diff --git a/.github/workflows/profiling.yml b/.github/workflows/profiling.yml
new file mode 100644
index 0000000000..b4963c0889
--- /dev/null
+++ b/.github/workflows/profiling.yml
@@ -0,0 +1,53 @@
+name: Profiling
+
+on:
+ pull_request:
+ branches:
+ - og-develop
+
+permissions:
+ # deployments permission to deploy GitHub pages website
+ deployments: write
+ # contents permission to update profiling contents in gh-pages branch
+ contents: write
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
+ cancel-in-progress: true
+
+jobs:
+ profiling:
+ name: Speed Profiling
+ runs-on: [self-hosted, linux, gpu, dataset-enabled]
+
+ defaults:
+ run:
+ shell: micromamba run -n omnigibson /bin/bash -leo pipefail {0}
+
+ steps:
+ - name: Fix home
+ run: echo "HOME=/root" >> $GITHUB_ENV
+
+ - name: Checkout source
+ uses: actions/checkout@v3
+
+ - name: Install dev requirements
+ run: pip install -r requirements-dev.txt
+
+ - name: Install
+ run: pip install -e .
+
+ - name: Run performance benchmark
+ run: bash scripts/profiling.sh
+
+ - name: Store benchmark result
+ uses: benchmark-action/github-action-benchmark@v1
+ with:
+ tool: 'customSmallerIsBetter'
+ output-file-path: output.json
+ benchmark-data-dir-path: profiling
+ fail-on-alert: true
+ alert-threshold: '200%'
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ comment-on-alert: true
+ auto-push: true
diff --git a/omnigibson/macros.py b/omnigibson/macros.py
index d0bcff6ef4..1e097182f6 100644
--- a/omnigibson/macros.py
+++ b/omnigibson/macros.py
@@ -55,6 +55,9 @@
# CANNOT be set at runtime
gm.GUI_VIEWPORT_ONLY = False
+# Whether to use the viewer camera or not
+gm.VIEWER_CAMERA_VISIBLE = True
+
# Do not suppress known omni warnings / errors, and also put omnigibson in a debug state
# This includes extra information for things such as object sampling, and also any debug
# logging messages
diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py
index a9a8e6f25a..271d3bd992 100644
--- a/omnigibson/simulator.py
+++ b/omnigibson/simulator.py
@@ -1236,6 +1236,7 @@ def _init_stage(
position=np.array(m.DEFAULT_VIEWER_CAMERA_POS),
orientation=np.array(m.DEFAULT_VIEWER_CAMERA_QUAT),
)
+ self.viewer_visibility = gm.VIEWER_CAMERA_VISIBLE
def close(self):
"""
diff --git a/omnigibson/systems/macro_particle_system.py b/omnigibson/systems/macro_particle_system.py
index 00d413caaf..0f85e98f76 100644
--- a/omnigibson/systems/macro_particle_system.py
+++ b/omnigibson/systems/macro_particle_system.py
@@ -1102,6 +1102,9 @@ class MacroPhysicalParticleSystem(PhysicalParticleSystem, MacroParticleSystem):
_particle_radius = None
_particle_offset = None
+ has_particles = False
+ has_refreshed = False
+
@classmethod
def initialize(cls):
# Run super method first
@@ -1189,6 +1192,12 @@ def get_particles_position_orientation(cls):
pos, ori = np.array([]).reshape(0, 3), np.array([]).reshape(0, 4)
return pos, ori
+ @classmethod
+ def update(cls):
+ if cls.has_particles and not cls.has_refreshed:
+ cls._refresh_particles_view()
+ cls.has_refreshed = True
+
@classmethod
def get_particles_local_pose(cls):
return cls.get_particles_position_orientation()
@@ -1344,6 +1353,9 @@ def generate_particles(
# Set the vels
cls.set_particles_velocities(lin_vels=velocities, ang_vels=angular_velocities)
+ if not cls.has_particles:
+ cls.has_particles = True
+
@classmethod
def create(cls, name, create_particle_template, particle_density, scale=None, **kwargs):
"""
diff --git a/omnigibson/systems/system_base.py b/omnigibson/systems/system_base.py
index 0e06b8966d..d78dd863f8 100644
--- a/omnigibson/systems/system_base.py
+++ b/omnigibson/systems/system_base.py
@@ -1233,6 +1233,10 @@ def is_physical_particle_system(system_name):
def get_system(system_name, force_active=True):
# Make sure scene exists
assert og.sim.scene is not None, "Cannot get systems until scene is imported!"
+ # Make sure prefixes preserve their double underscore
+ for prefix in SYSTEM_PREFIXES:
+ if f"{prefix}__" not in system_name:
+ system_name = system_name.replace(f"{prefix}_", f"{prefix}__")
# If system_name is not in REGISTERED_SYSTEMS, create from metadata
system = REGISTERED_SYSTEMS[system_name] if system_name in REGISTERED_SYSTEMS \
else _create_system_from_metadata(system_name=system_name)
diff --git a/omnigibson/transition_rules.py b/omnigibson/transition_rules.py
index 0b80ab5906..a6b5ba6160 100644
--- a/omnigibson/transition_rules.py
+++ b/omnigibson/transition_rules.py
@@ -799,9 +799,8 @@ def _generate_conditions(cls):
@classmethod
def transition(cls, object_candidates):
objs_to_remove = []
-
for diceable_obj in object_candidates["diceable"]:
- system = get_system(f"diced_{diceable_obj.category}")
+ system = get_system(f"diced__{diceable_obj.category}")
system.generate_particles_from_link(diceable_obj, diceable_obj.root_link, check_contact=False, use_visual_meshes=False)
# Delete original object from stage.
diff --git a/omnigibson/utils/profiling_utils.py b/omnigibson/utils/profiling_utils.py
new file mode 100644
index 0000000000..ccf7334a94
--- /dev/null
+++ b/omnigibson/utils/profiling_utils.py
@@ -0,0 +1,83 @@
+import gym
+import omnigibson as og
+import os
+import psutil
+from pynvml.smi import nvidia_smi
+
+from time import time
+from omnigibson.envs.env_base import Environment
+
+PROFILING_FIELDS = ["total time", "physics time", "render time", "non physics time", "get observation time", "task time", "action time"]
+
+class ProfilingEnv(Environment):
+ def step(self, action):
+ try:
+ start = time()
+ # If the action is not a dictionary, convert into a dictionary
+ if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict):
+ action_dict = dict()
+ idx = 0
+ for robot in self.robots:
+ action_dim = robot.action_dim
+ action_dict[robot.name] = action[idx: idx + action_dim]
+ idx += action_dim
+ else:
+ # Our inputted action is the action dictionary
+ action_dict = action
+
+ # Iterate over all robots and apply actions
+ for robot in self.robots:
+ robot.apply_action(action_dict[robot.name])
+
+ # Run simulation step
+ sim_start = time()
+ if len(og.sim._objects_to_initialize) > 0:
+ og.sim.render()
+ super(type(og.sim), og.sim).step(render=True)
+ omni_time = (time() - sim_start) * 1e3
+
+ # Additionally run non physics things
+ og.sim._non_physics_step()
+
+ # Grab observations
+ obs = self.get_obs()
+
+ # Step the scene graph builder if necessary
+ if self._scene_graph_builder is not None:
+ self._scene_graph_builder.step(self.scene)
+
+ # Grab reward, done, and info, and populate with internal info
+ reward, done, info = self.task.step(self, action)
+ self._populate_info(info)
+
+ if done and self._automatic_reset:
+ # Add lost observation to our information dict, and reset
+ info["last_observation"] = obs
+ obs = self.reset()
+
+ # Increment step
+ self._current_step += 1
+
+ # collect profiling data
+ total_frame_time = (time() - start) * 1e3
+ og_time = total_frame_time - omni_time
+ # memory usage in GB
+ memory_usage = psutil.Process(os.getpid()).memory_info().rss / 1024 ** 3
+ # VRAM usage in GB
+ for gpu in nvidia_smi.getInstance().DeviceQuery()['gpu']:
+ found = False
+ for process in gpu['processes']:
+ if process['pid'] == os.getpid():
+ vram_usage = process['used_memory'] / 1024
+ found = True
+ break
+ if found:
+ break
+
+ ret = [total_frame_time, omni_time, og_time, memory_usage, vram_usage]
+ if self._current_step % 100 == 0:
+ print("total time: {:.3f} ms, Omni time: {:.3f} ms, OG time: {:.3f} ms, memory: {:.3f} GB, vram: {:.3f} GB.".format(*ret))
+
+ return obs, reward, done, info, ret
+ except:
+ raise ValueError(f"Failed to execute environment step {self._current_step} in episode {self._current_episode}")
diff --git a/requirements-dev.txt b/requirements-dev.txt
index f0a16f7f5d..3337e951e4 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -7,4 +7,5 @@ mkdocs-material
mkdocs-material-extensions
mkdocstrings[python]
mkdocs-section-index
-mkdocs-literate-nav
\ No newline at end of file
+mkdocs-literate-nav
+nvidia-ml-py
\ No newline at end of file
diff --git a/scripts/benchmark.html b/scripts/benchmark.html
deleted file mode 100644
index 3d9f5c1a59..0000000000
--- a/scripts/benchmark.html
+++ /dev/null
@@ -1,134 +0,0 @@
-
-
-
- OmniGibson Profiling
-
-
-
-
-
-
-
-
-
-
- Baselines
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Non-physics Features
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Scenes
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/scripts/benchmark.css b/scripts/profiling.css
similarity index 100%
rename from scripts/benchmark.css
rename to scripts/profiling.css
diff --git a/scripts/profiling.html b/scripts/profiling.html
new file mode 100644
index 0000000000..02b7304934
--- /dev/null
+++ b/scripts/profiling.html
@@ -0,0 +1,164 @@
+
+
+
+ OmniGibson Profiling
+
+
+
+
+
+
+
+
+
+
+ Baselines
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Non-physics Features
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Scenes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/scripts/benchmark.js b/scripts/profiling.js
similarity index 50%
rename from scripts/benchmark.js
rename to scripts/profiling.js
index 67e2f56a3a..fcec87a632 100644
--- a/scripts/benchmark.js
+++ b/scripts/profiling.js
@@ -1,16 +1,22 @@
const canvasDict = {
- 'baseline_total_canvas': ["Total frame time", ["Empty scene, flatcache on", "Rs_int, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 3 Fetch robot, flatcache on"]],
- 'baseline_physics_canvas': ["Physics step time", ["Empty scene, flatcache on", "Rs_int, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 3 Fetch robot, flatcache on"]],
- 'baseline_rendering_canvas': ["Render step time", ["Empty scene, flatcache on", "Rs_int, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 3 Fetch robot, flatcache on"]],
- 'baseline_non_physics_canvas': ["Non-physics step time", ["Empty scene, flatcache on", "Rs_int, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 3 Fetch robot, flatcache on"]],
- 'np_total_canvas': ["Total frame time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
- 'np_physics_canvas': ["Physics step time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
- 'np_rendering_canvas': ["Render step time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
- 'np_non_physics_canvas': ["Non-physics step time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
- 'scene_total_canvas': ["Total frame time", ["Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on", "Rs_int, with 1 Fetch robot, flatcache on"]],
- 'scene_physics_canvas': ["Physics step time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
- 'scene_rendering_canvas': ["Render step time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
- 'scene_non_physics_canvas': ["Non-physics step time", ["Rs_int, with 1 Fetch robot, fluids", "Rs_int, with 1 Fetch robot, cloth", "Rs_int, with 1 Fetch robot, macro particles", "Rs_int, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ 'baseline_total_canvas': ["Total frame time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch robot", "Rs_int, with 3 Fetch robot"]],
+ 'baseline_loading_canvas': ["Loading time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch robot", "Rs_int, with 3 Fetch robot"]],
+ 'baseline_omni_canvas': ["Omni step time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch robot", "Rs_int, with 3 Fetch robot"]],
+ 'baseline_og_canvas': ["Non-omni step time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch robot", "Rs_int, with 3 Fetch robot"]],
+ 'baseline_mem_canvas': ["Memory usage", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch robot", "Rs_int, with 3 Fetch robot"]],
+ 'baseline_vram_canvas': ["Vram usage", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch robot", "Rs_int, with 3 Fetch robot"]],
+ 'np_total_canvas': ["Total frame time", ["Empty scene, with 1 Fetch robot, fluids", "Empty scene, with 1 Fetch robot, cloth", "Empty scene, with 1 Fetch robot, macro particles", "Empty scene, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ 'np_loading_canvas': ["Loading time", ["Empty scene, with 1 Fetch robot, fluids", "Empty scene, with 1 Fetch robot, cloth", "Empty scene, with 1 Fetch robot, macro particles", "Empty scene, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ 'np_omni_canvas': ["Omni step time", ["Empty scene, with 1 Fetch robot, fluids", "Empty scene, with 1 Fetch robot, cloth", "Empty scene, with 1 Fetch robot, macro particles", "Empty scene, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ 'np_og_canvas': ["Non-omni step time", ["Empty scene, with 1 Fetch robot, fluids", "Empty scene, with 1 Fetch robot, cloth", "Empty scene, with 1 Fetch robot, macro particles", "Empty scene, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ 'np_mem_canvas': ["Memory usage", ["Empty scene, with 1 Fetch robot, fluids", "Empty scene, with 1 Fetch robot, cloth", "Empty scene, with 1 Fetch robot, macro particles", "Empty scene, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ 'np_vram_canvas': ["Vram usage", ["Empty scene, with 1 Fetch robot, fluids", "Empty scene, with 1 Fetch robot, cloth", "Empty scene, with 1 Fetch robot, macro particles", "Empty scene, with 1 Fetch robot, cloth, fluids, macro particles"]],
+ // 'scene_total_canvas': ["Total frame time", ["Rs_int, with 1 Fetch robot", "house_single_floor, with 1 Fetch robot", "grocery_store_cafe, with 1 Fetch robot", "Pomaria_0_garden, with 1 Fetch robot"]],
+ // 'scene_loading_canvas': ["Loading time", ["Rs_int, with 1 Fetch robot", "house_single_floor, with 1 Fetch robot", "grocery_store_cafe, with 1 Fetch robot", "Pomaria_0_garden, with 1 Fetch robot"]],
+ // 'scene_omni_canvas': ["Omni step time", ["Rs_int, with 1 Fetch robot", "house_single_floor, with 1 Fetch robot", "grocery_store_cafe, with 1 Fetch robot", "Pomaria_0_garden, with 1 Fetch robot"]],
+ // 'scene_og_canvas': ["Non-omni step time", ["Rs_int, with 1 Fetch robot", "house_single_floor, with 1 Fetch robot", "grocery_store_cafe, with 1 Fetch robot", "Pomaria_0_garden, with 1 Fetch robot"]],
+ // 'scene_mem_canvas': ["Memory usage", ["Rs_int, with 1 Fetch robot", "house_single_floor, with 1 Fetch robot", "grocery_store_cafe, with 1 Fetch robot", "Pomaria_0_garden, with 1 Fetch robot"]],
+ // 'scene_vram_canvas': ["Vram usage", ["Rs_int, with 1 Fetch robot", "house_single_floor, with 1 Fetch robot", "grocery_store_cafe, with 1 Fetch robot", "Pomaria_0_garden, with 1 Fetch robot"]],
}
diff --git a/scripts/profiling.sh b/scripts/profiling.sh
new file mode 100644
index 0000000000..30acb156f7
--- /dev/null
+++ b/scripts/profiling.sh
@@ -0,0 +1,16 @@
+# 1st batch: baselines
+python tests/benchmark/profiling.py # baseline (fastest config possible)
+python tests/benchmark/profiling.py -s Rs_int # for vision research
+python tests/benchmark/profiling.py -s Rs_int -r 1 # for robotics research
+python tests/benchmark/profiling.py -s Rs_int -r 3 # for multi-agent research
+
+# 2nd batch: compare different scenes
+# python tests/benchmark/profiling.py -r 1 -s house_single_floor
+# python tests/benchmark/profiling.py -r 1 -s grocery_store_cafe
+# python tests/benchmark/profiling.py -r 1 -s Pomaria_0_garden
+
+# 3rd batch: OG non-physics features
+python tests/benchmark/profiling.py -r 1 -w # fluids (water)
+python tests/benchmark/profiling.py -r 1 -c # soft body (cloth)
+python tests/benchmark/profiling.py -r 1 -p # macro particle system (diced objects)
+python tests/benchmark/profiling.py -r 1 -w -c -p # everything
\ No newline at end of file
diff --git a/tests/benchmark/profiling.py b/tests/benchmark/profiling.py
new file mode 100644
index 0000000000..072f1632d4
--- /dev/null
+++ b/tests/benchmark/profiling.py
@@ -0,0 +1,180 @@
+import os
+import argparse
+import json
+import omnigibson as og
+import numpy as np
+import omnigibson.utils.transform_utils as T
+import time
+
+from omnigibson.macros import gm
+from omnigibson.systems import get_system
+from omnigibson.object_states import Covered, SlicerActive
+from omnigibson.utils.profiling_utils import ProfilingEnv
+from omnigibson.utils.constants import PrimType
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("-r", "--robot", type=int, default=0)
+parser.add_argument("-s", "--scene", default="")
+parser.add_argument("-c", "--cloth", action='store_true')
+parser.add_argument("-w", "--fluids", action='store_true')
+parser.add_argument("-p", "--macro_particle_system", action='store_true')
+
+PROFILING_FIELDS = ["Total frame time", "Omni step time", "Non-omni step time", "Memory usage", "Vram usage"]
+NUM_CLOTH = 5
+NUM_SLICE_OBJECT = 3
+
+SCENE_OFFSET = {
+ "": [0, 0],
+ "Rs_int": [0, 0],
+ "Pomaria_0_garden": [0.3, 0],
+ "grocery_store_cafe": [-3.5, 3.5],
+ "house_single_floor": [0, 0],
+}
+
+
+def main():
+ args = parser.parse_args()
+ # Modify macros settings
+ gm.VIEWER_CAMERA_VISIBLE = True
+ gm.ENABLE_HQ_RENDERING = args.fluids
+ gm.ENABLE_OBJECT_STATES = True
+ gm.ENABLE_TRANSITION_RULES = True
+ gm.USE_GPU_DYNAMICS = True
+
+ cfg = {
+ "env": {
+ "action_frequency": 60,
+ "physics_frequency": 300,
+ }
+ }
+ if args.robot > 0:
+ cfg["robots"] = []
+ for i in range(args.robot):
+ cfg["robots"].append({
+ "type": "Fetch",
+ "obs_modalities": "all",
+ "position": [-1.3 + 0.75 * i + SCENE_OFFSET[args.scene][0], 0.5 + SCENE_OFFSET[args.scene][1], 0],
+ "orientation": [0., 0., 0.7071, -0.7071]
+ })
+
+ if args.scene:
+ assert args.scene in SCENE_OFFSET, f"Scene {args.scene} not found in SCENE_OFFSET"
+ cfg["scene"] = {
+ "type": "InteractiveTraversableScene",
+ "scene_model": args.scene,
+ }
+ else:
+ cfg["scene"] = {"type": "Scene"}
+
+ cfg["objects"] = [{
+ "type": "DatasetObject",
+ "name": "table",
+ "category": "breakfast_table",
+ "model": "rjgmmy",
+ "fixed_base": True,
+ "scale": [0.75] * 3,
+ "position": [0.5 + SCENE_OFFSET[args.scene][0], -1 + SCENE_OFFSET[args.scene][1], 0.3],
+ "orientation": [0., 0., 0.7071, -0.7071]
+ }]
+
+ if args.cloth:
+ cfg["objects"].extend([{
+ "type": "DatasetObject",
+ "name": f"cloth_{n}",
+ "category": "t_shirt",
+ "model": "kvidcx",
+ "prim_type": PrimType.CLOTH,
+ "abilities": {"cloth": {}},
+ "bounding_box": [0.3, 0.5, 0.7],
+ "position": [-0.4, -1, 0.7 + n * 0.4],
+ "orientation": [0.7071, 0., 0.7071, 0.],
+ } for n in range(NUM_CLOTH)])
+
+ cfg["objects"].extend([{
+ "type": "DatasetObject",
+ "name": f"apple_{n}",
+ "category": "apple",
+ "model": "agveuv",
+ "scale": [1.5] * 3,
+ "position": [0.5 + SCENE_OFFSET[args.scene][0], -1.25 + SCENE_OFFSET[args.scene][1] + n * 0.2, 0.5],
+ "abilities": {"diceable": {}} if args.macro_particle_system else {}
+ } for n in range(NUM_SLICE_OBJECT)])
+ cfg["objects"].extend([{
+ "type": "DatasetObject",
+ "name": f"knife_{n}",
+ "category": "table_knife",
+ "model": "jxdfyy",
+ "scale": [2.5] * 3
+ } for n in range(NUM_SLICE_OBJECT)])
+
+ load_start = time.time()
+ env = ProfilingEnv(configs=cfg)
+ table = env.scene.object_registry("name", "table")
+ apples = [env.scene.object_registry("name", f"apple_{n}") for n in range(NUM_SLICE_OBJECT)]
+ knifes = [env.scene.object_registry("name", f"knife_{n}") for n in range(NUM_SLICE_OBJECT)]
+ if args.cloth:
+ clothes = [env.scene.object_registry("name", f"cloth_{n}") for n in range(NUM_CLOTH)]
+ for cloth in clothes:
+ cloth.root_link.mass = 1.0
+ env.reset()
+
+ for n, knife in enumerate(knifes):
+ knife.set_position_orientation(
+ position=apples[n].get_position() + np.array([-0.15, 0.0, 0.1 * (n + 2)]),
+ orientation=T.euler2quat([-np.pi / 2, 0, 0]),
+ )
+ knife.keep_still()
+ if args.fluids:
+ table.states[Covered].set_value(get_system("water"), True)
+
+ output, results = [], []
+
+ # Update the simulator's viewer camera's pose so it points towards the robot
+ og.sim.viewer_camera.set_position([SCENE_OFFSET[args.scene][0], -3 + SCENE_OFFSET[args.scene][1], 1])
+ # record total load time
+ total_load_time = time.time() - load_start
+
+ for i in range(500):
+ print('\n')
+ if args.robot:
+ result = env.step(np.array([np.random.uniform(-0.3, 0.3, env.robots[i].action_dim) for i in range(args.robot)]).flatten())[4]
+ else:
+ result = env.step(None)[4]
+ results.append(result)
+
+ field = f"{args.scene}" if args.scene else "Empty scene"
+ if args.robot:
+ field += f", with {args.robot} Fetch"
+ if args.cloth:
+ field += ", cloth"
+ if args.fluids:
+ field += ", fluids"
+ if args.macro_particle_system:
+ field += ", macro particles"
+ output.append({
+ "name": field,
+ "unit": "time (ms)",
+ "value": total_load_time,
+ "extra": ["Loading time", "Loading time"]
+ })
+ results = np.array(results)
+ for i, title in enumerate(PROFILING_FIELDS):
+ output.append({
+ "name": field,
+ "unit": "time (ms)" if 'time' in title else "GB",
+ "value": np.mean(results[:, i]),
+ "extra": [title, title]
+ })
+
+ ret = []
+ if os.path.exists("output.json"):
+ with open("output.json", "r") as f:
+ ret = json.load(f)
+ ret.extend(output)
+ with open("output.json", "w") as f:
+ json.dump(ret, f, indent=4)
+ og.shutdown()
+
+if __name__ == "__main__":
+ main()