Skip to content

Adds blueprint environment for Franka stacking mimic #1944

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
Mar 10, 2025
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@
# Mock out modules that are not available on RTD
autodoc_mock_imports = [
"torch",
"torchvision",
"numpy",
"matplotlib",
"scipy",
Expand Down
178 changes: 24 additions & 154 deletions scripts/imitation_learning/isaaclab_mimic/generate_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,149 +44,34 @@
"""Rest everything follows."""

import asyncio
import contextlib
import gymnasium as gym
import numpy as np
import os
import random
import torch

from isaaclab.envs.mdp.recorders.recorders_cfg import ActionStateRecorderManagerCfg
from isaaclab.managers import DatasetExportMode
from isaaclab.utils.datasets import HDF5DatasetFileHandler

import isaaclab_mimic.envs # noqa: F401
from isaaclab_mimic.datagen.data_generator import DataGenerator
from isaaclab_mimic.datagen.datagen_info_pool import DataGenInfoPool
from isaaclab_mimic.datagen.generation import env_loop, setup_async_generation, setup_env_config
from isaaclab_mimic.datagen.utils import get_env_name_from_dataset, setup_output_paths

import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg

# global variable to keep track of the data generation statistics
num_success = 0
num_failures = 0
num_attempts = 0


async def run_data_generator(env, env_id, env_action_queue, data_generator, success_term, pause_subtask=False):
"""Run data generator."""
global num_success, num_failures, num_attempts
while True:
results = await data_generator.generate(
env_id=env_id,
success_term=success_term,
env_action_queue=env_action_queue,
select_src_per_subtask=env.unwrapped.cfg.datagen_config.generation_select_src_per_subtask,
transform_first_robot_pose=env.unwrapped.cfg.datagen_config.generation_transform_first_robot_pose,
interpolate_from_last_target_pose=env.unwrapped.cfg.datagen_config.generation_interpolate_from_last_target_pose,
pause_subtask=pause_subtask,
)
if bool(results["success"]):
num_success += 1
else:
num_failures += 1
num_attempts += 1


def env_loop(env, env_action_queue, shared_datagen_info_pool, asyncio_event_loop):
"""Main loop for the environment."""
global num_success, num_failures, num_attempts
prev_num_attempts = 0
# simulate environment -- run everything in inference mode
with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode():
while True:

actions = torch.zeros(env.unwrapped.action_space.shape)

# get actions from all the data generators
for i in range(env.unwrapped.num_envs):
# an async-blocking call to get an action from a data generator
env_id, action = asyncio_event_loop.run_until_complete(env_action_queue.get())
actions[env_id] = action

# perform action on environment
env.step(actions)

# mark done so the data generators can continue with the step results
for i in range(env.unwrapped.num_envs):
env_action_queue.task_done()

if prev_num_attempts != num_attempts:
prev_num_attempts = num_attempts
print("")
print("*" * 50)
print(f"have {num_success} successes out of {num_attempts} trials so far")
print(f"have {num_failures} failures out of {num_attempts} trials so far")
print("*" * 50)

# termination condition is on enough successes if @guarantee_success or enough attempts otherwise
generation_guarantee = env.unwrapped.cfg.datagen_config.generation_guarantee
generation_num_trials = env.unwrapped.cfg.datagen_config.generation_num_trials
check_val = num_success if generation_guarantee else num_attempts
if check_val >= generation_num_trials:
print(f"Reached {generation_num_trials} successes/attempts. Exiting.")
break

# check that simulation is stopped or not
if env.unwrapped.sim.is_stopped():
break

env.close()


def main():
num_envs = args_cli.num_envs

# get directory path and file name (without extension) from cli arguments
output_dir = os.path.dirname(args_cli.output_file)
output_file_name = os.path.splitext(os.path.basename(args_cli.output_file))[0]

# create directory if it does not exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)

# Get env name from the dataset file
if not os.path.exists(args_cli.input_file):
raise FileNotFoundError(f"The dataset file {args_cli.input_file} does not exist.")
dataset_file_handler = HDF5DatasetFileHandler()
dataset_file_handler.open(args_cli.input_file)
env_name = dataset_file_handler.get_env_name()

if args_cli.task is not None:
env_name = args_cli.task
if env_name is None:
raise ValueError("Task/env name was not specified nor found in the dataset.")

# parse configuration
env_cfg = parse_env_cfg(env_name, device=args_cli.device, num_envs=num_envs)

# Override the datagen_config.generation_num_trials with the value from the command line arg
if args_cli.generation_num_trials is not None:
env_cfg.datagen_config.generation_num_trials = args_cli.generation_num_trials

env_cfg.env_name = env_name

# extract success checking function to invoke manually
success_term = None
if hasattr(env_cfg.terminations, "success"):
success_term = env_cfg.terminations.success
env_cfg.terminations.success = None
else:
raise NotImplementedError("No success termination term was found in the environment.")

# data generator is in charge of resetting the environment
env_cfg.terminations = None

env_cfg.observations.policy.concatenate_terms = False

env_cfg.recorders = ActionStateRecorderManagerCfg()
env_cfg.recorders.dataset_export_dir_path = output_dir
env_cfg.recorders.dataset_filename = output_file_name

if env_cfg.datagen_config.generation_keep_failed:
env_cfg.recorders.dataset_export_mode = DatasetExportMode.EXPORT_SUCCEEDED_FAILED_IN_SEPARATE_FILES
else:
env_cfg.recorders.dataset_export_mode = DatasetExportMode.EXPORT_SUCCEEDED_ONLY
# Setup output paths and get env name
output_dir, output_file_name = setup_output_paths(args_cli.output_file)
env_name = args_cli.task or get_env_name_from_dataset(args_cli.input_file)

# Configure environment
env_cfg, success_term = setup_env_config(
env_name=env_name,
output_dir=output_dir,
output_file_name=output_file_name,
num_envs=num_envs,
device=args_cli.device,
generation_num_trials=args_cli.generation_num_trials,
)

# create environment
env = gym.make(env_name, cfg=env_cfg)
Expand All @@ -199,36 +84,21 @@ def main():
# reset before starting
env.reset()

# Set up asyncio stuff
asyncio_event_loop = asyncio.get_event_loop()
env_action_queue = asyncio.Queue()

shared_datagen_info_pool_lock = asyncio.Lock()
shared_datagen_info_pool = DataGenInfoPool(
env.unwrapped, env.unwrapped.cfg, env.unwrapped.device, asyncio_lock=shared_datagen_info_pool_lock
# Setup and run async data generation
async_components = setup_async_generation(
env=env,
num_envs=args_cli.num_envs,
input_file=args_cli.input_file,
success_term=success_term,
pause_subtask=args_cli.pause_subtask,
)
shared_datagen_info_pool.load_from_dataset_file(args_cli.input_file)
print(f"Loaded {shared_datagen_info_pool.num_datagen_infos} to datagen info pool")

# make data generator object
data_generator = DataGenerator(env=env.unwrapped, src_demo_datagen_info_pool=shared_datagen_info_pool)
data_generator_asyncio_tasks = []
for i in range(num_envs):
data_generator_asyncio_tasks.append(
asyncio_event_loop.create_task(
run_data_generator(
env, i, env_action_queue, data_generator, success_term, pause_subtask=args_cli.pause_subtask
)
)
)

try:
asyncio.ensure_future(asyncio.gather(*data_generator_asyncio_tasks))
asyncio.ensure_future(asyncio.gather(*async_components["tasks"]))
env_loop(env, async_components["action_queue"], async_components["info_pool"], async_components["event_loop"])
except asyncio.CancelledError:
print("Tasks were cancelled.")

env_loop(env, env_action_queue, shared_datagen_info_pool, asyncio_event_loop)


if __name__ == "__main__":
try:
Expand Down
2 changes: 1 addition & 1 deletion source/isaaclab/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "0.36.0"
version = "0.36.1"

# Description
title = "Isaac Lab framework for Robot Learning"
Expand Down
9 changes: 9 additions & 0 deletions source/isaaclab/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
Changelog
---------

0.36.1 (2025-03-10)
~~~~~~~~~~~~~~~~~~~

Added
^^^^^

* Added :attr:`semantic_segmentation_mapping` for camera configs to allow specifying colors for semantics.


0.36.0 (2025-03-07)
~~~~~~~~~~~~~~~~~~~

Expand Down
5 changes: 3 additions & 2 deletions source/isaaclab/isaaclab/envs/manager_based_rl_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,15 @@ def __init__(self, cfg: ManagerBasedRLEnvCfg, render_mode: str | None = None, **
render_mode: The render mode for the environment. Defaults to None, which
is similar to ``"human"``.
"""
# -- counter for curriculum
self.common_step_counter = 0

# initialize the base class to setup the scene.
super().__init__(cfg=cfg)
# store the render mode
self.render_mode = render_mode

# initialize data and constants
# -- counter for curriculum
self.common_step_counter = 0
# -- init buffers
self.episode_length_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
# -- set the framerate of the gym video recorder wrapper so that the playback speed of the produced video matches the simulation
Expand Down
6 changes: 5 additions & 1 deletion source/isaaclab/isaaclab/sensors/camera/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from __future__ import annotations

import json
import numpy as np
import re
import torch
Expand Down Expand Up @@ -455,7 +456,10 @@ def _initialize_impl(self):
# if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned.
# if colorize is false, the data is returned as a uint32 image with ids as values.
if name == "semantic_segmentation":
init_params = {"colorize": self.cfg.colorize_semantic_segmentation}
init_params = {
"colorize": self.cfg.colorize_semantic_segmentation,
"mapping": json.dumps(self.cfg.semantic_segmentation_mapping),
}
elif name == "instance_segmentation_fast":
init_params = {"colorize": self.cfg.colorize_instance_segmentation}
elif name == "instance_id_segmentation_fast":
Expand Down
16 changes: 16 additions & 0 deletions source/isaaclab/isaaclab/sensors/camera/camera_cfg.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,19 @@ class OffsetCfg:
If True, instance segmentation is converted to an image where instance IDs are mapped to colors.
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.
"""

semantic_segmentation_mapping: dict = {}
"""Dictionary mapping semantics to specific colours

Eg.
```
{
"class:cube_1": (255, 36, 66, 255),
"class:cube_2": (255, 184, 48, 255),
"class:cube_3": (55, 255, 139, 255),
"class:table": (255, 237, 218, 255),
"class:ground": (100, 100, 100, 255),
"class:robot": (61, 178, 255, 255),
}
```
"""
6 changes: 5 additions & 1 deletion source/isaaclab/isaaclab/sensors/camera/tiled_camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from __future__ import annotations

import json
import math
import numpy as np
import torch
Expand Down Expand Up @@ -224,7 +225,10 @@ def _initialize_impl(self):
else:
init_params = None
if annotator_type == "semantic_segmentation":
init_params = {"colorize": self.cfg.colorize_semantic_segmentation}
init_params = {
"colorize": self.cfg.colorize_semantic_segmentation,
"mapping": json.dumps(self.cfg.semantic_segmentation_mapping),
}
elif annotator_type == "instance_segmentation_fast":
init_params = {"colorize": self.cfg.colorize_instance_segmentation}
elif annotator_type == "instance_id_segmentation_fast":
Expand Down
2 changes: 1 addition & 1 deletion source/isaaclab_mimic/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Semantic Versioning is used: https://semver.org/
version = "1.0.2"
version = "1.0.3"

# Description
category = "isaaclab"
Expand Down
15 changes: 14 additions & 1 deletion source/isaaclab_mimic/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,11 +1,24 @@
Changelog
---------

1.0.3 (2025-03-10)
~~~~~~~~~~~~~~~~~~

Changed
^^^^^^^

* Refactored dataset generation code into leaner modules to prepare for Jupyter notebook.

Added
^^^^^

* Added ``Isaac-Stack-Cube-Franka-IK-Rel-Blueprint-Mimic-v0`` environment for blueprint vision stacking.

1.0.2 (2025-01-10)
~~~~~~~~~~~~~~~~~~

Fixed
^^^^^^^
^^^^^

* Fixed test_selection_strategy.py test case by starting omniverse app to import needed dependencies.

Expand Down
2 changes: 2 additions & 0 deletions source/isaaclab_mimic/isaaclab_mimic/datagen/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,7 @@
from .data_generator import *
from .datagen_info import *
from .datagen_info_pool import *
from .generation import *
from .selection_strategy import *
from .utils import *
from .waypoint import *
Loading
Loading