IsaacLab¶
Unified framework for robot learning built on Isaac Sim.
Overview¶
IsaacLab is optimized for robot learning with:
- GPU-accelerated RL training (10,000+ parallel envs)
- Pre-built robot environments
- Modular design for easy customization
- PyTorch integration
- Sim-to-real transfer tools
Installation¶
# Clone repository
git clone https://github.com/isaac-sim/IsaacLab.git
cd IsaacLab
# Run installer
./isaaclab.sh --install
# Or with conda
./isaaclab.sh --conda # Creates conda environment
Quick Start¶
Pre-built Environments¶
import gymnasium as gym
# Create environment with 4096 parallel instances
env = gym.make("Isaac-Reach-Franka-v0", num_envs=4096)
# Reset
obs, info = env.reset()
# Run
for _ in range(1000):
actions = env.action_space.sample()
obs, rewards, terminated, truncated, info = env.step(actions)
Training with RL¶
from omni.isaac.lab_tasks.utils import parse_env_cfg
from stable_baselines3 import PPO
# Create environment
env_cfg = parse_env_cfg("Isaac-Reach-Franka-v0", num_envs=4096)
env = gym.make("Isaac-Reach-Franka-v0", cfg=env_cfg)
# Train
model = PPO("MultiInputPolicy", env, verbose=1)
model.learn(total_timesteps=10_000_000)
# Save
model.save("franka_reach")
Available Environments¶
| Environment | Description | Action Space | Observation |
|---|---|---|---|
Isaac-Reach-Franka-v0 |
Reach target | Continuous (7) | Joint states, target |
Isaac-Lift-Cube-Franka-v0 |
Lift cube | Continuous (7) | States, cube pose |
Isaac-Cartpole-v0 |
Classic cartpole | Discrete (2) | Cart pos, pole angle |
Isaac-Ant-v0 |
Quadruped locomotion | Continuous (8) | Joint states, IMU |
Isaac-Humanoid-v0 |
Humanoid walking | Continuous (21) | Full body state |
Custom Environments¶
Creating Custom Env¶
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.utils import configclass
@configclass
class MyRobotEnvCfg(ManagerBasedEnvCfg):
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.0)
# Observations
observations: ObservationsCfg = ObservationsCfg()
# Actions
actions: ActionsCfg = ActionsCfg()
# Rewards
rewards: RewardsCfg = RewardsCfg()
class MyRobotEnv(ManagerBasedEnv):
cfg: MyRobotEnvCfg
def __init__(self, cfg: MyRobotEnvCfg, **kwargs):
super().__init__(cfg, **kwargs)
def _compute_rewards(self):
# Implement reward logic
return self.rewards
# Register environment
gym.register(
id="Isaac-MyRobot-v0",
entry_point="my_module:MyRobotEnv",
kwargs={"cfg": MyRobotEnvCfg}
)
Scene Configuration¶
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.lab.sim import SimulationCfg
@configclass
class MySceneCfg(InteractiveSceneCfg):
# Ground plane
ground = AssetBaseCfg(
prim_path="/World/ground",
spawn=sim_utils.GroundPlaneCfg(),
)
# Robot
robot = ArticulationCfg(
prim_path="/World/envs/env_.*/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path="${ISAAC_NUCLEUS_DIR}/Robots/Franka/franka_instanceable.usd",
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.0),
joint_pos={"panda_joint.*": 0.0},
),
)
Observations and Actions¶
Observation Manager¶
from omni.isaac.lab.managers import ObservationManager, ObservationTermCfg
@configclass
class ObservationsCfg:
@configclass
class PolicyCfg:
# Joint positions
joint_pos = ObservationTermCfg(func=lambda env: env.robot.data.joint_pos)
# Joint velocities
joint_vel = ObservationTermCfg(func=lambda env: env.robot.data.joint_vel)
# Target position
target_pos = ObservationTermCfg(func=lambda env: env.target_pos)
policy: PolicyCfg = PolicyCfg()
Action Manager¶
from omni.isaac.lab.managers import ActionManager, ActionTermCfg
@configclass
class ActionsCfg:
# Joint position control
joint_pos = ActionTermCfg(
class_type=JointPositionAction,
asset_name="robot",
joint_names=["panda_joint.*"],
scale=0.5,
)
Rewards¶
from omni.isaac.lab.managers import RewardManager, RewardTermCfg
@configclass
class RewardsCfg:
# Reward for reaching target
reaching_target = RewardTermCfg(
func=lambda env: -torch.norm(env.robot.data.ee_pos - env.target_pos, dim=-1),
weight=1.0
)
# Penalty for large actions
action_penalty = RewardTermCfg(
func=lambda env: -torch.sum(env.actions**2, dim=-1),
weight=0.01
)
Domain Randomization¶
from omni.isaac.lab.managers import EventManager, EventTermCfg
@configclass
class EventsCfg:
# Randomize robot mass
randomize_mass = EventTermCfg(
func=randomize_rigid_body_mass,
params={
"asset_cfg": SceneEntityCfg("robot"),
"mass_range": (0.5, 1.5),
},
mode="reset",
)
# Randomize joint friction
randomize_friction = EventTermCfg(
func=randomize_joint_parameters,
params={
"asset_cfg": SceneEntityCfg("robot"),
"friction_range": (0.1, 1.0),
},
mode="reset",
)
Performance Tips¶
GPU Utilization¶
# Maximize parallel environments
env = gym.make("Isaac-Reach-Franka-v0", num_envs=8192)
# Use appropriate physics substeps
env_cfg.sim.dt = 1/120 # Physics timestep
env_cfg.decimation = 4 # Control decimation
Memory Management¶
# Reduce rendering for training
env_cfg.viewer.eye = None # Disable viewer
# Use smaller environments
env_cfg.scene.env_spacing = 1.5 # Reduce spacing
Sim-to-Real Transfer¶
Apply Domain Randomization¶
@configclass
class Sim2RealEventsCfg:
# Physics randomization
randomize_robot_properties = EventTermCfg(...)
randomize_object_properties = EventTermCfg(...)
# Visual randomization
randomize_lighting = EventTermCfg(...)
randomize_materials = EventTermCfg(...)
# Sensor noise
add_sensor_noise = EventTermCfg(...)
Train with Realistic Settings¶
# Match real robot constraints
env_cfg.robot.max_velocity = 2.0 # Real robot limit
env_cfg.robot.max_acceleration = 5.0
# Add observation noise
env_cfg.observations.noise_std = 0.01
Example Workflows¶
Training Loop¶
# Train with RSL-RL
python scripts/rsl_rl/train.py --task Isaac-Reach-Franka-v0 --num_envs 4096
# Train with Stable-Baselines3
python scripts/sb3/train.py --task Isaac-Reach-Franka-v0 --algo PPO
# Train with RL Games
python scripts/rlg/train.py --task Isaac-Reach-Franka-v0
Evaluation¶
# Play trained policy
python scripts/rsl_rl/play.py --task Isaac-Reach-Franka-v0 --checkpoint runs/*/model.pt
# Record video
python scripts/rsl_rl/play.py --task Isaac-Reach-Franka-v0 --checkpoint runs/*/model.pt --video
Next Steps¶
- IsaacSim - Underlying simulation platform
- RL Training - Detailed RL training guide
- Comparison - Compare with other simulators