diff --git a/docs/_static/css/theme.css b/docs/_static/css/theme.css new file mode 100644 index 0000000000000000000000000000000000000000..79183bddca1ff8c7084ea7ff8930ed5946fee9f5 --- /dev/null +++ b/docs/_static/css/theme.css @@ -0,0 +1,327 @@ +/* From https://github.com/rusty1s/pytorch_geometric */ + +.wy-side-nav-search { + background: rgb(143,144,147); +} + +.wy-side-nav-search > div.version { + color: black; +} + + +.wy-nav-content-wrap { + background: inherit; +} + +.wy-side-nav-search input[type="text"] { + border: none; + box-shadow: none; + background: white; + border-radius: 0; + font-size: 100%; +} + +.wy-menu-vertical li.current a, +.wy-menu-vertical li.toctree-l1.current > a { + border: none; +} + +.ethical-rtd > div.ethical-sidebar, +.ethical-rtd > div.ethical-footer { + display: none !important; +} + +h1 { + /* text-transform: uppercase; */ + font-family: inherit; + font-weight: 200; +} + +h2, +.rst-content .toctree-wrapper p.caption { + font-family: inherit; + font-weight: 200; +} + +.rst-content a:visited { + color: #3091d1; +} + +/* Begin code */ +.rst-content pre.literal-block, +.rst-content div[class^="highlight"] { + border: none; +} + +.rst-content pre.literal-block, +.rst-content div[class^="highlight"] pre, +.rst-content .linenodiv pre { + font-size: 80%; +} + +.highlight { + background: #f6f8fa; + border-radius: 6px; +} + +.highlight .kn, +.highlight .k { + color: #d73a49; +} + +.highlight .nn { + color: inherit; + font-weight: inherit; +} + +.highlight .nc { + color: #e36209; + font-weight: inherit; +} + +.highlight .fm, +.highlight .nd, +.highlight .nf, +.highlight .nb { + color: #6f42c1; +} + +.highlight .bp, +.highlight .n { + color: inherit; +} + +.highlight .kc, +.highlight .s1, +.highlight .s2, +.highlight .mi, +.highlight .mf, +.highlight .bp, +.highlight .bn, +.highlight .ow { + color: #005cc5; + font-weight: inherit; +} + +.highlight .c1 { + color: #6a737d; +} + +.rst-content code.xref { + padding: .2em .4em; + background: rgba(27,31,35,.05); + border-radius: 6px; + border: none; +} +/* End code */ + +.rst-content dl:not(.docutils) dt, +.rst-content dl:not(.docutils) dl dt { + background: rgb(243,244,247); +} + +.rst-content dl:not(.docutils) dt.field-odd, +.rst-content dl:not(.docutils) dt.field-odd { + text-transform: uppercase; + background: inherit; + border: none; + padding: 6px 0; +} + +.rst-content dl:not(.docutils) .property { + text-transform: uppercase; + font-style: normal; + padding-right: 12px; +} + +em.sig-param span.n:first-child, em.sig-param span.n:nth-child(2) { + font-style: normal; +} + +em.sig-param span.n:nth-child(3), +em.sig-param span.n:nth-child(3) a { + color: inherit; + font-weight: normal; + font-style: normal; +} + +em.sig-param span.default_value { + font-family: SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace; + font-style: normal; + font-size: 90%; +} + +.sig-paren { + padding: 0 4px; +} + +.wy-table-responsive table td, +.wy-table-responsive table th { + white-space: normal; +} + +.wy-table-bordered-all, +.rst-content table.docutils { + border: none; +} + +.wy-table-bordered-all td, +.rst-content table.docutils td { + border: none; +} + +.wy-table-odd td, +.wy-table-striped tr:nth-child(2n-1) td, +.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td { + background: rgb(243,244,247); +} + +.wy-table td, +.rst-content table.docutils td, +.rst-content table.field-list td, +.wy-table th, +.rst-content table.docutils th, +.rst-content table.field-list th { + padding: 16px; +} +/* +.admonition { + content: '\f12a'; + font-family: FontAwesome; +} */ + +.admonition.note, div.admonition.note { + border-color: rgba(var(--pst-color-admonition-note),1); +} + +.admonition.note>.admonition-title:before, div.admonition.note>.admonition-title:before { + color: rgba(var(--pst-color-admonition-note),1); + content: '\f12a'!important; + /* content: var(--pst-icon-admonition-note); */ +} + +.admonition.question>.admonition-title:before, div.admonition.question>.admonition-title:before { + color: rgba(var(--pst-color-admonition-note),1); + content: '\003f'!important; + /* content: var(--pst-icon-admonition-note); */ +} + +.admonition.explanation>.admonition-title:before, div.admonition.explanation>.admonition-title:before { + color: rgba(var(--pst-color-admonition-note),1); + content: '\f02d'!important; + /* content: var(--pst-icon-admonition-note); */ +} + +.card { + /* Add shadows to create the "card" effect */ + box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2); + transition: 0.3s; + border-radius: 5px; /* 5px rounded corners */ + width: 100%; + padding-bottom: 10px; +} + +/* On mouse-over, add a deeper shadow */ +.card:hover { + box-shadow: 0 8px 16px 0 rgba(0,0,0,0.2); + +} + +/* Add some padding inside the card container */ +.container { + padding: 2px 16px; +} + +.row:after { + content: ""; + display: table; + clear: both; +} + +.column { + float: left; + width: 50%; + padding: 20px 10px; +} + +.box{ + display: none; + width: 100%; +} + +a:hover + .box,.box:hover{ + display: block; + position: absolute; + z-index: 100; + border-radius: 50px!important; + margin-left: 60px; + margin-top: 0px; +} + +a:hover + .card:hover{ + display: block; + position: absolute; + z-index: 100; + border-radius: 50px!important; + margin-left: 60px; + margin-top: 0px; +} + +a.reference.external { + color: #6695B0!important; +} + +#p1 a { + color: #E98D64!important; +} + + +#frame { zoom: 0.75; -moz-transform: scale(0.75); -moz-transform-origin: 0 0; } + +/* Google Fonts */ +@import url(https://fonts.googleapis.com/css?family=Anonymous+Pro); + +/* Global */ + +#typewriter body{ + height: calc(100vh - 8em); + padding: 4em; + color: rgba(255,255,255,.75); + font-family: 'Anonymous Pro', monospace; + background-color: rgb(25,25,25); +} +#typewriter .line-1{ + position: relative; + top: 50%; + width: 24em; + margin: 0 auto; + border-right: 2px solid rgba(255,255,255,.75); + font-size: 180%; + text-align: center; + white-space: nowrap; + overflow: hidden; + transform: translateY(-50%); +} + +/* Animation */ +.anim-typewriter{ + animation: typewriter 4s steps(44) 1s 1 normal both, + blinkTextCursor 500ms steps(44) infinite normal; +} +@keyframes typewriter{ + from{width: 0;} + to{width: 24em;} +} +@keyframes blinkTextCursor{ + from{border-right-color: rgba(255,255,255,.75);} + to{border-right-color: transparent;} +} + + +.trimmed-cover { + object-fit: cover; + width: 120%; + height: 177px; + object-position: center 40%; + margin-right: -100px; +} \ No newline at end of file diff --git a/docs/_static/js/custom.js b/docs/_static/js/custom.js new file mode 100644 index 0000000000000000000000000000000000000000..8489b8f02a97a11c230678865f0c7957e7bcd4c0 --- /dev/null +++ b/docs/_static/js/custom.js @@ -0,0 +1,3 @@ +$(document).ready(function () { + $('a[href^="http://"], a[href^="https://"]').not('a[class*=internal]').attr('target', '_blank'); + }); \ No newline at end of file diff --git a/docs/_static/theme_overrides.css b/docs/_static/theme_overrides.css new file mode 100644 index 0000000000000000000000000000000000000000..174fade5f219b8b87141bae4c0f68f1ab10aba79 --- /dev/null +++ b/docs/_static/theme_overrides.css @@ -0,0 +1,13 @@ +/* override table width restrictions */ +@media screen and (min-width: 767px) { + + .wy-table-responsive table td { + /* !important prevents the common CSS stylesheets from overriding + this as on RTD they are loaded after this stylesheet */ + white-space: normal !important; + } + + .wy-table-responsive { + overflow: visible !important; + } +} \ No newline at end of file diff --git a/docs/algorithms/benchmarking.md b/docs/algorithms/benchmarking.md new file mode 100644 index 0000000000000000000000000000000000000000..a304460a4512f7539999a1772926f3e4749a098a --- /dev/null +++ b/docs/algorithms/benchmarking.md @@ -0,0 +1,18 @@ +# Benchmarking + +Benchmarking results of standard policy learning algorithms. + +## v1.0 + +We provide a standardized set of benchmarking experiments as baselines for future experiments. Specifically, we test [Soft Actor-Critic](https://arxiv.org/abs/1812.05905), a state of the art model-free RL algorithm, on a select combination of tasks (all) using a combination of proprioceptive and object-specific observations, robots (`Panda`, `Sawyer`), and controllers (`OSC_POSE`, `JOINT_VELOCITY`). Our experiments were implemented and executed in an extended version of [rlkit](https://github.com/vitchyr/rlkit), a popular PyTorch-based RL framework and algorithm library. For ease of replicability, we have released our official benchmarking results on a [benchmark repository](https://github.com/ARISE-Initiative/robosuite-benchmark). + +![benchmarking_results](../images/benchmarking/benchmarking_results.png) + +All agents were trained for 500 epochs with 500 steps per episode, and utilize the same standardized algorithm hyperparameters (see our benchmarking repo above for exact parameter values). The agents receive the low-dimensional physical states as input to the policy. These experiments ran on 2 CPUs and 12G VRAM and no GPU, each taking about two days to complete. We normalize the per-step rewards to 1.0 such that the maximum possible per-episode return is 500. Above, we show the per-task experiments conducted, with each experiment's training curve showing the evaluation return mean's average and standard deviation over five random seeds. + +We select two of the easiest environments, **Block Lifting** and **Door Opening**, for an ablation study between the operational space controllers (`OSC_POSE`) and the joint velocity controllers (`JOINT_VELOCITY`). We observe that the choice of controllers alone has an evident impact on the efficiency of learning. Both robots learn to solve the tasks faster with the operational space controllers, which we hypothesize is credited to the accelerated exploration in task space; this highlights the potential of this impedance-based controller to improve task performance on robotic tasks that were previously limited by their action space parameterization. The SAC algorithm is able to solve three of the nine environments, including **Block Lifting**, **Door Opening**, and **Two Arm Peg-In-Hole**, while making slow progress in the other environments, which requires intelligent exploration in longer task horizons. For future experiments, we recommend using the nine environments with the Panda robot and the operational space controller, i.e., the blue curves of Panda (OSC) in our benchmarking figure above, for standardized and fair comparisons. + +## v0.3 + +- Please see the [Surreal](http://svl.stanford.edu/assets/papers/fan2018corl.pdf) paper for benchmarking results. Code to reproduce results available [here](https://github.com/SurrealAI/surreal). +- For imitation learning results on [RoboTurk](https://roboturk.stanford.edu/) datasets please see the original [RoboTurk](https://arxiv.org/abs/1811.02790) paper and also the [IRIS](https://arxiv.org/abs/1911.05321) paper. \ No newline at end of file diff --git a/docs/algorithms/demonstrations.md b/docs/algorithms/demonstrations.md new file mode 100644 index 0000000000000000000000000000000000000000..a066363a54ae3b88905e5f1eae9ea742bf6cb2b8 --- /dev/null +++ b/docs/algorithms/demonstrations.md @@ -0,0 +1,65 @@ +# Human Demonstrations + +## Collecting Human Demonstrations + +We provide teleoperation utilities that allow users to control the robots with input devices, such as the keyboard, [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/), [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) and mujoco-gui. Such functionality allows us to collect a dataset of human demonstrations for learning. We provide an example script to illustrate how to collect demonstrations. Our [collect_human_demonstrations](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/collect_human_demonstrations.py) script takes the following arguments: + +- `directory:` path to a folder for where to store the pickle file of collected demonstrations +- `environment:` name of the environment you would like to collect the demonstrations for +- `device:` either "keyboard" or "spacemouse" or "dualsense" or "mjgui" +- `renderer:` Mujoco's builtin interactive viewer (mjviewer) or OpenCV viewer (mujoco) +- `camera:` Pass multiple camera names to enable multiple views. Note that the "mujoco" renderer must be enabled when using multiple views, while "mjviewer" is not supported. + +See the [devices page](https://robosuite.ai/docs/modules/devices.html) for details on how to use the devices. + +## Replaying Human Demonstrations + +We have included an example script that illustrates how demonstrations can be loaded and played back. Our [playback_demonstrations_from_hdf5](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/playback_demonstrations_from_hdf5.py) script selects demonstration episodes at random from a demonstration pickle file and replays them. + + +## Existing Datasets + +We have included some sample demonstrations for each task at `models/assets/demonstrations`. + + +## Structure of collected demonstrations + +Every set of demonstrations is collected as a `demo.hdf5` file. The `demo.hdf5` file is structured as follows. + +- data (group) + + - date (attribute) - date of collection + + - time (attribute) - time of collection + + - repository_version (attribute) - repository version used during collection + + - env (attribute) - environment name on which demos were collected + + - demo1 (group) - group for the first demonstration (every demonstration has a group) + + - model_file (attribute) - the xml string corresponding to the MJCF mujoco model + + - states (dataset) - flattened mujoco states, ordered by time + + - actions (dataset) - environment actions, ordered by time + + - demo2 (group) - group for the second demonstration + + ... + + (and so on) + +The reason for storing mujoco states instead of raw observations is to make it easy to retrieve different kinds of observations in a postprocessing step. This also saves disk space (image datasets are much larger). + + +## Using Demonstrations for Learning + +The [robomimic](https://arise-initiative.github.io/robomimic-web/) framework makes it easy to train policies using your own [datasets collected with robosuite](https://arise-initiative.github.io/robomimic-web/docs/introduction/datasets.html#robosuite-hdf5-datasets). The framework also contains many useful examples for how to integrate hdf5 datasets into your own learning pipeline. + +The robosuite repository also has some utilities for using the demonstrations to alter the start state distribution of training episodes for learning RL policies - this have proved effective in [several](https://arxiv.org/abs/1802.09564) [prior](https://arxiv.org/abs/1807.06919) [works](https://arxiv.org/abs/1804.02717). For example, we provide a generic utility for setting various types of learning curriculums which dictate how to sample from demonstration episodes when doing an environment reset. For more information see the `DemoSamplerWrapper` class. + +## Warnings +We have verified that deterministic action playback works specifically when playing back demonstrations on the *same machine* that the demonstrations were originally collected upon. However, this means that deterministic action playback is NOT guaranteed (in fact, very unlikely) to work across platforms or even across different machines using the same OS. + +While action playback trajectories are quite similar even if not completely identical to the original collected state trajectories, they do tend to drift over time, and should not be relied upon to accurately replicate demonstrations. Instead, we recommend directly setting states to reproduce the collected trajectories, as shown in [playback_demonstrations_from_hdf5](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/playback_demonstrations_from_hdf5.py). \ No newline at end of file diff --git a/docs/algorithms/sim2real.md b/docs/algorithms/sim2real.md new file mode 100644 index 0000000000000000000000000000000000000000..e619a5932a5813cefdca6e8c5a4abeab8c222e0e --- /dev/null +++ b/docs/algorithms/sim2real.md @@ -0,0 +1,155 @@ +# Sim-to-Real Transfer +This page covers the randomization techniques to narrow the reality gap of our robotics simulation. These techniques, which concerns about [visual observations](#visuals), [system dynamics](#dynamics), and [sensors](#sensors), are employed to improve the efficacy of transferring our simulation-trained models to the real world. + + +## Visuals + +It is well shown that randomizing the visuals in simulation can play an important role in sim2real applications. **robosuite** provides various `Modder` classes to control different aspects of the visual environment. This includes: + +- `CameraModder`: Modder for controlling camera parameters, including FOV and pose +- `TextureModder`: Modder for controlling visual objects' appearances, including texture and material properties +- `LightingModder`: Modder for controlling lighting parameters, including light source properties and pose + +Each of these Modders can be used by the user to directly override default simulation settings, or to randomize their respective properties mid-sim. We provide [demo_domain_randomization.py](../demos.html#domain-randomization) to showcase all of these modders being applied to randomize an environment during every sim step. + + +## Dynamics + +In order to achieve reasonable runtime speeds, many physics simulation platforms often must simplify the underlying physics model. Mujoco is no different, and as a result, many parameters such as friction, damping, and contact constraints do not fully capture real-world dynamics. + +To better compensate for this, **robosuite** provides the `DynamicsModder` class, which can control individual dynamics parameters for each model within an environment. Theses parameters are sorted by element group, and briefly described below (for more information, please see [Mujoco XML Reference](http://www.mujoco.org/book/XMLreference.html)): + +#### Opt (Global) Parameters +- `density`: Density of the medium (i.e.: air) +- `viscosity`: Viscosity of the medium (i.e.: air) + +#### Body Parameters +- `position`: (x, y, z) Position of the body relative to its parent body +- `quaternion`: (qw, qx, qy, qz) Quaternion of the body relative to its parent body +- `inertia`: (ixx, iyy, izz) diagonal components of the inertia matrix associated with this body +- `mass`: mass of the body + +#### Geom Parameters +- `friction`: (sliding, torsional, rolling) friction values for this geom +- `solref`: (timeconst, dampratio) contact solver values for this geom +- `solimp`: (dmin, dmax, width, midpoint, power) contact solver impedance values for this geom + +#### Joint parameters +- `stiffness`: Stiffness for this joint +- `frictionloss`: Friction loss associated with this joint +- `damping`: Damping value for this joint +- `armature`: Gear inertia for this joint + +This `DynamicsModder` follows the same basic API as the other `Modder` classes, and allows per-parameter and per-group randomization enabling. Apart from randomization, this modder can also be instantiated to selectively modify values at runtime. A brief example is given below: + +```python +import robosuite as suite +from robosuite.utils.mjmod import DynamicsModder +import numpy as np + +# Create environment and modder +env = suite.make("Lift", robots="Panda") +modder = DynamicsModder(sim=env.sim, random_state=np.random.RandomState(5)) + +# Define function for easy printing +cube_body_id = env.sim.model.body_name2id(env.cube.root_body) +cube_geom_ids = [env.sim.model.geom_name2id(geom) for geom in env.cube.contact_geoms] + +def print_params(): + print(f"cube mass: {env.sim.model.body_mass[cube_body_id]}") + print(f"cube frictions: {env.sim.model.geom_friction[cube_geom_ids]}") + print() + +# Print out initial parameter values +print("INITIAL VALUES") +print_params() + +# Modify the cube's properties +modder.mod(env.cube.root_body, "mass", 5.0) # make the cube really heavy +for geom_name in env.cube.contact_geoms: + modder.mod(geom_name, "friction", [2.0, 0.2, 0.04]) # greatly increase the friction +modder.update() # make sure the changes propagate in sim + +# Print out modified parameter values +print("MODIFIED VALUES") +print_params() + +# We can also restore defaults (original values) at any time +modder.restore_defaults() + +# Print out restored initial parameter values +print("RESTORED VALUES") +print_params() +``` + +Running [demo_domain_randomization.py](../demos.html#domain-randomization) is another method for demo'ing (albeit an extreme example of) this functionality. + +Note that the modder already has some sanity checks in place to prevent presumably undesired / non-sensical behavior, such as adding damping / frictionloss to a free joint or setting a non-zero stiffness value to a joint that is normally non-stiff to begin with. + + +## Sensors + +By default, Mujoco sensors are deterministic and delay-free, which is often an unrealistic assumption to make in the real world. To better close this domain gap, **robosuite** provides a realistic, customizable interface via the [Observable](../source/robosuite.utils.html#module-robosuite.utils.observables) class API. Observables model realistic sensor sampling, in which ground truth data is sampled (`sensor`), passed through a corrupting function (`corrupter`), and then finally passed through a filtering function (`filter`). Moreover, each observable has its own `sampling_rate` and `delayer` function which simulates sensor delay. While default values are used to instantiate each observable during environment creation, each of these components can be modified by the user at runtime using `env.modify_observable(...)` . Moreover, each observable is assigned a modality, and are grouped together in the returned observation dictionary during the `env.step()` call. For example, if an environment consists of camera observations and a single robot's proprioceptive observations, the observation dict structure might look as follows: + +```python +{ + "frontview_image": np.array(...), # this has modality "image" + "frontview_depth": np.array(...), # this has modality "image" + "robot0_joint_pos": np.array(...), # this has modality "robot0_proprio" + "robot0_gripper_pos": np.array(...), # this has modality "robot0_proprio" + "image-state": np.array(...), # this is a concatenation of all image observations + "robot0_proprio-state": np.array(...), # this is a concatenation of all robot0_proprio observations +} +``` + +Note that for memory efficiency the `image-state` is not returned by default (this can be toggled in `robosuite/macros.py`). + +We showcase how the `Observable` functionality can be used to model sensor corruption and delay via [demo_sensor_corruption.py](../demos.html#sensor-realism). We also highlight that each of the `sensor`, `corrupter`, and `filter` functions can be arbitrarily specified to suit the end-user's usage. For example, a common use case for these observables is to keep track of sampled values from a sensor operating at a higher frequency than the environment step (control) frequency. In this case, the `filter` function can be leveraged to keep track of the real-time sensor values as they're being sampled. We provide a minimal script showcasing this ability below: + +```python +import robosuite as suite +import numpy as np +from robosuite.utils.buffers import RingBuffer + +# Create env instance +control_freq = 10 +env = suite.make("Lift", robots="Panda", has_offscreen_renderer=False, use_camera_obs=False, control_freq=control_freq) + +# Define a ringbuffer to store joint position values +buffer = RingBuffer(dim=env.robots[0].robot_model.dof, length=10) + +# Create a function that we'll use as the "filter" for the joint position Observable +# This is a pass-through operation, but we record the value every time it gets called +# As per the Observables API, this should take in an arbitrary numeric and return the same type / shape +def filter_fcn(corrupted_value): + # Record the inputted value + buffer.push(corrupted_value) + # Return this value (no-op performed) + return corrupted_value + +# Now, let's enable the joint position Observable with this filter function +env.modify_observable( + observable_name="robot0_joint_pos", + attribute="filter", + modifier=filter_fcn, +) + +# Let's also increase the sampling rate to showcase the Observable's ability to update multiple times per env step +obs_sampling_freq = control_freq * 4 +env.modify_observable( + observable_name="robot0_joint_pos", + attribute="sampling_rate", + modifier=obs_sampling_freq, +) + +# Take a single environment step with positive joint velocity actions +action = np.ones(env.robots[0].robot_model.dof) * 1.0 +env.step(action) + +# Now we can analyze what values were recorded +np.set_printoptions(precision=2) +print(f"\nPolicy Frequency: {control_freq}, Observable Sampling Frequency: {obs_sampling_freq}") +print(f"Number of recorded samples after 1 policy step: {buffer._size}\n") +for i in range(buffer._size): + print(f"Recorded value {i}: {buffer.buf[i]}") +``` diff --git a/docs/modeling/arena.rst b/docs/modeling/arena.rst new file mode 100644 index 0000000000000000000000000000000000000000..4889c05894ee59f517af8b91f394ab8b2e48cea1 --- /dev/null +++ b/docs/modeling/arena.rst @@ -0,0 +1,67 @@ +Arena +===== + +The ``Arena`` class serves as a base model for building the simulation environment. +By default, this includes a ground plane and visual walls, and child classes extend this +to additionally include other objects, e.g., a table or bins. + +Base Arena +---------- + +.. autoclass:: robosuite.models.arenas.arena.Arena + + .. automethod:: __init__ + .. automethod:: set_origin + .. automethod:: set_camera + +Empty Arena +----------- + +.. autoclass:: robosuite.models.arenas.empty_arena.EmptyArena + + .. automethod:: __init__ + +Bins Arena +---------- + +.. autoclass:: robosuite.models.arenas.bins_arena.BinsArena + + .. automethod:: __init__ + .. automethod:: configure_location + +Pegs Arena +---------- + +.. autoclass:: robosuite.models.arenas.pegs_arena.PegsArena + + .. automethod:: __init__ + +Table Arena +----------- + +.. autoclass:: robosuite.models.arenas.table_arena.TableArena + + .. automethod:: __init__ + .. automethod:: configure_location + .. autoproperty:: table_top_abs + +Wipe Arena +---------- + +.. autoclass:: robosuite.models.arenas.wipe_arena.WipeArena + + .. automethod:: __init__ + .. automethod:: configure_location + .. automethod:: reset_arena + .. automethod:: sample_start_pos + .. automethod:: sample_path_pos + +MultiTable Arena +---------------- + +.. autoclass:: robosuite.models.arenas.multi_table_arena.MultiTableArena + + .. automethod:: __init__ + .. automethod:: _add_table + .. automethod:: configure_location + .. automethod:: _postprocess_arena diff --git a/docs/modeling/mujoco_model.rst b/docs/modeling/mujoco_model.rst new file mode 100644 index 0000000000000000000000000000000000000000..f164ac83a17b3c2b60d421cf408bfac5deb65cdb --- /dev/null +++ b/docs/modeling/mujoco_model.rst @@ -0,0 +1,38 @@ +Mujoco Model +============ + +The ``MujocoModel`` class is the foundational class from which all other model classes extend from in robosuite. This class represents a standardized API for all models used in simulation and is the core modeling component that other model classes build upon. The ``MujocoXMLModel`` is an extension of this class that represents models based on an XML file. + +Base Mujoco Model +----------------- + +.. autoclass:: robosuite.models.base.MujocoModel + + .. automethod:: correct_naming + .. automethod:: set_sites_visibility + .. automethod:: exclude_from_prefixing + .. autoproperty:: name + .. autoproperty:: naming_prefix + .. autoproperty:: root_body + .. autoproperty:: bodies + .. autoproperty:: joints + .. autoproperty:: actuators + .. autoproperty:: sites + .. autoproperty:: sensors + .. autoproperty:: contact_geoms + .. autoproperty:: visual_geoms + .. autoproperty:: important_geoms + .. autoproperty:: important_sites + .. autoproperty:: important_sensors + .. autoproperty:: bottom_offset + .. autoproperty:: top_offset + .. autoproperty:: horizontal_radius + + +XML Mujoco Model +---------------- + +.. autoclass:: robosuite.models.base.MujocoXMLModel + + .. autoproperty:: base_offset + .. autoproperty:: contact_geom_rgba diff --git a/docs/modeling/object_model.rst b/docs/modeling/object_model.rst new file mode 100644 index 0000000000000000000000000000000000000000..12c8a46596d861236c27cb212efdadddd52c136a --- /dev/null +++ b/docs/modeling/object_model.rst @@ -0,0 +1,48 @@ +Object Model +============ + +The ``MujocoObject`` class serves as a catch-all base class that is used to capture individual simulation objects to +instantiate within a given simulation. This is done in one of two ways via extended classes -- the ``MujocoXMLObject`` +reads in information from a corresponding object XML file, whereas the ``MujocoGeneratedObject`` proecedurally generates a +custom object using a suite of utility mj modeling functions. In conjunction with the ``RobotModel``, and +``Arena`` classes, these classes serve as the basis for forming the higher level ``Task`` class which is used to +ultimately generate the ``MjSim`` simulation object. + +Base Object Model +----------------- + +.. autoclass:: robosuite.models.objects.objects.MujocoObject + + .. automethod:: __init__ + .. automethod:: merge_assets + .. automethod:: get_obj + .. automethod:: exclude_from_prefixing + .. automethod:: _get_object_subtree + .. automethod:: _get_object_properties + .. autoproperty:: important_geoms + .. autoproperty:: important_sites + .. autoproperty:: important_sensors + .. autoproperty:: get_site_attrib_template + .. autoproperty:: get_joint_attrib_template + + +XML Object Model +---------------- + +.. autoclass:: robosuite.models.objects.objects.MujocoXMLObject + + .. automethod:: __init__ + .. automethod:: _duplicate_visual_from_collision + .. automethod:: _get_geoms + + +Generated Object Model +---------------------- + +.. autoclass:: robosuite.models.objects.objects.MujocoGeneratedObject + + .. automethod:: __init__ + .. automethod:: sanity_check + .. automethod:: get_collision_attrib_template + .. automethod:: get_visual_attrib_template + .. automethod:: append_material \ No newline at end of file diff --git a/docs/modeling/robot_model.rst b/docs/modeling/robot_model.rst new file mode 100644 index 0000000000000000000000000000000000000000..71673dad784a8561aef91ee7ff9b404d6fbcc099 --- /dev/null +++ b/docs/modeling/robot_model.rst @@ -0,0 +1,72 @@ +Robot Model +=========== + +Robot Model +----------- +The ``RobotModel`` class serves as a direct intermediary class that reads in information from a corresponding robot XML +file and also contains relevant hard-coded information from that XML. This represents an arbitrary robot optionally equipped with a mount via the ``RobotBaseModel`` class and is the core modeling component of the higher-level ``Robot`` class used in simulation. + +.. autoclass:: robosuite.models.robots.robot_model.RobotModel + + .. automethod:: set_base_xpos + .. automethod:: set_base_ori + .. automethod:: set_joint_attribute + .. automethod:: add_base + .. automethod:: add_mount + .. automethod:: add_mobile_base + .. automethod:: add_leg_base + .. autoproperty:: dof + .. autoproperty:: default_base + .. autoproperty:: default_controller_config + .. autoproperty:: init_qpos + .. autoproperty:: base_xpos_offset + .. autoproperty:: _horizontal_radius + .. autoproperty:: _important_sites + .. autoproperty:: _important_geoms + .. autoproperty:: _important_sensors + + +Manipulator Model +----------------- +The ``ManipulatorModel`` class extends from the base ``RobotModel`` class, and represents an armed, mounted robot with an optional gripper attached to its end effector. In conjunction with the corresponding ``GripperModel`` class and ``RobotBaseModel`` class, this serves as the core modeling component of the higher-level ``Manipulator`` class used in simulation. + +.. autoclass:: robosuite.models.robots.manipulators.manipulator_model.ManipulatorModel + + .. automethod:: add_gripper + .. autoproperty:: default_gripper + .. autoproperty:: arm_type + .. autoproperty:: base_xpos_offset + .. autoproperty:: eef_name + .. autoproperty:: _important_sites + + +Gripper Model +------------- +The ``GripperModel`` class serves as a direct intermediary class that reads in information from a corresponding gripper XML file and also contains relevant hard-coded information from that XML. In conjunction with the ``ManipulatorModel`` class, this serves as the core modeling component of the higher-level `Manipulator` class used in simulation. + +.. autoclass:: robosuite.models.grippers.gripper_model.GripperModel + + .. automethod:: format_action + .. autoproperty:: speed + .. autoproperty:: dof + .. autoproperty:: init_qpos + .. autoproperty:: _important_sites + .. autoproperty:: _important_geoms + .. autoproperty:: _important_sensors + + +Robot Base Model +----------- + +The ``RobotBaseModel`` class represents the base of the robot. User can use ``add_base`` method in the ``RobotModel`` class to add a base model to the robot. + +There are mainly three types of base models: ``MountModel``, ``MobileBaseModel``, and ``LegBaseModel``. + +.. autoclass:: robosuite.models.bases.robot_base_model.RobotBaseModel + + .. autoproperty:: top_offset + .. autoproperty:: horizontal_radius + .. autoproperty:: naming_prefix + .. autoproperty:: _important_sites + .. autoproperty:: _important_geoms + .. autoproperty:: _important_sensors diff --git a/docs/modeling/task.rst b/docs/modeling/task.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e7338ae6acf0adb7441d403d413f32286f5e661 --- /dev/null +++ b/docs/modeling/task.rst @@ -0,0 +1,14 @@ +Task +===== + +The ``Task`` class is responsible for integrating a given ``Arena``, ``RobotModel``, and set of ``MujocoObjects`` into a single element tree that is then parsed and converted into an ``MjSim`` object. + +Base Task +--------- + +.. autoclass:: robosuite.models.tasks.task.Task + + .. automethod:: __init__ + .. automethod:: merge_robot + .. automethod:: merge_arena + .. automethod:: merge_objects \ No newline at end of file diff --git a/docs/modules/controllers.rst b/docs/modules/controllers.rst new file mode 100644 index 0000000000000000000000000000000000000000..47ba76c3be3cd5cb51c2474adf8218b449a7953e --- /dev/null +++ b/docs/modules/controllers.rst @@ -0,0 +1,281 @@ +Controllers +============== + +Composite Controllers +--------------------- + +Robosuite's composite controllers assumes that a robot consists of multiple "body parts", such as arms, torso, head, base, and legs, and that each body part has +a "body part" controller (e.g., ``OSC_POSE``, ``JOINT_POSITION``). The composite controller orchestrates these body part controllers. +Composite controllers are controllers that are composed of multiple body-part controllers. +They are used to control the entire robot, including all of its parts. + +When an action vector is commanded to the robot, the action will be split into multiple body-part actions, each of which will be sent to the corresponding body-part +controller. To understand the action split, use the function ``robosuite.robots.robot.print_action_info()``. +To create the action easily, we also provide a helper function ``robosuite.robots.robot.create_action_vector()`` which takes the action dictionary as +inputs and return the action vector with correct dimensions. For controller actions whose input dimentions does not match the robot's degrees of freedoms, +you need to write your own ``create_action_vector()`` function inside the custom composite controller so that the robot's function can retrieve the information properly. + +**Basic** +****** + +The "Basic" composite controller consists of individual part controllers that operate independently to control various parts of the robot, such as arms, torso, head, base, and legs. +Each part can be assigned a specific controller type (e.g., ``OSC_POSE``, ``JOINT_POSITION``) depending on the desired control behavior for that part. +For example, arms may use ``OSC_POSE`` for precise end-effector control, while the base may use JOINT_VELOCITY for movement across the ground. + + +**WholeBodyIK** +************* + +The "WholeBodyIK" composite controller takes in end effector targets, and converts them into joint angle targets for the corresponding body parts' joints. + + +**Third-party Controllers** +*********************** + +Third-party controllers integrate custom or external control algorithms into robosuite. Examples include https://github.com/kevinzakka/mink. We provide +an example of adding a third-party controller in https://robosuite.ai/docs/tutorials/add_controller.html. + + +Workflow of Loading Configs +**************************** +Loading configs for composite controllers is critical for selecting the correct control mode with well-tuned parameters. We provide a list of default controller configs for the composite controllers, and also support easy specification of your custom controller config file. A config file is defined in a json file. + +An example of the controller config file is shown below (many parameters are omitted in `...` for brevity): + +.. toggle:: + + Example for defining BASIC controller. + + .. code-block:: json + + { + "type": "BASIC", + "body_parts": { + "arms": { + "right": { + "type": "OSC_POSE", + "input_max": 1, + "input_min": -1, + "output_max": [0.05, 0.05, 0.05, 0.5, 0.5, 0.5], + "output_min": [-0.05, -0.05, -0.05, -0.5, -0.5, -0.5], + "kp": 150, + ... + }, + "left": { + "type": "OSC_POSE", + ... + } + }, + "torso": { + "type" : "JOINT_POSITION", + ... + }, + "head": { + "type" : "JOINT_POSITION", + ... + }, + "base": { + "type": "JOINT_VELOCITY", + ... + }, + "legs": { + "type": "JOINT_POSITION", + ... + } + } + } + + + +Part Controllers +------------------ + +Part controllers are used to determine the type of high-level control over a given robot arm. While all arms are directly controlled via their joint torques, the inputted action space for a given environment can vary depending on the type of desired control. Our controller options include ``OSC_POSE``, ``OSC_POSITION``, ``JOINT_POSITION``, ``JOINT_VELOCITY``, and ``JOINT_TORQUE``. + +For ``OSC_POSE``, ``OSC_POSITION``, and ``JOINT_POSITION``, we include three variants that determine the action space. The most common variant is to use a predefined and constant set of impedance parameters; in that case, the action space only includes the desired pose, position, or joint configuration. We also include the option to specify either the stiffness values (and the damping will be automatically set to values that lead to a critically damped system), or all impedance parameters, both stiffness and damping, as part of the action at each step. These two variants lead to extended action spaces that can control the stiffness and damping behavior of the controller in a variable manner, providing full control to the policy/solution over the contact and dynamic behavior of the robot. + +When using any position-based control (``OSC``, ``IK``, or ``JOINT_POSITION`` controllers), input actions are, by default, +interpreted as delta values from the current state. + +When using any end-effector pose controller (``IK``, ``OSC_POSE``), delta rotations from the current end-effector orientation +in the form of axis-angle coordinates ``(ax, ay, az)``, where the direction represents the axis and the magnitude +represents the angle (in radians). Note that for ``OSC``, the rotation axes are taken relative to the global world +coordinate frame, whereas for ``IK``, the rotation axes are taken relative to the end-effector origin, NOT the global world coordinate frame! + +During runtime, the execution of the controllers is as follows. Controllers receive a desired configuration (reference value) and output joint torques that try to minimize the error between the current configuration and the desired one. Policies and solutions provide these desired configurations, elements of some action space, at what we call simulated policy frequency (:math:`f_{p}`), e.g., 20Hz or 30Hz. **robosuite** will execute several iterations composed of a controller execution and a simulation step at simulation frequency, :math:`f_s` (:math:`f_s = N\cdot f_p`), using the same reference signal until a new action is provided by the policy/solution. In these iterations, while the desired configuration is kept constant, the current state of the robot is changing, and thus, the error. + +In the following we summarize the options, variables, and the control laws (equations) that convert desired values from the policy/solution and current robot states into executable joint torques to minimize the difference. + +Joint Space Control - Torque +********************************* +Controller Type: ``JOINT_TORQUE`` + +Action Dimensions (not including gripper): ``n`` (number of joints) + +Since our controllers transform the desired values from the policies/solutions into joint torques, if these values are already joint torques, there is a one-to-one mapping between the reference value from the policy/solution and the output value from the joint torque controller at each step: :math:`\tau = \tau_d`. + +.. math:: + \begin{equation} + \tau = \tau_d + \end{equation} + +Joint Space Control - Velocity +********************************* +Controller Type: ``JOINT_VELOCITY`` + +Action Dimensions (not including gripper): ``n`` (number of joints) + +To control joint velocities, we create a proportional (P) control law between the desired value provided by the policy/solution (interpreted as desired velocity of each joint) and the current joint velocity of the robot. This control law, parameterized by a proportional constant, :math:`k_p`, generates joint torques to execute at each simulation step: + +.. math:: + \tau = k_p (\dot{q}_d - \dot{q}) + + +Joint Space Control - Position with Fixed Impedance +******************************************************** +Controller Type: ``JOINT_POSITION`` + +Impedance: fixed + +Action Dimensions (not including gripper): ``n`` (number of joints) + +In joint position control, we create a proportional-derivative (PD) control law between the desired value provided by the policy/solution (interpreted as desired configuration for each joint) and the current joint positions of the robot. The control law that generates the joint torques to execute is parameterized by proportional and derivative gains, :math:`k_p` and :math:`k_v`, and defined as + +.. math:: + \begin{equation} + \tau = \Lambda \left[k_p \Delta_q - k_d\dot{q}\right] + \end{equation} + +where :math:`\Delta_q = q_d - q` is the difference between current and desired joint configurations, and :math:`\Lambda` is the inertia matrix, that we use to scale the error to remove the dynamic effects of the mechanism. The stiffness and damping parameters, :math:`k_p` and :math:`k_d`, are determined in construction and kept fixed. + +Joint Space Control - Position with Variable Stiffness +*********************************************************** +Controller Type: ``JOINT_POSITION`` + +Impedance: variable_kp + +Action Dimensions (not including gripper): ``2n`` (number of joints) + +The control law is the same as for fixed impedance but, in this controller, :math:`k_p`` can be determined by the policy/solution at each policy step. + +Joint Space Control - Position with Variable Impedance +*********************************************************** +Controller Type: ``JOINT_POSITION`` + +Impedance: variable + +Action Dimensions (not including gripper): ``3n`` (number of joints) + +Again, the control law is the same in the two previous control types, but now both the stiffness and damping parameters, :math:`k_p` and :math:`k_d`, are controllable by the policy/solution and can be changed at each step. + +Operational Space Control - Pose with Fixed Impedance +********************************************************** +Controller Type: ``OSC_POSE`` + +Impedance: fixed + +Action Dimensions (not including gripper): ``6`` + +In the ``OSC_POSE`` controller, the desired value is the 6D pose (position and orientation) of a controlled frame. We follow the formalism from `[Khatib87] `_. Our control frame is always the ``eef_site`` defined in the `Gripper Model `_, placed at the end of the last link for robots without gripper or between the fingers for robots with gripper. The operational space control framework (OSC) computes the necessary joint torques to minimize the error between the desired and the current pose of the ``eef_site`` with the minimal kinematic energy. + +Given a desired pose :math:`\mathbf{x}_{\mathit{des}}` and the current end-effector pose, , we first compute the end-effector acceleration that would help minimize the error between both, assumed. PD (proportional-derivative) control schema to improve convergence and stability. For that, we first decompose into a desired position, :math:`p_d \in \mathbb{R}^3`, and a desired orientation, :math:`R_d \in \mathbb{SO}(3)`. The end-effector acceleration to minimize the error should increase with the difference between desired end-effector pose and current pose, :math:`p` and :math:`R` (proportional term), and decrease with the current end-effector velocity, :math:`v` and :math:`\omega` (derivative term). + +We then compute the robot actuation (joint torques) to achieve the desired end-effector space accelerations leveraging the kinematic and dynamic models of the robot with the dynamically-consistent operational space formulation in `[Khatib1995a] `_. First, we compute the wrenches at the end-effector that corresponds to the desired accelerations, :math:`{f}\in\mathbb{R}^{6}`. +Then, we map the wrenches in end-effector space :math:`{f}` to joint torque commands with the end-effector Jacobian at the current joint configuration :math:`J=J(q)`: :math:`\tau = J^T{f}`. + +Thus, the function that maps end-effector space position and orientation to low-level robot commands is (:math:`\textrm{ee} = \textrm{\it end-effector space}`): + +.. math:: + + \begin{equation} + \begin{aligned} + \tau &= J_p[\Lambda_p[k_p^p (p_d - p) - k_v^p v]] + J_R[\Lambda_R\left[k_p^R(R_d \ominus R) - k_d^R \omega \right]] + \end{aligned} + \end{equation} + +where :math:`\Lambda_p` and :math:`\Lambda_R` are the parts corresponding to position and orientation in :math:`\Lambda \in \mathbb{R}^{6\times6}`, the inertial matrix in the end-effector frame that decouples the end-effector motions, :math:`J_p` and :math:`J_R` are the position and orientation parts of the end-effector Jacobian, and :math:`\ominus` corresponds to the subtraction in :math:`\mathbb{SO}(3)`. The difference between current and desired position (:math:`\Delta_p= p_d - p`) and between current and desired orientation (:math:`\Delta_R = R_d \ominus R`) can be used as alternative policy action space, :math:`\mathcal{A}`. :math:`k_p^p`, :math:`k_p^d`, :math:`k_p^R`, and :math:`k_d^R` are vectors of proportional and derivative gains for position and orientation (parameters :math:`\kappa`), respectively, set once at initialization and kept fixed. + +Operational Space Control - Pose with Variable Stiffness +************************************************************* +Controller Type: ``OSC_POSE`` + +Impedance: variable_kp + +Action Dimensions (not including gripper): ``12`` + +The control law is the same as ``OSC_POSE`` but, in this case, the stiffness of the controller, :math:`k_p`, is part of the action space and can be controlled and changed at each time step by the policy/solution. The damping parameters, :math:`k_d`, are set to maintain the critically damped behavior of the controller. + +Operational Space Control - Pose with Variable Impedance +********************************************************* +Controller Type: ``OSC_POSE`` + +Impedance: variable + +Action Dimensions (not including gripper): ``18`` + +The control law is the same as in the to previous controllers, but now both the stiffness and the damping, :math:`k_p` and :math:`k_d`, are part of the action space and can be controlled and changed at each time step by the policy/solution. + + +Configurations +--------------- + +The `config directory `_ provides a set of default configuration files that hold default examples of parameters relevant to individual controllers. Note that when creating your controller config templates of a certain type of controller, the listed parameters in the default example are required and should be specified accordingly. + +Note: Each robot has its own default controller configuration which is called by default unless a different controller config is called. + +Below, a brief overview and description of each subset of controller parameters are shown: + +Controller Settings +******************** +* ``type``: Type of controller to control. Can be ``OSC_POSE``, ``OSC_POSITION``, ``IK_POSE``, ``JOINT_POSITION``, ``JOINT_VELOCITY``, or ``JOINT_TORQUE`` +* ``interpolation``: If not ``null``, specified type of interpolation to use between desired actions. Currently only ``linear`` is supported. +* ``ramp_ratio``: If using ``linear`` interpolation, specifies the proportion of allotted timesteps (value from [0, 1]) over which to execute the interpolated commands. +* ``{...}_limits``: Limits for that specific controller. E.g.: for a ``JOINT_POSITION``, the relevant limits are its joint positions, ``qpos_limits`` . Can be either a 2-element list (same min/max limits across entire relevant space), or a list of lists (specific limits for each component) +* ``ik_{pos, ori}_limit``: Only applicable for IK controller. Limits the magnitude of the desired relative change in position / orientation. +* ``{input,output}_{min,max}``: Scaling ranges for mapping action space inputs into controller inputs. Settings these limits will automatically clip the action space input to be within the ``input_{min,max}`` before mapping the requested value into the specified ``output_{min,max}`` range. Can be either a scalar (same limits across entire action space), or a list (specific limits for each action component) +* ``kp``: Where relevant, specifies the proportional gain for the controller. Can be either be a scalar (same value for all controller dimensions), or a list (specific values for each dimension) +* ``damping_ratio``: Where relevant, specifies the damping ratio constant for the controller. +* ``impedance_mode``: For impedance-based controllers (``OSC_*``, ``JOINT_POSITION``), determines the impedance mode for the controller, i.e. the nature of the impedance parameters. It can be ``fixed``, ``variable``, or ``variable_kp`` (kd is adjusted to provide critically damped behavior). +* ``kp_limits, damping_ratio_limits``: Only relevant if ``impedance_mode`` is set to ``variable`` or ``variable_kp``. Sets the limits for the resulting action space for variable impedance gains. +* ``control_delta``: Only relevant for ``OSC_POSE`` or ``OSC_POSITION`` controllers. ``true`` interprets input actions as delta values from the current robot end-effector position. Otherwise, assumed to be absolute (global) values +* ``uncouple_pos_ori``: Only relevant for ``OSC_POSE``. ``true`` decouples the desired position and orientation torques when executing the controller + +Loading a Controller +--------------------- +By default, user will use the `load_composite_controller_config()` method to create a controller configuration. + +Using a Default Controller Configuration +***************************************** +Any controller can be used with its default configuration, and can be easily loaded into a given environment by calling its name as shown below (where ``controller`` is one of acceptable controller ``type`` strings): + +.. code-block:: python + + import robosuite as suite + from robosuite import load_composite_controller_config + + # Load the desired controller config with default Basic controller + config = load_composite_controller_config(controller="BASIC") + + # Create environment + env = suite.make("Lift", robots="Panda", controller_configs=config, ... ) + + +Using a Custom Controller Configuration +**************************************** +A custom controller configuration can also be used by simply creating a new config (``.json``) file with the relevant parameters as specified above. All robosuite environments have an optional ``controller_configs`` argument that can be used to pass in specific controller settings. Note that this is expected to be a ``dict``, so the new configuration must be read in and parsed as a ``dict`` before passing it during the environment ``robosuite.make(...)`` call. A brief example script showing how to import a custom controller configuration is shown below. + + +.. code-block:: python + + import robosuite as suite + from robosuite import load_composite_controller_config + + # Path to config file + controller_fpath = "/your/custom/config/filepath/here/filename.json" + + # Import the file as a dict + config = load_composite_controller_config(controller=controller_fpath) + + # Create environment + env = suite.make("Lift", robots="Panda", controller_configs=config, ... ) + diff --git a/docs/modules/devices.md b/docs/modules/devices.md new file mode 100644 index 0000000000000000000000000000000000000000..76c84edfb5824ea77537cd2be7261f77ecfbaf8e --- /dev/null +++ b/docs/modules/devices.md @@ -0,0 +1,89 @@ +# I/O Devices + +Devices are used to read user input and teleoperate simulated robots in real-time. This is achieved by either using a keyboard, a [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/) or a [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) joystick, and whose teleoperation capabilities can be demonstrated with the [demo_device_control.py](../demos.html#teleoperation) script. More generally, we support any interface that implements the [Device](../simulation/device) abstract base class. In order to support your own custom device, simply subclass this base class and implement the required methods. + +## Keyboard + +We support keyboard input through the OpenCV2 window created by the mujoco renderer. + +**Keyboard controls** + +Note that the rendering window must be active for these commands to work. + +| Keys | Command | +| :------------------ | :----------------------------------------- | +| Ctrl+q | reset simulation | +| spacebar | toggle gripper (open/close) | +| up-right-down-left | move horizontally in x-y plane | +| .-; | move vertically | +| o-p | rotate (yaw) | +| y-h | rotate (pitch) | +| e-r | rotate (roll) | +| b | toggle arm/base mode (if applicable) | +| s | switch active arm (if multi-armed robot) | +| = | switch active robot (if multi-robot env) | +| Ctrl+C | quit | + + +## 3Dconnexion SpaceMouse + +We support the use of a [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/) as well. + +**3Dconnexion SpaceMouse controls** + +| Control | Command | +| :------------------------ | :------------------------------------ | +| Right button | reset simulation | +| Left button (hold) | close gripper | +| Move mouse laterally | move arm horizontally in x-y plane | +| Move mouse vertically | move arm vertically | +| Twist mouse about an axis | rotate arm about a corresponding axis | +| b | toggle arm/base mode (if applicable) | +| s | switch active arm (if multi-armed robot) | +| = | switch active robot (if multi-robot environment) | +| Ctrl+C (keyboard) | quit | + +## Sony DualSense + +we support the use of a [Sony DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) as well. + +**Sony DualSense controls** + +| Control | Command | +| :--------------------------- | :------------------------------------ | +| Square button | reset simulation | +| Circle button (hold) | close gripper | +| Move LX/LY Stick | move arm horizontally in x-y plane | +| Press L2 Trigger with or without L1 button | move arm vertically | +| Move RX/RY Stick | rotate arm about x/y axis (roll/pitch) | +| Press R2 Trigger with or without R1 button | rotate arm about z axis (yaw) | +| Triangle button | toggle arm/base mode (if applicable) | +| Left/Right Direction Pad | switch active arm (if multi-armed robot) | +| Up/Down Direction Pad | switch active robot (if multi-robot environment) | +| Ctrl+C (keyboard) | quit | + +## Mujoco GUI Device + +To use the Mujoco GUI device for teleoperation, follow these steps: + +1. Set renderer as `"mjviewer"`. For example: + +```python +env = suite.make( + **options, + renderer="mjviewer", + has_renderer=True, + has_offscreen_renderer=False, + ignore_done=True, + use_camera_obs=False, +) +``` + +Note: if using Mac, please use `mjpython` instead of `python`. For example: + +```mjpython robosuite/scripts/collect_human_demonstrations.py --environment Lift --robots Panda --device mjgui --camera frontview --controller WHOLE_BODY_IK``` + +2. Double click on a mocap body to select a body to drag, then: + +On Linux: `Ctrl` + right click to drag the body's position. `Ctrl` + left click to control the body's orientation. +On Mac: `fn` + `Ctrl` + right click. diff --git a/docs/modules/environments.md b/docs/modules/environments.md new file mode 100644 index 0000000000000000000000000000000000000000..50583c32e94856e2a136b45ad4b27be4c594c2f7 --- /dev/null +++ b/docs/modules/environments.md @@ -0,0 +1,265 @@ +# Environments + +Environments are the main **robosuite** API objects that external code will interact with. Each environment corresponds to a robot manipulation task and provides a standard interface for an agent to interact with the environment. While **robosuite** can support environments from different robotic domains, the current release focuses is on manipulation environments. + +Next, we will describe how to create an environment, how to interact with an environment, and how each environment creates a simulated task in the MuJoCo physics engine. We will use the `TwoArmLift` environment as a running example for each section. + +## Making an Environment + +Environments are created by calling `robosuite.make` with the name of the task and with a set of arguments that configure environment properties. We provide a few examples of different use cases below. + +```python +import robosuite +from robosuite.controllers import load_composite_controller_config + +# BASIC controller: arms controlled using OSC, mobile base (if present) using JOINT_VELOCITY, other parts controlled using JOINT_POSITION +controller_config = load_composite_controller_config(controller="BASIC") + +# create an environment to visualize on-screen +env = robosuite.make( + "TwoArmLift", + robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot + gripper_types="default", # use default grippers per robot arm + controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY + env_configuration="opposed", # (two-arm envs only) arms face each other + has_renderer=True, # on-screen rendering + render_camera="frontview", # visualize the "frontview" camera + has_offscreen_renderer=False, # no off-screen rendering + control_freq=20, # 20 hz control for applied actions + horizon=200, # each episode terminates after 200 steps + use_object_obs=False, # no observations needed + use_camera_obs=False, # no observations needed +) + +# create an environment for policy learning from low-dimensional observations +env = robosuite.make( + "TwoArmLift", + robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot + gripper_types="default", # use default grippers per robot arm + controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY + env_configuration="opposed", # (two-arm envs only) arms face each other + has_renderer=False, # no on-screen rendering + has_offscreen_renderer=False, # no off-screen rendering + control_freq=20, # 20 hz control for applied actions + horizon=200, # each episode terminates after 200 steps + use_object_obs=True, # provide object observations to agent + use_camera_obs=False, # don't provide image observations to agent + reward_shaping=True, # use a dense reward signal for learning +) + +# create an environment for policy learning from pixels +env = robosuite.make( + "TwoArmLift", + robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot + gripper_types="default", # use default grippers per robot arm + controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY + env_configuration="opposed", # (two-arm envs only) arms face each other + has_renderer=False, # no on-screen rendering + has_offscreen_renderer=True, # off-screen rendering needed for image obs + control_freq=20, # 20 hz control for applied actions + horizon=200, # each episode terminates after 200 steps + use_object_obs=False, # don't provide object observations to agent + use_camera_obs=True, # provide image observations to agent + camera_names="agentview", # use "agentview" camera for observations + camera_heights=84, # image height + camera_widths=84, # image width + reward_shaping=True, # use a dense reward signal for learning +) +``` + +### Modular Design + +We provide a few additional details on a few keyword arguments below to highlight the modular structure of creating **robosuite** environments, and how easy it is to configure different environment features. + +- `robots` : this argument can be used to easily instantiate tasks with different robot arms. For example, we could change the task to use two "Jaco" robots by passing `robots=["Jaco", "Jaco"]`. Once the environment is initialized, these robots (as captured by the [Robot](../simulation/robot.html#robot) class) can be accessed via the `robots` array attribute within the environment, i.e.: `env.robots[i]` for the `ith` robot arm in the environment. +- `gripper_types` : this argument can be used to easily swap out different grippers for each robot arm. For example, suppose we want to swap the default grippers for the arms in the example above. We could just pass `gripper_types=["PandaGripper", "RethinkGripper"]` to achieve this. Note that a single type can also be used to automatically broadcast the same gripper type across all arms. +- `controller_configs` : this argument can be used to easily replace the action space for each robot. For example, if we would like to control the robot using IK instead of OSC, we could use `load_composite_controller_config(controller="WHOLE_BODY_IK")` in the example above. +- `env_configuration` : this argument is mainly used for two-arm tasks to easily configure how the robots are oriented with respect to one another. For example, in the `TwoArmLift` environment, we could pass `env_configuration="parallel"` instead so that the robot arms are located next to each other, instead of opposite each other +- `placement_initializer` : this argument is optional, but can be used to specify a custom `ObjectPositionSampler` to override the default start state distribution for Mujoco objects. Samplers are responsible for sampling a set of valid, non-colliding placements for all of the objects in the scene at the start of each episode (e.g. when `env.reset()` is called). + +## Interacting with an Environment + +### Policy Loop + +```python +# this example assumes an env has already been created, and performs one agent rollout +import numpy as np + +def get_policy_action(obs): + # a trained policy could be used here, but we choose a random action + low, high = env.action_spec + return np.random.uniform(low, high) + +# reset the environment to prepare for a rollout +obs = env.reset() + +done = False +ret = 0. +while not done: + action = get_policy_action(obs) # use observation to decide on an action + obs, reward, done, _ = env.step(action) # play action + ret += reward +print("rollout completed with return {}".format(ret)) +``` + +### Observations + +**robosuite** observations are dictionaries that include key-value pairs per modality. This makes it easy for agents to work with modalities of different shapes (for example, flat proprioception observations, and pixel observations). Note that any observation entry ending with `*-state` represents a concatenation of all individual observations that belong to `*` modality. Below, we list commonly used observation keys. + +- `robot0_proprio-state`, `robot1_proprio-state` : proprioception observations for each robot arm. This includes the arm joint positions (encoded using `sin` and `cos`), arm joint velocities, end effector pose, gripper finger positions, and gripper finger velocities. The shape for this modality is flat (e.g. `(N,)`). +- `object-state` : task-specific object observations. For example, the `TwoArmLift` environment provides the pose of the pot, the position of each handle, and the relative position of each robot gripper with respect to each handle. The shape for this modality is flat (e.g. `(N,)`). +- `{camera_name}_image` : image observations for camera with name `camera_name`. The shape for this modality is `(H, W, 3)` where `H` and `W` are the height and width of the image respectively. By default, the returned image convention is mujoco's native `opengl` ("flipped"). This can alternatively be set to `opencv` convention (unflipped) via the `IMAGE_CONVENTION` macro in `macros.py`. +- `{camera_name}_depth` : depth image observations for camera with name `camera_name`. The shape for this modality is `(H, W)` where `H` and `W` are the height and width of the image respectively. By default, the returned image convention is mujoco's native `opengl` ("flipped"). This can alternatively be set to `opencv` convention (unflipped) via the `IMAGE_CONVENTION` macro in `macros.py`. +- `image-state` : (optional) stacked image observations. Note that this is disabled by default, and can be toggled via the `CONCATENATE_IMAGES` macro in `macros.py`. + +### Rewards and Termination + +Each environment implements a reward function in the `reward` method of each environment class. The reward can be either be a binary success or failure reward (nonzero if the current state is a task completion state) or a dense, shaped reward that is crafted to be (mostly) non-negative and non-decreasing along trajectories that solve the task. The reward function that is used is determined by the `reward_shaping` argument. The binary success check that is used to compute the sparse reward is implemented in the `_check_success` method of each environment class. + +Importantly, **robosuite** environments do not terminate if a success criterion is reached, but always continue for a fixed number of timesteps, determined by the `horizon` argument. This is a standard design decision for reinforcement learning in robot manipulation domains. + +We provide an example via the reward function and success criteria for `TwoArmLift` below. Note that for simplicity, we provide function aliases instead of actual implementation details so that the logic remains easy to follow: + +For the success criteria, we simply want to check if the pot is successfully lifted above a certain height threshold over the table, and return `True` or `False` accordingly. + +```python +def _check_success(self): + pot_height = get_pot_height() + table_height = get_table_height() + return pot_height > table_height + 0.10 +``` + +The reward function is a bit more involved. First, we initialize our reward variable to 0 and grab relevant sensory data from the environment, checking to see if the pot is tilted or not. +```python +def reward(self, action=None): + reward = 0 + pot_tilt = get_pot_tilt() + + # check if the pot is tilted more than 30 degrees + cos_30 = np.cos(np.pi / 6) + direction_coef = 1 if pot_tilt >= cos_30 else 0 +``` + +Next, we first check to see if we have completed the task (the pot being lifted above the table and not overly tilted), and if so, apply the un-normalized reward. +```python + if self._check_success(): + reward = 3.0 * direction_coef +``` + +Otherwise, we'll only provide partial rewards if we're using reward shaping, and calculate the appropriate reward. +```python + elif self.reward_shaping: + + # lifting reward (smooth value between [0, 1.5]) + pot_height = get_pot_height() + r_lift = min(max(pot_height - 0.05, 0), 0.15) + reward += 10. * direction_coef * r_lift + + # reaching reward (smooth value between [0, 1]) + left_hand_handle_distance = get_left_distance() + right_hand_handle_distance = get_right_distance() + reward += 0.5 * (1 - np.tanh(10.0 * left_hand_handle_distance)) + reward += 0.5 * (1 - np.tanh(10.0 * right_hand_handle_distance)) + + # grasping reward (discrete values between [0, 0.5]) + left_hand_handle_contact = is_left_contact() + right_hand_handle_contact = is_right_contact() + if left_hand_handle_contact: + reward += 0.25 + if right_hand_handle_contact: + reward += 0.5 +``` + +Lastly, we need to normalize our reward and then re-scale its value to `reward_scale` if it is specified before finally returning the calculated reward. +```python + if self.reward_scale is not None: + reward *= self.reward_scale / 3.0 + + return reward +``` + +## Task Models + +Every environment owns its own `MJCF` model that sets up the MuJoCo physics simulation by loading the robots, the workspace, and the objects into the simulator appropriately. This MuJoCo simulation model is programmatically instantiated in the `_load_model` function of each environment, by creating an instance of the `Task` class. + +Each `Task` class instance owns an `Arena` model, a list of `RobotModel` instances, and a list of `ObjectModel` instances. These are **robosuite** classes that introduce a useful abstraction in order to make designing scenes in MuJoCo easy. Every `Arena` is based off of an xml that defines the workspace (for example, table or bins) and camera locations. Every `RobotModel` is a MuJoCo model of representing an arbitrary robot (for `ManipulationModel`s, this represent armed robots, e.g. Sawyer, Panda, etc.). Every `ObjectModel` corresponds to a physical object loaded into the simulation (e.g. cube, pot with handles, etc.). + +## Task Descriptions + +While **robosuite** can support environments from different robotic domains, the current release focuses is on manipulation environments (`ManipulationEnv`), We provide a brief description of each environment below. For benchmarking results on these standardized environments, please check out the [Benchmarking](../algorithms/benchmarking) page. + +### Single-Arm Tasks + +#### Block Lifting + +![env_lift](../images/env_lift.png) + +- **Scene Description**: A cube is placed on the tabletop in front of a single robot arm. +- **Goal**: The robot arm must lift the cube above a certain height. +- **Start State Distribution**: The cube location is randomized at the beginning of each episode. + +#### Block Stacking + +![env_stack](../images/env_stack.png) + +- **Scene Description**: Two cubes are placed on the tabletop in front of a single robot arm. +- **Goal**: The robot must place one cube on top of the other cube. +- **Start State Distribution**: The cube locations are randomized at the beginning of each episode. + +#### Pick-and-Place + +![env_pick_place](../images/env_pick_place.png) + +- **Scene Description**: Four objects are placed in a bin in front of a single robot arm. There are four containers next to the bin. +- **Goal**: The robot must place each object into its corresponding container. This task also has easier single-object variants. +- **Start State Distribution**: The object locations are randomized at the beginning of each episode. + +#### Nut Assembly + +![env_nut_assembly](../images/env_nut_assembly.png) + +- **Scene Description**: Two colored pegs (one square and one round) are mounted on the tabletop, and two colored nuts (one square and one round) are placed on the table in front of a single robot arm. +- **Goal**: The robot must fit the square nut onto the square peg and the round nut onto the round peg. This task also has easier single nut-and-peg variants. +- **Start State Distribution**: The nut locations are randomized at the beginning of each episode. + +#### Door Opening + +![env_door](../images/env_door.png) + +- **Scene Description**: A door with a handle is mounted in free space in front of a single robot arm. +- **Goal**: The robot arm must learn to turn the handle and open the door. +- **Start State Distribution**: The door location is randomized at the beginning of each episode. + +#### Table Wiping + +![env_door](../images/env_wipe.png) + +- **Scene Description**: A table with a whiteboard surface and some markings is placed in front of a single robot arm, which has a whiteboard eraser mounted on its hand. +- **Goal**: The robot arm must learn to wipe the whiteboard surface and clean all of the markings. +- **Start State Distribution**: The whiteboard markings are randomized at the beginning of each episode. + +### Two-Arm Tasks + +#### Two Arm Lifting + +![env_two_arm_lift](../images/env_two_arm_lift.png) + +- **Scene Description**: A large pot with two handles is placed on a table top. Two robot arms are placed on the same side of the table or on opposite ends of the table. +- **Goal**: The two robot arms must each grab a handle and lift the pot together, above a certain height, while keeping the pot level. +- **Start State Distribution**: The pot location is randomized at the beginning of each episode. + +#### Two Arm Peg-In-Hole + +![env_two_arm_peg_in_hole](../images/env_two_arm_peg_in_hole.png) + +- **Scene Description**: Two robot arms are placed either next to each other or opposite each other. One robot arm holds a board with a square hole in the center, and the other robot arm holds a long peg. +- **Goal**: The two robot arms must coordinate to insert the peg into the hole. +- **Start State Distribution**: The initial arm configurations are randomized at the beginning of each episode. + +#### Two Arm Handover + +![env_two_arm_handover](../images/env_two_arm_handover.png) + +- **Scene Description**: A hammer is placed on a narrow table. Two robot arms are placed on the same side of the table or on opposite ends of the table. +- **Goal**: The two robot arms must coordinate so that the arm closer to the hammer picks it up and hands it to the other arm. +- **Start State Distribution**: The hammer location and size is randomized at the beginning of each episode. diff --git a/docs/modules/objects.md b/docs/modules/objects.md new file mode 100644 index 0000000000000000000000000000000000000000..6bd907acb23f05d8441a609818bc33315259c714 --- /dev/null +++ b/docs/modules/objects.md @@ -0,0 +1,158 @@ +# Objects + +## How to create a custom object +Objects, such as boxes and cans, are essential to building manipulation environments. We designed the [MujocoObject](../source/robosuite.models.objects.html#robosuite.models.objects.objects.MujocoObject) interfaces to standardize and simplify the procedure for importing 3D models into the scene or procedurally generate new objects. MuJoCo defines models via the [MJCF](http://www.mujoco.org/book/modeling.html) XML format. These MJCF files can either be stored as XML files on disk and loaded into simulator, or be created on-the-fly by code prior to simulation. Based on these two mechanisms of how MJCF models are created, we offer two main ways of creating your own object: + +* Define an object in an MJCF XML file; +* Use procedural generation APIs to dynamically create an MJCF model. + +## The MujocoObject class +```python +class MujocoObject(MujocoModel): + def __init__(...): + + ... + + # Attributes that should be filled in within the subclass + self._name = None + self._obj = None + + # Attributes that are auto-filled by _get_object_properties call + self._root_body = None + self._bodies = None + self._joints = None + self._actuators = None + self._sites = None + self._contact_geoms = None + self._visual_geoms = None +``` +`MujocoObject` is the base class of all objects. One must note that it is not a subclass of `MujocoXML`, but does extend from the unifying `MujocoModel` class from which all simulation models (including robots, grippers, etc.) should extend from. All of the attributes shown above prepended with a `_` are intended to be private variables and not accessed by external objects. Instead, any of these properties can be accessed via its public version, without the `_` (e.g.: to access all the object's joints, call `obj.joints` instead of `obj._joints`). This is because all public attributes are automatically post-processed from their private counterparts and have naming prefixes appended to it. + +The XML of an object is generated once during initialization via the `_get_object_subtree` call, after which any external object can extract a reference to this XML via the `get_obj` call. +```python + def _get_object_subtree(self): + pass + + def get_obj(self): + pass +``` + +Additionally, objects are usually placed relatively. For example, we want to put an object on a table or place a cube on top of another. Instance methods `get_bottom_offset`, `get_top_offset`, `get_horizontal_radius` provide the necessary information to place objects properly. +```python + def get_bottom_offset(self): + pass + + def get_top_offset(self): + pass + + def get_horizontal_radius(self): + pass +``` +This allows us to do things like the following. +```python +table_top = np.array([0, 1, 0]) +bottom_offset = obj.get_bottom_offset() +pos = table_top - bottom_offset # pos + bottom_offset = table_top +obj_xml = obj.get_obj().set("pos", array_to_string(pos)) # Set the top-level body of this object +``` + +## Creating a XMLObject +One can use MuJoCo MJCF XML to generate an object, either as a composition of primitive [geoms](http://mujoco.org/book/modeling.html#geom) or imported from STL files of triangulated [meshes](http://www.mujoco.org/book/modeling.html#mesh). An example is `robosuite.models.objects.xml_objects.BreadObject`. Its [python definition](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/objects/xml_objects.py#L49) is short. Note that all `MujocoXMLObject` classes require both a `fname` and `name` argument, the former which specifies the filepath to the raw XML file and the latter which specifies the in-sim name of the object instantiated. The optional `joints` argument can also specify a custom set of joints to apply to the given object (defaults to "default", which is a single free joint). This joint argument determines the DOF of the object as a whole and does not interfere with the joints already in the object. Additionally, the type of object created can be specified via the `obj_type` argument, and must be one of (`'collision'`, `'visual'`, or `'all'`). Lastly, setting `duplicate_collision_geoms` makes sure that all collision geoms automatically have an associated visual geom as well. Generally, the normal use case is to define a single class corresponding to a specific XML file, as shown below: +```python +class BreadObject(MujocoXMLObject): + def __init__(self, name): + super().__init__(xml_path_completion("objects/bread.xml"), + name=name, joints=[dict(type="free", damping="0.0005")], + obj_type="all", duplicate_collision_geoms=True) +``` + +In the corresponding XML file, a few key definitions must be present. The top-level, un-named body must contain as immediate children tags (a) the actual object bodie(s) (the top-level **must** be named `object`) and (b) three site tags named `bottom_site`, `top_site`, and `horizontal_radius_site` and whose `pos` values must be specified. The example for the `BreadObject`, [bread.xml](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/assets/objects/bread.xml), is shown below: +```xml + + + + + + + + + + + + + + + + + +``` +Concretely, +* `_get_object_subtree` looks for the object bodie(s) as defined by all nested bodie(s) beginning with the `object`-named body tag. +* `bottom_site` should be the bottom of the object, i.e. contact point with the surface it is placed on. +* `top_site` should be the top of the object, i.e. contact point if something is placed on it. +* `horizontal_radius_site` can be any point on a circle in the x-y plane that does not intersect the object. This allows us to place multiple objects without having them collide into one another. + +## Creating a procedurally generated object +Procedurally generated objects have been used in [several](https://arxiv.org/abs/1802.09564) [recent](https://arxiv.org/abs/1806.09266) [works](https://arxiv.org/abs/1709.07857) to train control policies with improved robustness and generalization. Here you can programmatically generate an MJCF XML of an object from scratch using `xml.etree.ElementTree`, and compose an object of multiple geom primitives. The base class for this type of object is `MujocoGeneratedObject`. +**robosuite** natively supports all Mujoco primitive objects with procedurally-generated `PrimitiveObject` classes (`BoxObject`, `BallObject`, `CapsuleObject`, and `CylinderObject`). + +Additionally, **robosuite** supports custom, complex objects that can be defined by collections of primitive geoms (the [CompositeObject](../source/robosuite.models.objects.html#robosuite.models.objects.generated_objects.CompositeObject) class) or even other objects (the [CompositeBodyObject](../source/robosuite.models.objects.html#robosuite.models.objects.generated_objects.CompositeBodyObject) class). The APIs for each of these classes have been standardized for ease of usage, and interested readers should consult the docstrings for each of these classes, as well as provided examples of each class ([HammerObject](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/objects/composite/hammer.py#L10), [HingedBoxObject](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/objects/composite_body/hinged_box.py#L8)). + +It should also be noted that all of the above classes extending from the `MujocoGenereatedObject` class automatically supports custom texture definitions on a per-geom level, where specific texture images can be mapped to individual geoms. The above `HammerObject` showcases an example applying custom textures to different geoms of the resulting object. + +## Placing Objects + +Object locations are initialized on every environment reset using instances of the [ObjectPositionSampler](../source/robosuite.utils.html#robosuite.utils.placement_samplers.ObjectPositionSampler) class. Object samplers use the `bottom_site` and `top_site` sites of each object in order to place objects on top of other objects, and the `horizontal_radius_site` site in order to ensure that objects do not collide with one another. The most basic sampler is the [UniformRandomSampler](../source/robosuite.utils.html#robosuite.utils.placement_samplers.UniformRandomSampler) class - this just uses rejection sampling to place objects randomly. As an example, consider the following code snippet from the `__init__` method of the `Lift` environment class. + +```python +self.placement_initializer = UniformRandomSampler( + name="ObjectSampler", + mujoco_objects=self.cube, + x_range=[-0.03, 0.03], + y_range=[-0.03, 0.03], + rotation_axis='z', + rotation=None, + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.01, +) +``` + +This will sample the `self.cube`'s object location uniformly at random in a box of size `0.03` (`x_range`, `y_range`) with random (`rotation`) z-rotation (`rotation_axis`), and with an offset of `0.01` (`z_offset`) above the table surface location (`reference_pos`). The sampler will also make sure that the entire object boundary falls within the sampling box size (`ensure_object_boundary_in_range`) and does not collide with any placed objects (`ensure_valid_placement`). + +Another common sampler is the [SequentialCompositeSampler](../source/robosuite.utils.html#robosuite.utils.placement_samplers.SequentialCompositeSampler), which is useful for composing multiple arbitrary placement samplers together. As an example, consider the following code snippet from the `__init__` method of the `NutAssembly` environment class. + +```python +# Establish named references to each nut object +nut_names = ("SquareNut", "RoundNut") + +# Initialize the top-level sampler +self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") + +# Create individual samplers per nut +for nut_name, default_y_range in zip(nut_names, ([0.11, 0.225], [-0.225, -0.11])): + self.placement_initializer.append_sampler( + sampler=UniformRandomSampler( + name=f"{nut_name}Sampler", + x_range=[-0.115, -0.11], + y_range=default_y_range, + rotation=None, + rotation_axis='z', + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.02, + ) + ) + +# No objects have been assigned to any samplers yet, so we do that now +for i, (nut_cls, nut_name) in enumerate(zip( + (SquareNutObject, RoundNutObject), + nut_names, +)): + nut = nut_cls(name=nut_name) + self.placement_initializer.add_objects_to_sampler(sampler_name=f"{nut_name}Sampler", mujoco_objects=nut) +``` + +The code snippet above results in two `UniformRandomSampler` instances being used to place the nuts onto the table surface - one for each type of nut. Notice this also allows the nuts to be initialized in separate regions of the table, and with arbitrary sampling settings. The `SequentialCompositeSampler` makes it easy to compose multiple placement initializers together and assign objects to each sub-sampler in a modular way. \ No newline at end of file diff --git a/docs/modules/overview.md b/docs/modules/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..08dfadd74a807e05fecebfebf45292c5f1cff0a6 --- /dev/null +++ b/docs/modules/overview.md @@ -0,0 +1,10 @@ +# Overview + +![module_overview_diagram](../images/module_overview.png) + +Our framework offers two main categories of APIs: 1) **Modeling APIs** for defining simulation environments in a modular and programmatic fashion, and 2) **Simulation APIs** for interfacing with external inputs such as from a **Policy** or an **I/O Device**. A **Simulation Model** specified by the Modeling APIs is instantiated by the **MuJoCo Engine** to create a simulation runtime, called **Environment**. The Environment generates observations through the **Sensors**, such as cameras and proprioception, and receives action commands from policies or devices through the **Controllers** of the **Robots**. The diagram above illustrates the key components in our framework and their relationships. + +A simulation model is defined by a [Task](../modeling/task) object, which encapsulates three essential constituents of robotic simulation: [Robot Model](../modeling/robot_model)s, [Object Model](../modeling/object_model)s, and [Arena](../modeling/arena). A task may contain one or more robots, zero to many objects, and a single arena. The Robot Model loads models of robots and optionally other models as well; for example, the [Manipulator](../modeling/robot_model.html#manipulator-model) robot model class also loads a corresponding [Gripper Model](../modeling/robot_model.html#gripper-model)s from XML files. The Object Model can be either loaded from 3D object assets or procedurally generated with programmatic APIs. The Arena defines the workspace of the robot, including the environment fixtures, such as a tabletop, and their placements. The task class combines these constituents into a single XML object in MuJoCo's [MJCF modeling language](http://www.mujoco.org/book/XMLreference.html). This MJCF object is passed to the MuJoCo engine through the [mujoco](https://mujoco.readthedocs.io/en/latest/python.html) library to instantiate the [MjModel](https://mujoco.readthedocs.io/en/latest/APIreference.html?highlight=MjModel#mjmodel) object for simulation runtime. + +The [Environment](environments) object provides [OpenAI Gym](https://gym.openai.com/)-style APIs for external inputs to interface with the simulation. External inputs correspond to the action commands used to control the [Robots](robots) and any actuators it owns (for example, in the case of a manipulator robot the arm joints and gripper), where the kinematic component of the action spaces are specific to the [Controllers](controllers) used by the robots. For instance, for joint-space controllers of a robot manipulator arm, the action space corresponds to the number of joints of the robot, and for operational space controllers, the action space corresponds to 3D Cartesian movement or 6D pose of the end-effector. These action commands can either be automatically generated by an algorithm (such as a deep neural network policy) or come from [I/O devices](devices) for human teleoperation (such as the keyboard). The controllers of the robots are responsible for interpreting these action commands and transforming them into the low-level torques passing to the underlying physics engine, which performs internal computations to determine the next state of the simulation. [Sensors](./sensors) retrieve information from the MjSim object and generate observations as the physical signals that the robots receive as response to their actions. Our framework supports multimodal sensing modalities, such as RGB-D cameras, force-torque measurements, and proprioceptive data, and also provides modular APIs to model realistic sensor dynamics. In addition to these sensory data, environments also provide additional information about the task progress and success conditions, including reward functions (for reinforcement learning) and other meta-data. For more information, please check out the descriptions of the individual components in this section. + diff --git a/docs/modules/renderers.md b/docs/modules/renderers.md new file mode 100644 index 0000000000000000000000000000000000000000..85aea0eab820e150aef38ea9d01a2621df6230cb --- /dev/null +++ b/docs/modules/renderers.md @@ -0,0 +1,74 @@ +# Renderers + +[Renderers](../source/robosuite.renderers) are used to visualize the simulation and can be used either in on-screen mode or headless (off-screen) mode. Renderers are also responsible for generating image-based observations that are returned from a given environment, and compute virtual images of the environment based on the properties defined in the cameras. + +Currently, the following ground-truth vision modalities are supported by the MuJoCo renderer: + +- **RGB**: Standard 3-channel color frames with values in range `[0, 255]`. This is set during environment construction with the `use_camera_obs` argument. +- **Depth**: 1-channel frame with normalized values in range `[0, 1]`. This is set during environment construction with the `camera_depths` argument. +- **Segmentation**: 1-channel frames with pixel values corresponding to integer IDs for various objects. Segmentation can + occur by class, instance, or geom, and is set during environment construction with the `camera_segmentations` argument. + +**robosuite** presents the following rendering options: + + + +## MuJoCo Default Renderer + +MuJoCo exposes users to an OpenGL context supported by [mujoco](https://mujoco.readthedocs.io/en/latest/python.html#rendering). Based on [OpenGL](https://www.opengl.org/), our assets and environment definitions have been tuned to look good with this renderer. The rendered frames can be displayed in a window with [OpenCV's imshow](https://pythonexamples.org/python-opencv-imshow/). + +![MuJoCo rendering](../images/gr1_cereal_mujoco.png "MuJoCo Default Renderer") + +## Isaac Rendering + +Users are also able to render using photorealistic methods through Isaac Sim. Specifically, we users are able to choose between two rendering modes: ray tracing and path tracing. For more information about Isaac Sim rendering options, please visit [here](https://docs.omniverse.nvidia.com/materials-and-rendering/latest/rtx-renderer.html). Isaac renderers are only available to those who are running on a Linux or Windows machine. + +To install Isaac on your local system, please follow the instructions listed [here](https://isaac-sim.github.io/IsaacLab/main/source/setup/installation/pip_installation.html). Make sure to follow instructions to install both Isaac Sim and Isaac Lab. + +### Ray tracing +![Ray tracing](../images/gr1_cereal_ray_tracing.png "Ray tracing") + +Ray tracing can be performed in real time. We are currently working on enhancing the rendering pipeline to support an online viewer with ray tracing capabilities. + +### Path tracing +![Path tracing](../images/gr1_cereal_path_tracing.png "Path tracing") + +Path tracing typically offers higher quality and is ideal for offline learning. If you have the time to collect data and plan to train algorithms using offline data, we recommend using path tracing for its photorealistic results. + +### Basic usage + +Once all dependecies for Isaac rendering have been installed, users can run the `robosuite/scripts/render_dataset_with_omniverse.py` to render previously collected demonstrations using either ray tracing or path tracining. Below we highlight the arguments that can be passed into the script. + +- **dataset**: Path to hdf5 dataset with the demonstrations to render. +- **ds_format**: Dataset format (options include `robosuite` and `robomimic` depending on if the dataset was collected using robosuite or robomimic, respectively). +- **episode**: Episode/demonstration to render. If no episode is provided, all demonstrations will be rendered. +- **output_directory**: Directory to store outputs from Isaac rendering and USD generation. +- **cameras**: List of cameras to render images. Cameras must be defined in robosuite. +- **width**: Width of the rendered output. +- **height**: Height of the rendered output. +- **renderer**: Renderer mode to use (options include `RayTracedLighting` or `PathTracing`). +- **save_video**: Whether to save the outputs renderings as a video. +- **online**: Enables online rendering and will not save the USD for future rendering offline. +- **skip_frames**: Renders every nth frame. +- **hide_sites**: Hides all sites in the scene. +- **reload_model**: Reloads the model from the Mujoco XML file. +- **keep_models**: List of names of models to keep from the original Mujoco XML file. +- **rgb**: Render with the RGB modality. If no other modality is selected, we default to rendering with RGB. +- **normals**: Render with normals. +- **semantic_segmentation**: Render with semantic segmentation. + +Here is an example command to render an video of a demonstration using ray tracing with the RGB and normal modality. + +```bash +$ python robosuite/scripts/render_dataset_with_omniverse.py --dataset /home/abhishek/Documents/research/rpl/robosuite/robosuite/models/assets/demonstrations_private/1734107564_9898326/demo.hdf5 --ds_format robosuite --episode 1 --camera agentview frontview --width 1920 --height 1080 --renderer RayTracedLighting --save_video --hide_sites --rgb --normals +``` + +### Rendering Speed + +Below, we present a table showing the estimated frames per second when using these renderers. Note that the exact speed of rendering might depend on your machine and scene size. Larger scenes may take longer to render. Additionally, changing renderer inputs such as samples per pixel (spp) or max bounces might affect rendering speeds. The values below are estimates using the `Lift` task with an NVIDIA GeForce RTX 4090. We use an spp of 64 when rendering with path tracing. + +| Renderer | Estimated FPS | +|----------------|---------------| +| MuJoCo | 3500 | +| Ray Tracing | 58 | +| Path Tracing | 2.8 | diff --git a/docs/modules/robots.rst b/docs/modules/robots.rst new file mode 100644 index 0000000000000000000000000000000000000000..70a20f5c663d10d216696d62d881840aa6e9261a --- /dev/null +++ b/docs/modules/robots.rst @@ -0,0 +1,210 @@ +Robots +======= + +.. figure:: ../images/robots_module_v15.png + +**Robots** are a key component in **robosuite**, and serve as the embodiment of a given agent as well as the central interaction point within an environment and key interface to MuJoCo for the robot-related state and control. **robosuite** captures this level of abstraction with the `Robot <../simulation/robot>`_-based classes, with support for both single-armed and bimanual variations, as well as robots with mobile manipulation capabilities, including both legged and wheeled variants. In turn, the Robot class is centrally defined by a `RobotModel <../modeling/robot_model>`_, `RobotBaseModel <../modeling/robot_model.html#robot-base-model>`_, `GripperModel <../modeling/robot_model.html#gripper-model>`_, and `Controller(s) <../simulation/controller>`_. Subclasses of the ``RobotModel`` class may also include additional models as well; for example, the `ManipulatorModel <../modeling/robot_model.html#manipulator-model>`_ class also includes `GripperModel(s) <../modeling/robot_model.html#gripper-model>`_ (with no gripper being represented by a dummy class). + +The high-level features of **robosuite**'s robots are described as follows: + +* **Diverse and Realistic Models**: **robosuite** provides models for 10 commercially-available robots (including the humanoid GR1 Robot), 9 grippers (including the inspire dexterous hand model), 4 bases (including the Omron wheeled mobile base), and 6 body-part controllers, with model properties either taken directly from official product documentation or raw spec sheets. An additional 8 robots, 8 grippers, and 3 bases can be installed separately from the `robosuite-models `_ repository. + +* **Modularized Support**: Robots are designed to be plug-n-play -- any combinations of robots, models, and controllers can be used, assuming the given environment is intended for the desired robot configuration. Because each robot is assigned a unique ID number, multiple instances of identical robots can be instantiated within the simulation without error. + +* **Self-Enclosed Abstraction**: For a given task and environment, any information relevant to the specific robot instance can be found within the properties and methods within that instance. This means that each robot is responsible for directly setting its initial state within the simulation at the start of each episode, and also directly controls the robot in simulation via torques outputted by its controller's transformed actions. + +Usage +===== +Below, we discuss the usage and functionality of the robots over the course of its program lifetime. + +Initialization +-------------- +During environment creation (``suite.make(...)``), individual robots are both instantiated and initialized. The desired RobotModel, RobotBaseModel, and Controller(s) (where multiple and/or additional models may be specified, e.g. for manipulator bimanual robots) are loaded into each robot, with the models being passed into the environment to compose the final MuJoCo simulation object. Each robot is then set to its initial state. + +Runtime +------- +During a given simulation episode (each ``env.step(...)`` call), the environment will receive a set of actions and distribute them accordingly to each robot, according to their respective action spaces. Each robot then converts these actions into low-level torques via their respective controllers, and directly executes these torques in the simulation. At the conclusion of the environment step, each robot will pass its set of robot-specific observations to the environment, which will then concatenate and append additional task-level observations before passing them as output from the ``env.step(...)`` call. + +Callables +--------- +At any given time, each robot has a set of ``properties`` whose real-time values can be accessed at any time. These include specifications for a given robot, such as its DoF, action dimension, and torque limits, as well as proprioceptive values, such as its joint positions and velocities. Additionally, if the robot is enabled with any sensors, those readings can also be polled as well. A full list of robot properties can be found in the `Robots API <../simulation/robot.html>`_ section. + +Models +====== +**robosuite** is designed to be generalizable to multiple robotic domains. The current release focuses on manipulator robots. For adding new robots, we provide a `rudimentary guide `_ on how to import raw Robot and Gripper models (based on a URDF source file) into robosuite. + +Manipulators +------------ + +.. list-table:: + :widths: 15 50 35 + :header-rows: 1 + + * - Robot + - Image + - Description + * - **Panda** + - .. image:: ../images/models/robot_model_Panda_isaac.png + :width: 90% + :align: center + - - **DoF:** 7 + - **Default Gripper:** PandaGripper + - **Default Base:** RethinkMount + * - **Sawyer** + - .. image:: ../images/models/robot_model_Sawyer_isaac.png + :width: 90% + :align: center + - - **DoF:** 7 + - **Default Gripper:** RethinkGripper + - **Default Base:** RethinkMount + * - **IIWA** + - .. image:: ../images/models/robot_model_IIWA_isaac.png + :width: 90% + :align: center + - - **DoF:** 7 + - **Default Gripper:** Robotiq140Gripper + - **Default Base:** RethinkMount + * - **Jaco** + - .. image:: ../images/models/robot_model_Jaco_isaac.png + :width: 90% + :align: center + - - **DoF:** 7 + - **Default Gripper:** JacoThreeFingerGripper + - **Default Base:** RethinkMount + * - **Kinova3** + - .. image:: ../images/models/robot_model_Kinova3_isaac.png + :width: 90% + :align: center + - - **DoF:** 7 + - **Default Gripper:** Robotiq85Gripper + - **Default Base:** RethinkMount + * - **UR5e** + - .. image:: ../images/models/robot_model_UR5e_isaac.png + :width: 90% + :align: center + - - **DoF:** 6 + - **Default Gripper:** Robotiq85Gripper + - **Default Base:** RethinkMount + * - **Baxter** + - .. image:: ../images/models/robot_model_Baxter_isaac.png + :width: 90% + :align: center + - - **DoF:** 14 + - **Default Gripper:** RethinkGripper + - **Default Base:** RethinkMount + * - **GR1** + - .. image:: ../images/models/robot_model_GR1_isaac.png + :width: 90% + :align: center + - - **DoF:** 24 + - **Default Gripper:** InspireHands + - **Default Base:** NoActuationBase + - **Variants**: GR1FixedLowerBody, GR1FloatingBody, GR1ArmsOnly + * - **Spot** + - .. image:: ../images/models/robot_model_Spot_isaac.png + :width: 90% + :align: center + - - **DoF:** 19 + - **Default Gripper:** BDGripper + - **Default Base:** Spot + - **Variants**: SpotWithArmFloating + * - **Tiago** + - .. image:: ../images/models/robot_model_Tiago_isaac.png + :width: 90% + :align: center + - - **DoF:** 20 + - **Default Gripper:** Robotiq85Gripper + - **Default Base:** NullMobileBase + +Grippers +-------- + +.. list-table:: + :widths: 20 45 35 + :header-rows: 1 + + * - Gripper + - Image + - Description + * - **BD Gripper** + - .. image:: ../images/models/bd_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 + * - **Inspire Hands** + - .. image:: ../images/models/inspire_hands.png + :width: 90% + :align: center + - - **DoF:** 6 + * - **Jaco Three Finger Gripper** + - .. image:: ../images/models/jaco_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 (3 for dexterous version) + * - **Panda Gripper** + - .. image:: ../images/models/panda_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 + * - **Rethink Gripper** + - .. image:: ../images/models/rethink_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 + * - **Robotiq 85 Gripper** + - .. image:: ../images/models/robotiq85_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 + * - **Robotiq 140 Gripper** + - .. image:: ../images/models/robotiq140_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 + * - **Robotiq Three Finger Gripper** + - .. image:: ../images/models/robotiq_three_gripper.png + :width: 90% + :align: center + - - **DoF:** 1 + * - **Wiping Gripper** + - .. image:: ../images/models/wiping_gripper.png + :width: 90% + :align: center + - - **DoF:** 0 + +Bases +----- + +.. list-table:: + :widths: 20 45 35 + :header-rows: 1 + + * - Gripper + - Image + - Description + * - **Rethink Mount** + - .. image:: ../images/models/rethink_base.png + :width: 90% + :align: center + - - **Type:** Fixed + * - **Rethink Minimal Mount** + - .. image:: ../images/models/rethink_minimal_base.png + :width: 90% + :align: center + - - **Type:** Fixed + * - **Omron Mobile Base** + - .. image:: ../images/models/omron_base.png + :width: 90% + :align: center + - - **Type:** Mobile + * - **Spot Base** + - .. image:: ../images/models/spot_base.png + :width: 90% + :align: center + - - **Type:** Legged + +Create Your Own Robot +---------------------- + +As of v1.5, users can create composite robots to match their specification. Specificially, arms, grippers, and bases can be swapped to create new robots configurations. We also provide several other robot models in an external repo. For more information, please refer to `here `_. + diff --git a/docs/modules/sensors.md b/docs/modules/sensors.md new file mode 100644 index 0000000000000000000000000000000000000000..4c5bf7e3a5b9eb0637fddc3434adc8533b3f3ab4 --- /dev/null +++ b/docs/modules/sensors.md @@ -0,0 +1,33 @@ +# Sensors + +Sensors are an important aspect of **robosuite**, and encompass an agent's feedback from interaction with the environment. Mujoco provides low-level APIs to directly interface with raw simulation data, though we provide more a more realistic interface via the `Observable` class API to model obtained sensory information. + +#### Mujoco-Native Sensors + +The simulator generates virtual physical signals as response to a robot's interactions. Virtual signals include images, force-torque measurements (from a force-torque sensor like the one included by default in the wrist of all [Gripper models](../modeling/robot_model.html#gripper-model)), pressure signals (e.g. from a sensor on the robot's finger or on the environment), etc. Raw sensor information (except cameras and joint sensors) can be accessed via the function `get_sensor_measurement` provided the name of the sensor. + +Joint sensors provide information about the state of each robot's joint including position and velocity. In MuJoCo these are not measured by sensors, but resolved and set by the simulator as the result of the actuation forces. Therefore, they are not accessed through the common `get_sensor_measurement` function but as properties of the [Robot simulation API](../simulation/robot), i.e., `_joint_positions` and `_joint_velocities`. + +Cameras bundle a name to a set of properties to render images of the environment such as the pose and pointing direction, field of view, and resolution. Inheriting from MuJoCo, cameras are defined in the [robot](../modeling/robot_model) and [arena models](../modeling/arena) and can be attached to any body. Images, as they would be generated from the cameras, are not accessed through `get_sensor_measurement` but via the renderer (see below). In a common user pipeline, images are not queried directly; we specify one or several cameras we want to use images from when we create the environment, and the images are generated and appended automatically to the observation dictionary. + +#### Observables + +**robosuite** provides a realistic, customizable interface via the [Observable](../source/robosuite.utils.html#robosuite.utils.observables.Observable) class API. Observables model realistic sensor sampling, in which ground truth data is sampled (`sensor`), passed through a corrupting function (`corrupter`), and then finally passed through a filtering function (`filter`). Moreover, each observable has its own `sampling_rate` and `delayer` function which simulates sensor delay. While default values are used to instantiate each observable during environment creation, each of these components can be modified by the user at runtime using `env.modify_observable(...)` . Moreover, each observable is assigned a modality, and are grouped together in the returned observation dictionary during the `env.step()` call. For example, if an environment consists of camera observations (RGB, depth, and instance segmentation) and a single robot's proprioceptive observations, the observation dict structure might look as follows: + +```python +{ + "frontview_image": np.array(...), # this has modality "image" + "frontview_depth": np.array(...), # this has modality "image" + "frontview_segmentation_instance": np.array(...), # this has modality "image" + "robot0_joint_pos": np.array(...), # this has modality "robot0_proprio" + "robot0_gripper_pos": np.array(...), # this has modality "robot0_proprio" + "image-state": np.array(...), # this is a concatenation of all image observations + "robot0_proprio-state": np.array(...), # this is a concatenation of all robot0_proprio observations +} +``` + +For more information on the vision ground-truth sensors supported, please see the [Renderer](./renderers) section. + +Note that for memory efficiency the `image-state` is not returned by default (this can be toggled in `robosuite/macros.py`). + +Observables can also be used to model sensor corruption and delay, and refer the reader to the [Sensor Randomization](../algorithms/sim2real.html#sensors) section for additional information. diff --git a/docs/source/robosuite.controllers.interpolators.rst b/docs/source/robosuite.controllers.interpolators.rst new file mode 100644 index 0000000000000000000000000000000000000000..0c8afe8ace6db5c22d71c2edf9092e5beb80daf6 --- /dev/null +++ b/docs/source/robosuite.controllers.interpolators.rst @@ -0,0 +1,29 @@ +robosuite.controllers.interpolators package +=========================================== + +Submodules +---------- + +robosuite.controllers.interpolators.base\_interpolator module +------------------------------------------------------------- + +.. automodule:: robosuite.controllers.interpolators.base_interpolator + :members: + :undoc-members: + :show-inheritance: + +robosuite.controllers.interpolators.linear\_interpolator module +--------------------------------------------------------------- + +.. automodule:: robosuite.controllers.interpolators.linear_interpolator + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.controllers.interpolators + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.controllers.parts.gripper.rst b/docs/source/robosuite.controllers.parts.gripper.rst new file mode 100644 index 0000000000000000000000000000000000000000..8652c23a0c7fd169ec190b9b3faa0ffac40e73dd --- /dev/null +++ b/docs/source/robosuite.controllers.parts.gripper.rst @@ -0,0 +1,29 @@ +robosuite.controllers.parts.gripper package +=========================================== + +Submodules +---------- + +robosuite.controllers.parts.gripper.gripper\_controller module +-------------------------------------------------------------- + +.. automodule:: robosuite.controllers.parts.gripper.gripper_controller + :members: + :undoc-members: + :show-inheritance: + +robosuite.controllers.parts.gripper.simple\_grip module +------------------------------------------------------- + +.. automodule:: robosuite.controllers.parts.gripper.simple_grip + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.controllers.parts.gripper + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.controllers.parts.rst b/docs/source/robosuite.controllers.parts.rst new file mode 100644 index 0000000000000000000000000000000000000000..333df06bba13422390a2ff6e83e52738dd010d9f --- /dev/null +++ b/docs/source/robosuite.controllers.parts.rst @@ -0,0 +1,40 @@ +robosuite.controllers.parts package +=================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + robosuite.controllers.parts.arm + robosuite.controllers.parts.generic + robosuite.controllers.parts.gripper + robosuite.controllers.parts.mobile_base + +Submodules +---------- + +robosuite.controllers.parts.controller module +--------------------------------------------- + +.. automodule:: robosuite.controllers.parts.controller + :members: + :undoc-members: + :show-inheritance: + +robosuite.controllers.parts.controller\_factory module +------------------------------------------------------ + +.. automodule:: robosuite.controllers.parts.controller_factory + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.controllers.parts + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.controllers.rst b/docs/source/robosuite.controllers.rst new file mode 100644 index 0000000000000000000000000000000000000000..8eb30b4c4a9102934f00df5a185a8d5221ec541d --- /dev/null +++ b/docs/source/robosuite.controllers.rst @@ -0,0 +1,19 @@ +robosuite.controllers package +============================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + robosuite.controllers.composite + robosuite.controllers.parts + +Module contents +--------------- + +.. automodule:: robosuite.controllers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.environments.rst b/docs/source/robosuite.environments.rst new file mode 100644 index 0000000000000000000000000000000000000000..7af0754e1ce6ac5ac84aa00d3e2751ecb69b4b5d --- /dev/null +++ b/docs/source/robosuite.environments.rst @@ -0,0 +1,37 @@ +robosuite.environments package +============================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + robosuite.environments.manipulation + +Submodules +---------- + +robosuite.environments.base module +---------------------------------- + +.. automodule:: robosuite.environments.base + :members: + :undoc-members: + :show-inheritance: + +robosuite.environments.robot\_env module +---------------------------------------- + +.. automodule:: robosuite.environments.robot_env + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.environments + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.models.arenas.rst b/docs/source/robosuite.models.arenas.rst new file mode 100644 index 0000000000000000000000000000000000000000..54b12506d59c41cc7bfec8ef6c1decd711d3ba0d --- /dev/null +++ b/docs/source/robosuite.models.arenas.rst @@ -0,0 +1,69 @@ +robosuite.models.arenas package +=============================== + +Submodules +---------- + +robosuite.models.arenas.arena module +------------------------------------ + +.. automodule:: robosuite.models.arenas.arena + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.arenas.bins\_arena module +------------------------------------------ + +.. automodule:: robosuite.models.arenas.bins_arena + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.arenas.empty\_arena module +------------------------------------------- + +.. automodule:: robosuite.models.arenas.empty_arena + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.arenas.multi\_table\_arena module +-------------------------------------------------- + +.. automodule:: robosuite.models.arenas.multi_table_arena + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.arenas.pegs\_arena module +------------------------------------------ + +.. automodule:: robosuite.models.arenas.pegs_arena + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.arenas.table\_arena module +------------------------------------------- + +.. automodule:: robosuite.models.arenas.table_arena + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.arenas.wipe\_arena module +------------------------------------------ + +.. automodule:: robosuite.models.arenas.wipe_arena + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.models.arenas + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.models.bases.rst b/docs/source/robosuite.models.bases.rst new file mode 100644 index 0000000000000000000000000000000000000000..aca6fd371fe3b7aa5acf24930f4fd1d00fb7bc18 --- /dev/null +++ b/docs/source/robosuite.models.bases.rst @@ -0,0 +1,117 @@ +robosuite.models.bases package +============================== + +Submodules +---------- + +robosuite.models.bases.floating\_legged\_base module +---------------------------------------------------- + +.. automodule:: robosuite.models.bases.floating_legged_base + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.leg\_base\_model module +---------------------------------------------- + +.. automodule:: robosuite.models.bases.leg_base_model + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.mobile\_base\_model module +------------------------------------------------- + +.. automodule:: robosuite.models.bases.mobile_base_model + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.mount\_model module +------------------------------------------ + +.. automodule:: robosuite.models.bases.mount_model + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.no\_actuation\_base module +------------------------------------------------- + +.. automodule:: robosuite.models.bases.no_actuation_base + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.null\_mobile\_base module +------------------------------------------------ + +.. automodule:: robosuite.models.bases.null_mobile_base + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.null\_mount module +----------------------------------------- + +.. automodule:: robosuite.models.bases.null_mount + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.omron\_mobile\_base module +------------------------------------------------- + +.. automodule:: robosuite.models.bases.omron_mobile_base + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.rethink\_minimal\_mount module +----------------------------------------------------- + +.. automodule:: robosuite.models.bases.rethink_minimal_mount + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.rethink\_mount module +-------------------------------------------- + +.. automodule:: robosuite.models.bases.rethink_mount + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.robot\_base\_factory module +-------------------------------------------------- + +.. automodule:: robosuite.models.bases.robot_base_factory + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.robot\_base\_model module +------------------------------------------------ + +.. automodule:: robosuite.models.bases.robot_base_model + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.bases.spot\_base module +---------------------------------------- + +.. automodule:: robosuite.models.bases.spot_base + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.models.bases + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.models.objects.composite_body.rst b/docs/source/robosuite.models.objects.composite_body.rst new file mode 100644 index 0000000000000000000000000000000000000000..1bf48f8a776db2c56b50c9f6e48aca465ab6fa62 --- /dev/null +++ b/docs/source/robosuite.models.objects.composite_body.rst @@ -0,0 +1,29 @@ +robosuite.models.objects.composite\_body package +================================================ + +Submodules +---------- + +robosuite.models.objects.composite\_body.hinged\_box module +----------------------------------------------------------- + +.. automodule:: robosuite.models.objects.composite_body.hinged_box + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.composite\_body.ratcheting\_wrench module +------------------------------------------------------------------ + +.. automodule:: robosuite.models.objects.composite_body.ratcheting_wrench + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.models.objects.composite_body + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.models.objects.primitive.rst b/docs/source/robosuite.models.objects.primitive.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e16a8294ef81eee868b8479186c6dfd147b7503 --- /dev/null +++ b/docs/source/robosuite.models.objects.primitive.rst @@ -0,0 +1,45 @@ +robosuite.models.objects.primitive package +========================================== + +Submodules +---------- + +robosuite.models.objects.primitive.ball module +---------------------------------------------- + +.. automodule:: robosuite.models.objects.primitive.ball + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.primitive.box module +--------------------------------------------- + +.. automodule:: robosuite.models.objects.primitive.box + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.primitive.capsule module +------------------------------------------------- + +.. automodule:: robosuite.models.objects.primitive.capsule + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.primitive.cylinder module +-------------------------------------------------- + +.. automodule:: robosuite.models.objects.primitive.cylinder + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.models.objects.primitive + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.models.objects.rst b/docs/source/robosuite.models.objects.rst new file mode 100644 index 0000000000000000000000000000000000000000..c820d2fcd4e8f5e217d1e9dde6dd58fc46439446 --- /dev/null +++ b/docs/source/robosuite.models.objects.rst @@ -0,0 +1,56 @@ +robosuite.models.objects package +================================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + robosuite.models.objects.composite + robosuite.models.objects.composite_body + robosuite.models.objects.group + robosuite.models.objects.primitive + +Submodules +---------- + +robosuite.models.objects.generated\_objects module +-------------------------------------------------- + +.. automodule:: robosuite.models.objects.generated_objects + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.object\_groups module +---------------------------------------------- + +.. automodule:: robosuite.models.objects.object_groups + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.objects module +--------------------------------------- + +.. automodule:: robosuite.models.objects.objects + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.objects.xml\_objects module +-------------------------------------------- + +.. automodule:: robosuite.models.objects.xml_objects + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.models.objects + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.models.rst b/docs/source/robosuite.models.rst new file mode 100644 index 0000000000000000000000000000000000000000..be78f5ccb49c2cbd93ba039eeb6c12e66bbd6896 --- /dev/null +++ b/docs/source/robosuite.models.rst @@ -0,0 +1,42 @@ +robosuite.models package +======================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + robosuite.models.arenas + robosuite.models.bases + robosuite.models.grippers + robosuite.models.objects + robosuite.models.robots + robosuite.models.tasks + +Submodules +---------- + +robosuite.models.base module +---------------------------- + +.. automodule:: robosuite.models.base + :members: + :undoc-members: + :show-inheritance: + +robosuite.models.world module +----------------------------- + +.. automodule:: robosuite.models.world + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/robosuite.wrappers.rst b/docs/source/robosuite.wrappers.rst new file mode 100644 index 0000000000000000000000000000000000000000..39839d7d4bb5b4c6bd06c998e34341999e8b5110 --- /dev/null +++ b/docs/source/robosuite.wrappers.rst @@ -0,0 +1,61 @@ +robosuite.wrappers package +========================== + +Submodules +---------- + +robosuite.wrappers.data\_collection\_wrapper module +--------------------------------------------------- + +.. automodule:: robosuite.wrappers.data_collection_wrapper + :members: + :undoc-members: + :show-inheritance: + +robosuite.wrappers.demo\_sampler\_wrapper module +------------------------------------------------ + +.. automodule:: robosuite.wrappers.demo_sampler_wrapper + :members: + :undoc-members: + :show-inheritance: + +robosuite.wrappers.domain\_randomization\_wrapper module +-------------------------------------------------------- + +.. automodule:: robosuite.wrappers.domain_randomization_wrapper + :members: + :undoc-members: + :show-inheritance: + +robosuite.wrappers.gym\_wrapper module +-------------------------------------- + +.. automodule:: robosuite.wrappers.gym_wrapper + :members: + :undoc-members: + :show-inheritance: + +robosuite.wrappers.visualization\_wrapper module +------------------------------------------------ + +.. automodule:: robosuite.wrappers.visualization_wrapper + :members: + :undoc-members: + :show-inheritance: + +robosuite.wrappers.wrapper module +--------------------------------- + +.. automodule:: robosuite.wrappers.wrapper + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: robosuite.wrappers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/tutorials/add_controller.md b/docs/tutorials/add_controller.md new file mode 100644 index 0000000000000000000000000000000000000000..9a25ce178c30b14e3602c3dd8f37ca660edb0e54 --- /dev/null +++ b/docs/tutorials/add_controller.md @@ -0,0 +1,55 @@ + +## Adding Third Party Controllers + +To use a third-party controller with robosuite, you'll need to: +1. Create a new class that subclasses one of the composite controllers in `robosuite/controllers/composite/composite_controller.py`. +2. Register the composite controller with the decorator `@register_composite_controller`. +3. Implement composite specific functionality that ultimately provides control input to the underlying `part_controller`'s. +4. Import the new class so that it gets added to robosuite's `REGISTERED_COMPOSITE_CONTROLLERS_DICT` via the `@register_composite_controller` decorator. +5. Provide controller specific configs and the new controller's `type` in a json file. + +For the new composite controllers subclassing `WholeBody`, you'll mainly need to update `joint_action_policy`. + +We provide an example of how to use a third-party `WholeBodyMinkIK` composite controller with robosuite, in the `robosuite/examples/third_party_controller/` directory. You can run the command `python teleop_mink.py` example script to see a third-party controller in action. Note: to run this specific example, you'll need to `pip install mink`. + + +Steps 1 and 2: + +In `robosuite/examples/third_party_controller/mink_controller.py`: + +``` +@register_composite_controller +class WholeBodyMinkIK(WholeBody): + name = "WHOLE_BODY_MINK_IK" +``` + +Step 3: + +In `robosuite/examples/third_party_controller/mink_controller.py`, add logic specific to the new composite controller: + +``` +self.joint_action_policy = IKSolverMink(...) +``` + +Step 4: + +In `teleop_mink.py`, we import: + +``` +from robosuite.examples.third_party_controller.mink_controller import WholeBodyMinkIK +``` + +Step 5: + +In `robosuite/examples/third_party_controller/default_mink_ik_gr1.json`, we add configs specific to our new composite controller. and also set the `type` to +match the `name` specified in `WholeBodyMinkIK`: + +``` +{ + "type": "WHOLE_BODY_MINK_IK", # set the correct type + "composite_controller_specific_configs": { + ... + }, + ... +} +``` \ No newline at end of file diff --git a/docs/tutorials/add_environment.md b/docs/tutorials/add_environment.md new file mode 100644 index 0000000000000000000000000000000000000000..9865cd69d17a9a9cd46bb96332b6cd3f455d814e --- /dev/null +++ b/docs/tutorials/add_environment.md @@ -0,0 +1,65 @@ +## Building Your Own Environments + +**robosuite** offers great flexibility in creating your own environments. A [task](../modeling/task) typically involves the participation of a [robot](../modeling/robot_model) with [grippers](../modeling/robot_model.html#gripper-model) as its end-effectors, an [arena](../modeling/arena) (workspace), and [objects](../modeling/object_model) that the robot interacts with. For a detailed overview of our design architecture, please check out the [Overview](../modules/overview) page in Modules. Our Modeling APIs provide methods of composing these modularized elements into a scene, which can be loaded in MuJoCo for simulation. To build your own environments, we recommend you take a look at the [Environment classes](../simulation/environment) which have used these APIs to define robotics environments and tasks and the [source code](https://github.com/ARISE-Initiative/robosuite/tree/master/robosuite/environments) of our standardized environments. Below we walk through a step-by-step example of building a new tabletop manipulation environment with our APIs. + +**Step 1: Creating the world.** All mujoco object definitions are housed in an xml. We create a [MujocoWorldBase](../source/robosuite.models) class to do it. +```python +from robosuite.models import MujocoWorldBase + +world = MujocoWorldBase() +``` + +**Step 2: Creating the robot.** The class housing the xml of a robot can be created as follows. +```python +from robosuite.models.robots import Panda + +mujoco_robot = Panda() +``` +We can add a gripper to the robot by creating a gripper instance and calling the add_gripper method on a robot. +```python +from robosuite.models.grippers import gripper_factory + +gripper = gripper_factory('PandaGripper') +mujoco_robot.add_gripper(gripper) +``` +To add the robot to the world, we place the robot on to a desired position and merge it into the world +```python +mujoco_robot.set_base_xpos([0, 0, 0]) +world.merge(mujoco_robot) +``` + +**Step 3: Creating the table.** We can initialize the [TableArena](../source/robosuite.models.arenas) instance that creates a table and the floorplane +```python +from robosuite.models.arenas import TableArena + +mujoco_arena = TableArena() +mujoco_arena.set_origin([0.8, 0, 0]) +world.merge(mujoco_arena) +``` + +**Step 4: Adding the object.** For details of `MujocoObject`, refer to the documentation about [MujocoObject](../modeling/object_model), we can create a ball and add it to the world. +```python +from robosuite.models.objects import BallObject +from robosuite.utils.mjcf_utils import new_joint + +sphere = BallObject( + name="sphere", + size=[0.04], + rgba=[0, 0.5, 0.5, 1]).get_obj() +sphere.set('pos', '1.0 0 1.0') +world.worldbody.append(sphere) +``` + +**Step 5: Running Simulation.** Once we have created the object, we can obtain a `mujoco.MjModel` model by running +```python +model = world.get_model(mode="mujoco") +``` +This is an `MjModel` instance that can then be used for simulation. For example, +```python +import mujoco + +data = mujoco.MjData(model) +while data.time < 1: + mujoco.mj_step(model, data) +``` + diff --git a/robosuite.egg-info/PKG-INFO b/robosuite.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..9f5dfc1df0655d91e848c0378d3448453a384144 --- /dev/null +++ b/robosuite.egg-info/PKG-INFO @@ -0,0 +1,85 @@ +Metadata-Version: 2.4 +Name: robosuite +Version: 1.5.2 +Summary: robosuite: A Modular Simulation Framework and Benchmark for Robot Learning +Home-page: https://github.com/ARISE-Initiative/robosuite +Author: Yuke Zhu +Author-email: yukez@cs.utexas.edu +Requires-Python: >=3 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: numpy>=1.13.3 +Requires-Dist: numba>=0.49.1 +Requires-Dist: scipy>=1.2.3 +Requires-Dist: mujoco>=3.3.0 +Requires-Dist: qpsolvers[quadprog]>=4.3.1 +Requires-Dist: Pillow +Requires-Dist: opencv-python +Requires-Dist: pynput +Requires-Dist: termcolor +Requires-Dist: pytest +Requires-Dist: tqdm +Provides-Extra: mink +Requires-Dist: mink==0.0.5; extra == "mink" +Dynamic: author +Dynamic: author-email +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license-file +Dynamic: provides-extra +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + +# robosuite + + +[**[Homepage]**](https://robosuite.ai/)   [**[White Paper]**](https://arxiv.org/abs/2009.12293)   [**[Documentations]**](https://robosuite.ai/docs/overview.html)   [**[ARISE Initiative]**](https://github.com/ARISE-Initiative) + +------- +## Latest Updates + +- [10/28/2024] **v1.5**: Added support for diverse robot embodiments (including humanoids), custom robot composition, composite controllers (including whole body controllers), more teleoperation devices, photo-realistic rendering. [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.5.0) [[documentation]](http://robosuite.ai/docs/overview.html) + +- [11/15/2022] **v1.4**: Backend migration to DeepMind's official [MuJoCo Python binding](https://github.com/deepmind/mujoco), robot textures, and bug fixes :robot: [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.4.0) [[documentation]](http://robosuite.ai/docs/v1.4/) + +- [10/19/2021] **v1.3**: Ray tracing and physically based rendering tools :sparkles: and access to additional vision modalities 🎥 [[video spotlight]](https://www.youtube.com/watch?v=2xesly6JrQ8) [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.3) [[documentation]](http://robosuite.ai/docs/v1.3/) + +- [02/17/2021] **v1.2**: Added observable sensor models :eyes: and dynamics randomization :game_die: [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.2) + +- [12/17/2020] **v1.1**: Refactored infrastructure and standardized model classes for much easier environment prototyping :wrench: [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.1) + +------- + +**robosuite** is a simulation framework powered by the [MuJoCo](http://mujoco.org/) physics engine for robot learning. It also offers a suite of benchmark environments for reproducible research. The current release (v1.5) features support for diverse robot embodiments (including humanoids), custom robot composition, composite controllers (including whole body controllers), more teleoperation devices, photo-realistic rendering. This project is part of the broader [Advancing Robot Intelligence through Simulated Environments (ARISE) Initiative](https://github.com/ARISE-Initiative), with the aim of lowering the barriers of entry for cutting-edge research at the intersection of AI and Robotics. + +Data-driven algorithms, such as reinforcement learning and imitation learning, provide a powerful and generic tool in robotics. These learning paradigms, fueled by new advances in deep learning, have achieved some exciting successes in a variety of robot control problems. However, the challenges of reproducibility and the limited accessibility of robot hardware (especially during a pandemic) have impaired research progress. The overarching goal of **robosuite** is to provide researchers with: + +* a standardized set of benchmarking tasks for rigorous evaluation and algorithm development; +* a modular design that offers great flexibility in designing new robot simulation environments; +* a high-quality implementation of robot controllers and off-the-shelf learning algorithms to lower the barriers to entry. + +This framework was originally developed in late 2017 by researchers in [Stanford Vision and Learning Lab](http://svl.stanford.edu) (SVL) as an internal tool for robot learning research. Now, it is actively maintained and used for robotics research projects in SVL, the [UT Robot Perception and Learning Lab](http://rpl.cs.utexas.edu) (RPL) and NVIDIA [Generalist Embodied Agent Research Group](https://research.nvidia.com/labs/gear/) (GEAR). We welcome community contributions to this project. For details, please check out our [contributing guidelines](CONTRIBUTING.md). + +**Robosuite** offers a modular design of APIs for building new environments, robot embodiments, and robot controllers with procedural generation. We highlight these primary features below: + +* **standardized tasks**: a set of standardized manipulation tasks of large diversity and varying complexity and RL benchmarking results for reproducible research; +* **procedural generation**: modular APIs for programmatically creating new environments and new tasks as combinations of robot models, arenas, and parameterized 3D objects. Check out our repo [robosuite_models](https://github.com/ARISE-Initiative/robosuite_models) for extra robot models tailored to robosuite. +* **robot controllers**: a selection of controller types to command the robots, such as joint-space velocity control, inverse kinematics control, operational space control, and whole body control; +* **teleoperation devices**: a selection of teleoperation devices including keyboard, spacemouse and MuJoCo viewer drag-drop; +* **multi-modal sensors**: heterogeneous types of sensory signals, including low-level physical states, RGB cameras, depth maps, and proprioception; +* **human demonstrations**: utilities for collecting human demonstrations, replaying demonstration datasets, and leveraging demonstration data for learning. Check out our sister project [robomimic](https://arise-initiative.github.io/robomimic-web/); +* **photorealistic rendering**: integration with advanced graphics tools that provide real-time photorealistic renderings of simulated scenes, including support for NVIDIA Isaac Sim rendering. + +## Citation +Please cite [**robosuite**](https://robosuite.ai) if you use this framework in your publications: +```bibtex +@inproceedings{robosuite2020, + title={robosuite: A Modular Simulation Framework and Benchmark for Robot Learning}, + author={Yuke Zhu and Josiah Wong and Ajay Mandlekar and Roberto Mart\'{i}n-Mart\'{i}n and Abhishek Joshi and Soroush Nasiriany and Yifeng Zhu and Kevin Lin}, + booktitle={arXiv preprint arXiv:2009.12293}, + year={2020} +} +``` diff --git a/robosuite.egg-info/SOURCES.txt b/robosuite.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..398a939a9925dd8edca6689e95124e5823bec6bc --- /dev/null +++ b/robosuite.egg-info/SOURCES.txt @@ -0,0 +1,1183 @@ +AUTHORS +LICENSE +MANIFEST.in +README.md +pyproject.toml +setup.py +robosuite/__init__.py +robosuite/macros.py +robosuite.egg-info/PKG-INFO +robosuite.egg-info/SOURCES.txt +robosuite.egg-info/dependency_links.txt +robosuite.egg-info/eager_resources.txt +robosuite.egg-info/requires.txt +robosuite.egg-info/top_level.txt +robosuite/controllers/__init__.py +robosuite/controllers/composite/__init__.py +robosuite/controllers/composite/composite_controller.py +robosuite/controllers/composite/composite_controller_factory.py +robosuite/controllers/config/default/composite/basic.json +robosuite/controllers/config/default/composite/hybrid_mobile_base.json +robosuite/controllers/config/default/composite/whole_body_ik.json +robosuite/controllers/config/default/composite/whole_body_mink_ik.json +robosuite/controllers/config/default/parts/ik_pose.json +robosuite/controllers/config/default/parts/joint_position.json +robosuite/controllers/config/default/parts/joint_torque.json +robosuite/controllers/config/default/parts/joint_velocity.json +robosuite/controllers/config/default/parts/osc_pose.json +robosuite/controllers/config/default/parts/osc_position.json +robosuite/controllers/config/robots/default_baxter.json +robosuite/controllers/config/robots/default_gr1.json +robosuite/controllers/config/robots/default_gr1_fixed_lower_body.json +robosuite/controllers/config/robots/default_gr1_floating_body.json +robosuite/controllers/config/robots/default_iiwa.json +robosuite/controllers/config/robots/default_kinova3.json +robosuite/controllers/config/robots/default_panda.json +robosuite/controllers/config/robots/default_panda_dex.json +robosuite/controllers/config/robots/default_pandaomron.json +robosuite/controllers/config/robots/default_pandaomron_whole_body_ik.json +robosuite/controllers/config/robots/default_sawyer.json +robosuite/controllers/config/robots/default_spotwitharm.json +robosuite/controllers/config/robots/default_tiago.json +robosuite/controllers/config/robots/default_tiago_whole_body_ik.json +robosuite/controllers/config/robots/default_ur5e.json +robosuite/controllers/parts/__init__.py +robosuite/controllers/parts/controller.py +robosuite/controllers/parts/controller_factory.py +robosuite/controllers/parts/arm/__init__.py +robosuite/controllers/parts/arm/ik.py +robosuite/controllers/parts/arm/osc.py +robosuite/controllers/parts/generic/__init__.py +robosuite/controllers/parts/generic/joint_pos.py +robosuite/controllers/parts/generic/joint_tor.py +robosuite/controllers/parts/generic/joint_vel.py +robosuite/controllers/parts/gripper/__init__.py +robosuite/controllers/parts/gripper/gripper_controller.py +robosuite/controllers/parts/gripper/simple_grip.py +robosuite/controllers/parts/mobile_base/__init__.py +robosuite/controllers/parts/mobile_base/joint_vel.py +robosuite/controllers/parts/mobile_base/mobile_base_controller.py +robosuite/demos/demo_collect_and_playback_data.py +robosuite/demos/demo_composite_robot.py +robosuite/demos/demo_control.py +robosuite/demos/demo_device_control.py +robosuite/demos/demo_domain_randomization.py +robosuite/demos/demo_gripper_interaction.py +robosuite/demos/demo_gripper_selection.py +robosuite/demos/demo_gym_functionality.py +robosuite/demos/demo_multi_camera.py +robosuite/demos/demo_random_action.py +robosuite/demos/demo_renderers.py +robosuite/demos/demo_segmentation.py +robosuite/demos/demo_sensor_corruption.py +robosuite/demos/demo_usd_export.py +robosuite/demos/demo_video_recording.py +robosuite/devices/__init__.py +robosuite/devices/device.py +robosuite/devices/dualsense.py +robosuite/devices/keyboard.py +robosuite/devices/mjgui.py +robosuite/devices/spacemouse.py +robosuite/environments/__init__.py +robosuite/environments/base.py +robosuite/environments/robot_env.py +robosuite/environments/manipulation/__init__.py +robosuite/environments/manipulation/door.py +robosuite/environments/manipulation/lift.py +robosuite/environments/manipulation/manipulation_env.py +robosuite/environments/manipulation/nut_assembly.py +robosuite/environments/manipulation/pick_place.py +robosuite/environments/manipulation/stack.py +robosuite/environments/manipulation/tool_hang.py +robosuite/environments/manipulation/two_arm_env.py +robosuite/environments/manipulation/two_arm_handover.py +robosuite/environments/manipulation/two_arm_lift.py +robosuite/environments/manipulation/two_arm_peg_in_hole.py +robosuite/environments/manipulation/two_arm_transport.py +robosuite/environments/manipulation/wipe.py +robosuite/models/__init__.py +robosuite/models/base.py +robosuite/models/world.py +robosuite/models/arenas/__init__.py +robosuite/models/arenas/arena.py +robosuite/models/arenas/bins_arena.py +robosuite/models/arenas/empty_arena.py +robosuite/models/arenas/multi_table_arena.py +robosuite/models/arenas/pegs_arena.py +robosuite/models/arenas/table_arena.py +robosuite/models/arenas/wipe_arena.py +robosuite/models/assets/base.xml +robosuite/models/assets/arenas/bins_arena.xml +robosuite/models/assets/arenas/empty_arena.xml +robosuite/models/assets/arenas/multi_table_arena.xml +robosuite/models/assets/arenas/pegs_arena.xml +robosuite/models/assets/arenas/table_arena.xml +robosuite/models/assets/bases/floating_legged_base.xml +robosuite/models/assets/bases/no_actuation_base.xml +robosuite/models/assets/bases/null_base.xml +robosuite/models/assets/bases/null_mobile_base.xml +robosuite/models/assets/bases/null_mount.xml +robosuite/models/assets/bases/omron_mobile_base.xml +robosuite/models/assets/bases/rethink_minimal_mount.xml +robosuite/models/assets/bases/rethink_mount.xml +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_0.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_1.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_2.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_3.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_4.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_5.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_6.obj +robosuite/models/assets/bases/meshes/omron_mobile_base/omron_7.obj +robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_collision.mtl +robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_collision.obj +robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_collision.stl +robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_vis.mtl +robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_vis.obj +robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_vis.stl +robosuite/models/assets/bases/meshes/rethink_mount/pedestal.dae +robosuite/models/assets/bases/meshes/rethink_mount/pedestal.mtl +robosuite/models/assets/bases/meshes/rethink_mount/pedestal.obj +robosuite/models/assets/bases/meshes/rethink_mount/pedestal.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/base/PEDESTAL.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/base/PEDESTAL.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/base/pedestal_link_collision.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/base/pedestal_link_collision.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/head/H0.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/head/H0.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/head/H1.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/head/H1.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/lower_elbow/E1.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/lower_elbow/E1.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/lower_forearm/W1.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/lower_forearm/W1.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/lower_shoulder/S1.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/lower_shoulder/S1.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link_collision.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link_collision.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/upper_elbow/E0.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/upper_elbow/E0.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/upper_forearm/W0.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/upper_forearm/W0.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/upper_shoulder/S0.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/upper_shoulder/S0.stl +robosuite/models/assets/bullet_data/baxter_description/meshes/wrist/W2.dae +robosuite/models/assets/bullet_data/baxter_description/meshes/wrist/W2.stl +robosuite/models/assets/bullet_data/baxter_description/urdf/baxter_arm.urdf +robosuite/models/assets/bullet_data/panda_description/CMakeLists.txt +robosuite/models/assets/bullet_data/panda_description/mainpage.dox +robosuite/models/assets/bullet_data/panda_description/package.xml +robosuite/models/assets/bullet_data/panda_description/rosdoc.yaml +robosuite/models/assets/bullet_data/panda_description/meshes/collision/finger.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/hand.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link0.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link1.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link2.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link3.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link4.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link5.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link6.stl +robosuite/models/assets/bullet_data/panda_description/meshes/collision/link7.stl +robosuite/models/assets/bullet_data/panda_description/meshes/visual/finger.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/hand.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link0.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link1.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link2.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link3.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link4.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link5.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link6.dae +robosuite/models/assets/bullet_data/panda_description/meshes/visual/link7.dae +robosuite/models/assets/bullet_data/panda_description/urdf/hand.urdf +robosuite/models/assets/bullet_data/panda_description/urdf/hand.urdf.xacro +robosuite/models/assets/bullet_data/panda_description/urdf/hand.xacro +robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm.urdf +robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm.urdf.xacro +robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm.xacro +robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm_hand.urdf +robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm_hand.urdf.xacro +robosuite/models/assets/bullet_data/sawyer_description/CMakeLists.txt +robosuite/models/assets/bullet_data/sawyer_description/package.xml +robosuite/models/assets/bullet_data/sawyer_description/config/sawyer.rviz +robosuite/models/assets/bullet_data/sawyer_description/launch/test_sawyer_description.launch.test +robosuite/models/assets/bullet_data/sawyer_description/meshes/base.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/base.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l0.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l0.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l1.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l1.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l2.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l2.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l3.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l3.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l4.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l4.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l5.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l5.stl +robosuite/models/assets/bullet_data/sawyer_description/meshes/l6.dae +robosuite/models/assets/bullet_data/sawyer_description/meshes/l6.stl +robosuite/models/assets/bullet_data/sawyer_description/params/named_poses.yaml +robosuite/models/assets/bullet_data/sawyer_description/urdf/sawyer_arm.urdf +robosuite/models/assets/grippers/bd_gripper.xml +robosuite/models/assets/grippers/fourier_left_hand.xml +robosuite/models/assets/grippers/fourier_right_hand.xml +robosuite/models/assets/grippers/inspire_left_hand.xml +robosuite/models/assets/grippers/inspire_right_hand.xml +robosuite/models/assets/grippers/jaco_three_finger_gripper.xml +robosuite/models/assets/grippers/null_gripper.xml +robosuite/models/assets/grippers/panda_gripper.xml +robosuite/models/assets/grippers/rethink_gripper.xml +robosuite/models/assets/grippers/robotiq_gripper_140.xml +robosuite/models/assets/grippers/robotiq_gripper_85.xml +robosuite/models/assets/grippers/robotiq_gripper_s.xml +robosuite/models/assets/grippers/wiping_gripper.xml +robosuite/models/assets/grippers/xarm7_gripper.xml +robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_fngr_0.obj +robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_fngr_1.obj +robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_wr1.obj +robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_wr1_0.obj +robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_wr1_1.obj +robosuite/models/assets/grippers/meshes/bd_gripper/front_jaw.obj +robosuite/models/assets/grippers/meshes/bd_gripper/jaw_tooth.obj +robosuite/models/assets/grippers/meshes/bd_gripper/left_finger.obj +robosuite/models/assets/grippers/meshes/bd_gripper/left_hinge.obj +robosuite/models/assets/grippers/meshes/bd_gripper/left_tooth.obj +robosuite/models/assets/grippers/meshes/bd_gripper/middle_jaw.obj +robosuite/models/assets/grippers/meshes/bd_gripper/right_finger.obj +robosuite/models/assets/grippers/meshes/bd_gripper/right_hinge.obj +robosuite/models/assets/grippers/meshes/bd_gripper/right_tooth.obj +robosuite/models/assets/grippers/meshes/fourier_hands/L_hand_base_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_index_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_index_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_distal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_base_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_hand_base_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_index_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_index_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_intermediate_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_proximal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_distal_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_base_link.STL +robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_link.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link11.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link12.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link13.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link14.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link15.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link16.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link17.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link18.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link19.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link20.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link21.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link22.STL +robosuite/models/assets/grippers/meshes/inspire_hands/lh_base_link.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link11.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link12.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link13.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link14.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link15.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link16.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link17.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link18.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link19.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link20.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link21.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link22.STL +robosuite/models/assets/grippers/meshes/inspire_hands/rh_base_link.STL +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.dae +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.mtl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.obj +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.stl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.dae +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.mtl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.obj +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.stl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.dae +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.mtl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.obj +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.stl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.dae +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.mtl +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.obj +robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.stl +robosuite/models/assets/grippers/meshes/panda_gripper/finger.stl +robosuite/models/assets/grippers/meshes/panda_gripper/finger_longer.stl +robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.dae +robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.mtl +robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.obj +robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.stl +robosuite/models/assets/grippers/meshes/panda_gripper/hand.stl +robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.dae +robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.mtl +robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.obj +robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.stl +robosuite/models/assets/grippers/meshes/rethink_gripper/connector_plate.mtl +robosuite/models/assets/grippers/meshes/rethink_gripper/connector_plate.obj +robosuite/models/assets/grippers/meshes/rethink_gripper/connector_plate.stl +robosuite/models/assets/grippers/meshes/rethink_gripper/electric_gripper_base.mtl +robosuite/models/assets/grippers/meshes/rethink_gripper/electric_gripper_base.obj +robosuite/models/assets/grippers/meshes/rethink_gripper/electric_gripper_base.stl +robosuite/models/assets/grippers/meshes/rethink_gripper/half_round_tip.mtl +robosuite/models/assets/grippers/meshes/rethink_gripper/half_round_tip.obj +robosuite/models/assets/grippers/meshes/rethink_gripper/half_round_tip.stl +robosuite/models/assets/grippers/meshes/rethink_gripper/standard_narrow.mtl +robosuite/models/assets/grippers/meshes/rethink_gripper/standard_narrow.obj +robosuite/models/assets/grippers/meshes/rethink_gripper/standard_narrow.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_finger.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_finger_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_finger_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_knuckle.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_knuckle_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_knuckle_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_finger.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_finger_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_finger_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_knuckle.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_knuckle_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_knuckle_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_base_link.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_base_link_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_base_link_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_coupling.stl +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_coupling_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_coupling_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_adapter_plate.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_base.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_0_L.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_0_R.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_1_L.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_1_R.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_2_L.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_2_R.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_3_L.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_3_R.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_base_link.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_base_link_vis.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_pad_vis.dae +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_pad_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_base_link.mtl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_base_link.obj +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_base_link.stl +robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_gripper_coupling_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3_vis.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm.stl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm_vis.mtl +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm_vis.obj +robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm_vis.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/base_link.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/end_tool.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/left_finger.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/left_inner_knuckle.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/left_outer_knuckle.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link1.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link2.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link3.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link4.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link5.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link6.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link7.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/link_base.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/right_finger.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/right_inner_knuckle.stl +robosuite/models/assets/grippers/meshes/xarm7_gripper/right_outer_knuckle.stl +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate.mtl +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate.obj +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base.mtl +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base.obj +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate/connector_plate.obj +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate/connector_plate.xml +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base/electric_gripper_base_0.obj +robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base/electric_gripper_base_1.obj +robosuite/models/assets/objects/bottle.xml +robosuite/models/assets/objects/bread-visual.xml +robosuite/models/assets/objects/bread.xml +robosuite/models/assets/objects/can-visual.xml +robosuite/models/assets/objects/can.xml +robosuite/models/assets/objects/cereal-visual.xml +robosuite/models/assets/objects/cereal.xml +robosuite/models/assets/objects/door.xml +robosuite/models/assets/objects/door_lock.xml +robosuite/models/assets/objects/lemon.xml +robosuite/models/assets/objects/milk-visual.xml +robosuite/models/assets/objects/milk.xml +robosuite/models/assets/objects/plate-with-hole.xml +robosuite/models/assets/objects/round-nut.xml +robosuite/models/assets/objects/square-nut.xml +robosuite/models/assets/objects/meshes/bottle.msh +robosuite/models/assets/objects/meshes/bottle.mtl +robosuite/models/assets/objects/meshes/bottle.obj +robosuite/models/assets/objects/meshes/bottle.stl +robosuite/models/assets/objects/meshes/bread.msh +robosuite/models/assets/objects/meshes/bread.mtl +robosuite/models/assets/objects/meshes/bread.obj +robosuite/models/assets/objects/meshes/bread.stl +robosuite/models/assets/objects/meshes/can.msh +robosuite/models/assets/objects/meshes/can.mtl +robosuite/models/assets/objects/meshes/can.obj +robosuite/models/assets/objects/meshes/can.stl +robosuite/models/assets/objects/meshes/cereal.msh +robosuite/models/assets/objects/meshes/cereal.mtl +robosuite/models/assets/objects/meshes/cereal.obj +robosuite/models/assets/objects/meshes/cereal.stl +robosuite/models/assets/objects/meshes/cube.obj +robosuite/models/assets/objects/meshes/cylinder.msh +robosuite/models/assets/objects/meshes/cylinder.obj +robosuite/models/assets/objects/meshes/handles.msh +robosuite/models/assets/objects/meshes/handles.mtl +robosuite/models/assets/objects/meshes/handles.obj +robosuite/models/assets/objects/meshes/handles.stl +robosuite/models/assets/objects/meshes/lemon.msh +robosuite/models/assets/objects/meshes/lemon.mtl +robosuite/models/assets/objects/meshes/lemon.obj +robosuite/models/assets/objects/meshes/lemon.stl +robosuite/models/assets/objects/meshes/milk.msh +robosuite/models/assets/objects/meshes/milk.mtl +robosuite/models/assets/objects/meshes/milk.obj +robosuite/models/assets/objects/meshes/milk.stl +robosuite/models/assets/objects/meshes/sphere8.obj +robosuite/models/assets/robots/baxter/robot.xml +robosuite/models/assets/robots/baxter/obj_meshes/head/H0.mtl +robosuite/models/assets/robots/baxter/obj_meshes/head/H0.obj +robosuite/models/assets/robots/baxter/obj_meshes/head/H1.mtl +robosuite/models/assets/robots/baxter/obj_meshes/head/H1.obj +robosuite/models/assets/robots/baxter/obj_meshes/head/H0/H0.obj +robosuite/models/assets/robots/baxter/obj_meshes/head/H1/H1_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/head/H1/H1_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1.mtl +robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1/E1_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1/E1_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1.mtl +robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1/W1_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1/W1_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_shoulder/S1.mtl +robosuite/models/assets/robots/baxter/obj_meshes/lower_shoulder/S1.obj +robosuite/models/assets/robots/baxter/obj_meshes/lower_shoulder/S1/S1.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link.mtl +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision.mtl +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision.stl +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_2.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_3.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_4.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_5.obj +robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision/base_link_collision.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0.mtl +robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0/E0_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0/E0_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0.mtl +robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_2.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_3.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0.mtl +robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0/S0_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0/S0_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2.mtl +robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2.obj +robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2/W2_0.obj +robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2/W2_1.obj +robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2/W2_2.obj +robosuite/models/assets/robots/gr1/robot.xml +robosuite/models/assets/robots/gr1/meshes/base.STL +robosuite/models/assets/robots/gr1/meshes/head_pitch.STL +robosuite/models/assets/robots/gr1/meshes/head_roll.STL +robosuite/models/assets/robots/gr1/meshes/head_yaw.STL +robosuite/models/assets/robots/gr1/meshes/l_foot_pitch.STL +robosuite/models/assets/robots/gr1/meshes/l_foot_roll.STL +robosuite/models/assets/robots/gr1/meshes/l_hand_pitch.STL +robosuite/models/assets/robots/gr1/meshes/l_hand_roll.STL +robosuite/models/assets/robots/gr1/meshes/l_hand_yaw.STL +robosuite/models/assets/robots/gr1/meshes/l_lower_arm_pitch.STL +robosuite/models/assets/robots/gr1/meshes/l_shank_pitch.STL +robosuite/models/assets/robots/gr1/meshes/l_thigh_pitch.STL +robosuite/models/assets/robots/gr1/meshes/l_thigh_roll.STL +robosuite/models/assets/robots/gr1/meshes/l_thigh_yaw.STL +robosuite/models/assets/robots/gr1/meshes/l_upper_arm_pitch.STL +robosuite/models/assets/robots/gr1/meshes/l_upper_arm_roll.STL +robosuite/models/assets/robots/gr1/meshes/l_upper_arm_yaw.STL +robosuite/models/assets/robots/gr1/meshes/r_foot_pitch.STL +robosuite/models/assets/robots/gr1/meshes/r_foot_roll.STL +robosuite/models/assets/robots/gr1/meshes/r_hand_pitch.STL +robosuite/models/assets/robots/gr1/meshes/r_hand_roll.STL +robosuite/models/assets/robots/gr1/meshes/r_hand_yaw.STL +robosuite/models/assets/robots/gr1/meshes/r_lower_arm_pitch.STL +robosuite/models/assets/robots/gr1/meshes/r_shank_pitch.STL +robosuite/models/assets/robots/gr1/meshes/r_thigh_pitch.STL +robosuite/models/assets/robots/gr1/meshes/r_thigh_roll.STL +robosuite/models/assets/robots/gr1/meshes/r_thigh_yaw.STL +robosuite/models/assets/robots/gr1/meshes/r_upper_arm_pitch.STL +robosuite/models/assets/robots/gr1/meshes/r_upper_arm_roll.STL +robosuite/models/assets/robots/gr1/meshes/r_upper_arm_yaw.STL +robosuite/models/assets/robots/gr1/meshes/torso.STL +robosuite/models/assets/robots/gr1/meshes/waist_pitch.STL +robosuite/models/assets/robots/gr1/meshes/waist_roll.STL +robosuite/models/assets/robots/gr1/meshes/waist_yaw.STL +robosuite/models/assets/robots/iiwa/robot.xml +robosuite/models/assets/robots/iiwa/meshes/link_0.obj +robosuite/models/assets/robots/iiwa/meshes/link_0.stl +robosuite/models/assets/robots/iiwa/meshes/link_0_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_0_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_0_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_1.obj +robosuite/models/assets/robots/iiwa/meshes/link_1.stl +robosuite/models/assets/robots/iiwa/meshes/link_1_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_1_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_1_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_2.obj +robosuite/models/assets/robots/iiwa/meshes/link_2.stl +robosuite/models/assets/robots/iiwa/meshes/link_2_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_2_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_2_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_3.obj +robosuite/models/assets/robots/iiwa/meshes/link_3.stl +robosuite/models/assets/robots/iiwa/meshes/link_3_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_3_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_3_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_4.obj +robosuite/models/assets/robots/iiwa/meshes/link_4.stl +robosuite/models/assets/robots/iiwa/meshes/link_4_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_4_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_4_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_5.obj +robosuite/models/assets/robots/iiwa/meshes/link_5.stl +robosuite/models/assets/robots/iiwa/meshes/link_5_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_5_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_5_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_6.obj +robosuite/models/assets/robots/iiwa/meshes/link_6.stl +robosuite/models/assets/robots/iiwa/meshes/link_6_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_6_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_6_vis.stl +robosuite/models/assets/robots/iiwa/meshes/link_7.obj +robosuite/models/assets/robots/iiwa/meshes/link_7.stl +robosuite/models/assets/robots/iiwa/meshes/link_7_vis.mtl +robosuite/models/assets/robots/iiwa/meshes/link_7_vis.obj +robosuite/models/assets/robots/iiwa/meshes/link_7_vis.stl +robosuite/models/assets/robots/iiwa/meshes/pedestal.mtl +robosuite/models/assets/robots/jaco/robot.xml +robosuite/models/assets/robots/jaco/meshes/arm_half_1.mtl +robosuite/models/assets/robots/jaco/meshes/arm_half_1.obj +robosuite/models/assets/robots/jaco/meshes/arm_half_1.stl +robosuite/models/assets/robots/jaco/meshes/arm_half_2.mtl +robosuite/models/assets/robots/jaco/meshes/arm_half_2.obj +robosuite/models/assets/robots/jaco/meshes/arm_half_2.stl +robosuite/models/assets/robots/jaco/meshes/base.mtl +robosuite/models/assets/robots/jaco/meshes/base.obj +robosuite/models/assets/robots/jaco/meshes/base.stl +robosuite/models/assets/robots/jaco/meshes/forearm.mtl +robosuite/models/assets/robots/jaco/meshes/forearm.obj +robosuite/models/assets/robots/jaco/meshes/forearm.stl +robosuite/models/assets/robots/jaco/meshes/pedestal.mtl +robosuite/models/assets/robots/jaco/meshes/ring_big.mtl +robosuite/models/assets/robots/jaco/meshes/ring_big.obj +robosuite/models/assets/robots/jaco/meshes/ring_big.stl +robosuite/models/assets/robots/jaco/meshes/ring_small.mtl +robosuite/models/assets/robots/jaco/meshes/ring_small.obj +robosuite/models/assets/robots/jaco/meshes/ring_small.stl +robosuite/models/assets/robots/jaco/meshes/shoulder.mtl +robosuite/models/assets/robots/jaco/meshes/shoulder.obj +robosuite/models/assets/robots/jaco/meshes/shoulder.stl +robosuite/models/assets/robots/jaco/meshes/wrist_spherical_1.mtl +robosuite/models/assets/robots/jaco/meshes/wrist_spherical_1.obj +robosuite/models/assets/robots/jaco/meshes/wrist_spherical_1.stl +robosuite/models/assets/robots/jaco/meshes/wrist_spherical_2.mtl +robosuite/models/assets/robots/jaco/meshes/wrist_spherical_2.obj +robosuite/models/assets/robots/jaco/meshes/wrist_spherical_2.stl +robosuite/models/assets/robots/kinova3/robot.xml +robosuite/models/assets/robots/kinova3/meshes/base_link.mtl +robosuite/models/assets/robots/kinova3/meshes/base_link.obj +robosuite/models/assets/robots/kinova3/meshes/base_link.stl +robosuite/models/assets/robots/kinova3/meshes/bracelet_no_vision_link.mtl +robosuite/models/assets/robots/kinova3/meshes/bracelet_no_vision_link.obj +robosuite/models/assets/robots/kinova3/meshes/bracelet_no_vision_link.stl +robosuite/models/assets/robots/kinova3/meshes/bracelet_with_vision_link.mtl +robosuite/models/assets/robots/kinova3/meshes/bracelet_with_vision_link.obj +robosuite/models/assets/robots/kinova3/meshes/bracelet_with_vision_link.stl +robosuite/models/assets/robots/kinova3/meshes/end_effector_link.mtl +robosuite/models/assets/robots/kinova3/meshes/end_effector_link.obj +robosuite/models/assets/robots/kinova3/meshes/end_effector_link.stl +robosuite/models/assets/robots/kinova3/meshes/forearm_link.mtl +robosuite/models/assets/robots/kinova3/meshes/forearm_link.obj +robosuite/models/assets/robots/kinova3/meshes/forearm_link.stl +robosuite/models/assets/robots/kinova3/meshes/half_arm_1_link.mtl +robosuite/models/assets/robots/kinova3/meshes/half_arm_1_link.obj +robosuite/models/assets/robots/kinova3/meshes/half_arm_1_link.stl +robosuite/models/assets/robots/kinova3/meshes/half_arm_2_link.mtl +robosuite/models/assets/robots/kinova3/meshes/half_arm_2_link.obj +robosuite/models/assets/robots/kinova3/meshes/half_arm_2_link.stl +robosuite/models/assets/robots/kinova3/meshes/pedestal.mtl +robosuite/models/assets/robots/kinova3/meshes/shoulder_link.mtl +robosuite/models/assets/robots/kinova3/meshes/shoulder_link.obj +robosuite/models/assets/robots/kinova3/meshes/shoulder_link.stl +robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_1_link.mtl +robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_1_link.obj +robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_1_link.stl +robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_2_link.mtl +robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_2_link.obj +robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_2_link.stl +robosuite/models/assets/robots/panda/robot.xml +robosuite/models/assets/robots/panda/meshes/finger.stl +robosuite/models/assets/robots/panda/meshes/hand.stl +robosuite/models/assets/robots/panda/meshes/link0.stl +robosuite/models/assets/robots/panda/meshes/link1.stl +robosuite/models/assets/robots/panda/meshes/link2.stl +robosuite/models/assets/robots/panda/meshes/link3.stl +robosuite/models/assets/robots/panda/meshes/link4.stl +robosuite/models/assets/robots/panda/meshes/link5.stl +robosuite/models/assets/robots/panda/meshes/link6.stl +robosuite/models/assets/robots/panda/meshes/link7.stl +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_0.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_1.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_10.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_11.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_2.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_3.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_4.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_5.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_6.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_7.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_8.obj +robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_9.obj +robosuite/models/assets/robots/panda/obj_meshes/link1_vis/link1_vis.obj +robosuite/models/assets/robots/panda/obj_meshes/link2_vis/link2_vis.obj +robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_0.obj +robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_1.obj +robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_2.obj +robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_3.obj +robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_0.obj +robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_1.obj +robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_2.obj +robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_3.obj +robosuite/models/assets/robots/panda/obj_meshes/link5_vis/link5_vis_0.obj +robosuite/models/assets/robots/panda/obj_meshes/link5_vis/link5_vis_1.obj +robosuite/models/assets/robots/panda/obj_meshes/link5_vis/link5_vis_2.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_0.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_1.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_10.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_11.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_12.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_13.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_14.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_15.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_16.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_2.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_3.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_4.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_5.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_6.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_7.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_8.obj +robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_9.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_0.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_1.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_2.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_3.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_4.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_5.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_6.obj +robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_7.obj +robosuite/models/assets/robots/sawyer/robot.xml +robosuite/models/assets/robots/sawyer/obj_meshes/base.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/base.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/head.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l1.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l2.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l3.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l5.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l5.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6.mtl +robosuite/models/assets/robots/sawyer/obj_meshes/l6.obj +robosuite/models/assets/robots/sawyer/obj_meshes/base/base_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/base/base_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_5.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_6.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_7.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_8.obj +robosuite/models/assets/robots/sawyer/obj_meshes/head/head_9.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_5.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_6.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l1/l1_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l1/l1_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l1/l1_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_5.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_6.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_7.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_0.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_1.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_2.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_3.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_4.obj +robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_5.obj +robosuite/models/assets/robots/spot/robot.xml +robosuite/models/assets/robots/spot/meshes/body_0.obj +robosuite/models/assets/robots/spot/meshes/body_1.obj +robosuite/models/assets/robots/spot/meshes/body_collision.obj +robosuite/models/assets/robots/spot/meshes/front_jaw.obj +robosuite/models/assets/robots/spot/meshes/front_left_hip.obj +robosuite/models/assets/robots/spot/meshes/front_left_lower_leg.obj +robosuite/models/assets/robots/spot/meshes/front_left_upper_leg_0.obj +robosuite/models/assets/robots/spot/meshes/front_left_upper_leg_1.obj +robosuite/models/assets/robots/spot/meshes/front_right_hip.obj +robosuite/models/assets/robots/spot/meshes/front_right_lower_leg.obj +robosuite/models/assets/robots/spot/meshes/front_right_upper_leg_0.obj +robosuite/models/assets/robots/spot/meshes/front_right_upper_leg_1.obj +robosuite/models/assets/robots/spot/meshes/jaw_tooth.obj +robosuite/models/assets/robots/spot/meshes/left_finger.obj +robosuite/models/assets/robots/spot/meshes/left_hinge.obj +robosuite/models/assets/robots/spot/meshes/left_lower_leg_collision.obj +robosuite/models/assets/robots/spot/meshes/left_tooth.obj +robosuite/models/assets/robots/spot/meshes/left_upper_leg_collision.obj +robosuite/models/assets/robots/spot/meshes/middle_jaw.obj +robosuite/models/assets/robots/spot/meshes/rear_left_hip.obj +robosuite/models/assets/robots/spot/meshes/rear_left_lower_leg.obj +robosuite/models/assets/robots/spot/meshes/rear_left_upper_leg_0.obj +robosuite/models/assets/robots/spot/meshes/rear_left_upper_leg_1.obj +robosuite/models/assets/robots/spot/meshes/rear_right_hip.obj +robosuite/models/assets/robots/spot/meshes/rear_right_lower_leg.obj +robosuite/models/assets/robots/spot/meshes/rear_right_upper_leg_0.obj +robosuite/models/assets/robots/spot/meshes/rear_right_upper_leg_1.obj +robosuite/models/assets/robots/spot/meshes/right_finger.obj +robosuite/models/assets/robots/spot/meshes/right_hinge.obj +robosuite/models/assets/robots/spot/meshes/right_lower_leg_collision.obj +robosuite/models/assets/robots/spot/meshes/right_tooth.obj +robosuite/models/assets/robots/spot/meshes/right_upper_leg_collision.obj +robosuite/models/assets/robots/spot_arm/robot.xml +robosuite/models/assets/robots/spot_arm/meshes/arm_link_el0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_el0_coll.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_1.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_lip.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_main.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_hr0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_hr0_coll.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0_base.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0_left_motor.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0_right_motor.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh1_0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh1_1.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_wr0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_wr0_0.obj +robosuite/models/assets/robots/spot_arm/meshes/arm_link_wr0_1.obj +robosuite/models/assets/robots/tiago/robot.xml +robosuite/models/assets/robots/tiago/meshes/arm/arm_1.mtl +robosuite/models/assets/robots/tiago/meshes/arm/arm_1.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_2.mtl +robosuite/models/assets/robots/tiago/meshes/arm/arm_2.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_3.mtl +robosuite/models/assets/robots/tiago/meshes/arm/arm_3.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_4.mtl +robosuite/models/assets/robots/tiago/meshes/arm/arm_4.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_4_collision.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_5-wrist-2010.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_5-wrist-2017.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_6-wrist-2010.stl +robosuite/models/assets/robots/tiago/meshes/arm/arm_6-wrist-2017.mtl +robosuite/models/assets/robots/tiago/meshes/arm/arm_6-wrist-2017.stl +robosuite/models/assets/robots/tiago/meshes/base/base.stl +robosuite/models/assets/robots/tiago/meshes/base/base_antena_link.stl +robosuite/models/assets/robots/tiago/meshes/base/base_collision.stl +robosuite/models/assets/robots/tiago/meshes/base/base_dock_link.stl +robosuite/models/assets/robots/tiago/meshes/base/base_link.stl +robosuite/models/assets/robots/tiago/meshes/base/base_ring.stl +robosuite/models/assets/robots/tiago/meshes/base/high_resolution/base.stl +robosuite/models/assets/robots/tiago/meshes/base/high_resolution/base_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/finger_abd_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/finger_flex_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/finger_flex_tip_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link.mtl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link.stl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link_mirror_x_y.stl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_link.mtl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_link.stl +robosuite/models/assets/robots/tiago/meshes/grippers/gripper_link_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/palm.stl +robosuite/models/assets/robots/tiago/meshes/grippers/palm_collision.stl +robosuite/models/assets/robots/tiago/meshes/grippers/thumb_abd_collision.stl +robosuite/models/assets/robots/tiago/meshes/head/head_1.stl +robosuite/models/assets/robots/tiago/meshes/head/head_2.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_fix.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_collision_core.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_collision_shoulder.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_dual_arm.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_dual_arm_with_screen.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_with_arm.stl +robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_without_arm.stl +robosuite/models/assets/robots/tiago/meshes/wheels/caster_1.mtl +robosuite/models/assets/robots/tiago/meshes/wheels/caster_1.stl +robosuite/models/assets/robots/tiago/meshes/wheels/caster_2.mtl +robosuite/models/assets/robots/tiago/meshes/wheels/caster_2.stl +robosuite/models/assets/robots/tiago/meshes/wheels/suspension_front_link.mtl +robosuite/models/assets/robots/tiago/meshes/wheels/suspension_front_link.stl +robosuite/models/assets/robots/tiago/meshes/wheels/suspension_front_link_mirror_y.stl +robosuite/models/assets/robots/tiago/meshes/wheels/suspension_rear_link.mtl +robosuite/models/assets/robots/tiago/meshes/wheels/suspension_rear_link.stl +robosuite/models/assets/robots/tiago/meshes/wheels/wheel.mtl +robosuite/models/assets/robots/tiago/meshes/wheels/wheel.stl +robosuite/models/assets/robots/tiago/meshes/wheels/wheel_link.mtl +robosuite/models/assets/robots/tiago/meshes/wheels/wheel_link.stl +robosuite/models/assets/robots/tiago/meshes/wheels/wheel_link_mirror_z.stl +robosuite/models/assets/robots/tiago/meshes/wheels/high_resolution/caster_1.stl +robosuite/models/assets/robots/tiago/meshes/wheels/high_resolution/caster_2.stl +robosuite/models/assets/robots/tiago/meshes/wheels/high_resolution/wheel.stl +robosuite/models/assets/robots/ur5e/robot.xml +robosuite/models/assets/robots/ur5e/meshes/base.dae +robosuite/models/assets/robots/ur5e/meshes/base.stl +robosuite/models/assets/robots/ur5e/meshes/base_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/base_vis.obj +robosuite/models/assets/robots/ur5e/meshes/base_vis.stl +robosuite/models/assets/robots/ur5e/meshes/forearm.dae +robosuite/models/assets/robots/ur5e/meshes/forearm.stl +robosuite/models/assets/robots/ur5e/meshes/forearm_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/forearm_vis.obj +robosuite/models/assets/robots/ur5e/meshes/forearm_vis.stl +robosuite/models/assets/robots/ur5e/meshes/pedestal.dae +robosuite/models/assets/robots/ur5e/meshes/pedestal.mtl +robosuite/models/assets/robots/ur5e/meshes/pedestal.obj +robosuite/models/assets/robots/ur5e/meshes/shoulder.dae +robosuite/models/assets/robots/ur5e/meshes/shoulder.stl +robosuite/models/assets/robots/ur5e/meshes/shoulder_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/shoulder_vis.obj +robosuite/models/assets/robots/ur5e/meshes/shoulder_vis.stl +robosuite/models/assets/robots/ur5e/meshes/upperarm.dae +robosuite/models/assets/robots/ur5e/meshes/upperarm.stl +robosuite/models/assets/robots/ur5e/meshes/upperarm_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/upperarm_vis.obj +robosuite/models/assets/robots/ur5e/meshes/upperarm_vis.stl +robosuite/models/assets/robots/ur5e/meshes/wrist1.dae +robosuite/models/assets/robots/ur5e/meshes/wrist1.stl +robosuite/models/assets/robots/ur5e/meshes/wrist1_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/wrist1_vis.obj +robosuite/models/assets/robots/ur5e/meshes/wrist1_vis.stl +robosuite/models/assets/robots/ur5e/meshes/wrist2.dae +robosuite/models/assets/robots/ur5e/meshes/wrist2.stl +robosuite/models/assets/robots/ur5e/meshes/wrist2_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/wrist2_vis.obj +robosuite/models/assets/robots/ur5e/meshes/wrist2_vis.stl +robosuite/models/assets/robots/ur5e/meshes/wrist3.dae +robosuite/models/assets/robots/ur5e/meshes/wrist3.stl +robosuite/models/assets/robots/ur5e/meshes/wrist3_vis.mtl +robosuite/models/assets/robots/ur5e/meshes/wrist3_vis.obj +robosuite/models/assets/robots/ur5e/meshes/wrist3_vis.stl +robosuite/models/assets/robots/ur5e/obj_meshes/base_vis/base_vis_0.obj +robosuite/models/assets/robots/ur5e/obj_meshes/base_vis/base_vis_1.obj +robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_0.obj +robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_1.obj +robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_2.obj +robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_3.obj +robosuite/models/assets/robots/ur5e/obj_meshes/shoulder_vis/shoulder_vis_0.obj +robosuite/models/assets/robots/ur5e/obj_meshes/shoulder_vis/shoulder_vis_1.obj +robosuite/models/assets/robots/ur5e/obj_meshes/shoulder_vis/shoulder_vis_2.obj +robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_0.obj +robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_1.obj +robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_2.obj +robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_3.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist1_vis/wrist1_vis_0.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist1_vis/wrist1_vis_1.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist1_vis/wrist1_vis_2.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist2_vis/wrist2_vis_0.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist2_vis/wrist2_vis_1.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist2_vis/wrist2_vis_2.obj +robosuite/models/assets/robots/ur5e/obj_meshes/wrist3_vis/wrist3_vis.obj +robosuite/models/assets/robots/xarm7/robot.xml +robosuite/models/assets/robots/xarm7/assets/base_link.stl +robosuite/models/assets/robots/xarm7/assets/end_tool.stl +robosuite/models/assets/robots/xarm7/assets/left_finger.stl +robosuite/models/assets/robots/xarm7/assets/left_inner_knuckle.stl +robosuite/models/assets/robots/xarm7/assets/left_outer_knuckle.stl +robosuite/models/assets/robots/xarm7/assets/link1.stl +robosuite/models/assets/robots/xarm7/assets/link2.stl +robosuite/models/assets/robots/xarm7/assets/link3.stl +robosuite/models/assets/robots/xarm7/assets/link4.stl +robosuite/models/assets/robots/xarm7/assets/link5.stl +robosuite/models/assets/robots/xarm7/assets/link6.stl +robosuite/models/assets/robots/xarm7/assets/link7.stl +robosuite/models/assets/robots/xarm7/assets/link_base.stl +robosuite/models/assets/robots/xarm7/assets/right_finger.stl +robosuite/models/assets/robots/xarm7/assets/right_inner_knuckle.stl +robosuite/models/assets/robots/xarm7/assets/right_outer_knuckle.stl +robosuite/models/assets/robots/xarm7/meshes/base_link.stl +robosuite/models/assets/robots/xarm7/meshes/end_tool.stl +robosuite/models/assets/robots/xarm7/meshes/left_finger.stl +robosuite/models/assets/robots/xarm7/meshes/left_inner_knuckle.stl +robosuite/models/assets/robots/xarm7/meshes/left_outer_knuckle.stl +robosuite/models/assets/robots/xarm7/meshes/link1.stl +robosuite/models/assets/robots/xarm7/meshes/link2.stl +robosuite/models/assets/robots/xarm7/meshes/link3.stl +robosuite/models/assets/robots/xarm7/meshes/link4.stl +robosuite/models/assets/robots/xarm7/meshes/link5.stl +robosuite/models/assets/robots/xarm7/meshes/link6.stl +robosuite/models/assets/robots/xarm7/meshes/link7.stl +robosuite/models/assets/robots/xarm7/meshes/link_base.stl +robosuite/models/assets/robots/xarm7/meshes/right_finger.stl +robosuite/models/assets/robots/xarm7/meshes/right_inner_knuckle.stl +robosuite/models/assets/robots/xarm7/meshes/right_outer_knuckle.stl +robosuite/models/assets/textures/blue-wood.png +robosuite/models/assets/textures/brass-ambra.png +robosuite/models/assets/textures/bread.png +robosuite/models/assets/textures/can.png +robosuite/models/assets/textures/ceramic.png +robosuite/models/assets/textures/cereal.png +robosuite/models/assets/textures/clay.png +robosuite/models/assets/textures/cream-plaster.png +robosuite/models/assets/textures/dark-wood.png +robosuite/models/assets/textures/dirt.png +robosuite/models/assets/textures/glass.png +robosuite/models/assets/textures/gray-felt.png +robosuite/models/assets/textures/gray-plaster.png +robosuite/models/assets/textures/gray-woodgrain.png +robosuite/models/assets/textures/green-wood.png +robosuite/models/assets/textures/lemon.png +robosuite/models/assets/textures/light-gray-floor-tile.png +robosuite/models/assets/textures/light-gray-plaster.png +robosuite/models/assets/textures/light-wood.png +robosuite/models/assets/textures/metal.png +robosuite/models/assets/textures/pink-plaster.png +robosuite/models/assets/textures/red-wood.png +robosuite/models/assets/textures/soda.png +robosuite/models/assets/textures/steel-brushed.png +robosuite/models/assets/textures/steel-scratched.png +robosuite/models/assets/textures/white-bricks.png +robosuite/models/assets/textures/white-plaster.png +robosuite/models/assets/textures/wood-tiles.png +robosuite/models/assets/textures/wood-varnished-panels.png +robosuite/models/assets/textures/yellow-plaster.png +robosuite/models/bases/__init__.py +robosuite/models/bases/floating_legged_base.py +robosuite/models/bases/leg_base_model.py +robosuite/models/bases/mobile_base_model.py +robosuite/models/bases/mount_model.py +robosuite/models/bases/no_actuation_base.py +robosuite/models/bases/null_base.py +robosuite/models/bases/null_base_model.py +robosuite/models/bases/null_mobile_base.py +robosuite/models/bases/null_mount.py +robosuite/models/bases/omron_mobile_base.py +robosuite/models/bases/rethink_minimal_mount.py +robosuite/models/bases/rethink_mount.py +robosuite/models/bases/robot_base_factory.py +robosuite/models/bases/robot_base_model.py +robosuite/models/bases/spot_base.py +robosuite/models/grippers/__init__.py +robosuite/models/grippers/bd_gripper.py +robosuite/models/grippers/fourier_hands.py +robosuite/models/grippers/gripper_factory.py +robosuite/models/grippers/gripper_model.py +robosuite/models/grippers/gripper_tester.py +robosuite/models/grippers/inspire_hands.py +robosuite/models/grippers/jaco_three_finger_gripper.py +robosuite/models/grippers/null_gripper.py +robosuite/models/grippers/panda_gripper.py +robosuite/models/grippers/rethink_gripper.py +robosuite/models/grippers/robotiq_140_gripper.py +robosuite/models/grippers/robotiq_85_gripper.py +robosuite/models/grippers/robotiq_three_finger_gripper.py +robosuite/models/grippers/wiping_gripper.py +robosuite/models/grippers/xarm7_gripper.py +robosuite/models/objects/__init__.py +robosuite/models/objects/generated_objects.py +robosuite/models/objects/object_groups.py +robosuite/models/objects/objects.py +robosuite/models/objects/xml_objects.py +robosuite/models/objects/composite/__init__.py +robosuite/models/objects/composite/bin.py +robosuite/models/objects/composite/cone.py +robosuite/models/objects/composite/hammer.py +robosuite/models/objects/composite/hollow_cylinder.py +robosuite/models/objects/composite/hook_frame.py +robosuite/models/objects/composite/lid.py +robosuite/models/objects/composite/pot_with_handles.py +robosuite/models/objects/composite/stand_with_mount.py +robosuite/models/objects/composite_body/__init__.py +robosuite/models/objects/composite_body/hinged_box.py +robosuite/models/objects/composite_body/ratcheting_wrench.py +robosuite/models/objects/group/__init__.py +robosuite/models/objects/group/transport.py +robosuite/models/objects/primitive/__init__.py +robosuite/models/objects/primitive/ball.py +robosuite/models/objects/primitive/box.py +robosuite/models/objects/primitive/capsule.py +robosuite/models/objects/primitive/cylinder.py +robosuite/models/robots/__init__.py +robosuite/models/robots/compositional.py +robosuite/models/robots/robot_model.py +robosuite/models/robots/manipulators/__init__.py +robosuite/models/robots/manipulators/baxter_robot.py +robosuite/models/robots/manipulators/gr1_robot.py +robosuite/models/robots/manipulators/humanoid_model.py +robosuite/models/robots/manipulators/humanoid_upperbody_model.py +robosuite/models/robots/manipulators/iiwa_robot.py +robosuite/models/robots/manipulators/jaco_robot.py +robosuite/models/robots/manipulators/kinova3_robot.py +robosuite/models/robots/manipulators/legged_manipulator_model.py +robosuite/models/robots/manipulators/manipulator_model.py +robosuite/models/robots/manipulators/panda_robot.py +robosuite/models/robots/manipulators/sawyer_robot.py +robosuite/models/robots/manipulators/spot_arm.py +robosuite/models/robots/manipulators/tiago_robot.py +robosuite/models/robots/manipulators/ur5e_robot.py +robosuite/models/robots/manipulators/xarm7_robot.py +robosuite/models/tasks/__init__.py +robosuite/models/tasks/manipulation_task.py +robosuite/models/tasks/task.py +robosuite/renderers/__init__.py +robosuite/renderers/base.py +robosuite/renderers/base_parser.py +robosuite/renderers/context/__init__.py +robosuite/renderers/context/egl_context.py +robosuite/renderers/context/glfw_context.py +robosuite/renderers/context/osmesa_context.py +robosuite/renderers/viewer/__init__.py +robosuite/renderers/viewer/mjviewer_renderer.py +robosuite/renderers/viewer/opencv_renderer.py +robosuite/robots/__init__.py +robosuite/robots/fixed_base_robot.py +robosuite/robots/legged_robot.py +robosuite/robots/mobile_robot.py +robosuite/robots/robot.py +robosuite/robots/wheeled_robot.py +robosuite/scripts/browse_mjcf_model.py +robosuite/scripts/check_custom_robot_model.py +robosuite/scripts/collect_human_demonstrations.py +robosuite/scripts/compile_mjcf_model.py +robosuite/scripts/make_reset_video.py +robosuite/scripts/playback_demonstrations_from_hdf5.py +robosuite/scripts/print_robosuite_info.py +robosuite/scripts/print_robot_action_info.py +robosuite/scripts/render_dataset_with_omniverse.py +robosuite/scripts/setup_macros.py +robosuite/scripts/tune_camera.py +robosuite/scripts/tune_joints.py +robosuite/scripts/internal/view_robot_initialization.py +robosuite/utils/__init__.py +robosuite/utils/binding_utils.py +robosuite/utils/buffers.py +robosuite/utils/camera_utils.py +robosuite/utils/control_utils.py +robosuite/utils/errors.py +robosuite/utils/ik_utils.py +robosuite/utils/input_utils.py +robosuite/utils/log_utils.py +robosuite/utils/mjcf_utils.py +robosuite/utils/mjmod.py +robosuite/utils/numba.py +robosuite/utils/observables.py +robosuite/utils/placement_samplers.py +robosuite/utils/robot_composition_utils.py +robosuite/utils/robot_utils.py +robosuite/utils/sim_utils.py +robosuite/utils/traj_utils.py +robosuite/utils/transform_utils.py +robosuite/wrappers/__init__.py +robosuite/wrappers/data_collection_wrapper.py +robosuite/wrappers/demo_sampler_wrapper.py +robosuite/wrappers/domain_randomization_wrapper.py +robosuite/wrappers/gym_wrapper.py +robosuite/wrappers/visualization_wrapper.py +robosuite/wrappers/wrapper.py \ No newline at end of file diff --git a/robosuite.egg-info/dependency_links.txt b/robosuite.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/robosuite.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/robosuite.egg-info/eager_resources.txt b/robosuite.egg-info/eager_resources.txt new file mode 100644 index 0000000000000000000000000000000000000000..72e8ffc0db8aad71a934dd11e5968bd5109e54b4 --- /dev/null +++ b/robosuite.egg-info/eager_resources.txt @@ -0,0 +1 @@ +* diff --git a/robosuite.egg-info/requires.txt b/robosuite.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dc599553ed060790f563b217035003ad085ac41 --- /dev/null +++ b/robosuite.egg-info/requires.txt @@ -0,0 +1,14 @@ +numpy>=1.13.3 +numba>=0.49.1 +scipy>=1.2.3 +mujoco>=3.3.0 +qpsolvers[quadprog]>=4.3.1 +Pillow +opencv-python +pynput +termcolor +pytest +tqdm + +[mink] +mink==0.0.5 diff --git a/robosuite.egg-info/top_level.txt b/robosuite.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e28915e0b67aa41f881df7e7a2021478486043d9 --- /dev/null +++ b/robosuite.egg-info/top_level.txt @@ -0,0 +1 @@ +robosuite diff --git a/tactile_tasks/__init__.py b/tactile_tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38bf389e45491a54d04fa734dd1a2524c22f40de --- /dev/null +++ b/tactile_tasks/__init__.py @@ -0,0 +1,2 @@ +from tactile_tasks.uskin_sensor import USkinSensor +from tactile_tasks.motion_planner import MotionPlanner diff --git a/tactile_tasks/__pycache__/__init__.cpython-310.pyc b/tactile_tasks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eafd09c0264f558084d496aae73a616a65dfd63d Binary files /dev/null and b/tactile_tasks/__pycache__/__init__.cpython-310.pyc differ diff --git a/tactile_tasks/__pycache__/collect_data.cpython-310.pyc b/tactile_tasks/__pycache__/collect_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f6cbcacba88ea020da90dafc5b8613b26faa729 Binary files /dev/null and b/tactile_tasks/__pycache__/collect_data.cpython-310.pyc differ diff --git a/tactile_tasks/__pycache__/motion_planner.cpython-310.pyc b/tactile_tasks/__pycache__/motion_planner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0774d241f203842d26ebb75b8023ce1523f3a92b Binary files /dev/null and b/tactile_tasks/__pycache__/motion_planner.cpython-310.pyc differ diff --git a/tactile_tasks/__pycache__/sawyer_ik.cpython-310.pyc b/tactile_tasks/__pycache__/sawyer_ik.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98b3fa721832da1cc36b2846f1b4754ce3f8b992 Binary files /dev/null and b/tactile_tasks/__pycache__/sawyer_ik.cpython-310.pyc differ diff --git a/tactile_tasks/__pycache__/uskin_sensor.cpython-310.pyc b/tactile_tasks/__pycache__/uskin_sensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b98f3de2737baa32224838911870e6a156a7333 Binary files /dev/null and b/tactile_tasks/__pycache__/uskin_sensor.cpython-310.pyc differ diff --git a/tactile_tasks/__pycache__/visualize_data.cpython-310.pyc b/tactile_tasks/__pycache__/visualize_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15069b20c3f750daf30f1f279b1ea53062ecab20 Binary files /dev/null and b/tactile_tasks/__pycache__/visualize_data.cpython-310.pyc differ diff --git a/tactile_tasks/collect_data.py b/tactile_tasks/collect_data.py new file mode 100644 index 0000000000000000000000000000000000000000..5f8ea8a0d500989c02f37979d1c89ce638618592 --- /dev/null +++ b/tactile_tasks/collect_data.py @@ -0,0 +1,644 @@ +#!/usr/bin/env python3 +""" +Data collection for contact-rich manipulation with tactile sensing. +Fixed version: improved alignment loops, better z calculation, stable success check. +""" + +import os +import sys +import argparse +from datetime import datetime + +import numpy as np +import h5py + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import robosuite +from tactile_tasks.uskin_sensor import USkinSensor + + +# ---- OSC_POSE helpers (7D action: [Δx, Δy, Δz, Δrx, Δry, Δrz, gripper]) ---- + +def get_eef_pos(env): + return env.sim.data.site_xpos[env.robots[0].eef_site_id["right"]].copy() + + +def get_eef_z_axis(env): + """Get EEF z-axis in world frame.""" + mat = env.sim.data.site_xmat[env.robots[0].eef_site_id["right"]].reshape(3, 3) + return mat[:, 2].copy() + + +def get_ori_correction(env, ori_gain=1.0): + """ + Compute axis-angle delta to rotate EEF z-axis toward [0,0,-1] (vertical down). + Returns 3D orientation delta for OSC_POSE action. + With goal_update_mode='achieved', delta is applied relative to current orientation. + """ + z_axis = get_eef_z_axis(env) + target_z = np.array([0.0, 0.0, -1.0]) + cross = np.cross(z_axis, target_z) + sin_a = np.linalg.norm(cross) + if sin_a < 1e-6: + return np.zeros(3) # already aligned + axis = cross / sin_a + cos_a = np.dot(z_axis, target_z) + angle = np.arctan2(sin_a, cos_a) + return np.clip(axis * angle * ori_gain, -1, 1) + + +def move_action(env, target_pos, gain=5.0, gripper=0.0, ori_gain=0.0): + """7D action: position delta + orientation delta + gripper.""" + delta = target_pos - get_eef_pos(env) + delta = np.clip(delta * gain, -1, 1) + ori = get_ori_correction(env, ori_gain) if ori_gain > 0 else np.zeros(3) + return np.concatenate([delta, ori, [gripper]]) + + +def at_target(env, target_pos, threshold=0.01): + return np.linalg.norm(get_eef_pos(env) - target_pos) < threshold + + +def grip_action(env, gripper=1.0, ori_gain=0.0): + ori = get_ori_correction(env, ori_gain) if ori_gain > 0 else np.zeros(3) + return np.concatenate([[0, 0, 0], ori, [gripper]]) + + +def is_upright(env, body_id, tol=0.15): + import mujoco + quat = env.sim.data.body_xquat[body_id].copy() + mat = np.zeros(9) + mujoco.mju_quat2Mat(mat, quat) + z_axis = mat.reshape(3, 3)[:, 2] + return abs(z_axis[2]) > (1.0 - tol) + + +# ---- Core alignment primitive ---- +# Key fix: continuous feedback loop that drives OBJECT xy to TARGET xy +# instead of one-shot correction. Runs until converged or max_steps. + +def align_object_to_xy(env, sensor, recorder, get_obj_xy_fn, target_xy, + gripper=1.0, xy_tol=0.003, max_steps=80, gain=8.0, + ori_gain=0.0): + """ + Continuously correct EEF so that the held object's xy matches target_xy. + ori_gain>0 applies vertical orientation correction (OSC_POSE). + """ + for _ in range(max_steps): + if recorder.done: + break + obj_xy = get_obj_xy_fn() + eef = get_eef_pos(env) + err = target_xy - obj_xy + if np.linalg.norm(err) < xy_tol: + break + target = eef.copy() + target[:2] += np.clip(err * gain, -0.05, 0.05) + action = move_action(env, target, gain=5.0, gripper=gripper, ori_gain=ori_gain) + recorder.step(env, sensor, action) + + eef_hold = get_eef_pos(env).copy() + recorder.run_for(env, sensor, + lambda: move_action(env, eef_hold, gain=5.0, gripper=gripper, + ori_gain=ori_gain), + steps=8) + + +def descend_with_alignment(env, sensor, recorder, get_obj_xy_fn, target_xy, + target_z, gripper=1.0, z_tol=0.005, + xy_gain=8.0, z_gain=5.0, max_steps=80, + ori_gain=0.0): + """ + Descend to target_z while keeping object aligned over target_xy. + ori_gain>0 applies vertical orientation correction. + """ + for _ in range(max_steps): + if recorder.done: + break + obj_xy = get_obj_xy_fn() + eef = get_eef_pos(env) + xy_err = target_xy - obj_xy + z_err = target_z - eef[2] + + target = eef.copy() + target[:2] += np.clip(xy_err * xy_gain, -0.05, 0.05) + target[2] += np.clip(z_err * z_gain, -0.15, 0.15) + + action = move_action(env, target, gain=5.0, gripper=gripper, ori_gain=ori_gain) + recorder.step(env, sensor, action) + + if abs(z_err) < z_tol and np.linalg.norm(xy_err) < 0.005: + break + + +# ---- Task policies ---- + +def run_precision_grasp(env, sensor, recorder): + obj_pos = env.sim.data.body_xpos[env.obj_body_id].copy() + + above = obj_pos.copy(); above[2] += 0.08 + recorder.run_until(env, sensor, lambda: move_action(env, above, gain=5.0, gripper=-1), + done_fn=lambda: at_target(env, above, 0.01), max_steps=80) + + recorder.run_until(env, sensor, lambda: move_action(env, obj_pos, gain=5.0, gripper=-1), + done_fn=lambda: at_target(env, obj_pos, 0.01), max_steps=60) + + recorder.run_for(env, sensor, lambda: grip_action(env, 1.0), steps=15) + + lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.12 + recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0), + done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=60) + + +def grasp_object(env, sensor, recorder, obj_body_id, obj_geoms, + descend_gain=3.0, grasp_steps=30, retry_dz=-0.012): + """ + General-purpose grasp primitive. + Returns measured (eef_pos - obj_pos) offset after successful grasp, + so callers can use it for accurate downstream z calculations. + Tries multiple z heights if first attempt fails. + """ + obj_pos = env.sim.data.body_xpos[obj_body_id].copy() + + # Move above object + above = obj_pos.copy(); above[2] += 0.12 + recorder.run_until(env, sensor, lambda: move_action(env, above, gain=5.0, gripper=-1), + done_fn=lambda: at_target(env, above, 0.01), max_steps=80) + + # Try grasping at progressively lower z positions + # Start at obj center, try up to 3 heights spaced 12mm apart + for dz in [0.0, retry_dz, retry_dz * 2]: + grasp_target = obj_pos.copy(); grasp_target[2] += dz + recorder.run_until(env, sensor, + lambda t=grasp_target: move_action(env, t, gain=descend_gain, gripper=-1), + done_fn=lambda t=grasp_target: at_target(env, t, 0.006), + max_steps=80) + recorder.run_for(env, sensor, lambda: grip_action(env, 1.0), steps=grasp_steps) + if env._check_grasp(gripper=env.robots[0].gripper, object_geoms=obj_geoms): + break + # Re-open for next retry + recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=10) + + # Measure actual offset EEF→obj immediately after grasp + eef_after = get_eef_pos(env).copy() + obj_after = env.sim.data.body_xpos[obj_body_id].copy() + eef_to_obj_offset = eef_after - obj_after # typically [~0, ~0, +z] (EEF above obj center) + + return eef_to_obj_offset + + +def run_peg_insertion(env, sensor, recorder): + # ── Phase 1: Grasp peg ────────────────────────────────────────────────── + eef_to_peg = grasp_object( + env, sensor, recorder, + obj_body_id=env.peg_body_id, + obj_geoms=env.peg, + descend_gain=3.0, + grasp_steps=30, + retry_dz=-0.012, + ) + + # ── Phase 2: Lift high ────────────────────────────────────────────────── + lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.22 + recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0), + done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=100) + + # ── Phase 3: Move to above hole ──────────────────────────────────────── + hole_pos = env.sim.data.body_xpos[env.hole_body_id].copy() + hole_xy = hole_pos[:2].copy() + + above_hole = np.array([hole_xy[0], hole_xy[1], get_eef_pos(env)[2]]) + recorder.run_until(env, sensor, + lambda: move_action(env, above_hole, gain=4.0, gripper=1.0), + done_fn=lambda: at_target(env, above_hole, 0.01), max_steps=120) + + # ── Phase 4: Position align + gradual orientation correction ──────────── + # First: precise xy alignment (no ori) + align_object_to_xy( + env, sensor, recorder, + get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(), + target_xy=hole_xy, + gripper=1.0, xy_tol=0.002, max_steps=80, gain=10.0, ori_gain=0.0, + ) + + # Gradual ori ramp with continuous position correction + for step in range(200): + if recorder.done: + break + peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy() + eef = get_eef_pos(env) + target = eef.copy() + target[:2] += (hole_xy - peg_xy) * 3.0 + og = min(1.5, step / 150.0 * 1.5) + action = move_action(env, target, gain=10.0, gripper=1.0, ori_gain=og) + recorder.step(env, sensor, action) + + # Re-measure offset with final orientation + eef_settled = get_eef_pos(env).copy() + peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy() + eef_to_peg = eef_settled - peg_settled + + # ── Phase 5: Descend to just above hole ───────────────────────────────── + hole_top_z = hole_pos[2] + env.hole_height * 0.5 + target_eef_z = hole_top_z + 0.005 + eef_to_peg[2] + env.peg_height + + descend_with_alignment( + env, sensor, recorder, + get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(), + target_xy=hole_xy, + target_z=target_eef_z, + gripper=1.0, z_tol=0.005, xy_gain=10.0, z_gain=4.0, + max_steps=120, ori_gain=0.5, + ) + + # ── Phase 6: Insertion — push down with xy + ori corrections ──────────── + for _ in range(150): + if recorder.done: + break + peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy() + xy_err = hole_xy - peg_xy + ori = get_ori_correction(env, ori_gain=0.5) + action = np.zeros(7) + action[:2] = np.clip(xy_err * 15.0, -1, 1) + action[2] = -0.15 + action[3:6] = ori + action[6] = 1.0 + recorder.step(env, sensor, action) + if env._check_success(): + break + + # ── Phase 7: Release and retreat ───────────────────────────────────────── + recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=12) + retreat = get_eef_pos(env).copy(); retreat[2] += 0.10 + recorder.run_until(env, sensor, lambda: move_action(env, retreat, gain=4.0, gripper=-1), + done_fn=lambda: at_target(env, retreat, 0.01), max_steps=50) + + +def run_gentle_stack(env, sensor, recorder): + # ── Phase 1: Grasp box, measure real EEF→box offset ────────────────────── + # box is flat (half-h ~0.032), grasp at center height + eef_to_box = grasp_object( + env, sensor, recorder, + obj_body_id=env.box_body_id, + obj_geoms=env.stack_box, + descend_gain=3.0, + grasp_steps=30, + retry_dz=-0.010, + ) + + # ── Phase 2: Lift ───────────────────────────────────────────────────────── + lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.14 + recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0), + done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=80) + + eef_hold = get_eef_pos(env).copy() + recorder.run_for(env, sensor, + lambda: move_action(env, eef_hold, gain=8.0, gripper=1.0), + steps=12) + + # Re-measure offset after settling (box may shift in grip) + eef_settled = get_eef_pos(env).copy() + box_settled = env.sim.data.body_xpos[env.box_body_id].copy() + eef_to_box = eef_settled - box_settled # [~0, ~0, +z]: EEF is above box center + + # ── Phase 3: Align box xy over can ─────────────────────────────────────── + can_pos = env.sim.data.body_xpos[env.can_body_id].copy() + can_xy = can_pos[:2].copy() + + align_object_to_xy( + env, sensor, recorder, + get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.box_body_id][:2].copy(), + target_xy=can_xy, + gripper=1.0, + xy_tol=0.005, + max_steps=100, + gain=8.0, + ) + + # ── Phase 4: Descend to just above can top ──────────────────────────────── + # Key formula: + # box_bottom_z = eef_z - eef_to_box[2] - box_half_h + # want box_bottom_z = can_top_z + small_clearance + # → target_eef_z = can_top_z + clearance + eef_to_box[2] + box_half_h + can_top_z = can_pos[2] + 0.04 # CylinderObject size=[0.022, 0.04] → half_h=0.04 + # Read actual box half-height from sim geom (BoxObject is randomized, .size unreliable) + box_geom_names = [n for n in env.sim.model.geom_names + if env.stack_box.root_body.lower() in n.lower()] + if box_geom_names: + gid = env.sim.model.geom_name2id(box_geom_names[0]) + box_half_h = env.sim.model.geom_size[gid][2] # z half-extent + else: + box_half_h = 0.032 # fallback: midpoint of size_min/size_max z + target_eef_z = can_top_z + 0.010 + eef_to_box[2] + box_half_h + + descend_with_alignment( + env, sensor, recorder, + get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.box_body_id][:2].copy(), + target_xy=can_xy, + target_z=target_eef_z, + gripper=1.0, + z_tol=0.006, + xy_gain=6.0, + z_gain=3.0, + max_steps=120, + ) + + # ── Phase 5: Gentle final push until contact ────────────────────────────── + for _ in range(80): + if recorder.done: + break + box_xy = env.sim.data.body_xpos[env.box_body_id][:2].copy() + xy_err = can_xy - box_xy + action = np.zeros(7) + action[:2] = np.clip(xy_err * 8.0, -0.3, 0.3) + action[2] = -0.004 # very slow descent + action[6] = 1.0 # gripper closed + recorder.step(env, sensor, action) + + # Stop on tactile contact + mags = sensor.get_force_magnitudes() + avg_force = (mags["left_finger"].mean() + mags["right_finger"].mean()) / 2.0 + if avg_force > 1.0: + break + + # ── Phase 6: Hold, release, retreat ────────────────────────────────────── + eef_hold = get_eef_pos(env).copy() + recorder.run_for(env, sensor, + lambda: move_action(env, eef_hold, gain=8.0, gripper=1.0), + steps=20) + + recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=15) + + # Straight-up retreat, slow to avoid disturbing box + retreat = eef_hold.copy(); retreat[2] += 0.10 + recorder.run_until(env, sensor, + lambda: move_action(env, retreat, gain=2.0, gripper=-1), + done_fn=lambda: at_target(env, retreat, 0.015), max_steps=60) + + # Wait for box to fully settle (success check happens here) + recorder.run_for(env, sensor, + lambda: move_action(env, retreat, gain=2.0, gripper=-1), + steps=35) + + +# ---- Data recording ---- + +class EpisodeRecorder: + def __init__(self, env, sensor): + self.env = env + self.sensor = sensor + self.data = { + "agentview_image": [], "eye_in_hand_image": [], + "agentview_depth": [], "eye_in_hand_depth": [], + "tactile_left": [], "tactile_right": [], + "joint_pos": [], "joint_vel": [], + "eef_pos": [], "eef_quat": [], + "gripper_qpos": [], + "actions": [], "rewards": [], "success": [], + } + self.step_count = 0 + self.done = False + + def step(self, env, sensor, action): + if self.done: + return None, 0, True, {} + obs, reward, done, info = env.step(action) + self.done = done + + for _ in range(USkinSensor.FREQ_MULTIPLIER): + td = sensor.update() + self.data["tactile_left"].append(td["left_finger"].copy()) + self.data["tactile_right"].append(td["right_finger"].copy()) + + if "agentview_image" in obs: + self.data["agentview_image"].append(obs["agentview_image"]) + if "robot0_eye_in_hand_image" in obs: + self.data["eye_in_hand_image"].append(obs["robot0_eye_in_hand_image"]) + if "agentview_depth" in obs: + self.data["agentview_depth"].append(obs["agentview_depth"]) + if "robot0_eye_in_hand_depth" in obs: + self.data["eye_in_hand_depth"].append(obs["robot0_eye_in_hand_depth"]) + + robot = env.robots[0] + self.data["joint_pos"].append(np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes])) + self.data["joint_vel"].append(np.array(env.sim.data.qvel[robot._ref_joint_vel_indexes])) + self.data["eef_pos"].append(get_eef_pos(env)) + + eef_mat = env.sim.data.site_xmat[robot.eef_site_id["right"]].reshape(3, 3) + import mujoco + quat = np.zeros(4) + mujoco.mju_mat2Quat(quat, eef_mat.flatten()) + self.data["eef_quat"].append(quat) + + gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", []) + if gripper_idx: + self.data["gripper_qpos"].append(np.array(env.sim.data.qpos[gripper_idx])) + else: + self.data["gripper_qpos"].append(np.array([action[-1]])) + + self.data["actions"].append(action) + self.data["rewards"].append(reward) + self.data["success"].append(env._check_success()) + self.step_count += 1 + return obs, reward, done, info + + def run_for(self, env, sensor, action_fn, steps): + for _ in range(steps): + if self.done: + break + self.step(env, sensor, action_fn()) + + def run_until(self, env, sensor, action_fn, done_fn, max_steps): + for _ in range(max_steps): + if self.done: + break + self.step(env, sensor, action_fn()) + if done_fn(): + break + + def finalize(self): + for key in self.data: + if len(self.data[key]) > 0: + self.data[key] = np.array(self.data[key]) + else: + self.data[key] = np.array([]) + self.data["task_success"] = bool(any(self.data["success"])) if len(self.data["success"]) > 0 else False + self.data["n_steps"] = self.step_count + return self.data + + +# ---- Task configs ---- + +TASK_CONFIGS = { + "precision_grasp": { + "env_class": "PrecisionGrasp", + "run_fn": run_precision_grasp, + "horizon": 300, + "controller": "OSC_POSE", + }, + "peg_insertion": { + "env_class": "PegInsertion", + "run_fn": run_peg_insertion, + "horizon": 600, + "controller": "OSC_POSE", + }, + "gentle_stack": { + "env_class": "GentleStack", + "run_fn": run_gentle_stack, + "horizon": 400, + "controller": "OSC_POSE", + }, +} + + +def create_env(task_name, has_renderer=False, camera_names=None): + from tactile_tasks.envs.precision_grasp import PrecisionGrasp + from tactile_tasks.envs.peg_insertion import PegInsertion + from tactile_tasks.envs.gentle_stack import GentleStack + + env_classes = { + "PrecisionGrasp": PrecisionGrasp, + "PegInsertion": PegInsertion, + "GentleStack": GentleStack, + } + + config = TASK_CONFIGS[task_name] + EnvClass = env_classes[config["env_class"]] + + if camera_names is None: + camera_names = ["agentview", "robot0_eye_in_hand"] + + controller_configs = { + "type": "BASIC", + "body_parts": { + "right": { + "type": "OSC_POSE", + "input_max": 1, + "input_min": -1, + "output_max": [0.15, 0.15, 0.15, 0.5, 0.5, 0.5], + "output_min": [-0.15, -0.15, -0.15, -0.5, -0.5, -0.5], + "kp": 200, + "damping_ratio": 1, + "impedance_mode": "fixed", + "kp_limits": [0, 400], + "damping_ratio_limits": [0, 10], + "position_limits": None, + "uncouple_pos_ori": True, + "input_type": "delta", + "input_ref_frame": "base", + "interpolation": None, + "ramp_ratio": 0.2, + "gripper": {"type": "GRIP"}, + } + }, + } + + env = EnvClass( + robots="Sawyer", + gripper_types="Robotiq85Gripper", + controller_configs=controller_configs, + has_renderer=has_renderer, + has_offscreen_renderer=True, + use_camera_obs=True, + use_object_obs=True, + control_freq=20, + horizon=config["horizon"], + camera_names=camera_names, + camera_heights=256, + camera_widths=256, + camera_depths=True, + reward_shaping=True, + renderer="mjviewer", + ) + return env + + +def save_episode_hdf5(filepath, episode_data, task_name): + """Save a single episode to its own HDF5 file.""" + os.makedirs(os.path.dirname(filepath), exist_ok=True) + with h5py.File(filepath, "w") as f: + meta = f.create_group("metadata") + meta.attrs["task"] = task_name + meta.attrs["robot"] = "Sawyer" + meta.attrs["gripper"] = "Robotiq85" + meta.attrs["tactile_sensor"] = "uSkin_4x4" + meta.attrs["controller"] = TASK_CONFIGS.get(task_name, {}).get("controller", "OSC_POSE") + meta.attrs["control_freq"] = 20 + meta.attrs["tactile_freq"] = 100 + meta.attrs["camera_freq"] = 20 + meta.attrs["created"] = datetime.now().isoformat() + + f.attrs["success"] = bool(episode_data["task_success"]) + f.attrs["n_steps"] = int(episode_data["n_steps"]) + + for key, value in episode_data.items(): + if isinstance(value, np.ndarray) and value.size > 0: + if "image" in key or "depth" in key: + f.create_dataset(key, data=value, compression="gzip", compression_opts=4) + else: + f.create_dataset(key, data=value) + elif isinstance(value, (bool, int, float)): + f.attrs[key] = value + + +def collect_task_data(task_name, n_episodes=1, save_dir="./tactile_data", visualize=False, + max_attempts=500): + """Collect n_episodes SUCCESSFUL episodes. Failed episodes are discarded.""" + print(f"\n{'='*60}") + print(f"Task: {task_name} | Target: {n_episodes} successful episodes") + print(f"{'='*60}\n") + + task_dir = os.path.join(save_dir, task_name) + os.makedirs(task_dir, exist_ok=True) + config = TASK_CONFIGS[task_name] + env = create_env(task_name, has_renderer=visualize) + successes = 0 + attempts = 0 + + while successes < n_episodes and attempts < max_attempts: + attempts += 1 + print(f"Attempt {attempts} | Saved: {successes}/{n_episodes}") + obs = env.reset() + for _ in range(80): + obs, _, _, _ = env.step(np.zeros(env.action_dim)) + + sensor = USkinSensor(env.sim, gripper_prefix="gripper0_right_", noise_std=0.005) + recorder = EpisodeRecorder(env, sensor) + config["run_fn"](env, sensor, recorder) + episode_data = recorder.finalize() + + success = episode_data["task_success"] + print(f" Steps: {episode_data['n_steps']}, " + f"Success: {success}, " + f"Reward: {episode_data['rewards'].sum():.1f}") + + if success: + filepath = os.path.join(task_dir, f"episode_{successes:02d}.hdf5") + save_episode_hdf5(filepath, episode_data, task_name) + successes += 1 + print(f" -> Saved as episode_{successes-1:02d}.hdf5") + else: + print(f" -> Discarded (failed)") + + print(f"\nDone: {successes}/{n_episodes} successful episodes in {attempts} attempts") + env.close() + return task_dir + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--task", type=str, default="precision_grasp", + choices=list(TASK_CONFIGS.keys()) + ["all"]) + parser.add_argument("--n_episodes", type=int, default=1) + parser.add_argument("--save_dir", type=str, default="./tactile_data") + parser.add_argument("--visualize", action="store_true") + args = parser.parse_args() + + tasks = list(TASK_CONFIGS.keys()) if args.task == "all" else [args.task] + for task in tasks: + collect_task_data(task, args.n_episodes, args.save_dir, args.visualize) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tactile_tasks/convert_for_act.py b/tactile_tasks/convert_for_act.py new file mode 100644 index 0000000000000000000000000000000000000000..5b52019124144049076f1ba5d46baf985de722d8 --- /dev/null +++ b/tactile_tasks/convert_for_act.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +""" +Convert tactile_data HDF5 episodes to ACT training format. + +Input: tactile_data/{task}/episode_XX.hdf5 (our format) +Output: policy/ACT/processed_data/sim-{task}/{config}-{num}/episode_{i}.hdf5 + +ACT expects per-episode HDF5 with: + /action [T, action_dim] + /observations/qpos [T, state_dim] + /observations/images/{cam_name} [T, H, W, 3] +""" + +import os +import sys +import json +import argparse +import numpy as np +import h5py + + +def convert_episode(src_path, dst_path, camera_names): + """Convert a single episode from our format to ACT format.""" + with h5py.File(src_path, "r") as src: + T = src["actions"].shape[0] + + # State: joint_pos(7) + normalized gripper(1) = 8D + joint_pos = src["joint_pos"][:] # [T, 7] + gripper_qpos = src["gripper_qpos"][:] # [T, 6] + # Normalize first gripper joint (finger_joint): [0, 0.8] → [0, 1] + gripper_norm = gripper_qpos[:, 0:1] / 0.8 + qpos = np.concatenate([joint_pos, gripper_norm], axis=1).astype(np.float32) # [T, 8] + + # Actions: 7D OSC_POSE → pad to 8D to match state_dim (ACT uses same dim for both) + actions_7d = src["actions"][:].astype(np.float32) # [T, 7] + actions = np.concatenate([actions_7d, np.zeros((T, 1), dtype=np.float32)], axis=1) # [T, 8] + + # Camera name mapping: our names → ACT cam names + cam_map = { + "agentview": "agentview_image", + "eye_in_hand": "eye_in_hand_image", + } + + os.makedirs(os.path.dirname(dst_path), exist_ok=True) + with h5py.File(dst_path, "w") as dst: + dst.create_dataset("/action", data=actions) + obs = dst.create_group("/observations") + obs.create_dataset("qpos", data=qpos) + img_group = obs.create_group("images") + for cam_name in camera_names: + src_key = cam_map.get(cam_name, cam_name + "_image") + if src_key in src: + img_group.create_dataset(cam_name, data=src[src_key][:]) + else: + print(f" Warning: camera '{src_key}' not found in {src_path}") + + +def convert_task(task_name, data_dir, output_dir, config_name="default", + camera_names=None): + """Convert all episodes for a task.""" + if camera_names is None: + camera_names = ["agentview", "eye_in_hand"] + + task_dir = os.path.join(data_dir, task_name) + episodes = sorted([f for f in os.listdir(task_dir) if f.endswith(".hdf5")]) + num_episodes = len(episodes) + + out_dir = os.path.join(output_dir, f"sim-{task_name}", f"{config_name}-{num_episodes}") + os.makedirs(out_dir, exist_ok=True) + + max_episode_len = 0 + for i, ep_file in enumerate(episodes): + src_path = os.path.join(task_dir, ep_file) + dst_path = os.path.join(out_dir, f"episode_{i}.hdf5") + convert_episode(src_path, dst_path, camera_names) + + with h5py.File(dst_path, "r") as f: + ep_len = f["/action"].shape[0] + max_episode_len = max(max_episode_len, ep_len) + + print(f" [{i+1}/{num_episodes}] {ep_file} → episode_{i}.hdf5 ({ep_len} steps)") + + print(f"\nConverted {num_episodes} episodes to {out_dir}") + print(f"Max episode length: {max_episode_len}") + + return { + "dataset_dir": out_dir, + "num_episodes": num_episodes, + "episode_len": max_episode_len, + "camera_names": camera_names, + } + + +def update_task_configs(config_path, task_name, task_info, config_name="default"): + """Update SIM_TASK_CONFIGS.json with new task entry.""" + if os.path.exists(config_path): + with open(config_path, "r") as f: + content = f.read().strip() + configs = json.loads(content) if content else {} + else: + configs = {} + + key = f"sim-{task_name}-{config_name}-{task_info['num_episodes']}" + # Store path relative to ACT directory (where training runs) + act_dir = os.path.dirname(config_path) + rel_dir = os.path.relpath(task_info["dataset_dir"], act_dir) + configs[key] = { + "dataset_dir": "./" + rel_dir, + "num_episodes": task_info["num_episodes"], + "episode_len": task_info["episode_len"], + "camera_names": task_info["camera_names"], + } + + with open(config_path, "w") as f: + json.dump(configs, f, indent=4) + print(f"Updated {config_path} with task '{key}'") + + +def main(): + parser = argparse.ArgumentParser(description="Convert tactile data to ACT format") + parser.add_argument("--data_dir", default="./tactile_data", + help="Source data directory") + parser.add_argument("--output_dir", default="./policy/ACT/processed_data", + help="Output directory for ACT data") + parser.add_argument("--task", default="all", + help="Task name or 'all'") + parser.add_argument("--config_name", default="default") + args = parser.parse_args() + + tasks = ["precision_grasp", "peg_insertion", "gentle_stack"] if args.task == "all" else [args.task] + + config_path = os.path.join(os.path.dirname(args.output_dir), "SIM_TASK_CONFIGS.json") + + for task in tasks: + task_dir = os.path.join(args.data_dir, task) + if not os.path.exists(task_dir): + print(f"Skipping {task}: no data at {task_dir}") + continue + + print(f"\n{'='*50}") + print(f"Converting {task}") + print(f"{'='*50}") + + info = convert_task(task, args.data_dir, args.output_dir, args.config_name) + update_task_configs(config_path, task, info, args.config_name) + + print("\nDone! To train ACT:") + print(f" cd policy/ACT") + print(f" python imitate_episodes.py \\") + print(f" --ckpt_dir ./checkpoints/{{task}}_act \\") + print(f" --policy_class ACT \\") + print(f" --task_name sim-{{task}}-default-{{num}} \\") + print(f" --batch_size 8 --num_epochs 2000 --lr 1e-4 \\") + print(f" --kl_weight 10 --chunk_size 16 \\") + print(f" --hidden_dim 256 --dim_feedforward 2048 \\") + print(f" --state_dim 8 --save_freq 100") + + +if __name__ == "__main__": + main() diff --git a/tactile_tasks/convert_for_dp.py b/tactile_tasks/convert_for_dp.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0faaedfc12f6e3b3bf8ebff72dba608df6b8bd --- /dev/null +++ b/tactile_tasks/convert_for_dp.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +Convert tactile_data HDF5 episodes to Diffusion Policy (DP) Zarr format. + +Input: tactile_data/{task}/episode_XX.hdf5 (our format) +Output: policy/DP/data/{task}-{config}-{num}.zarr + +DP expects Zarr archive with: + data/head_camera [N, 3, H, W] uint8 NCHW + data/state [N, state_dim] float32 + data/action [N, action_dim] float32 + meta/episode_ends [num_episodes] int64 +""" + +import os +import sys +import argparse +import numpy as np +import h5py + +try: + import zarr +except ImportError: + print("Error: zarr not installed. Run: pip install zarr") + sys.exit(1) + + +def convert_task(task_name, data_dir, output_dir, config_name="default"): + """Convert all episodes for a task to a single Zarr archive.""" + task_dir = os.path.join(data_dir, task_name) + episodes = sorted([f for f in os.listdir(task_dir) if f.endswith(".hdf5")]) + num_episodes = len(episodes) + + all_images = [] + all_states = [] + all_actions = [] + episode_ends = [] + total_steps = 0 + + for i, ep_file in enumerate(episodes): + src_path = os.path.join(task_dir, ep_file) + with h5py.File(src_path, "r") as f: + T = f["actions"].shape[0] + + # Image: agentview as head_camera, HWC uint8 + images = f["agentview_image"][:] # [T, H, W, 3] + + # State: joint_pos(7) + normalized gripper(1) = 8D + joint_pos = f["joint_pos"][:] # [T, 7] + gripper_qpos = f["gripper_qpos"][:] # [T, 6] + gripper_norm = gripper_qpos[:, 0:1] / 0.8 + state = np.concatenate([joint_pos, gripper_norm], axis=1).astype(np.float32) + + # Action: 7D OSC_POSE + actions = f["actions"][:].astype(np.float32) + + # DP uses (T-1) transitions: state[:-1] → action[1:] shift + # But for simplicity and to match the original code, keep them aligned + all_images.append(images) + all_states.append(state) + all_actions.append(actions) + + total_steps += T + episode_ends.append(total_steps) + + print(f" [{i+1}/{num_episodes}] {ep_file}: {T} steps") + + # Stack all episodes + all_images = np.concatenate(all_images, axis=0) # [N, H, W, 3] + all_states = np.concatenate(all_states, axis=0) # [N, 8] + all_actions = np.concatenate(all_actions, axis=0) # [N, 7] + episode_ends = np.array(episode_ends, dtype=np.int64) + + # Convert images: NHWC → NCHW + all_images = np.moveaxis(all_images, -1, 1) # [N, 3, H, W] + + print(f"\nTotal: {total_steps} steps from {num_episodes} episodes") + print(f"Images: {all_images.shape}, States: {all_states.shape}, Actions: {all_actions.shape}") + + # Save as Zarr + save_path = os.path.join(output_dir, f"{task_name}-{config_name}-{num_episodes}.zarr") + if os.path.exists(save_path): + import shutil + shutil.rmtree(save_path) + + os.makedirs(output_dir, exist_ok=True) + zarr_root = zarr.group(save_path) + zarr_data = zarr_root.create_group("data") + zarr_meta = zarr_root.create_group("meta") + + compressor = zarr.Blosc(cname="zstd", clevel=3, shuffle=1) + + zarr_data.create_dataset("head_camera", data=all_images, + chunks=(100, *all_images.shape[1:]), + overwrite=True, compressor=compressor) + zarr_data.create_dataset("state", data=all_states, + chunks=(100, all_states.shape[1]), + dtype="float32", overwrite=True, compressor=compressor) + zarr_data.create_dataset("action", data=all_actions, + chunks=(100, all_actions.shape[1]), + dtype="float32", overwrite=True, compressor=compressor) + zarr_meta.create_dataset("episode_ends", data=episode_ends, + dtype="int64", overwrite=True, compressor=compressor) + + print(f"Saved to {save_path}") + return save_path + + +def main(): + parser = argparse.ArgumentParser(description="Convert tactile data to DP Zarr format") + parser.add_argument("--data_dir", default="./tactile_data", + help="Source data directory") + parser.add_argument("--output_dir", default="./policy/DP/data", + help="Output directory for Zarr files") + parser.add_argument("--task", default="all", + help="Task name or 'all'") + parser.add_argument("--config_name", default="default") + args = parser.parse_args() + + tasks = ["precision_grasp", "peg_insertion", "gentle_stack"] if args.task == "all" else [args.task] + + for task in tasks: + task_dir = os.path.join(args.data_dir, task) + if not os.path.exists(task_dir): + print(f"Skipping {task}: no data at {task_dir}") + continue + + print(f"\n{'='*50}") + print(f"Converting {task}") + print(f"{'='*50}") + + zarr_path = convert_task(task, args.data_dir, args.output_dir, args.config_name) + + print("\nDone! To train DP:") + print(f" cd policy/DP") + print(f" python train.py --config-name=robot_dp_tactile.yaml \\") + print(f" task.name={{task}} \\") + print(f" task.dataset.zarr_path=data/{{task}}-default-{{num}}.zarr \\") + print(f" training.seed=0 training.device=cuda:0") + + +if __name__ == "__main__": + main() diff --git a/tactile_tasks/envs/__init__.py b/tactile_tasks/envs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce574cee0dad23e6b44034bd84efe399fa4f6b68 --- /dev/null +++ b/tactile_tasks/envs/__init__.py @@ -0,0 +1,3 @@ +from tactile_tasks.envs.precision_grasp import PrecisionGrasp +from tactile_tasks.envs.peg_insertion import PegInsertion +from tactile_tasks.envs.gentle_stack import GentleStack diff --git a/tactile_tasks/envs/__pycache__/__init__.cpython-310.pyc b/tactile_tasks/envs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cab987bde445c15b8b073888c349b49d47c8b3c Binary files /dev/null and b/tactile_tasks/envs/__pycache__/__init__.cpython-310.pyc differ diff --git a/tactile_tasks/envs/__pycache__/contact_slide.cpython-310.pyc b/tactile_tasks/envs/__pycache__/contact_slide.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1493743dba8f5e1539c8ff1b32974d348c111529 Binary files /dev/null and b/tactile_tasks/envs/__pycache__/contact_slide.cpython-310.pyc differ diff --git a/tactile_tasks/envs/__pycache__/gentle_stack.cpython-310.pyc b/tactile_tasks/envs/__pycache__/gentle_stack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a3d7f13d9adfe59843459dbc22e36603aa3e404 Binary files /dev/null and b/tactile_tasks/envs/__pycache__/gentle_stack.cpython-310.pyc differ diff --git a/tactile_tasks/envs/__pycache__/peg_insertion.cpython-310.pyc b/tactile_tasks/envs/__pycache__/peg_insertion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6ce8d8fd42b4ef0785743b433948bc01c4d251b Binary files /dev/null and b/tactile_tasks/envs/__pycache__/peg_insertion.cpython-310.pyc differ diff --git a/tactile_tasks/envs/__pycache__/precision_grasp.cpython-310.pyc b/tactile_tasks/envs/__pycache__/precision_grasp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558bcf80423b5e023aa78ac4a26c1181e657db1a Binary files /dev/null and b/tactile_tasks/envs/__pycache__/precision_grasp.cpython-310.pyc differ diff --git a/tactile_tasks/envs/__pycache__/precision_place.cpython-310.pyc b/tactile_tasks/envs/__pycache__/precision_place.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7816fa987a9dde75ad01db57f766006bb039af7 Binary files /dev/null and b/tactile_tasks/envs/__pycache__/precision_place.cpython-310.pyc differ diff --git a/tactile_tasks/envs/__pycache__/surface_push.cpython-310.pyc b/tactile_tasks/envs/__pycache__/surface_push.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b164904fae9f2cd61313e66ae28e15efa9631e6e Binary files /dev/null and b/tactile_tasks/envs/__pycache__/surface_push.cpython-310.pyc differ diff --git a/tactile_tasks/envs/gentle_stack.py b/tactile_tasks/envs/gentle_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..b72b0bcf173163ae4789e4d5c82f3420996731de --- /dev/null +++ b/tactile_tasks/envs/gentle_stack.py @@ -0,0 +1,324 @@ +""" +Task 4: Gentle Stack + +Contact-rich task: Pick up a small box and gently place it on top of a can. +Requires force-controlled placement to avoid knocking over the can. + +Objects: CanObject (base), BoxObject (to be stacked) +Success: Box stacked on top of can stably +Tactile role: Detect contact during placement, control placement force +""" + +from collections import OrderedDict + +import numpy as np + +from robosuite.environments.manipulation.manipulation_env import ManipulationEnv +from robosuite.models.arenas import TableArena +from robosuite.models.objects import BoxObject, CanObject, CylinderObject +from robosuite.models.tasks import ManipulationTask +from robosuite.utils.mjcf_utils import CustomMaterial +from robosuite.utils.observables import Observable, sensor +from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler +from robosuite.utils.transform_utils import convert_quat + + +class GentleStack(ManipulationEnv): + """ + Gentle stacking task: pick up a box and place it on top of a can. + + The robot must grasp a box, lift it, move it above the can, and gently + lower it onto the can's top surface. Tactile feedback is used to detect + initial contact and control the placement force. + """ + + def __init__( + self, + robots, + env_configuration="default", + controller_configs=None, + gripper_types="Robotiq85Gripper", + base_types="default", + initialization_noise="default", + table_full_size=(0.8, 0.8, 0.05), + table_friction=(1.0, 5e-3, 1e-4), + use_camera_obs=True, + use_object_obs=True, + reward_scale=1.0, + reward_shaping=True, + placement_initializer=None, + has_renderer=False, + has_offscreen_renderer=True, + render_camera="frontview", + render_collision_mesh=False, + render_visual_mesh=True, + render_gpu_device_id=-1, + control_freq=20, + lite_physics=True, + horizon=600, + ignore_done=False, + hard_reset=True, + camera_names="agentview", + camera_heights=256, + camera_widths=256, + camera_depths=False, + camera_segmentations=None, + renderer="mjviewer", + renderer_config=None, + seed=None, + ): + self.table_full_size = table_full_size + self.table_friction = table_friction + self.table_offset = np.array((0, 0, 0.8)) + self.reward_scale = reward_scale + self.reward_shaping = reward_shaping + self.use_object_obs = use_object_obs + self.placement_initializer = placement_initializer + + super().__init__( + robots=robots, + env_configuration=env_configuration, + controller_configs=controller_configs, + base_types=base_types, + gripper_types=gripper_types, + initialization_noise=initialization_noise, + use_camera_obs=use_camera_obs, + has_renderer=has_renderer, + has_offscreen_renderer=has_offscreen_renderer, + render_camera=render_camera, + render_collision_mesh=render_collision_mesh, + render_visual_mesh=render_visual_mesh, + render_gpu_device_id=render_gpu_device_id, + control_freq=control_freq, + lite_physics=lite_physics, + horizon=horizon, + ignore_done=ignore_done, + hard_reset=hard_reset, + camera_names=camera_names, + camera_heights=camera_heights, + camera_widths=camera_widths, + camera_depths=camera_depths, + camera_segmentations=camera_segmentations, + renderer=renderer, + renderer_config=renderer_config, + seed=seed, + ) + + def reward(self, action=None): + reward = 0.0 + box_pos = self.sim.data.body_xpos[self.box_body_id] + can_pos = self.sim.data.body_xpos[self.can_body_id] + + if self._check_success(): + reward = 3.0 + elif self.reward_shaping: + # Reaching: gripper to box + dist_to_box = self._gripper_to_target( + gripper=self.robots[0].gripper, + target=self.stack_box.root_body, + target_type="body", + return_distance=True, + ) + reaching_reward = 0.5 * (1 - np.tanh(10.0 * dist_to_box)) + reward += reaching_reward + + # Grasping reward + grasped = self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.stack_box) + if grasped: + reward += 0.5 + + # Lifting reward + table_height = self.table_offset[2] + lift = max(0, box_pos[2] - table_height - 0.02) + lift_reward = min(0.5, lift * 5) + reward += lift_reward + + # Hover over can reward: xy distance from box to above can + xy_dist = np.linalg.norm(box_pos[:2] - can_pos[:2]) + hover_reward = 0.5 * (1 - np.tanh(10.0 * xy_dist)) + reward += hover_reward + + # Stacking proximity reward (box above can and close) + if xy_dist < 0.03 and box_pos[2] > can_pos[2]: + stack_dist = abs(box_pos[2] - can_pos[2] - 0.06) # can height ~ 0.06 + stack_reward = 1.0 * (1 - np.tanh(20.0 * stack_dist)) + reward += stack_reward + + if self.reward_scale is not None: + reward *= self.reward_scale / 3.0 + return reward + + def _load_model(self): + super()._load_model() + + xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) + self.robots[0].robot_model.set_base_xpos(xpos) + + mujoco_arena = TableArena( + table_full_size=self.table_full_size, + table_friction=self.table_friction, + table_offset=self.table_offset, + ) + mujoco_arena.set_origin([0, 0, 0]) + + # Box to pick up and stack + redwood = CustomMaterial( + texture="WoodRed", + tex_name="redwood", + mat_name="redwood_mat", + tex_attrib={"type": "cube"}, + mat_attrib={"texrepeat": "1 1", "specular": "0.4", "shininess": "0.1"}, + ) + self.stack_box = BoxObject( + name="stack_box", + size_min=[0.025, 0.025, 0.03], + size_max=[0.03, 0.03, 0.035], + rgba=[1, 0, 0, 1], + material=redwood, + rng=self.rng, + ) + + # Small cylinder as base — box must balance precisely (adds difficulty) + self.base_can = CylinderObject( + name="base_can", + size=[0.022, 0.04], # radius=2.2cm, half-height=4cm (8cm tall) + rgba=[0.6, 0.6, 0.6, 1], + density=2000, + ) + + self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") + self.placement_initializer.append_sampler( + UniformRandomSampler( + name="BoxSampler", + mujoco_objects=self.stack_box, + x_range=[-0.03, 0.02], + y_range=[-0.02, 0.02], + rotation=None, + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.01, + rng=self.rng, + ) + ) + self.placement_initializer.append_sampler( + UniformRandomSampler( + name="CanSampler", + mujoco_objects=self.base_can, + x_range=[0.05, 0.09], + y_range=[-0.02, 0.02], + rotation=None, + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.01, + rng=self.rng, + ) + ) + + self.model = ManipulationTask( + mujoco_arena=mujoco_arena, + mujoco_robots=[robot.robot_model for robot in self.robots], + mujoco_objects=[self.stack_box, self.base_can], + ) + + def _setup_references(self): + super()._setup_references() + self.box_body_id = self.sim.model.body_name2id(self.stack_box.root_body) + self.can_body_id = self.sim.model.body_name2id(self.base_can.root_body) + + def _setup_observables(self): + observables = super()._setup_observables() + + if self.use_object_obs: + modality = "object" + + @sensor(modality=modality) + def box_pos(obs_cache): + return np.array(self.sim.data.body_xpos[self.box_body_id]) + + @sensor(modality=modality) + def box_quat(obs_cache): + return convert_quat(np.array(self.sim.data.body_xquat[self.box_body_id]), to="xyzw") + + @sensor(modality=modality) + def can_pos(obs_cache): + return np.array(self.sim.data.body_xpos[self.can_body_id]) + + @sensor(modality=modality) + def can_quat(obs_cache): + return convert_quat(np.array(self.sim.data.body_xquat[self.can_body_id]), to="xyzw") + + @sensor(modality=modality) + def box_to_can(obs_cache): + if "box_pos" in obs_cache and "can_pos" in obs_cache: + return obs_cache["can_pos"] - obs_cache["box_pos"] + return np.zeros(3) + + sensors = [box_pos, box_quat, can_pos, can_quat, box_to_can] + arm_prefixes = self._get_arm_prefixes(self.robots[0], include_robot_name=False) + full_prefixes = self._get_arm_prefixes(self.robots[0]) + + sensors += [ + self._get_obj_eef_sensor(full_pf, "box_pos", f"{arm_pf}gripper_to_box_pos", modality) + for arm_pf, full_pf in zip(arm_prefixes, full_prefixes) + ] + names = [s.__name__ for s in sensors] + + for name, s in zip(names, sensors): + observables[name] = Observable( + name=name, sensor=s, sampling_rate=self.control_freq, + ) + + return observables + + def _reset_internal(self): + super()._reset_internal() + if not self.deterministic_reset: + object_placements = self.placement_initializer.sample() + for obj_pos, obj_quat, obj in object_placements.values(): + self.sim.data.set_joint_qpos( + obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)]) + ) + + def _check_success(self): + box_pos = self.sim.data.body_xpos[self.box_body_id] + can_pos = self.sim.data.body_xpos[self.can_body_id] + + # Box should be above the can and horizontally centered + # can radius=0.022, box half-size~0.027, so xy tolerance = 0.04 is reasonable + xy_dist = np.linalg.norm(box_pos[:2] - can_pos[:2]) + z_diff = box_pos[2] - can_pos[2] + # can half-height=0.04, so can top is at can_pos[2]+0.04 + # box center should be above can top: z_diff > 0.04, with some slack + stacked = xy_dist < 0.04 and 0.03 < z_diff < 0.18 + + # Box must be roughly flat (z-axis pointing up), relaxed tolerance + box_quat = self.sim.data.body_xquat[self.box_body_id] + import mujoco + mat = np.zeros(9) + mujoco.mju_quat2Mat(mat, box_quat) + z_axis = mat.reshape(3, 3)[:, 2] + box_flat = abs(z_axis[2]) > 0.85 # relaxed from 0.9 + + # Box translational velocity should be low (stable placement) + if self.stack_box.joints: + joint_name = self.stack_box.joints[0] + jid = self.sim.model.joint_name2id(joint_name) + qvel_addr = self.sim.model.jnt_dofadr[jid] + box_vel = np.linalg.norm(self.sim.data.qvel[qvel_addr: qvel_addr + 3]) # only translational + else: + box_vel = 0.0 + box_stable = box_vel < 0.1 + + # Gripper released + not_grasped = not self._check_grasp( + gripper=self.robots[0].gripper, object_geoms=self.stack_box + ) + + return stacked and box_flat and box_stable and not_grasped + + def visualize(self, vis_settings): + super().visualize(vis_settings=vis_settings) + if vis_settings["grippers"]: + self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.stack_box) \ No newline at end of file diff --git a/tactile_tasks/envs/peg_insertion.py b/tactile_tasks/envs/peg_insertion.py new file mode 100644 index 0000000000000000000000000000000000000000..5935974daaf4a794210a23727d832e35af7a47ed --- /dev/null +++ b/tactile_tasks/envs/peg_insertion.py @@ -0,0 +1,309 @@ +""" +Task: Peg Insertion (Fixed) + +Key fixes: +- hole inner_radius = 0.023 (was 0.028) → 3mm clearance, alignment matters +- success = peg inside hole AND peg velocity near zero (stable insertion) +- hole placed realistically for Sawyer workspace +- peg placement closer to robot to avoid reach issues +""" + +import numpy as np + +from robosuite.environments.manipulation.manipulation_env import ManipulationEnv +from robosuite.models.arenas import TableArena +from robosuite.models.objects import CylinderObject +from robosuite.models.objects.composite import HollowCylinderObject +from robosuite.models.tasks import ManipulationTask +from robosuite.utils.observables import Observable, sensor +from robosuite.utils.placement_samplers import UniformRandomSampler, SequentialCompositeSampler +from robosuite.utils.transform_utils import convert_quat + + +class PegInsertion(ManipulationEnv): + """ + Peg-in-hole insertion task. + + Success = peg xy within hole, peg descended into hole, peg stationary. + """ + + def __init__( + self, + robots, + env_configuration="default", + controller_configs=None, + gripper_types="Robotiq85Gripper", + base_types="default", + initialization_noise="default", + peg_radius=0.02, + peg_height=0.04, + hole_outer_radius=0.038, + hole_inner_radius=0.023, # 3mm clearance — requires <3° tilt, objects must be close to base + hole_height=0.05, + table_full_size=(0.8, 0.8, 0.05), + table_friction=(1.0, 5e-3, 1e-4), + use_camera_obs=True, + use_object_obs=True, + reward_scale=1.0, + reward_shaping=True, + placement_initializer=None, + has_renderer=False, + has_offscreen_renderer=True, + render_camera="frontview", + render_collision_mesh=False, + render_visual_mesh=True, + render_gpu_device_id=-1, + control_freq=20, + lite_physics=True, + horizon=600, + ignore_done=False, + hard_reset=True, + camera_names="agentview", + camera_heights=256, + camera_widths=256, + camera_depths=False, + camera_segmentations=None, + renderer="mjviewer", + renderer_config=None, + seed=None, + ): + self.peg_radius = peg_radius + self.peg_height = peg_height + self.hole_outer_radius = hole_outer_radius + self.hole_inner_radius = hole_inner_radius + self.hole_height = hole_height + self.table_full_size = table_full_size + self.table_friction = table_friction + self.table_offset = np.array((0, 0, 0.8)) + self.reward_scale = reward_scale + self.reward_shaping = reward_shaping + self.use_object_obs = use_object_obs + self.placement_initializer = placement_initializer + + self._success_steps = 0 + self._required_success_steps = 5 + + super().__init__( + robots=robots, + env_configuration=env_configuration, + controller_configs=controller_configs, + base_types=base_types, + gripper_types=gripper_types, + initialization_noise=initialization_noise, + use_camera_obs=use_camera_obs, + has_renderer=has_renderer, + has_offscreen_renderer=has_offscreen_renderer, + render_camera=render_camera, + render_collision_mesh=render_collision_mesh, + render_visual_mesh=render_visual_mesh, + render_gpu_device_id=render_gpu_device_id, + control_freq=control_freq, + lite_physics=lite_physics, + horizon=horizon, + ignore_done=ignore_done, + hard_reset=hard_reset, + camera_names=camera_names, + camera_heights=camera_heights, + camera_widths=camera_widths, + camera_depths=camera_depths, + camera_segmentations=camera_segmentations, + renderer=renderer, + renderer_config=renderer_config, + seed=seed, + ) + + def _reset_internal(self): + super()._reset_internal() + self._success_steps = 0 + + if not self.deterministic_reset: + object_placements = self.placement_initializer.sample() + for obj_pos, obj_quat, obj in object_placements.values(): + if obj.joints: + self.sim.data.set_joint_qpos( + obj.joints[0], + np.concatenate([np.array(obj_pos), np.array(obj_quat)]) + ) + else: + body_id = self.sim.model.body_name2id(obj.root_body) + self.sim.model.body_pos[body_id] = obj_pos + self.sim.model.body_quat[body_id] = obj_quat + + def reward(self, action=None): + reward = 0.0 + if self._check_success(): + reward = 3.0 + elif self.reward_shaping: + peg_pos = self.sim.data.body_xpos[self.peg_body_id] + hole_pos = self.sim.data.body_xpos[self.hole_body_id] + + dist_to_peg = self._gripper_to_target( + gripper=self.robots[0].gripper, + target=self.peg.root_body, + target_type="body", + return_distance=True, + ) + reward += 0.5 * (1 - np.tanh(10.0 * dist_to_peg)) + + grasped = self._check_grasp( + gripper=self.robots[0].gripper, object_geoms=self.peg) + if grasped: + reward += 0.5 + + xy_dist = np.linalg.norm(peg_pos[:2] - hole_pos[:2]) + reward += 0.5 * (1 - np.tanh(20.0 * xy_dist)) + + height_diff = peg_pos[2] - hole_pos[2] + if height_diff < self.peg_height and xy_dist < self.hole_inner_radius: + insertion = 1.5 * (1 - np.clip(height_diff / self.peg_height, 0, 1)) + reward += insertion + + if self.reward_scale is not None: + reward *= self.reward_scale / 3.0 + return reward + + def _load_model(self): + super()._load_model() + + xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) + self.robots[0].robot_model.set_base_xpos(xpos) + + mujoco_arena = TableArena( + table_full_size=self.table_full_size, + table_friction=self.table_friction, + table_offset=self.table_offset, + ) + mujoco_arena.set_origin([0, 0, 0]) + + self.peg = CylinderObject( + name="peg", + size=[self.peg_radius, self.peg_height], + rgba=[0.2, 0.6, 1.0, 1], + density=500, + ) + + self.hole = HollowCylinderObject( + name="hole", + outer_radius=self.hole_outer_radius, + inner_radius=self.hole_inner_radius, + height=self.hole_height, + ngeoms=16, # more geoms = smoother hole wall + rgba=[0.6, 0.6, 0.6, 1], + density=50000, + ) + + self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler") + # Peg: close to robot base, small randomization + self.placement_initializer.append_sampler( + UniformRandomSampler( + name="PegSampler", + mujoco_objects=self.peg, + x_range=[-0.06, -0.02], + y_range=[-0.02, 0.02], + rotation=None, + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.01, + rng=self.rng, + ) + ) + # Hole: close to robot base to minimize EEF tilt + self.placement_initializer.append_sampler( + UniformRandomSampler( + name="HoleSampler", + mujoco_objects=self.hole, + x_range=[0.04, 0.08], + y_range=[-0.02, 0.02], + rotation=0, + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.001, + rng=self.rng, + ) + ) + + self.model = ManipulationTask( + mujoco_arena=mujoco_arena, + mujoco_robots=[robot.robot_model for robot in self.robots], + mujoco_objects=[self.peg, self.hole], + ) + + def _setup_references(self): + super()._setup_references() + self.peg_body_id = self.sim.model.body_name2id(self.peg.root_body) + self.hole_body_id = self.sim.model.body_name2id(self.hole.root_body) + + if self.peg.joints: + self.peg_joint_id = self.sim.model.joint_name2id(self.peg.joints[0]) + else: + self.peg_joint_id = None + + def _setup_observables(self): + observables = super()._setup_observables() + + if self.use_object_obs: + modality = "object" + + @sensor(modality=modality) + def peg_pos(obs_cache): + return np.array(self.sim.data.body_xpos[self.peg_body_id]) + + @sensor(modality=modality) + def peg_quat(obs_cache): + return convert_quat( + np.array(self.sim.data.body_xquat[self.peg_body_id]), to="xyzw") + + @sensor(modality=modality) + def hole_pos(obs_cache): + return np.array(self.sim.data.body_xpos[self.hole_body_id]) + + sensors = [peg_pos, peg_quat, hole_pos] + arm_prefixes = self._get_arm_prefixes(self.robots[0], include_robot_name=False) + full_prefixes = self._get_arm_prefixes(self.robots[0]) + sensors += [ + self._get_obj_eef_sensor(full_pf, "peg_pos", + f"{arm_pf}gripper_to_peg_pos", modality) + for arm_pf, full_pf in zip(arm_prefixes, full_prefixes) + ] + + for s in sensors: + observables[s.__name__] = Observable( + name=s.__name__, sensor=s, sampling_rate=self.control_freq) + + return observables + + def _check_success(self): + peg_pos = self.sim.data.body_xpos[self.peg_body_id] + hole_pos = self.sim.data.body_xpos[self.hole_body_id] + + # 1. xy alignment: peg center within hole inner radius + xy_dist = np.linalg.norm(peg_pos[:2] - hole_pos[:2]) + if xy_dist > self.hole_inner_radius * 0.9: + self._success_steps = 0 + return False + + # 2. z: peg has descended into hole + # hole top = hole_pos[2] + hole_height/2 + hole_top_z = hole_pos[2] + self.hole_height * 0.5 + if peg_pos[2] > hole_top_z - 0.01: + self._success_steps = 0 + return False + + # 3. Peg translational velocity should be low + if self.peg_joint_id is not None: + qvel_addr = self.sim.model.jnt_dofadr[self.peg_joint_id] + peg_vel = self.sim.data.qvel[qvel_addr: qvel_addr + 3] # translational only + if np.linalg.norm(peg_vel) > 0.1: + self._success_steps = 0 + return False + + self._success_steps += 1 + return self._success_steps >= self._required_success_steps + + def visualize(self, vis_settings): + super().visualize(vis_settings=vis_settings) + if vis_settings["grippers"]: + self._visualize_gripper_to_target( + gripper=self.robots[0].gripper, target=self.peg) \ No newline at end of file diff --git a/tactile_tasks/envs/precision_grasp.py b/tactile_tasks/envs/precision_grasp.py new file mode 100644 index 0000000000000000000000000000000000000000..c0de4e9512180cba0875da314c275e9e607276e0 --- /dev/null +++ b/tactile_tasks/envs/precision_grasp.py @@ -0,0 +1,241 @@ +""" +Task 1: Precision Grasp + +Contact-rich task: Grasp fragile objects (bottle, can, milk carton) with controlled force. +The robot must apply enough force to hold the object but not crush it. + +Objects: BottleObject, CanObject, MilkObject (randomly selected per episode) +Success: Object lifted above threshold height +Tactile role: Monitor grip force to maintain stable grasp without excessive force +""" + +from collections import OrderedDict + +import numpy as np + +from robosuite.environments.manipulation.manipulation_env import ManipulationEnv +from robosuite.models.arenas import TableArena +from robosuite.models.objects import BottleObject, CanObject, MilkObject +from robosuite.models.tasks import ManipulationTask +from robosuite.utils.observables import Observable, sensor +from robosuite.utils.placement_samplers import UniformRandomSampler +from robosuite.utils.transform_utils import convert_quat + + +class PrecisionGrasp(ManipulationEnv): + """ + Precision grasping task: pick up fragile objects with controlled grip force. + + The robot must grasp an object from the table and lift it to a target height. + Different objects require different grasp strategies due to varying shapes. + + Args: + robots (str): Robot specification (e.g., "Sawyer") + object_type (str or None): "bottle", "can", or "milk". None = random each episode. + lift_height (float): Height above table for successful lift (meters) + """ + + def __init__( + self, + robots, + env_configuration="default", + controller_configs=None, + gripper_types="Robotiq85Gripper", + base_types="default", + initialization_noise="default", + object_type=None, + lift_height=0.10, + table_full_size=(0.8, 0.8, 0.05), + table_friction=(1.0, 5e-3, 1e-4), + use_camera_obs=True, + use_object_obs=True, + reward_scale=1.0, + reward_shaping=True, + placement_initializer=None, + has_renderer=False, + has_offscreen_renderer=True, + render_camera="frontview", + render_collision_mesh=False, + render_visual_mesh=True, + render_gpu_device_id=-1, + control_freq=20, + lite_physics=True, + horizon=500, + ignore_done=False, + hard_reset=True, + camera_names="agentview", + camera_heights=256, + camera_widths=256, + camera_depths=False, + camera_segmentations=None, + renderer="mjviewer", + renderer_config=None, + seed=None, + ): + self.object_type = object_type + self.lift_height = lift_height + self.table_full_size = table_full_size + self.table_friction = table_friction + self.table_offset = np.array((0, 0, 0.8)) + self.reward_scale = reward_scale + self.reward_shaping = reward_shaping + self.use_object_obs = use_object_obs + self.placement_initializer = placement_initializer + + # Will be set in _load_model + self._current_object_type = None + self.obj = None + + super().__init__( + robots=robots, + env_configuration=env_configuration, + controller_configs=controller_configs, + base_types=base_types, + gripper_types=gripper_types, + initialization_noise=initialization_noise, + use_camera_obs=use_camera_obs, + has_renderer=has_renderer, + has_offscreen_renderer=has_offscreen_renderer, + render_camera=render_camera, + render_collision_mesh=render_collision_mesh, + render_visual_mesh=render_visual_mesh, + render_gpu_device_id=render_gpu_device_id, + control_freq=control_freq, + lite_physics=lite_physics, + horizon=horizon, + ignore_done=ignore_done, + hard_reset=hard_reset, + camera_names=camera_names, + camera_heights=camera_heights, + camera_widths=camera_widths, + camera_depths=camera_depths, + camera_segmentations=camera_segmentations, + renderer=renderer, + renderer_config=renderer_config, + seed=seed, + ) + + def reward(self, action=None): + reward = 0.0 + if self._check_success(): + reward = 2.25 + elif self.reward_shaping: + dist = self._gripper_to_target( + gripper=self.robots[0].gripper, + target=self.obj.root_body, + target_type="body", + return_distance=True, + ) + reaching_reward = 1 - np.tanh(10.0 * dist) + reward += reaching_reward + if self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.obj): + reward += 0.25 + # Additional reward for lifting + obj_height = self.sim.data.body_xpos[self.obj_body_id][2] + table_height = self.table_offset[2] + lift_reward = min(1.0, max(0, (obj_height - table_height - 0.01)) / self.lift_height) + reward += lift_reward + + if self.reward_scale is not None: + reward *= self.reward_scale / 2.25 + return reward + + def _load_model(self): + super()._load_model() + + xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0]) + self.robots[0].robot_model.set_base_xpos(xpos) + + mujoco_arena = TableArena( + table_full_size=self.table_full_size, + table_friction=self.table_friction, + table_offset=self.table_offset, + ) + mujoco_arena.set_origin([0, 0, 0]) + + # Select object type + obj_choices = { + "bottle": BottleObject, + "can": CanObject, + "milk": MilkObject, + } + + if self.object_type is not None: + self._current_object_type = self.object_type + else: + self._current_object_type = self.rng.choice(list(obj_choices.keys())) + + self.obj = obj_choices[self._current_object_type](name="target_obj") + + self.placement_initializer = UniformRandomSampler( + name="ObjectSampler", + mujoco_objects=self.obj, + x_range=[-0.03, 0.03], + y_range=[-0.03, 0.03], + rotation=None, + ensure_object_boundary_in_range=False, + ensure_valid_placement=True, + reference_pos=self.table_offset, + z_offset=0.01, + rng=self.rng, + ) + + self.model = ManipulationTask( + mujoco_arena=mujoco_arena, + mujoco_robots=[robot.robot_model for robot in self.robots], + mujoco_objects=self.obj, + ) + + def _setup_references(self): + super()._setup_references() + self.obj_body_id = self.sim.model.body_name2id(self.obj.root_body) + + def _setup_observables(self): + observables = super()._setup_observables() + + if self.use_object_obs: + modality = "object" + + @sensor(modality=modality) + def obj_pos(obs_cache): + return np.array(self.sim.data.body_xpos[self.obj_body_id]) + + @sensor(modality=modality) + def obj_quat(obs_cache): + return convert_quat(np.array(self.sim.data.body_xquat[self.obj_body_id]), to="xyzw") + + sensors = [obj_pos, obj_quat] + arm_prefixes = self._get_arm_prefixes(self.robots[0], include_robot_name=False) + full_prefixes = self._get_arm_prefixes(self.robots[0]) + + sensors += [ + self._get_obj_eef_sensor(full_pf, "obj_pos", f"{arm_pf}gripper_to_obj_pos", modality) + for arm_pf, full_pf in zip(arm_prefixes, full_prefixes) + ] + names = [s.__name__ for s in sensors] + + for name, s in zip(names, sensors): + observables[name] = Observable( + name=name, sensor=s, sampling_rate=self.control_freq, + ) + + return observables + + def _reset_internal(self): + super()._reset_internal() + if not self.deterministic_reset: + object_placements = self.placement_initializer.sample() + for obj_pos, obj_quat, obj in object_placements.values(): + self.sim.data.set_joint_qpos( + obj.joints[0], np.concatenate([np.array(obj_pos), np.array(obj_quat)]) + ) + + def _check_success(self): + obj_height = self.sim.data.body_xpos[self.obj_body_id][2] + table_height = self.table_offset[2] + return obj_height > table_height + self.lift_height + + def visualize(self, vis_settings): + super().visualize(vis_settings=vis_settings) + if vis_settings["grippers"]: + self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.obj) diff --git a/tactile_tasks/motion_planner.py b/tactile_tasks/motion_planner.py new file mode 100644 index 0000000000000000000000000000000000000000..83ff5ddecc3db201613084506c19ebea851add25 --- /dev/null +++ b/tactile_tasks/motion_planner.py @@ -0,0 +1,1021 @@ +""" +Motion Planner for Sawyer + Robotiq85 with tactile-aware control. + +Provides: +- RRT-Connect path planning in joint space +- Min-jerk (quintic) trajectory interpolation for smooth motion +- Task-space waypoint planning via IK_POSE / OSC_POSE interface +- Force-controlled grasping with tactile feedback +- Contact-phase planning for insertion and pushing + +Works with robosuite's IK_POSE or OSC_POSE controller: outputs 7D actions +[dx, dy, dz, dax, day, daz, gripper] at each control step. +""" + +import numpy as np +from copy import deepcopy + +import mujoco + + +class MinJerkTrajectory: + """ + Minimum-jerk (quintic polynomial) trajectory between two points. + Guarantees zero velocity and acceleration at start/end. + """ + + def __init__(self, start, end, duration, dt): + self.start = np.array(start, dtype=np.float64) + self.end = np.array(end, dtype=np.float64) + self.duration = max(duration, dt) + self.dt = dt + self.t = 0.0 + + def step(self): + """ + Get next point on the trajectory. + + Returns: + tuple: (position, velocity, done) + """ + self.t += self.dt + tau = np.clip(self.t / self.duration, 0.0, 1.0) + + # Min-jerk profile: x(t) = x0 + (xf - x0) * (10*tau^3 - 15*tau^4 + 6*tau^5) + s = 10.0 * tau**3 - 15.0 * tau**4 + 6.0 * tau**5 + ds = (30.0 * tau**2 - 60.0 * tau**3 + 30.0 * tau**4) / self.duration + + pos = self.start + (self.end - self.start) * s + vel = (self.end - self.start) * ds + + done = tau >= 1.0 + return pos, vel, done + + def reset(self, start=None, end=None, duration=None): + self.t = 0.0 + if start is not None: + self.start = np.array(start, dtype=np.float64) + if end is not None: + self.end = np.array(end, dtype=np.float64) + if duration is not None: + self.duration = max(duration, self.dt) + + +class RRTNode: + """Node in an RRT tree.""" + + def __init__(self, config, parent=None): + self.config = np.array(config) + self.parent = parent + + +class RRTConnectPlanner: + """ + RRT-Connect planner in joint space. + + Uses bidirectional RRT to find collision-free paths between joint configurations. + Collision checking is done via MuJoCo. + """ + + def __init__(self, sim, joint_ids, joint_limits, step_size=0.1, max_iter=2000): + self.sim = sim + self.joint_ids = joint_ids + self.joint_limits = np.array(joint_limits) # (n_joints, 2) -> [lower, upper] + self.step_size = step_size + self.max_iter = max_iter + self.ndof = len(joint_ids) + + def plan(self, start, goal): + """ + Plan a collision-free path from start to goal in joint space. + + Args: + start (np.ndarray): start joint configuration + goal (np.ndarray): goal joint configuration + + Returns: + list[np.ndarray] or None: list of waypoints, or None if planning fails + """ + tree_a = [RRTNode(start)] + tree_b = [RRTNode(goal)] + + for i in range(self.max_iter): + # Sample random config + q_rand = self._random_config() + + # Extend tree_a toward q_rand + near_a = self._nearest(tree_a, q_rand) + q_new_a = self._steer(near_a.config, q_rand) + + if self._collision_free(near_a.config, q_new_a): + node_a = RRTNode(q_new_a, parent=near_a) + tree_a.append(node_a) + + # Try to connect tree_b to q_new_a + near_b = self._nearest(tree_b, q_new_a) + q_connect = self._steer(near_b.config, q_new_a) + + if self._collision_free(near_b.config, q_connect): + node_b = RRTNode(q_connect, parent=near_b) + tree_b.append(node_b) + + if np.linalg.norm(q_connect - q_new_a) < self.step_size: + # Trees connected - extract path + path = self._extract_path(node_a, node_b) + return self._shortcut(path) + + # Swap trees + tree_a, tree_b = tree_b, tree_a + + return None # Planning failed + + def _random_config(self): + return self.joint_limits[:, 0] + np.random.random(self.ndof) * ( + self.joint_limits[:, 1] - self.joint_limits[:, 0] + ) + + def _nearest(self, tree, config): + dists = [np.linalg.norm(n.config - config) for n in tree] + return tree[np.argmin(dists)] + + def _steer(self, from_config, to_config): + diff = to_config - from_config + dist = np.linalg.norm(diff) + if dist <= self.step_size: + return to_config.copy() + return from_config + diff / dist * self.step_size + + def _collision_free(self, q1, q2, n_checks=5): + """Check if linear path between q1 and q2 is collision-free.""" + for alpha in np.linspace(0, 1, n_checks): + q = q1 + alpha * (q2 - q1) + if self._in_collision(q): + return False + return True + + def _in_collision(self, q): + """Check collision at configuration q using MuJoCo.""" + # Save current state + old_qpos = self.sim.data.qpos.copy() + old_qvel = self.sim.data.qvel.copy() + + # Set joint positions + for i, jid in enumerate(self.joint_ids): + self.sim.data.qpos[jid] = q[i] + + # Forward kinematics + mujoco.mj_forward(self.sim.model._model, self.sim.data._data) + + # Check for contacts between robot and environment (excluding gripper-object contacts) + in_collision = False + for i in range(self.sim.data._data.ncon): + contact = self.sim.data._data.contact[i] + if contact.dist < -0.001: # penetration threshold + in_collision = True + break + + # Restore state + self.sim.data.qpos[:] = old_qpos + self.sim.data.qvel[:] = old_qvel + mujoco.mj_forward(self.sim.model._model, self.sim.data._data) + + return in_collision + + def _extract_path(self, node_a, node_b): + """Extract path from two connected tree nodes.""" + # Path from start to node_a + path_a = [] + n = node_a + while n is not None: + path_a.append(n.config) + n = n.parent + path_a.reverse() + + # Path from node_b to goal + path_b = [] + n = node_b + while n is not None: + path_b.append(n.config) + n = n.parent + + return path_a + path_b + + def _shortcut(self, path, n_attempts=50): + """Shortcut the path by trying to connect random pairs of waypoints.""" + if len(path) <= 2: + return path + path = list(path) + for _ in range(n_attempts): + if len(path) <= 2: + break + i = np.random.randint(0, len(path) - 2) + j = np.random.randint(i + 2, len(path)) + if self._collision_free(path[i], path[j], n_checks=10): + path = path[: i + 1] + path[j:] + return path + + +class MotionPlanner: + """ + High-level motion planner for contact-rich manipulation tasks. + + Uses OSC_POSE controller interface: outputs [dx, dy, dz, dax, day, daz, gripper]. + Provides task-level motion primitives with tactile feedback integration. + + Args: + env: robosuite environment instance + tactile_sensor: USkinSensor instance (optional, for force-controlled grasping) + pos_gain (float): proportional gain for position tracking + ori_gain (float): proportional gain for orientation tracking + pos_threshold (float): position error threshold for "reached" (meters) + ori_threshold (float): orientation error threshold (radians) + """ + + def __init__(self, env, tactile_sensor=None, pos_gain=3.0, ori_gain=1.5, + pos_threshold=0.005, ori_threshold=0.1): + self.env = env + self.tactile = tactile_sensor + self.pos_gain = pos_gain + self.ori_gain = ori_gain + self.pos_threshold = pos_threshold + self.ori_threshold = ori_threshold + + self.dt = 1.0 / env.control_freq + + # Motion state + self._trajectory = None + self._phase = "idle" + self._gripper_action = -1.0 # -1 = open, 1 = closed + self._target_pos = None + self._target_quat = None + + # Save initial downward orientation for vertical control + self._initial_quat = self.get_eef_quat() + + # Force control parameters + self._target_grip_force = 0.0 + self._grip_force_kp = 0.5 + self._grip_force_ki = 0.05 + self._grip_force_integral = 0.0 + + def _compute_pos_action(self, pos_error): + """Compute position action with delta capping for stable convergence. + + OSC_POSE delta mode: goal = current_eef + delta * output_max. + If delta > error, the goal overshoots the target causing oscillation. + We cap delta so it never exceeds 70% of the error magnitude. + For large errors (action saturated at 1.0), no cap needed. + """ + error_norm = np.linalg.norm(pos_error) + raw_action = pos_error * self.pos_gain + + # Cap: delta = action * output_max should not exceed 0.7 * error + # So action < 0.7 * error / output_max + if error_norm > 0.001: + output_max = 0.15 + max_action_mag = 0.7 * error_norm / output_max + raw_mag = np.linalg.norm(raw_action) + if raw_mag > max_action_mag: + raw_action = raw_action * (max_action_mag / raw_mag) + + return np.clip(raw_action, -1.0, 1.0) + + def get_eef_pos(self): + """Get current end-effector position.""" + return np.array(self.env.sim.data.site_xpos[self.env.robots[0].eef_site_id["right"]]) + + def get_eef_quat(self): + """Get current end-effector quaternion (wxyz).""" + eef_mat = np.array( + self.env.sim.data.site_xmat[self.env.robots[0].eef_site_id["right"]] + ).reshape(3, 3) + quat = np.zeros(4) + mujoco.mju_mat2Quat(quat, eef_mat.flatten()) + return quat + + def get_eef_mat(self): + """Get current end-effector rotation matrix.""" + return np.array( + self.env.sim.data.site_xmat[self.env.robots[0].eef_site_id["right"]] + ).reshape(3, 3) + + # ---- High-level motion primitives ---- + + def move_to(self, target_pos, target_quat=None, duration=1.5, gripper_open=True): + """ + Plan a smooth move to target pose. + + Args: + target_pos (np.ndarray): target (x, y, z) + target_quat (np.ndarray): target quaternion (wxyz), None = keep current + duration (float): motion duration in seconds + gripper_open (bool): whether gripper should be open + """ + start_pos = self.get_eef_pos() + if target_quat is None: + target_quat = self.get_eef_quat() + + self._target_pos = np.array(target_pos) + self._target_quat = np.array(target_quat) + self._trajectory = MinJerkTrajectory(start_pos, target_pos, duration, self.dt) + self._phase = "moving" + self._move_steps = 0 + self._gripper_action = -1.0 if gripper_open else 1.0 + + def approach(self, target_pos, approach_height=0.08, duration=1.5): + """ + Plan safe approach: ascend, move laterally, then descend. + Three-phase motion prevents the open gripper from knocking objects. + + Args: + target_pos (np.ndarray): final target (x, y, z) + approach_height (float): height above target to approach from + duration (float): total duration + """ + above_pos = target_pos.copy() + above_pos[2] += approach_height + + # Safe height: above both current EEF and target, so lateral move is safe + safe_z = max(above_pos[2], self.get_eef_pos()[2]) + 0.02 + ascend_pos = self.get_eef_pos().copy() + ascend_pos[2] = safe_z + lateral_pos = above_pos.copy() + lateral_pos[2] = safe_z + + self._waypoints = [ + {"pos": ascend_pos, "duration": duration * 0.15, "gripper_open": True}, + {"pos": lateral_pos, "duration": duration * 0.30, "gripper_open": True}, + {"pos": above_pos, "duration": duration * 0.20, "gripper_open": True}, + {"pos": target_pos, "duration": duration * 0.35, "gripper_open": True}, + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + def grasp(self, target_force=2.0, timeout=1.0, min_close_time=0.5): + """ + Close gripper with tactile force control. + + Args: + target_force (float): target normal grip force (N) + timeout (float): maximum time to close gripper + min_close_time (float): minimum time to keep closing before force check + """ + self._target_grip_force = target_force + self._grip_force_integral = 0.0 + self._phase = "grasping" + self._grasp_timer = 0.0 + self._grasp_timeout = timeout + self._min_close_time = min_close_time + self._gripper_action = 1.0 # start closing + + def lift(self, height=0.15, duration=1.5): + """Lift object to specified height above current position.""" + current_pos = self.get_eef_pos() + target_pos = current_pos.copy() + target_pos[2] += height + self.move_to(target_pos, duration=duration, gripper_open=False) + + def place(self, target_pos, approach_height=0.05, duration=2.0): + """ + Place object at target position: descend, open gripper, retreat. + + Args: + target_pos (np.ndarray): placement position + approach_height (float): retreat height + duration (float): total duration + """ + above_pos = target_pos.copy() + above_pos[2] += approach_height + + self._waypoints = [ + {"pos": above_pos, "duration": duration * 0.3, "gripper_open": False}, + {"pos": target_pos, "duration": duration * 0.3, "gripper_open": False}, + {"pos": target_pos, "duration": 0.5, "gripper_open": True, "min_hold": 0.4}, # release and hold open + {"pos": above_pos, "duration": duration * 0.3, "gripper_open": True}, # retreat + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + def place_tactile(self, target_pos, descent_speed=0.03, force_threshold=0.5, + max_descent=0.12): + """ + Lower object onto a surface using tactile contact detection. + Descends until tactile sensors detect contact, then releases. + + Args: + target_pos (np.ndarray): approximate surface xy position (z is auto-detected) + descent_speed (float): descent velocity in m/s + force_threshold (float): tactile force threshold for contact detection (N) + max_descent (float): max descent distance before giving up + """ + current_pos = self.get_eef_pos() + # Move above target first + above_pos = np.array([target_pos[0], target_pos[1], current_pos[2]]) + + self._waypoints = [ + {"pos": above_pos, "duration": 2.0, "gripper_open": False, "threshold": 0.008}, + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + # Store descent parameters for after waypoint completion + self._pending_descent = { + "speed": descent_speed, + "force_threshold": force_threshold, + "max_descent": max_descent, + "target_xy": target_pos[:2].copy(), + } + + def _start_tactile_descent(self): + """Begin tactile-guided descent phase.""" + params = self._pending_descent + self._phase = "tactile_descent" + self._descent_start_z = self.get_eef_pos()[2] + self._descent_speed = params["speed"] + self._descent_force_threshold = params["force_threshold"] + self._descent_max = params["max_descent"] + self._descent_target_xy = params["target_xy"] + self._descent_contact_detected = False + self._descent_release_timer = 0.0 + self._gripper_action = 1.0 # keep closed during descent + self._pending_descent = None + + def _tactile_descent_action(self): + """Generate action for tactile-guided descent and release.""" + current_pos = self.get_eef_pos() + + if not self._descent_contact_detected: + # Descend slowly while maintaining xy position + xy_error = self._descent_target_xy - current_pos[:2] + xy_err_norm = np.linalg.norm(xy_error) + xy_blend = min(1.0, xy_err_norm / 0.04) + xy_gain = self.pos_gain * (0.25 + 0.75 * xy_blend) + pos_action = np.zeros(3) + pos_action[:2] = np.clip(xy_error * xy_gain, -0.5, 0.5) + pos_action[2] = -self._descent_speed # descend + + ori_action = np.zeros(3) + if self._target_quat is not None: + ori_action = np.clip( + self._compute_ori_error(self._target_quat) * self.ori_gain, -1.0, 1.0 + ) + + action = np.concatenate([pos_action, ori_action, [1.0]]) # keep grasping + + # Check tactile for contact + if self.tactile is not None: + mags = self.tactile.get_force_magnitudes() + avg_force = (mags["left_finger"].mean() + mags["right_finger"].mean()) / 2.0 + if avg_force > self._descent_force_threshold: + self._descent_contact_detected = True + self._descent_release_timer = 0.0 + return action, False + + # Check max descent + descended = self._descent_start_z - current_pos[2] + if descended > self._descent_max: + self._descent_contact_detected = True # give up and release + self._descent_release_timer = 0.0 + + return action, False + else: + # Contact detected — open gripper and retreat + self._descent_release_timer += self.dt + + if self._descent_release_timer < 0.3: + # Open gripper + action = np.array([0, 0, 0, 0, 0, 0, -1.0]) + return action, False + elif self._descent_release_timer < 0.8: + # Retreat upward + action = np.array([0, 0, 0.1, 0, 0, 0, -1.0]) + return action, False + else: + self._phase = "idle" + return np.array([0, 0, 0, 0, 0, 0, -1.0]), True + + def place_gentle(self, target_pos, lower_height=0.02, duration=2.0): + """ + Gently lower an object onto a target, then release. + Descends slowly to just above the target, opens gripper, then retreats. + + Args: + target_pos (np.ndarray): surface position to place on + lower_height (float): height above target to release from + duration (float): total duration + """ + above_pos = target_pos.copy() + above_pos[2] += 0.06 # approach from 6cm above + + release_pos = target_pos.copy() + release_pos[2] += lower_height # release just above surface + + retreat_pos = target_pos.copy() + retreat_pos[2] += 0.08 + + self._waypoints = [ + {"pos": above_pos, "duration": duration * 0.25, "gripper_open": False}, + {"pos": release_pos, "duration": duration * 0.35, "gripper_open": False}, # slow descent + {"pos": release_pos, "duration": 0.3, "gripper_open": True, "min_hold": 0.25}, # open gripper + {"pos": retreat_pos, "duration": duration * 0.25, "gripper_open": True}, # retreat + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + def slide_to(self, target_xy, slide_z, duration=3.0): + """ + Slide a grasped object horizontally at a fixed z height. + Keeps the object on the table by maintaining low z. + + Args: + target_xy (np.ndarray): target (x, y) position + slide_z (float): z height to maintain during slide (should be object-on-table height) + duration (float): total duration + """ + current_pos = self.get_eef_pos() + target_pos = np.array([target_xy[0], target_xy[1], slide_z]) + + self._waypoints = [ + {"pos": target_pos, "duration": duration, "gripper_open": False}, + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + def push(self, start_pos, end_pos, push_height=0.02, duration=2.0): + """ + Push an object from start_pos to end_pos along the table surface. + + Args: + start_pos (np.ndarray): push start position (behind the object) + end_pos (np.ndarray): push end position + push_height (float): height above table for pushing + duration (float): push duration + """ + approach_pos = start_pos.copy() + approach_pos[2] += 0.08 + + push_start = start_pos.copy() + push_start[2] = start_pos[2] + push_height + + push_end = end_pos.copy() + push_end[2] = end_pos[2] + push_height + + retreat_pos = end_pos.copy() + retreat_pos[2] += 0.08 + + self._waypoints = [ + {"pos": approach_pos, "duration": duration * 0.2, "gripper_open": False}, + {"pos": push_start, "duration": duration * 0.15, "gripper_open": False}, + {"pos": push_end, "duration": duration * 0.5, "gripper_open": False}, + {"pos": retreat_pos, "duration": duration * 0.15, "gripper_open": False}, + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + def insert(self, hole_pos, pre_insert_height=0.04, insert_depth=0.03, + duration=3.0, compliance_gain=0.3): + """ + Insert a grasped object into a hole using compliant motion. + + Args: + hole_pos (np.ndarray): position of hole opening + pre_insert_height (float): height above hole to align + insert_depth (float): how deep to insert + duration (float): total duration + compliance_gain (float): compliance for lateral correction + """ + align_pos = hole_pos.copy() + align_pos[2] += pre_insert_height + + insert_pos = hole_pos.copy() + insert_pos[2] -= insert_depth + + self._waypoints = [ + {"pos": align_pos, "duration": duration * 0.35, "gripper_open": False}, + {"pos": hole_pos, "duration": duration * 0.3, "gripper_open": False, + "compliant": True, "compliance_gain": compliance_gain}, + {"pos": insert_pos, "duration": duration * 0.35, "gripper_open": False, + "compliant": True, "compliance_gain": compliance_gain}, + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + self._compliance_gain = compliance_gain + + def insert_and_release(self, hole_pos, pre_insert_height=0.04, insert_depth=0.03, + duration=3.0, compliance_gain=0.3): + """ + Insert a grasped object into a hole, then release and retreat. + + Args: + hole_pos (np.ndarray): position of hole opening + pre_insert_height (float): height above hole to align + insert_depth (float): how deep to insert + duration (float): total duration + compliance_gain (float): compliance for lateral correction + """ + align_pos = hole_pos.copy() + align_pos[2] += pre_insert_height + + insert_pos = hole_pos.copy() + insert_pos[2] -= insert_depth + + retreat_pos = hole_pos.copy() + retreat_pos[2] += pre_insert_height + 0.05 + + self._waypoints = [ + {"pos": align_pos, "duration": duration * 0.25, "gripper_open": False}, + {"pos": hole_pos, "duration": duration * 0.25, "gripper_open": False, + "compliant": True, "compliance_gain": compliance_gain}, + {"pos": insert_pos, "duration": duration * 0.25, "gripper_open": False, + "compliant": True, "compliance_gain": compliance_gain}, + {"pos": insert_pos, "duration": 0.3, "gripper_open": True, "min_hold": 0.2}, # release + {"pos": retreat_pos, "duration": duration * 0.15, "gripper_open": True}, # retreat + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + self._compliance_gain = compliance_gain + + def insert_spiral(self, hole_pos, rim_eef_z, insert_depth=0.03, + max_search_radius=0.025, spiral_speed=8.0, + duration=10.0): + """ + Spiral search insertion for peg-in-hole under position uncertainty. + + Two-phase approach: + 1. Descend to rim level using proportional control + 2. Spiral at rim level — when peg drops into hole opening, insert and release + + Args: + hole_pos (np.ndarray): estimated hole center position (body center) + rim_eef_z (float): expected EEF z when peg rests on rim top + insert_depth (float): how deep to push after finding hole + max_search_radius (float): maximum spiral radius + spiral_speed (float): angular speed in rad/s + duration (float): total timeout for spiral search phase + """ + # Move above the rim first + above_pos = hole_pos.copy() + above_pos[2] = rim_eef_z + 0.03 # 3cm above expected rim contact + + self._waypoints = [ + {"pos": above_pos, "duration": 2.0, "gripper_open": False}, + ] + self._current_wp_idx = 0 + self._start_next_waypoint() + self._phase = "waypoints" + + self._pending_spiral = { + "hole_xy": hole_pos[:2].copy(), + "hole_z": hole_pos[2], + "rim_eef_z": rim_eef_z, + "max_radius": max_search_radius, + "spiral_speed": spiral_speed, + "insert_depth": insert_depth, + "duration": duration, + } + + def _start_spiral_insert(self): + """Begin spiral search phase after waypoint alignment.""" + params = self._pending_spiral + self._phase = "spiral_insert" + self._spiral = { + "center": params["hole_xy"], + "hole_z": params["hole_z"], + "rim_eef_z": params["rim_eef_z"], + "max_radius": params["max_radius"], + "speed": params["spiral_speed"], + "insert_depth": params["insert_depth"], + "timeout": params["duration"], + "timer": 0.0, + "state": "descending", # descending -> searching -> inserting -> releasing + "search_timer": 0.0, + "release_timer": 0.0, + "z_history": [], + } + self._pending_spiral = None + + def _spiral_insert_action(self): + """Generate action for two-phase spiral search insertion.""" + s = self._spiral + current_pos = self.get_eef_pos() + s["timer"] += self.dt + + ori_action = np.zeros(3) + if self._target_quat is not None: + ori_action = np.clip( + self._compute_ori_error(self._target_quat) * self.ori_gain, -1.0, 1.0 + ) + + if s["state"] == "descending": + # Descend to rim level using proportional control with gain scheduling + target_z = s["rim_eef_z"] + z_error = target_z - current_pos[2] + full_error = np.append(s["center"] - current_pos[:2], z_error) + pos_action = self._compute_pos_action(full_error) + action = np.concatenate([pos_action, ori_action, [1.0]]) + + # Switch to searching when close to target or timeout + if abs(z_error) < 0.01 or s["timer"] > 3.0: + s["state"] = "searching" + s["search_timer"] = 0.0 + + return action, False + + elif s["state"] == "searching": + s["search_timer"] += self.dt + + if s["timer"] > s["timeout"]: + self._phase = "idle" + return np.zeros(7), True + + # Spiral pattern centered on hole + angle = s["speed"] * s["search_timer"] + t_frac = min(1.0, s["search_timer"] / (s["timeout"] * 0.35)) + radius = s["max_radius"] * t_frac + + target_xy = s["center"] + radius * np.array([np.cos(angle), np.sin(angle)]) + xy_error = target_xy - current_pos[:2] + + # Use moderate gain for spiral tracking (not too aggressive) + xy_gain = min(self.pos_gain, 10.0) + pos_action = np.zeros(3) + pos_action[:2] = np.clip(xy_error * xy_gain, -0.8, 0.8) + # Gentle downward bias: press against rim surface + target_z = s["rim_eef_z"] - 0.005 + z_error = target_z - current_pos[2] + z_blend = min(1.0, abs(z_error) / 0.04) + z_gain = self.pos_gain * (0.25 + 0.75 * z_blend) + pos_action[2] = np.clip(z_error * z_gain, -0.5, 0.5) + + action = np.concatenate([pos_action, ori_action, [1.0]]) + + # Detect hole entry: rapid z drop + near hole center + s["z_history"].append(current_pos[2]) + if len(s["z_history"]) > 5: + z_vel = (s["z_history"][-5] - s["z_history"][-1]) / (5 * self.dt) + xy_dist = np.linalg.norm(current_pos[:2] - s["center"]) + if z_vel > 0.03 and xy_dist < s["max_radius"] * 1.2: + s["state"] = "inserting" + s["center"] = current_pos[:2].copy() + + return action, False + + elif s["state"] == "inserting": + # Push straight down into hole with gain scheduling + target_z = s["hole_z"] + z_error = target_z - current_pos[2] + full_error = np.append(s["center"] - current_pos[:2], z_error) + pos_action = self._compute_pos_action(full_error) + + action = np.concatenate([pos_action, ori_action, [1.0]]) + + if abs(z_error) < 0.015 or s["timer"] > s["timeout"]: + s["state"] = "releasing" + s["release_timer"] = 0.0 + + return action, False + + elif s["state"] == "releasing": + s["release_timer"] += self.dt + if s["release_timer"] < 0.3: + return np.array([0, 0, 0, 0, 0, 0, -1.0]), False + elif s["release_timer"] < 0.8: + return np.array([0, 0, 0.1, 0, 0, 0, -1.0]), False + else: + self._phase = "idle" + return np.array([0, 0, 0, 0, 0, 0, -1.0]), True + + return np.zeros(7), True + + def release(self, hold_time=0.5): + """Open gripper and hold for specified time.""" + self._phase = "releasing" + self._release_timer = 0.0 + self._release_timeout = hold_time + self._gripper_action = -1.0 # open + + # ---- Action generation ---- + + def get_action(self): + """ + Compute the next OSC_POSE action based on current motion plan. + + Returns: + tuple: (action (np.ndarray shape (7,)), done (bool)) + action = [dx, dy, dz, dax, day, daz, gripper] + """ + if self._phase == "idle": + return np.zeros(7), True + + if self._phase == "grasping": + return self._grasping_action() + + if self._phase == "releasing": + return self._releasing_action() + + if self._phase == "tactile_descent": + return self._tactile_descent_action() + + if self._phase == "spiral_insert": + return self._spiral_insert_action() + + if self._phase == "waypoints": + action, done = self._waypoint_action() + # Check if waypoints done and pending descent + if done and hasattr(self, '_pending_descent') and self._pending_descent is not None: + self._start_tactile_descent() + return action, False + if done and hasattr(self, '_pending_spiral') and self._pending_spiral is not None: + self._start_spiral_insert() + return action, False + return action, done + + if self._phase == "moving": + return self._moving_action() + + return np.zeros(7), True + + def _moving_action(self): + """Generate action for simple move-to-target. + + Uses direct proportional control toward the final target position. + The trajectory is only used for timing (phase completion) and to + provide velocity-limiting via a blending factor for smoothness. + """ + _, _, traj_done = self._trajectory.step() + + # Direct proportional control toward FINAL target (not trajectory point) + current_pos = self.get_eef_pos() + pos_error = self._target_pos - current_pos + + ori_action = np.zeros(3) + if self._target_quat is not None: + ori_action = self._compute_ori_error(self._target_quat) + + pos_action = self._compute_pos_action(pos_error) + ori_action = np.clip(ori_action * self.ori_gain, -1.0, 1.0) + + action = np.concatenate([pos_action, ori_action, [self._gripper_action]]) + + # Done when close to target or trajectory time + buffer exceeded. + dist = np.linalg.norm(pos_error) + self._move_steps = getattr(self, "_move_steps", 0) + 1 + max_move_steps = int(self._trajectory.duration / self.dt) + 40 # 2s extra + + if dist < 0.015 or self._move_steps > max_move_steps: + self._phase = "idle" + return action, True + + return action, False + + def _releasing_action(self): + """Generate action for timed gripper release.""" + self._release_timer += self.dt + action = np.array([0, 0, 0, 0, 0, 0, -1.0]) + if self._release_timer >= self._release_timeout: + self._phase = "idle" + return action, True + return action, False + + def _grasping_action(self): + """Generate action for force-controlled grasping.""" + self._grasp_timer += self.dt + min_close_time = getattr(self, "_min_close_time", 0.5) + + # Always command gripper to close during grasp phase + self._gripper_action = 1.0 + + if self.tactile is not None and self._grasp_timer >= min_close_time: + # Only check force after gripper has had time to close + mags = self.tactile.get_force_magnitudes() + avg_force = (mags["left_finger"].mean() + mags["right_finger"].mean()) / 2.0 + + # Done if force is within range or timeout + force_error = self._target_grip_force - avg_force + force_ok = abs(force_error) < 0.5 * self._target_grip_force + if force_ok or self._grasp_timer >= self._grasp_timeout: + self._phase = "idle" + return np.array([0, 0, 0, 0, 0, 0, 1.0]), True + elif self._grasp_timer >= self._grasp_timeout: + # Timeout fallback + self._phase = "idle" + return np.array([0, 0, 0, 0, 0, 0, 1.0]), True + + action = np.array([0, 0, 0, 0, 0, 0, 1.0]) + return action, False + + def _waypoint_action(self): + """Generate action for multi-waypoint trajectory. + + Uses direct proportional control toward each waypoint's target. + Transitions to the next waypoint once close enough OR time is up. + """ + _, _, traj_done = self._trajectory.step() + + wp = self._waypoints[self._current_wp_idx] + wp_target = np.array(wp["pos"]) + + current_pos = self.get_eef_pos() + pos_error = wp_target - current_pos + + # Compliance correction using tactile (for insertion) + if wp.get("compliant", False) and self.tactile is not None: + compliance_gain = wp.get("compliance_gain", 0.3) + mags = self.tactile.get_force_magnitudes() + left_total = mags["left_finger"].sum() + right_total = mags["right_finger"].sum() + force_diff = left_total - right_total + pos_error[1] += compliance_gain * force_diff * 0.001 + + pos_action = self._compute_pos_action(pos_error) + + ori_action = np.zeros(3) + if self._target_quat is not None: + ori_action = np.clip( + self._compute_ori_error(self._target_quat) * self.ori_gain, -1.0, 1.0 + ) + + self._gripper_action = -1.0 if wp.get("gripper_open", True) else 1.0 + action = np.concatenate([pos_action, ori_action, [self._gripper_action]]) + + # Advance to next waypoint when close to target (per-waypoint threshold). + dist = np.linalg.norm(pos_error) + self._wp_steps = getattr(self, "_wp_steps", 0) + 1 + max_wp_steps = int(wp["duration"] / self.dt) + 40 # 2s extra + min_hold = wp.get("min_hold", 0) + elapsed = self._wp_steps * self.dt + threshold = wp.get("threshold", 0.015) # 1.5cm default + converged = dist < threshold and elapsed >= min_hold + timed_out = self._wp_steps > max_wp_steps + + if converged or timed_out: + self._current_wp_idx += 1 + if self._current_wp_idx >= len(self._waypoints): + self._phase = "idle" + return action, True + else: + self._start_next_waypoint() + + return action, False + + def _start_next_waypoint(self): + """Initialize trajectory for next waypoint.""" + wp = self._waypoints[self._current_wp_idx] + start_pos = self.get_eef_pos() + self._trajectory = MinJerkTrajectory( + start_pos, wp["pos"], wp["duration"], self.dt + ) + self._wp_steps = 0 # reset step counter for this waypoint + # Always maintain vertical orientation (initial downward pose) + self._target_quat = self._initial_quat.copy() + + def _compute_ori_error(self, target_quat): + """Compute orientation error as angle-axis.""" + current_mat = self.get_eef_mat() + target_mat = np.zeros(9) + mujoco.mju_quat2Mat(target_mat, target_quat) + target_mat = target_mat.reshape(3, 3) + + error_mat = target_mat @ current_mat.T + # Extract angle-axis from rotation matrix + trace = np.clip((np.trace(error_mat) - 1.0) / 2.0, -1.0, 1.0) + angle = np.arccos(trace) + if angle < 1e-6: + return np.zeros(3) + + axis = np.array([ + error_mat[2, 1] - error_mat[1, 2], + error_mat[0, 2] - error_mat[2, 0], + error_mat[1, 0] - error_mat[0, 1], + ]) + norm = np.linalg.norm(axis) + if norm < 1e-6: + return np.zeros(3) + axis /= norm + + return axis * angle + + @property + def is_done(self): + return self._phase == "idle" + + @property + def phase(self): + return self._phase + + def reset(self): + """Reset planner state.""" + self._trajectory = None + self._phase = "idle" + self._gripper_action = -1.0 + self._target_pos = None + self._target_quat = None + self._grip_force_integral = 0.0 diff --git a/tactile_tasks/run_collection.py b/tactile_tasks/run_collection.py new file mode 100644 index 0000000000000000000000000000000000000000..b8cd61b92b4541d1436de5fe3ffa037ec77a8200 --- /dev/null +++ b/tactile_tasks/run_collection.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +""" +Batch data collection for tactile manipulation tasks. + +Collects episodes with randomized object placements, saves per-episode HDF5 files, +and generates per-episode MP4 videos with tactile overlays. + +File structure: + tactile_data/ + precision_grasp/ + episode_00.hdf5 + episode_01.hdf5 + ... + peg_insertion/ + episode_00.hdf5 + ... + gentle_stack/ + episode_00.hdf5 + ... + videos/ + precision_grasp/ + precision_grasp_ep00.mp4 + ... + +Usage: + # Collect all tasks, 50 episodes each + python tactile_tasks/run_collection.py --n_episodes 50 + + # Collect single task + python tactile_tasks/run_collection.py --task peg_insertion --n_episodes 50 + + # With visualization window (slower) + python tactile_tasks/run_collection.py --task gentle_stack --n_episodes 10 --visualize + + # Custom output directory + python tactile_tasks/run_collection.py --save_dir ./my_data --n_episodes 20 + + # Skip video generation (faster) + python tactile_tasks/run_collection.py --n_episodes 50 --no_video +""" + +import os +import sys +import argparse +import time + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from tactile_tasks.collect_data import collect_task_data, TASK_CONFIGS +from tactile_tasks.visualize_data import generate_video + + +def generate_all_episode_videos(task_dir, n_episodes, video_dir, task_name): + """Generate videos for all episodes in a task directory.""" + os.makedirs(video_dir, exist_ok=True) + for ep_idx in range(n_episodes): + data_file = os.path.join(task_dir, f"episode_{ep_idx:02d}.hdf5") + video_path = os.path.join(video_dir, f"{task_name}_ep{ep_idx:02d}.mp4") + if os.path.exists(video_path): + continue # skip already generated + if not os.path.exists(data_file): + print(f" Warning: {data_file} not found, skipping video") + continue + try: + generate_video(data_file, output_path=video_path) + except Exception as e: + print(f" Warning: video for episode {ep_idx} failed: {e}") + + +def main(): + parser = argparse.ArgumentParser(description="Collect tactile manipulation data") + parser.add_argument("--task", type=str, default="all", + choices=list(TASK_CONFIGS.keys()) + ["all"], + help="Task to collect (default: all)") + parser.add_argument("--n_episodes", type=int, default=50, + help="Number of episodes per task (default: 50)") + parser.add_argument("--save_dir", type=str, default="./tactile_data", + help="Output directory (default: ./tactile_data)") + parser.add_argument("--visualize", action="store_true", + help="Show renderer window during collection") + parser.add_argument("--no_video", action="store_true", + help="Skip video generation") + args = parser.parse_args() + + tasks = list(TASK_CONFIGS.keys()) if args.task == "all" else [args.task] + total_start = time.time() + + for task in tasks: + print(f"\n{'='*60}") + print(f" Collecting: {task} | {args.n_episodes} episodes") + print(f"{'='*60}") + + t0 = time.time() + task_dir = collect_task_data( + task, + n_episodes=args.n_episodes, + save_dir=args.save_dir, + visualize=args.visualize, + ) + elapsed = time.time() - t0 + print(f" Collection time: {elapsed:.0f}s ({elapsed/args.n_episodes:.1f}s/ep)") + + if not args.no_video: + print(f"\n Generating videos...") + video_dir = os.path.join(args.save_dir, "videos", task) + generate_all_episode_videos(task_dir, args.n_episodes, video_dir, task) + print(f" Videos saved to: {video_dir}/") + + total_elapsed = time.time() - total_start + print(f"\n{'='*60}") + print(f" Done! Total time: {total_elapsed:.0f}s") + print(f" Data saved to: {args.save_dir}/") + print(f"{'='*60}") + + +if __name__ == "__main__": + main() diff --git a/tactile_tasks/sawyer_ik.py b/tactile_tasks/sawyer_ik.py new file mode 100644 index 0000000000000000000000000000000000000000..c7603bcbc39cd106245904da996dd86723303324 --- /dev/null +++ b/tactile_tasks/sawyer_ik.py @@ -0,0 +1,102 @@ +""" +Mink-based IK solver for Sawyer + Robotiq85 in robosuite. + +Computes joint angles for target EEF poses (position + orientation), +enabling simultaneous position and orientation control without the +iterative align-straighten cycles needed by OSC_POSE. +""" + +import numpy as np +import mink + + +def get_vertical_rotation(): + """Rotation matrix for gripper pointing straight down: z=[0,0,-1].""" + return np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=float) + + +class SawyerIKSolver: + """Differential IK solver for Sawyer arm using mink.""" + + def __init__(self, env, position_cost=1.0, orientation_cost=1.0, posture_cost=1e-3): + self.env = env + self.robot = env.robots[0] + self.model = env.sim.model._model + self.data = env.sim.data._data + self.joint_ids = list(self.robot._ref_joint_pos_indexes) # [0..6] + + # EEF site name + import mujoco + eef_site_id = self.robot.eef_site_id["right"] + self.eef_name = mujoco.mj_id2name( + self.model, mujoco.mjtObj.mjOBJ_SITE, eef_site_id + ) + + # Mink configuration and tasks + self.config = mink.Configuration(self.model) + self.eef_task = mink.FrameTask( + frame_name=self.eef_name, + frame_type="site", + position_cost=position_cost, + orientation_cost=orientation_cost, + lm_damping=1.0, + ) + self.posture_task = mink.PostureTask( + model=self.model, cost=posture_cost + ) + self.tasks = [self.eef_task, self.posture_task] + + def solve(self, target_pos, target_rot=None, max_iters=100, pos_tol=1e-4, ori_tol=1e-4): + """ + Solve IK for target EEF pose. + + Args: + target_pos: [x, y, z] target position + target_rot: 3x3 rotation matrix (default: vertical down) + max_iters: max IK iterations + pos_tol: position convergence threshold + ori_tol: orientation convergence threshold + + Returns: + target_qpos: (7,) joint angles, or None if failed + info: dict with pos_err, ori_err, converged + """ + if target_rot is None: + target_rot = get_vertical_rotation() + + # Build SE3 target + target_mat = np.eye(4) + target_mat[:3, 3] = target_pos + target_mat[:3, :3] = target_rot + target_se3 = mink.SE3.from_matrix(target_mat) + self.eef_task.set_target(target_se3) + + # Init from current qpos + current_q = self.data.qpos.copy() + self.config.update(current_q) + self.posture_task.set_target(current_q) + + dt = 0.01 + for i in range(max_iters): + vel = mink.solve_ik(self.config, self.tasks, dt, "quadprog", 1e-5) + self.config.integrate_inplace(vel, dt) + err = self.eef_task.compute_error(self.config) + pos_err = np.linalg.norm(err[:3]) + ori_err = np.linalg.norm(err[3:]) + if pos_err < pos_tol and ori_err < ori_tol: + break + + converged = pos_err < pos_tol and ori_err < ori_tol + target_qpos = self.config.q[self.joint_ids].copy() + + return target_qpos, { + "pos_err": pos_err, + "ori_err": ori_err, + "converged": converged, + "iters": i + 1, + } + + def get_eef_tilt(self): + """Get current EEF tilt from vertical (degrees).""" + eef_mat = self.data.site_xmat[self.robot.eef_site_id["right"]].reshape(3, 3) + return np.degrees(np.arccos(np.clip(-eef_mat[2, 2], -1, 1))) diff --git a/tactile_tasks/uskin_sensor.py b/tactile_tasks/uskin_sensor.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba19a90c3ca6bec5609f1563e4a40bb921e80d7 --- /dev/null +++ b/tactile_tasks/uskin_sensor.py @@ -0,0 +1,215 @@ +""" +uSkin Tactile Sensor Simulation for Robotiq 85 Gripper. + +Reads 3D contact forces from MuJoCo and maps them to a 4x4 taxel grid +defined as sites in the gripper XML. Each taxel reports (fx, fy, fz) in +the finger body's local frame. + +The taxel sites are defined in robotiq_gripper_85.xml as: + taxel_{l|r}_r{row}c{col} (e.g., taxel_l_r0c0, taxel_r_r3c3) + +Reference: Tomo et al., "Covering a Robot Fingertip with uSkin", IEEE RA-L 2018. +""" + +import numpy as np +import mujoco + + +class USkinSensor: + """ + Simulates uSkin tactile sensors on both fingerpads of a Robotiq 85 gripper. + + Reads taxel positions from XML-defined sites, maps MuJoCo contact forces + to the nearest taxel. Each finger has a 4x4 grid outputting 3D force vectors. + + Args: + sim: robosuite MjSim instance + gripper_prefix (str): naming prefix (e.g., "gripper0_right_") + noise_std (float): Gaussian measurement noise std (N) + force_range (tuple): clipping range per axis (N) + influence_radius (float): max distance for contact-to-taxel mapping (m) + """ + + TAXEL_ROWS = 4 + TAXEL_COLS = 4 + FREQ_MULTIPLIER = 5 # tactile freq = 5 * control freq + + def __init__(self, sim, gripper_prefix="gripper0_right_", + noise_std=0.02, force_range=(-50.0, 50.0), + influence_radius=0.005): + self.sim = sim + self.prefix = gripper_prefix + self.noise_std = noise_std + self.force_range = force_range + self.influence_radius = influence_radius + + # Resolve taxel site IDs from XML + self._site_ids = {"left": np.zeros((4, 4), dtype=int), + "right": np.zeros((4, 4), dtype=int)} + for side, letter in [("left", "l"), ("right", "r")]: + for row in range(4): + for col in range(4): + name = f"{self.prefix}taxel_{letter}_r{row}c{col}" + self._site_ids[side][row, col] = sim.model.site_name2id(name) + + # Resolve finger geom IDs for contact detection + self._geom_ids = {} + for side, names in [ + ("left", ["left_fingerpad_collision", "left_fingertip_collision", + "left_inner_finger_collision"]), + ("right", ["right_fingerpad_collision", "right_fingertip_collision", + "right_inner_finger_collision"]), + ]: + self._geom_ids[side] = [] + for gname in names: + try: + self._geom_ids[side].append( + sim.model.geom_name2id(f"{self.prefix}{gname}")) + except Exception: + pass + + # Resolve finger body IDs + self._body_ids = {} + for side, bname in [("left", "left_inner_finger"), + ("right", "right_inner_finger")]: + self._body_ids[side] = sim.model.body_name2id( + f"{self.prefix}{bname}") + + # Internal state + self._left_forces = np.zeros((4, 4, 3)) + self._right_forces = np.zeros((4, 4, 3)) + self._tactile_buffer = [] + + def _get_taxel_world_pos(self, side): + """Get world positions of all taxels from site data. Shape (4, 4, 3).""" + positions = np.zeros((4, 4, 3)) + for r in range(4): + for c in range(4): + positions[r, c] = self.sim.data.site_xpos[ + self._site_ids[side][r, c]] + return positions + + def update(self): + """ + Read MuJoCo contacts, map forces to nearest taxel sites. + + Returns: + dict with "left_finger", "right_finger" (4,4,3), + "total_left", "total_right" (3,) + """ + self._left_forces = np.zeros((4, 4, 3)) + self._right_forces = np.zeros((4, 4, 3)) + + model_raw = self.sim.model._model + data_raw = self.sim.data._data + + # Cache taxel world positions for this step + taxel_pos = { + "left": self._get_taxel_world_pos("left"), + "right": self._get_taxel_world_pos("right"), + } + + for i in range(data_raw.ncon): + contact = data_raw.contact[i] + g1, g2 = contact.geom1, contact.geom2 + + # Which finger is involved? + finger_side = None + other_geom = None + for side, gids in self._geom_ids.items(): + if g1 in gids: + finger_side = side + other_geom = g2 + break + elif g2 in gids: + finger_side = side + other_geom = g1 + break + + if finger_side is None: + continue + + # Skip gripper self-contacts + other_side = "right" if finger_side == "left" else "left" + if other_geom in self._geom_ids.get(other_side, []): + continue + + # Get 3D contact force in contact frame + c_force = np.zeros(6) + mujoco.mj_contactForce(model_raw, data_raw, i, c_force) + + # Transform to world frame + contact_frame = contact.frame.reshape(3, 3) + force_world = contact_frame.T @ c_force[:3] + + # Transform to finger body local frame + body_id = self._body_ids[finger_side] + body_rot = self.sim.data.body_xmat[body_id].reshape(3, 3) + force_local = body_rot.T @ force_world + + # Find nearest taxel by world-frame distance + contact_pos = contact.pos + tp = taxel_pos[finger_side] + dists = np.linalg.norm( + tp.reshape(-1, 3) - contact_pos, axis=1) + min_idx = np.argmin(dists) + + if dists[min_idx] < self.influence_radius: + row, col = divmod(min_idx, 4) + if finger_side == "left": + self._left_forces[row, col] += force_local + else: + self._right_forces[row, col] += force_local + + # Add sensor noise + if self.noise_std > 0: + self._left_forces += np.random.normal( + 0, self.noise_std, self._left_forces.shape) + self._right_forces += np.random.normal( + 0, self.noise_std, self._right_forces.shape) + + # Clip to sensor range + self._left_forces = np.clip( + self._left_forces, self.force_range[0], self.force_range[1]) + self._right_forces = np.clip( + self._right_forces, self.force_range[0], self.force_range[1]) + + result = { + "left_finger": self._left_forces.copy(), + "right_finger": self._right_forces.copy(), + "total_left": self._left_forces.sum(axis=(0, 1)), + "total_right": self._right_forces.sum(axis=(0, 1)), + } + self._tactile_buffer.append(result) + return result + + def get_observation(self): + """Flat array for policy input. Shape (96,).""" + return np.concatenate([ + self._left_forces.flatten(), + self._right_forces.flatten(), + ]) + + def get_force_magnitudes(self): + """Per-taxel force magnitudes. Returns dict of (4,4) arrays.""" + return { + "left_finger": np.linalg.norm(self._left_forces, axis=2), + "right_finger": np.linalg.norm(self._right_forces, axis=2), + } + + def get_taxel_world_positions(self, side): + """Get world positions of taxels. Returns (4, 4, 3).""" + return self._get_taxel_world_pos(side) + + def get_buffer(self, clear=True): + """Get accumulated high-frequency readings.""" + buf = list(self._tactile_buffer) + if clear: + self._tactile_buffer = [] + return buf + + def reset(self): + """Reset sensor state.""" + self._left_forces = np.zeros((4, 4, 3)) + self._right_forces = np.zeros((4, 4, 3)) + self._tactile_buffer = [] diff --git a/tactile_tasks/visualize_data.py b/tactile_tasks/visualize_data.py new file mode 100644 index 0000000000000000000000000000000000000000..76cd4aaabbac9fb147f365a6a4cc3070c6a1e8f7 --- /dev/null +++ b/tactile_tasks/visualize_data.py @@ -0,0 +1,628 @@ +#!/usr/bin/env python3 +""" +Visualization script for collected tactile manipulation data. + +Displays: +1. Camera images (agentview + eye_in_hand) - synchronized with control freq +2. Real-time tactile force distribution heatmaps (4x4 per finger) +3. Force magnitude time series + +Can visualize from: +- Saved HDF5 data files (offline) +- Live collection (real-time) + +Usage: + # Visualize saved data + python visualize_data.py --data_file ./tactile_data/precision_grasp_data.hdf5 --episode 0 + + # Live visualization during collection + python visualize_data.py --task precision_grasp --live +""" + +import os +import sys +import argparse + +import numpy as np +import h5py + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +def visualize_offline(data_file, episode_idx=0, playback_speed=1.0): + """ + Visualize saved episode data from HDF5 file. + + Shows camera images and tactile heatmaps side by side. + """ + import matplotlib.pyplot as plt + from matplotlib.gridspec import GridSpec + from matplotlib.colors import Normalize + from matplotlib import cm + + with h5py.File(data_file, "r") as f: + meta = f["metadata"] + print(f"Task: {meta.attrs['task']}") + print(f"Robot: {meta.attrs['robot']}, Gripper: {meta.attrs['gripper']}") + print(f"Tactile sensor: {meta.attrs['tactile_sensor']}") + print(f"Control freq: {meta.attrs['control_freq']} Hz, " + f"Tactile freq: {meta.attrs['tactile_freq']} Hz") + + # Per-episode HDF5: data at root level + ep = f + print(f"\nEpisode: " + f"steps={ep.attrs['n_steps']}, " + f"success={ep.attrs['success']}") + + # Load data + agentview = ep["agentview_image"][:] if "agentview_image" in ep else None + eye_in_hand = ep["eye_in_hand_image"][:] if "eye_in_hand_image" in ep else None + tactile_left = ep["tactile_left"][:] if "tactile_left" in ep else None + tactile_right = ep["tactile_right"][:] if "tactile_right" in ep else None + rewards = ep["rewards"][:] if "rewards" in ep else None + eef_pos = ep["eef_pos"][:] if "eef_pos" in ep else None + + n_frames = len(agentview) if agentview is not None else 0 + tactile_ratio = 5 # tactile_freq / control_freq + + if n_frames == 0: + print("No image data to visualize.") + return + + print(f"Frames: {n_frames}, Tactile samples: {len(tactile_left) if tactile_left is not None else 0}") + + # Setup figure + fig = plt.figure(figsize=(16, 10)) + gs = GridSpec(3, 4, figure=fig, hspace=0.35, wspace=0.3) + + # Camera views + ax_agent = fig.add_subplot(gs[0:2, 0:2]) + ax_agent.set_title("AgentView Camera", fontsize=12, fontweight="bold") + ax_agent.axis("off") + + ax_hand = fig.add_subplot(gs[0:2, 2:4]) + ax_hand.set_title("Eye-in-Hand Camera", fontsize=12, fontweight="bold") + ax_hand.axis("off") + + # Tactile heatmaps + ax_tleft = fig.add_subplot(gs[2, 0]) + ax_tleft.set_title("Left Finger Tactile", fontsize=10, fontweight="bold") + + ax_tright = fig.add_subplot(gs[2, 1]) + ax_tright.set_title("Right Finger Tactile", fontsize=10, fontweight="bold") + + # Force time series + ax_force = fig.add_subplot(gs[2, 2:4]) + ax_force.set_title("Tactile Force Magnitude", fontsize=10, fontweight="bold") + ax_force.set_xlabel("Step") + ax_force.set_ylabel("Force (N)") + + # Precompute force magnitudes for time series + if tactile_left is not None and tactile_right is not None: + left_mag_all = np.linalg.norm(tactile_left, axis=-1).mean(axis=(1, 2)) + right_mag_all = np.linalg.norm(tactile_right, axis=-1).mean(axis=(1, 2)) + else: + left_mag_all = np.zeros(1) + right_mag_all = np.zeros(1) + + # Color normalization for tactile + vmax = max(left_mag_all.max(), right_mag_all.max(), 0.1) + + # Initial display + img_agent = ax_agent.imshow(agentview[0] if agentview is not None else np.zeros((256, 256, 3), dtype=np.uint8)) + img_hand = ax_hand.imshow(eye_in_hand[0] if eye_in_hand is not None else np.zeros((256, 256, 3), dtype=np.uint8)) + + # Tactile heatmaps + if tactile_left is not None: + left_mag = np.linalg.norm(tactile_left[0], axis=-1) + right_mag = np.linalg.norm(tactile_right[0], axis=-1) + else: + left_mag = np.zeros((4, 4)) + right_mag = np.zeros((4, 4)) + + hm_left = ax_tleft.imshow(left_mag, cmap="hot", vmin=0, vmax=vmax, + interpolation="nearest", aspect="equal") + hm_right = ax_tright.imshow(right_mag, cmap="hot", vmin=0, vmax=vmax, + interpolation="nearest", aspect="equal") + plt.colorbar(hm_left, ax=ax_tleft, fraction=0.046) + plt.colorbar(hm_right, ax=ax_tright, fraction=0.046) + + # Force value text annotations on heatmaps + left_texts = [] + right_texts = [] + for ri in range(4): + for ci in range(4): + val = left_mag[ri, ci] + color = "white" if val > vmax * 0.5 else "black" + t = ax_tleft.text(ci, ri, f"{val:.1f}", ha="center", va="center", + fontsize=6, color=color, fontweight="bold") + left_texts.append(t) + val = right_mag[ri, ci] + color = "white" if val > vmax * 0.5 else "black" + t = ax_tright.text(ci, ri, f"{val:.1f}", ha="center", va="center", + fontsize=6, color=color, fontweight="bold") + right_texts.append(t) + + # Add taxel grid labels + for ax in [ax_tleft, ax_tright]: + ax.set_xticks(range(4)) + ax.set_yticks(range(4)) + ax.set_xticklabels([f"c{i}" for i in range(4)], fontsize=7) + ax.set_yticklabels([f"r{i}" for i in range(4)], fontsize=7) + + # Force time series + line_left, = ax_force.plot([], [], "b-", label="Left finger", linewidth=1) + line_right, = ax_force.plot([], [], "r-", label="Right finger", linewidth=1) + ax_force.legend(fontsize=8) + ax_force.set_xlim(0, len(left_mag_all)) + ax_force.set_ylim(0, vmax * 1.1) + vline = ax_force.axvline(x=0, color="gray", linestyle="--", alpha=0.5) + + # Plot full force series + line_left.set_data(range(len(left_mag_all)), left_mag_all) + line_right.set_data(range(len(right_mag_all)), right_mag_all) + + fig.suptitle(f"Tactile Manipulation Data Viewer", fontsize=14, fontweight="bold") + + # Animation + delay = (1.0 / 20.0) / playback_speed # 20 Hz control freq + + plt.ion() + plt.show() + + try: + for frame_idx in range(n_frames): + # Update camera images + if agentview is not None: + img_agent.set_data(agentview[frame_idx]) + if eye_in_hand is not None: + img_hand.set_data(eye_in_hand[frame_idx]) + + # Update tactile heatmaps (show the last sub-sample for this frame) + if tactile_left is not None: + t_idx = min(frame_idx * tactile_ratio + tactile_ratio - 1, len(tactile_left) - 1) + left_mag = np.linalg.norm(tactile_left[t_idx], axis=-1) + right_mag = np.linalg.norm(tactile_right[t_idx], axis=-1) + hm_left.set_data(left_mag) + hm_right.set_data(right_mag) + # Update force value annotations + for ri in range(4): + for ci in range(4): + idx = ri * 4 + ci + lv = left_mag[ri, ci] + left_texts[idx].set_text(f"{lv:.1f}") + left_texts[idx].set_color("white" if lv > vmax * 0.5 else "black") + rv = right_mag[ri, ci] + right_texts[idx].set_text(f"{rv:.1f}") + right_texts[idx].set_color("white" if rv > vmax * 0.5 else "black") + + # Update time marker + vline.set_xdata([frame_idx * tactile_ratio]) + + # Update title with step info + reward_str = f", Reward: {rewards[frame_idx]:.3f}" if rewards is not None else "" + fig.suptitle( + f"Step {frame_idx}/{n_frames}{reward_str}", + fontsize=14, fontweight="bold" + ) + + fig.canvas.draw_idle() + fig.canvas.flush_events() + plt.pause(delay) + + except KeyboardInterrupt: + pass + + plt.ioff() + print("\nVisualization complete. Close the window to exit.") + plt.show() + + +def visualize_live(task_name, n_episodes=1): + """ + Live visualization during data collection. + + Renders the MuJoCo scene and shows real-time tactile force distributions. + """ + import matplotlib + matplotlib.use("TkAgg") + import matplotlib.pyplot as plt + from matplotlib.gridspec import GridSpec + + from tactile_tasks.uskin_sensor import USkinSensor + from tactile_tasks.motion_planner import MotionPlanner + from tactile_tasks.collect_data import create_env, TASK_CONFIGS, collect_episode + + config = TASK_CONFIGS[task_name] + + # Create env with renderer + env = create_env(task_name, has_renderer=True) + obs = env.reset() + + tactile = USkinSensor(env.sim, gripper_prefix="gripper0_right_", noise_std=0.02) + planner = MotionPlanner(env, tactile_sensor=tactile) + + # Setup tactile visualization figure + fig, axes = plt.subplots(1, 3, figsize=(14, 4)) + + ax_left = axes[0] + ax_left.set_title("Left Finger Tactile (4x4)", fontweight="bold") + hm_left = ax_left.imshow(np.zeros((4, 4)), cmap="hot", vmin=0, vmax=2.0, + interpolation="nearest", aspect="equal") + plt.colorbar(hm_left, ax=ax_left, label="Force (N)") + + ax_right = axes[1] + ax_right.set_title("Right Finger Tactile (4x4)", fontweight="bold") + hm_right = ax_right.imshow(np.zeros((4, 4)), cmap="hot", vmin=0, vmax=2.0, + interpolation="nearest", aspect="equal") + plt.colorbar(hm_right, ax=ax_right, label="Force (N)") + + ax_force = axes[2] + ax_force.set_title("Force History", fontweight="bold") + ax_force.set_xlabel("Step") + ax_force.set_ylabel("Avg Force (N)") + left_history = [] + right_history = [] + line_l, = ax_force.plot([], [], "b-", label="Left") + line_r, = ax_force.plot([], [], "r-", label="Right") + ax_force.legend() + ax_force.set_ylim(0, 3) + + for ax in [ax_left, ax_right]: + ax.set_xticks(range(4)) + ax.set_yticks(range(4)) + + plt.ion() + plt.tight_layout() + plt.show() + + # Run collection with live visualization + plan_fn = config["plan_fn"] + phases = plan_fn(planner, env) + current_phase_idx = 0 + phase_name, phase_init = phases[current_phase_idx] + phase_init() + print(f"Phase: {phase_name}") + + step = 0 + try: + while step < config["horizon"]: + action, phase_done = planner.get_action() + + # Update tactile + for _ in range(USkinSensor.FREQ_MULTIPLIER): + tactile_data = tactile.update() + + # Step environment + obs, reward, done, info = env.step(action) + + # Render + env.render() + + # Update tactile visualization + mags = tactile.get_force_magnitudes() + hm_left.set_data(mags["left_finger"]) + hm_right.set_data(mags["right_finger"]) + + # Update vmax dynamically + cur_max = max(mags["left_finger"].max(), mags["right_finger"].max(), 0.5) + hm_left.set_clim(0, cur_max) + hm_right.set_clim(0, cur_max) + + # Update force history + left_history.append(mags["left_finger"].mean()) + right_history.append(mags["right_finger"].mean()) + line_l.set_data(range(len(left_history)), left_history) + line_r.set_data(range(len(right_history)), right_history) + ax_force.set_xlim(0, max(len(left_history), 10)) + ax_force.set_ylim(0, max(max(left_history + [0.5]), max(right_history + [0.5])) * 1.2) + + fig.suptitle(f"Step {step} | Phase: {phase_name} | Reward: {reward:.3f}", fontweight="bold") + fig.canvas.draw_idle() + fig.canvas.flush_events() + + step += 1 + + if phase_done: + current_phase_idx += 1 + if current_phase_idx < len(phases): + phase_name, phase_init = phases[current_phase_idx] + phase_init() + print(f"Phase: {phase_name}") + else: + print("All phases complete!") + break + + if done: + break + + except KeyboardInterrupt: + pass + + print(f"Episode done. Steps: {step}, Success: {env._check_success()}") + plt.ioff() + plt.show() + env.close() + + +def generate_video(data_file, episode_idx=None, output_path=None, fps=20, show_tactile=True): + """ + Generate MP4 video from a per-episode HDF5 file using ffmpeg. + + Args: + data_file (str): path to per-episode HDF5 file + episode_idx: deprecated, ignored + output_path (str): output video path (default: auto-generated) + fps (int): frames per second + show_tactile (bool): whether to include tactile heatmaps + """ + import subprocess + import tempfile + import shutil + + try: + import matplotlib + matplotlib.use("Agg") # non-interactive backend for rendering + import matplotlib.pyplot as plt + from matplotlib.gridspec import GridSpec + except ImportError: + print("matplotlib is required for video generation") + return + + # Check ffmpeg availability + if shutil.which("ffmpeg") is None: + print("ERROR: ffmpeg not found. Install it: sudo apt install ffmpeg") + return + + with h5py.File(data_file, "r") as f: + meta = f["metadata"] + task_name = meta.attrs["task"] + # Per-episode HDF5: data at root level + success = f.attrs.get("success", False) + n_steps = f.attrs.get("n_steps", 0) + + agentview = f["agentview_image"][:] if "agentview_image" in f else None + eye_in_hand = f["eye_in_hand_image"][:] if "eye_in_hand_image" in f else None + tactile_left = f["tactile_left"][:] if "tactile_left" in f else None + tactile_right = f["tactile_right"][:] if "tactile_right" in f else None + rewards = f["rewards"][:] if "rewards" in f else None + + if agentview is None or len(agentview) == 0: + print("No image data to render.") + return + + n_frames = len(agentview) + tactile_ratio = 5 + + if output_path is None: + base = os.path.splitext(data_file)[0] + output_path = f"{base}_ep{episode_idx:04d}.mp4" + + print(f"Generating video: {output_path}") + print(f" Task: {task_name}, Episode: {episode_idx}, Steps: {n_steps}, Success: {success}") + print(f" Frames: {n_frames}, FPS: {fps}") + + # Precompute tactile force magnitudes for color scale + if show_tactile and tactile_left is not None: + left_mag_all = np.linalg.norm(tactile_left, axis=-1) + right_mag_all = np.linalg.norm(tactile_right, axis=-1) + vmax = max(left_mag_all.max(), right_mag_all.max(), 0.1) + left_avg = left_mag_all.mean(axis=(1, 2)) + right_avg = right_mag_all.mean(axis=(1, 2)) + else: + show_tactile = False + + # Determine figure layout + if show_tactile: + fig = plt.figure(figsize=(12, 8), dpi=100) + gs = GridSpec(3, 4, figure=fig, hspace=0.3, wspace=0.3) + ax_agent = fig.add_subplot(gs[0:2, 0:2]) + ax_hand = fig.add_subplot(gs[0:2, 2:4]) + ax_tleft = fig.add_subplot(gs[2, 0]) + ax_tright = fig.add_subplot(gs[2, 1]) + ax_force = fig.add_subplot(gs[2, 2:4]) + else: + fig, axes = plt.subplots(1, 2, figsize=(10, 5), dpi=100) + ax_agent, ax_hand = axes + + # Use a temp directory for frames + tmpdir = tempfile.mkdtemp(prefix="tactile_video_") + + try: + for frame_idx in range(n_frames): + # Clear and redraw each frame + if show_tactile: + for ax in [ax_agent, ax_hand, ax_tleft, ax_tright, ax_force]: + ax.clear() + else: + ax_agent.clear() + ax_hand.clear() + + # Camera images + ax_agent.imshow(agentview[frame_idx]) + ax_agent.set_title("AgentView", fontsize=10, fontweight="bold") + ax_agent.axis("off") + + if eye_in_hand is not None and len(eye_in_hand) > frame_idx: + ax_hand.imshow(eye_in_hand[frame_idx]) + ax_hand.set_title("Eye-in-Hand", fontsize=10, fontweight="bold") + ax_hand.axis("off") + + if show_tactile: + t_idx = min(frame_idx * tactile_ratio + tactile_ratio - 1, len(tactile_left) - 1) + left_mag = np.linalg.norm(tactile_left[t_idx], axis=-1) + right_mag = np.linalg.norm(tactile_right[t_idx], axis=-1) + + ax_tleft.imshow(left_mag, cmap="hot", vmin=0, vmax=vmax, interpolation="nearest") + ax_tleft.set_title("Left Finger", fontsize=9) + ax_tleft.set_xticks(range(4)) + ax_tleft.set_yticks(range(4)) + # Annotate each taxel with force value + for ri in range(4): + for ci in range(4): + val = left_mag[ri, ci] + color = "white" if val > vmax * 0.5 else "black" + ax_tleft.text(ci, ri, f"{val:.1f}", ha="center", va="center", + fontsize=6, color=color, fontweight="bold") + + ax_tright.imshow(right_mag, cmap="hot", vmin=0, vmax=vmax, interpolation="nearest") + ax_tright.set_title("Right Finger", fontsize=9) + ax_tright.set_xticks(range(4)) + ax_tright.set_yticks(range(4)) + # Annotate each taxel with force value + for ri in range(4): + for ci in range(4): + val = right_mag[ri, ci] + color = "white" if val > vmax * 0.5 else "black" + ax_tright.text(ci, ri, f"{val:.1f}", ha="center", va="center", + fontsize=6, color=color, fontweight="bold") + + # Force time series up to current frame + t_end = min((frame_idx + 1) * tactile_ratio, len(left_avg)) + ax_force.plot(range(t_end), left_avg[:t_end], "b-", label="Left", linewidth=1) + ax_force.plot(range(t_end), right_avg[:t_end], "r-", label="Right", linewidth=1) + ax_force.axvline(x=t_end - 1, color="gray", linestyle="--", alpha=0.5) + ax_force.set_xlim(0, len(left_avg)) + ax_force.set_ylim(0, vmax * 1.1) + ax_force.set_xlabel("Tactile Sample", fontsize=8) + ax_force.set_ylabel("Force (N)", fontsize=8) + ax_force.legend(fontsize=7, loc="upper right") + ax_force.set_title("Tactile Force", fontsize=9) + + # Title + reward_str = f" R={rewards[frame_idx]:.2f}" if rewards is not None else "" + status = "SUCCESS" if success else "RUNNING" + fig.suptitle( + f"{task_name} | Step {frame_idx+1}/{n_frames}{reward_str} | {status}", + fontsize=12, fontweight="bold" + ) + + frame_path = os.path.join(tmpdir, f"frame_{frame_idx:06d}.png") + fig.savefig(frame_path, bbox_inches="tight", pad_inches=0.1) + + if frame_idx % 50 == 0: + print(f" Rendered frame {frame_idx+1}/{n_frames}") + + plt.close(fig) + + # Combine frames with ffmpeg + print(f" Encoding video with ffmpeg...") + ffmpeg_cmd = [ + "ffmpeg", "-y", + "-framerate", str(fps), + "-i", os.path.join(tmpdir, "frame_%06d.png"), + "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", # ensure even dimensions for H.264 + "-c:v", "libx264", + "-pix_fmt", "yuv420p", + "-crf", "23", + "-preset", "medium", + output_path, + ] + result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True) + if result.returncode != 0: + print(f" ffmpeg error: {result.stderr[:500]}") + else: + file_size = os.path.getsize(output_path) / 1024 + print(f" Video saved: {output_path} ({file_size:.0f} KB)") + + finally: + shutil.rmtree(tmpdir, ignore_errors=True) + + +def generate_all_videos(data_dir, output_dir=None, fps=20, show_tactile=True): + """Generate videos for all episodes in all HDF5 files in a directory.""" + if output_dir is None: + output_dir = os.path.join(data_dir, "videos") + os.makedirs(output_dir, exist_ok=True) + + hdf5_files = sorted([f for f in os.listdir(data_dir) if f.endswith(".hdf5")]) + if not hdf5_files: + print(f"No HDF5 files found in {data_dir}") + return + + for hdf5_file in hdf5_files: + filepath = os.path.join(data_dir, hdf5_file) + with h5py.File(filepath, "r") as f: + episodes = sorted([k for k in f.keys() if k.startswith("episode_")]) + + task_name = os.path.splitext(hdf5_file)[0].replace("_data", "") + for ep_name in episodes: + ep_idx = int(ep_name.split("_")[1]) + output_path = os.path.join(output_dir, f"{task_name}_{ep_name}.mp4") + generate_video(filepath, ep_idx, output_path, fps, show_tactile) + + print(f"\nAll videos saved to: {output_dir}") + + +def print_data_summary(data_file): + """Print summary of saved data file.""" + with h5py.File(data_file, "r") as f: + print(f"\nData file: {data_file}") + print(f"{'='*50}") + + if "metadata" in f: + meta = f["metadata"] + for key in meta.attrs: + print(f" {key}: {meta.attrs[key]}") + + episodes = [k for k in f.keys() if k.startswith("episode_")] + print(f"\nEpisodes: {len(episodes)}") + + for ep_name in sorted(episodes): + ep = f[ep_name] + print(f"\n {ep_name}:") + print(f" Success: {ep.attrs.get('success', 'N/A')}") + print(f" Steps: {ep.attrs.get('n_steps', 'N/A')}") + for key in ep: + shape = ep[key].shape + dtype = ep[key].dtype + print(f" {key}: shape={shape}, dtype={dtype}") + + +def main(): + parser = argparse.ArgumentParser(description="Visualize tactile manipulation data") + parser.add_argument("--data_file", type=str, default=None, + help="HDF5 data file to visualize") + parser.add_argument("--episode", type=int, default=0, + help="Episode index to visualize") + parser.add_argument("--speed", type=float, default=1.0, + help="Playback speed multiplier") + parser.add_argument("--task", type=str, default="precision_grasp", + help="Task name for live visualization") + parser.add_argument("--live", action="store_true", + help="Live visualization during collection") + parser.add_argument("--summary", action="store_true", + help="Print data file summary") + parser.add_argument("--video", action="store_true", + help="Generate MP4 video from saved data") + parser.add_argument("--video_all", type=str, default=None, + help="Generate videos for all episodes in data directory") + parser.add_argument("--output", type=str, default=None, + help="Output video file path") + parser.add_argument("--fps", type=int, default=20, + help="Video frames per second") + parser.add_argument("--no_tactile", action="store_true", + help="Exclude tactile data from video") + args = parser.parse_args() + + if args.video_all: + generate_all_videos(args.video_all, fps=args.fps, show_tactile=not args.no_tactile) + elif args.video and args.data_file: + generate_video(args.data_file, args.episode, args.output, args.fps, not args.no_tactile) + elif args.summary and args.data_file: + print_data_summary(args.data_file) + elif args.live: + visualize_live(args.task) + elif args.data_file: + visualize_offline(args.data_file, args.episode, args.speed) + else: + print("Usage:") + print(" Offline viz: python visualize_data.py --data_file data.hdf5 --episode 0") + print(" Generate video: python visualize_data.py --data_file data.hdf5 --episode 0 --video") + print(" All videos: python visualize_data.py --video_all ./tactile_data/") + print(" Summary: python visualize_data.py --data_file data.hdf5 --summary") + print(" Live viz: python visualize_data.py --task precision_grasp --live") + + +if __name__ == "__main__": + main() diff --git a/tests/test_environments/test_action_playback.py b/tests/test_environments/test_action_playback.py new file mode 100644 index 0000000000000000000000000000000000000000..cf82ee48b1813eb91b582c1baf29d615325f1c7e --- /dev/null +++ b/tests/test_environments/test_action_playback.py @@ -0,0 +1,76 @@ +""" +Test script for recording a sequence of random actions and playing them back +""" + +import argparse +import json +import os +import random + +import h5py +import numpy as np + +import robosuite +from robosuite.controllers import load_composite_controller_config + + +def test_playback(): + # set seeds + random.seed(0) + np.random.seed(0) + + env = robosuite.make( + "Lift", + robots=["Panda"], + controller_configs=load_composite_controller_config(controller="BASIC"), + has_renderer=False, + has_offscreen_renderer=False, + ignore_done=True, + use_camera_obs=False, + reward_shaping=True, + control_freq=20, + ) + env.reset() + + # task instance + task_xml = env.sim.model.get_xml() + task_init_state = np.array(env.sim.get_state().flatten()) + + # trick for ensuring that we can play MuJoCo demonstrations back + # deterministically by using the recorded actions open loop + env.reset_from_xml_string(task_xml) + env.sim.reset() + env.sim.set_state_from_flattened(task_init_state) + env.sim.forward() + + # random actions to play + n_actions = 100 + actions = 0.1 * np.random.uniform(low=-1.0, high=1.0, size=(n_actions, env.action_spec[0].shape[0])) + + # play actions + print("playing random actions...") + states = [task_init_state] + for i in range(n_actions): + env.step(actions[i]) + states.append(np.array(env.sim.get_state().flatten())) + + # try playback + print("attempting playback...") + env.reset() + env.reset_from_xml_string(task_xml) + env.sim.reset() + env.sim.set_state_from_flattened(task_init_state) + env.sim.forward() + + for i in range(n_actions): + env.step(actions[i]) + state_playback = env.sim.get_state().flatten() + assert np.all(np.equal(states[i + 1], state_playback)) + + env.close() + print("test passed!") + + +if __name__ == "__main__": + + test_playback() diff --git a/tests/test_grippers/test_all_grippers.py b/tests/test_grippers/test_all_grippers.py new file mode 100644 index 0000000000000000000000000000000000000000..d0c589f7e38785463abf6fdc98e4f485e49c2b74 --- /dev/null +++ b/tests/test_grippers/test_all_grippers.py @@ -0,0 +1,31 @@ +""" +Tests the basic interface of all grippers. + +This runs some basic sanity checks on the environment, namely, checking that: + - Verifies that the gripper's action, init_qpos exist and are valid + +Obviously, if an environment crashes during runtime, that is considered a failure as well. +""" + +from robosuite.models.grippers import GRIPPER_MAPPING + + +def test_all_gripper(): + for name, gripper in GRIPPER_MAPPING.items(): + # Test all grippers except the null gripper + if name not in {None, "WipingGripper"}: + print("Testing {}...".format(name)) + _test_gripper(gripper()) + + +def _test_gripper(gripper): + action = gripper.format_action([1] * gripper.dof) + assert action is not None + + assert gripper.init_qpos is not None + assert len(gripper.init_qpos) == len(gripper.joints) + + +if __name__ == "__main__": + test_all_gripper() + print("Gripper tests completed.") diff --git a/tests/test_grippers/test_jaco_threefinger.py b/tests/test_grippers/test_jaco_threefinger.py new file mode 100644 index 0000000000000000000000000000000000000000..d64f6ba8bb6910e852fa6505840bb2a1fcc2cc1e --- /dev/null +++ b/tests/test_grippers/test_jaco_threefinger.py @@ -0,0 +1,26 @@ +from robosuite.models.grippers import GripperTester, JacoThreeFingerGripper + + +def test_robotiq(): + robotiq_tester(False) + + +def robotiq_tester(render, total_iters=1, test_y=True): + gripper = JacoThreeFingerGripper() + tester = GripperTester( + gripper=gripper, + pos="0 0 0.3", + quat="0 0 1 0", + gripper_low_pos=0.01, + gripper_high_pos=0.1, + box_size=[0.025] * 3, + step_time=1000, + render=render, + ) + tester.start_simulation() + tester.loop(total_iters=total_iters, test_y=test_y) + tester.close() + + +if __name__ == "__main__": + robotiq_tester(True, 20, False) diff --git a/tests/test_grippers/test_panda_gripper.py b/tests/test_grippers/test_panda_gripper.py new file mode 100644 index 0000000000000000000000000000000000000000..7b785a71b0d61360ee6200afb5c998a298b70d22 --- /dev/null +++ b/tests/test_grippers/test_panda_gripper.py @@ -0,0 +1,29 @@ +""" +Tests panda gripper on grabbing task +""" + +from robosuite.models.grippers import GripperTester, PandaGripper + + +def test_panda_gripper(): + panda_gripper_tester(False) + + +def panda_gripper_tester(render, total_iters=1, test_y=True): + gripper = PandaGripper() + tester = GripperTester( + gripper=gripper, + pos="0 0 0.3", + quat="0 0 1 0", + gripper_low_pos=-0.10, + gripper_high_pos=0.01, + render=render, + ) + tester.start_simulation() + tester.loop(total_iters=total_iters, test_y=test_y) + tester.close() + + +if __name__ == "__main__": + panda_gripper_tester(True, 20, True) + panda_gripper_tester(True, 20, True) diff --git a/tests/test_grippers/test_rethink_gripper.py b/tests/test_grippers/test_rethink_gripper.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca4f5345f85ce3974ccf462c33e425c5d8d5fd6 --- /dev/null +++ b/tests/test_grippers/test_rethink_gripper.py @@ -0,0 +1,28 @@ +""" +Tests two finger gripper and left two finger gripper on grabbing task +""" + +from robosuite.models.grippers import GripperTester, RethinkGripper + + +def test_two_finger(): + two_finger_tester(False) + + +def two_finger_tester(render, total_iters=1, test_y=True): + gripper = RethinkGripper() + tester = GripperTester( + gripper=gripper, + pos="0 0 0.3", + quat="0 0 1 0", + gripper_low_pos=-0.07, + gripper_high_pos=0.02, + render=render, + ) + tester.start_simulation() + tester.loop(total_iters=total_iters, test_y=test_y) + tester.close() + + +if __name__ == "__main__": + two_finger_tester(True, 20, True) diff --git a/tests/test_grippers/test_robotiq_140.py b/tests/test_grippers/test_robotiq_140.py new file mode 100644 index 0000000000000000000000000000000000000000..a6235138beb779ed51df4aa9f2a4fd4a50aba029 --- /dev/null +++ b/tests/test_grippers/test_robotiq_140.py @@ -0,0 +1,25 @@ +from robosuite.models.grippers import GripperTester, Robotiq140Gripper + + +def test_robotiq(): + robotiq_tester(False) + + +def robotiq_tester(render, total_iters=1, test_y=True): + gripper = Robotiq140Gripper() + tester = GripperTester( + gripper=gripper, + pos="0 0 0.3", + quat="0 0 1 0", + gripper_low_pos=0.02, + gripper_high_pos=0.1, + box_size=[0.025] * 3, + render=render, + ) + tester.start_simulation() + tester.loop(total_iters=total_iters, test_y=test_y) + tester.close() + + +if __name__ == "__main__": + robotiq_tester(True, 20, False) diff --git a/tests/test_grippers/test_robotiq_85.py b/tests/test_grippers/test_robotiq_85.py new file mode 100644 index 0000000000000000000000000000000000000000..636b0b64ff8638fc971167bbcc05ca0b4e7a6a2f --- /dev/null +++ b/tests/test_grippers/test_robotiq_85.py @@ -0,0 +1,25 @@ +from robosuite.models.grippers import GripperTester, Robotiq85Gripper + + +def test_robotiq(): + robotiq_tester(False) + + +def robotiq_tester(render, total_iters=1, test_y=True): + gripper = Robotiq85Gripper() + tester = GripperTester( + gripper=gripper, + pos="-0.02 0 0.3", + quat="0 0 1 0", + gripper_low_pos=-0.065, + gripper_high_pos=0.01, + box_size=[0.025] * 3, + render=render, + ) + tester.start_simulation() + tester.loop(total_iters=total_iters, test_y=test_y) + tester.close() + + +if __name__ == "__main__": + robotiq_tester(True, 20, False) diff --git a/tests/test_grippers/test_robotiq_threefinger.py b/tests/test_grippers/test_robotiq_threefinger.py new file mode 100644 index 0000000000000000000000000000000000000000..a040d7686d4d46bd361474a0bc511c005a94b6a2 --- /dev/null +++ b/tests/test_grippers/test_robotiq_threefinger.py @@ -0,0 +1,26 @@ +from robosuite.models.grippers import GripperTester, RobotiqThreeFingerGripper + + +def test_robotiq_three_finger(): + robotiq_three_finger_tester(False) + + +def robotiq_three_finger_tester(render, total_iters=1, test_y=True): + gripper = RobotiqThreeFingerGripper() + tester = GripperTester( + gripper=gripper, + pos="0 0 0.3", + quat="0 0 1 0", + gripper_low_pos=-0.02, + gripper_high_pos=0.1, + box_size=[0.035] * 3, + box_density=500, + render=render, + ) + tester.start_simulation() + tester.loop(total_iters=total_iters, test_y=test_y) + tester.close() + + +if __name__ == "__main__": + robotiq_three_finger_tester(True, 20, False) diff --git a/tests/test_robots/test_all_robots.py b/tests/test_robots/test_all_robots.py new file mode 100644 index 0000000000000000000000000000000000000000..da22a8dc359b9b74c4793a8b49498d327e0794e0 --- /dev/null +++ b/tests/test_robots/test_all_robots.py @@ -0,0 +1,31 @@ +""" +Tests the basic interface of all robots. + +This runs some basic sanity checks on the robots, namely, checking that: + - Verifies that all single-arm robots have properly defined contact geoms. + +Obviously, if an environment crashes during runtime, that is considered a failure as well. +""" +from robosuite.robots import ROBOT_CLASS_MAPPING, FixedBaseRobot, LeggedRobot, WheeledRobot + + +def test_all_robots(): + for name, robot in ROBOT_CLASS_MAPPING.items(): + print(f"Testing {name}") + if robot not in [FixedBaseRobot, WheeledRobot, LeggedRobot]: + raise ValueError(f"Invalid robot type: {robot}") + else: + _test_contact_geoms(robot(name)) + + +def _test_contact_geoms(robot): + robot.load_model() + contact_geoms = robot.robot_model._contact_geoms + for geom in contact_geoms: + assert isinstance(geom, str), f"The geom {geom} is of type {type(geom)}, but should be {type('placeholder')}" + + +if __name__ == "__main__": + # test_single_arm_robots() + test_all_robots() + print("Robot tests completed.") diff --git a/tests/test_robots/test_composite_robots.py b/tests/test_robots/test_composite_robots.py new file mode 100644 index 0000000000000000000000000000000000000000..c72892598b17c2f282c098ebfde67968fa19bf0f --- /dev/null +++ b/tests/test_robots/test_composite_robots.py @@ -0,0 +1,100 @@ +""" +Script to test composite robots: + +$ pytest -s tests/test_robots/test_composite_robots.py +""" +import logging +from typing import Dict, List, Union + +import numpy as np +import pytest + +import robosuite as suite +import robosuite.utils.robot_composition_utils as cu +from robosuite.controllers import load_composite_controller_config +from robosuite.models.grippers import GRIPPER_MAPPING +from robosuite.models.robots import is_robosuite_robot +from robosuite.utils.log_utils import ROBOSUITE_DEFAULT_LOGGER + +ROBOSUITE_DEFAULT_LOGGER.setLevel(logging.ERROR) + +TEST_ROBOTS = ["Baxter", "IIWA", "Jaco", "Kinova3", "Panda", "Sawyer", "UR5e", "Tiago", "SpotArm", "GR1"] +TEST_BASES = [ + "RethinkMount", + "RethinkMinimalMount", + "NullMount", + "OmronMobileBase", + "NullMobileBase", + "NoActuationBase", + "Spot", + "SpotFloating", +] + +# If you would like to visualize the scene during testing, +# set render to True and increase env_steps to a larger value. +def create_and_test_env( + env: str, + robots: Union[str, List[str]], + controller_config: Dict, + render: bool = True, + env_steps: int = 20, +): + + config = { + "env_name": env, + "robots": robots, + "controller_configs": controller_config, + } + + env = suite.make( + **config, + has_renderer=render, + has_offscreen_renderer=False, + ignore_done=True, + use_camera_obs=False, + reward_shaping=True, + control_freq=20, + ) + env.reset() + low, high = env.action_spec + low = np.clip(low, -1, 1) + high = np.clip(high, -1, 1) + + # Runs a few steps of the simulation as a sanity check + for i in range(env_steps): + action = np.random.uniform(low, high) + obs, reward, done, _ = env.step(action) + if render: + env.render() + + env.close() + + +@pytest.mark.parametrize("robot", TEST_ROBOTS) +@pytest.mark.parametrize("base", TEST_BASES) +def test_composite_robot_base_combinations(robot, base): + if is_robosuite_robot(robot): + if robot in ["Tiago", "GR1", "SpotArm"]: + pytest.skip(f"Skipping {robot} for now since it we typically do not attach it to another base.") + elif base in ["NullMobileBase", "NoActuationBase", "Spot", "SpotFloating"]: + pytest.skip(f"Skipping {base} for now since comopsite robots do not use {base}.") + else: + cu.create_composite_robot(name="CompositeRobot", robot=robot, base=base, grippers="RethinkGripper") + controller_config = load_composite_controller_config(controller="BASIC", robot="CompositeRobot") + create_and_test_env(env="Lift", robots="CompositeRobot", controller_config=controller_config, render=False) + + +@pytest.mark.parametrize("robot", TEST_ROBOTS) +@pytest.mark.parametrize("gripper", GRIPPER_MAPPING.keys()) +def test_composite_robot_gripper_combinations(robot, gripper): + if is_robosuite_robot(robot): + if robot in ["Tiago"]: + base = "NullMobileBase" + elif robot == "GR1": + base = "NoActuationBase" + else: + base = "RethinkMount" + + cu.create_composite_robot(name="CompositeRobot", robot=robot, base=base, grippers=gripper) + controller_config = load_composite_controller_config(controller="BASIC", robot="CompositeRobot") + create_and_test_env(env="Lift", robots="CompositeRobot", controller_config=controller_config, render=False)