Fxxkrobotics commited on
Commit
5a6766e
·
verified ·
1 Parent(s): a15d286

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docs/_static/css/theme.css +327 -0
  2. docs/_static/js/custom.js +3 -0
  3. docs/_static/theme_overrides.css +13 -0
  4. docs/algorithms/benchmarking.md +18 -0
  5. docs/algorithms/demonstrations.md +65 -0
  6. docs/algorithms/sim2real.md +155 -0
  7. docs/modeling/arena.rst +67 -0
  8. docs/modeling/mujoco_model.rst +38 -0
  9. docs/modeling/object_model.rst +48 -0
  10. docs/modeling/robot_model.rst +72 -0
  11. docs/modeling/task.rst +14 -0
  12. docs/modules/controllers.rst +281 -0
  13. docs/modules/devices.md +89 -0
  14. docs/modules/environments.md +265 -0
  15. docs/modules/objects.md +158 -0
  16. docs/modules/overview.md +10 -0
  17. docs/modules/renderers.md +74 -0
  18. docs/modules/robots.rst +210 -0
  19. docs/modules/sensors.md +33 -0
  20. docs/source/robosuite.controllers.interpolators.rst +29 -0
  21. docs/source/robosuite.controllers.parts.gripper.rst +29 -0
  22. docs/source/robosuite.controllers.parts.rst +40 -0
  23. docs/source/robosuite.controllers.rst +19 -0
  24. docs/source/robosuite.environments.rst +37 -0
  25. docs/source/robosuite.models.arenas.rst +69 -0
  26. docs/source/robosuite.models.bases.rst +117 -0
  27. docs/source/robosuite.models.objects.composite_body.rst +29 -0
  28. docs/source/robosuite.models.objects.primitive.rst +45 -0
  29. docs/source/robosuite.models.objects.rst +56 -0
  30. docs/source/robosuite.models.rst +42 -0
  31. docs/source/robosuite.wrappers.rst +61 -0
  32. docs/tutorials/add_controller.md +55 -0
  33. docs/tutorials/add_environment.md +65 -0
  34. robosuite.egg-info/PKG-INFO +85 -0
  35. robosuite.egg-info/SOURCES.txt +1183 -0
  36. robosuite.egg-info/dependency_links.txt +1 -0
  37. robosuite.egg-info/eager_resources.txt +1 -0
  38. robosuite.egg-info/requires.txt +14 -0
  39. robosuite.egg-info/top_level.txt +1 -0
  40. tactile_tasks/__init__.py +2 -0
  41. tactile_tasks/__pycache__/__init__.cpython-310.pyc +0 -0
  42. tactile_tasks/__pycache__/collect_data.cpython-310.pyc +0 -0
  43. tactile_tasks/__pycache__/motion_planner.cpython-310.pyc +0 -0
  44. tactile_tasks/__pycache__/sawyer_ik.cpython-310.pyc +0 -0
  45. tactile_tasks/__pycache__/uskin_sensor.cpython-310.pyc +0 -0
  46. tactile_tasks/__pycache__/visualize_data.cpython-310.pyc +0 -0
  47. tactile_tasks/collect_data.py +644 -0
  48. tactile_tasks/convert_for_act.py +160 -0
  49. tactile_tasks/convert_for_dp.py +143 -0
  50. tactile_tasks/envs/__init__.py +3 -0
docs/_static/css/theme.css ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* From https://github.com/rusty1s/pytorch_geometric */
2
+
3
+ .wy-side-nav-search {
4
+ background: rgb(143,144,147);
5
+ }
6
+
7
+ .wy-side-nav-search > div.version {
8
+ color: black;
9
+ }
10
+
11
+
12
+ .wy-nav-content-wrap {
13
+ background: inherit;
14
+ }
15
+
16
+ .wy-side-nav-search input[type="text"] {
17
+ border: none;
18
+ box-shadow: none;
19
+ background: white;
20
+ border-radius: 0;
21
+ font-size: 100%;
22
+ }
23
+
24
+ .wy-menu-vertical li.current a,
25
+ .wy-menu-vertical li.toctree-l1.current > a {
26
+ border: none;
27
+ }
28
+
29
+ .ethical-rtd > div.ethical-sidebar,
30
+ .ethical-rtd > div.ethical-footer {
31
+ display: none !important;
32
+ }
33
+
34
+ h1 {
35
+ /* text-transform: uppercase; */
36
+ font-family: inherit;
37
+ font-weight: 200;
38
+ }
39
+
40
+ h2,
41
+ .rst-content .toctree-wrapper p.caption {
42
+ font-family: inherit;
43
+ font-weight: 200;
44
+ }
45
+
46
+ .rst-content a:visited {
47
+ color: #3091d1;
48
+ }
49
+
50
+ /* Begin code */
51
+ .rst-content pre.literal-block,
52
+ .rst-content div[class^="highlight"] {
53
+ border: none;
54
+ }
55
+
56
+ .rst-content pre.literal-block,
57
+ .rst-content div[class^="highlight"] pre,
58
+ .rst-content .linenodiv pre {
59
+ font-size: 80%;
60
+ }
61
+
62
+ .highlight {
63
+ background: #f6f8fa;
64
+ border-radius: 6px;
65
+ }
66
+
67
+ .highlight .kn,
68
+ .highlight .k {
69
+ color: #d73a49;
70
+ }
71
+
72
+ .highlight .nn {
73
+ color: inherit;
74
+ font-weight: inherit;
75
+ }
76
+
77
+ .highlight .nc {
78
+ color: #e36209;
79
+ font-weight: inherit;
80
+ }
81
+
82
+ .highlight .fm,
83
+ .highlight .nd,
84
+ .highlight .nf,
85
+ .highlight .nb {
86
+ color: #6f42c1;
87
+ }
88
+
89
+ .highlight .bp,
90
+ .highlight .n {
91
+ color: inherit;
92
+ }
93
+
94
+ .highlight .kc,
95
+ .highlight .s1,
96
+ .highlight .s2,
97
+ .highlight .mi,
98
+ .highlight .mf,
99
+ .highlight .bp,
100
+ .highlight .bn,
101
+ .highlight .ow {
102
+ color: #005cc5;
103
+ font-weight: inherit;
104
+ }
105
+
106
+ .highlight .c1 {
107
+ color: #6a737d;
108
+ }
109
+
110
+ .rst-content code.xref {
111
+ padding: .2em .4em;
112
+ background: rgba(27,31,35,.05);
113
+ border-radius: 6px;
114
+ border: none;
115
+ }
116
+ /* End code */
117
+
118
+ .rst-content dl:not(.docutils) dt,
119
+ .rst-content dl:not(.docutils) dl dt {
120
+ background: rgb(243,244,247);
121
+ }
122
+
123
+ .rst-content dl:not(.docutils) dt.field-odd,
124
+ .rst-content dl:not(.docutils) dt.field-odd {
125
+ text-transform: uppercase;
126
+ background: inherit;
127
+ border: none;
128
+ padding: 6px 0;
129
+ }
130
+
131
+ .rst-content dl:not(.docutils) .property {
132
+ text-transform: uppercase;
133
+ font-style: normal;
134
+ padding-right: 12px;
135
+ }
136
+
137
+ em.sig-param span.n:first-child, em.sig-param span.n:nth-child(2) {
138
+ font-style: normal;
139
+ }
140
+
141
+ em.sig-param span.n:nth-child(3),
142
+ em.sig-param span.n:nth-child(3) a {
143
+ color: inherit;
144
+ font-weight: normal;
145
+ font-style: normal;
146
+ }
147
+
148
+ em.sig-param span.default_value {
149
+ font-family: SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;
150
+ font-style: normal;
151
+ font-size: 90%;
152
+ }
153
+
154
+ .sig-paren {
155
+ padding: 0 4px;
156
+ }
157
+
158
+ .wy-table-responsive table td,
159
+ .wy-table-responsive table th {
160
+ white-space: normal;
161
+ }
162
+
163
+ .wy-table-bordered-all,
164
+ .rst-content table.docutils {
165
+ border: none;
166
+ }
167
+
168
+ .wy-table-bordered-all td,
169
+ .rst-content table.docutils td {
170
+ border: none;
171
+ }
172
+
173
+ .wy-table-odd td,
174
+ .wy-table-striped tr:nth-child(2n-1) td,
175
+ .rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td {
176
+ background: rgb(243,244,247);
177
+ }
178
+
179
+ .wy-table td,
180
+ .rst-content table.docutils td,
181
+ .rst-content table.field-list td,
182
+ .wy-table th,
183
+ .rst-content table.docutils th,
184
+ .rst-content table.field-list th {
185
+ padding: 16px;
186
+ }
187
+ /*
188
+ .admonition {
189
+ content: '\f12a';
190
+ font-family: FontAwesome;
191
+ } */
192
+
193
+ .admonition.note, div.admonition.note {
194
+ border-color: rgba(var(--pst-color-admonition-note),1);
195
+ }
196
+
197
+ .admonition.note>.admonition-title:before, div.admonition.note>.admonition-title:before {
198
+ color: rgba(var(--pst-color-admonition-note),1);
199
+ content: '\f12a'!important;
200
+ /* content: var(--pst-icon-admonition-note); */
201
+ }
202
+
203
+ .admonition.question>.admonition-title:before, div.admonition.question>.admonition-title:before {
204
+ color: rgba(var(--pst-color-admonition-note),1);
205
+ content: '\003f'!important;
206
+ /* content: var(--pst-icon-admonition-note); */
207
+ }
208
+
209
+ .admonition.explanation>.admonition-title:before, div.admonition.explanation>.admonition-title:before {
210
+ color: rgba(var(--pst-color-admonition-note),1);
211
+ content: '\f02d'!important;
212
+ /* content: var(--pst-icon-admonition-note); */
213
+ }
214
+
215
+ .card {
216
+ /* Add shadows to create the "card" effect */
217
+ box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
218
+ transition: 0.3s;
219
+ border-radius: 5px; /* 5px rounded corners */
220
+ width: 100%;
221
+ padding-bottom: 10px;
222
+ }
223
+
224
+ /* On mouse-over, add a deeper shadow */
225
+ .card:hover {
226
+ box-shadow: 0 8px 16px 0 rgba(0,0,0,0.2);
227
+
228
+ }
229
+
230
+ /* Add some padding inside the card container */
231
+ .container {
232
+ padding: 2px 16px;
233
+ }
234
+
235
+ .row:after {
236
+ content: "";
237
+ display: table;
238
+ clear: both;
239
+ }
240
+
241
+ .column {
242
+ float: left;
243
+ width: 50%;
244
+ padding: 20px 10px;
245
+ }
246
+
247
+ .box{
248
+ display: none;
249
+ width: 100%;
250
+ }
251
+
252
+ a:hover + .box,.box:hover{
253
+ display: block;
254
+ position: absolute;
255
+ z-index: 100;
256
+ border-radius: 50px!important;
257
+ margin-left: 60px;
258
+ margin-top: 0px;
259
+ }
260
+
261
+ a:hover + .card:hover{
262
+ display: block;
263
+ position: absolute;
264
+ z-index: 100;
265
+ border-radius: 50px!important;
266
+ margin-left: 60px;
267
+ margin-top: 0px;
268
+ }
269
+
270
+ a.reference.external {
271
+ color: #6695B0!important;
272
+ }
273
+
274
+ #p1 a {
275
+ color: #E98D64!important;
276
+ }
277
+
278
+
279
+ #frame { zoom: 0.75; -moz-transform: scale(0.75); -moz-transform-origin: 0 0; }
280
+
281
+ /* Google Fonts */
282
+ @import url(https://fonts.googleapis.com/css?family=Anonymous+Pro);
283
+
284
+ /* Global */
285
+
286
+ #typewriter body{
287
+ height: calc(100vh - 8em);
288
+ padding: 4em;
289
+ color: rgba(255,255,255,.75);
290
+ font-family: 'Anonymous Pro', monospace;
291
+ background-color: rgb(25,25,25);
292
+ }
293
+ #typewriter .line-1{
294
+ position: relative;
295
+ top: 50%;
296
+ width: 24em;
297
+ margin: 0 auto;
298
+ border-right: 2px solid rgba(255,255,255,.75);
299
+ font-size: 180%;
300
+ text-align: center;
301
+ white-space: nowrap;
302
+ overflow: hidden;
303
+ transform: translateY(-50%);
304
+ }
305
+
306
+ /* Animation */
307
+ .anim-typewriter{
308
+ animation: typewriter 4s steps(44) 1s 1 normal both,
309
+ blinkTextCursor 500ms steps(44) infinite normal;
310
+ }
311
+ @keyframes typewriter{
312
+ from{width: 0;}
313
+ to{width: 24em;}
314
+ }
315
+ @keyframes blinkTextCursor{
316
+ from{border-right-color: rgba(255,255,255,.75);}
317
+ to{border-right-color: transparent;}
318
+ }
319
+
320
+
321
+ .trimmed-cover {
322
+ object-fit: cover;
323
+ width: 120%;
324
+ height: 177px;
325
+ object-position: center 40%;
326
+ margin-right: -100px;
327
+ }
docs/_static/js/custom.js ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ $(document).ready(function () {
2
+ $('a[href^="http://"], a[href^="https://"]').not('a[class*=internal]').attr('target', '_blank');
3
+ });
docs/_static/theme_overrides.css ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* override table width restrictions */
2
+ @media screen and (min-width: 767px) {
3
+
4
+ .wy-table-responsive table td {
5
+ /* !important prevents the common CSS stylesheets from overriding
6
+ this as on RTD they are loaded after this stylesheet */
7
+ white-space: normal !important;
8
+ }
9
+
10
+ .wy-table-responsive {
11
+ overflow: visible !important;
12
+ }
13
+ }
docs/algorithms/benchmarking.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Benchmarking
2
+
3
+ Benchmarking results of standard policy learning algorithms.
4
+
5
+ ## v1.0
6
+
7
+ We provide a standardized set of benchmarking experiments as baselines for future experiments. Specifically, we test [Soft Actor-Critic](https://arxiv.org/abs/1812.05905), a state of the art model-free RL algorithm, on a select combination of tasks (all) using a combination of proprioceptive and object-specific observations, robots (`Panda`, `Sawyer`), and controllers (`OSC_POSE`, `JOINT_VELOCITY`). Our experiments were implemented and executed in an extended version of [rlkit](https://github.com/vitchyr/rlkit), a popular PyTorch-based RL framework and algorithm library. For ease of replicability, we have released our official benchmarking results on a [benchmark repository](https://github.com/ARISE-Initiative/robosuite-benchmark).
8
+
9
+ ![benchmarking_results](../images/benchmarking/benchmarking_results.png)
10
+
11
+ All agents were trained for 500 epochs with 500 steps per episode, and utilize the same standardized algorithm hyperparameters (see our benchmarking repo above for exact parameter values). The agents receive the low-dimensional physical states as input to the policy. These experiments ran on 2 CPUs and 12G VRAM and no GPU, each taking about two days to complete. We normalize the per-step rewards to 1.0 such that the maximum possible per-episode return is 500. Above, we show the per-task experiments conducted, with each experiment's training curve showing the evaluation return mean's average and standard deviation over five random seeds.
12
+
13
+ We select two of the easiest environments, **Block Lifting** and **Door Opening**, for an ablation study between the operational space controllers (`OSC_POSE`) and the joint velocity controllers (`JOINT_VELOCITY`). We observe that the choice of controllers alone has an evident impact on the efficiency of learning. Both robots learn to solve the tasks faster with the operational space controllers, which we hypothesize is credited to the accelerated exploration in task space; this highlights the potential of this impedance-based controller to improve task performance on robotic tasks that were previously limited by their action space parameterization. The SAC algorithm is able to solve three of the nine environments, including **Block Lifting**, **Door Opening**, and **Two Arm Peg-In-Hole**, while making slow progress in the other environments, which requires intelligent exploration in longer task horizons. For future experiments, we recommend using the nine environments with the Panda robot and the operational space controller, i.e., the blue curves of Panda (OSC) in our benchmarking figure above, for standardized and fair comparisons.
14
+
15
+ ## v0.3
16
+
17
+ - Please see the [Surreal](http://svl.stanford.edu/assets/papers/fan2018corl.pdf) paper for benchmarking results. Code to reproduce results available [here](https://github.com/SurrealAI/surreal).
18
+ - For imitation learning results on [RoboTurk](https://roboturk.stanford.edu/) datasets please see the original [RoboTurk](https://arxiv.org/abs/1811.02790) paper and also the [IRIS](https://arxiv.org/abs/1911.05321) paper.
docs/algorithms/demonstrations.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Human Demonstrations
2
+
3
+ ## Collecting Human Demonstrations
4
+
5
+ We provide teleoperation utilities that allow users to control the robots with input devices, such as the keyboard, [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/), [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) and mujoco-gui. Such functionality allows us to collect a dataset of human demonstrations for learning. We provide an example script to illustrate how to collect demonstrations. Our [collect_human_demonstrations](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/collect_human_demonstrations.py) script takes the following arguments:
6
+
7
+ - `directory:` path to a folder for where to store the pickle file of collected demonstrations
8
+ - `environment:` name of the environment you would like to collect the demonstrations for
9
+ - `device:` either "keyboard" or "spacemouse" or "dualsense" or "mjgui"
10
+ - `renderer:` Mujoco's builtin interactive viewer (mjviewer) or OpenCV viewer (mujoco)
11
+ - `camera:` Pass multiple camera names to enable multiple views. Note that the "mujoco" renderer must be enabled when using multiple views, while "mjviewer" is not supported.
12
+
13
+ See the [devices page](https://robosuite.ai/docs/modules/devices.html) for details on how to use the devices.
14
+
15
+ ## Replaying Human Demonstrations
16
+
17
+ We have included an example script that illustrates how demonstrations can be loaded and played back. Our [playback_demonstrations_from_hdf5](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/playback_demonstrations_from_hdf5.py) script selects demonstration episodes at random from a demonstration pickle file and replays them.
18
+
19
+
20
+ ## Existing Datasets
21
+
22
+ We have included some sample demonstrations for each task at `models/assets/demonstrations`.
23
+
24
+
25
+ ## Structure of collected demonstrations
26
+
27
+ Every set of demonstrations is collected as a `demo.hdf5` file. The `demo.hdf5` file is structured as follows.
28
+
29
+ - data (group)
30
+
31
+ - date (attribute) - date of collection
32
+
33
+ - time (attribute) - time of collection
34
+
35
+ - repository_version (attribute) - repository version used during collection
36
+
37
+ - env (attribute) - environment name on which demos were collected
38
+
39
+ - demo1 (group) - group for the first demonstration (every demonstration has a group)
40
+
41
+ - model_file (attribute) - the xml string corresponding to the MJCF mujoco model
42
+
43
+ - states (dataset) - flattened mujoco states, ordered by time
44
+
45
+ - actions (dataset) - environment actions, ordered by time
46
+
47
+ - demo2 (group) - group for the second demonstration
48
+
49
+ ...
50
+
51
+ (and so on)
52
+
53
+ The reason for storing mujoco states instead of raw observations is to make it easy to retrieve different kinds of observations in a postprocessing step. This also saves disk space (image datasets are much larger).
54
+
55
+
56
+ ## Using Demonstrations for Learning
57
+
58
+ The [robomimic](https://arise-initiative.github.io/robomimic-web/) framework makes it easy to train policies using your own [datasets collected with robosuite](https://arise-initiative.github.io/robomimic-web/docs/introduction/datasets.html#robosuite-hdf5-datasets). The framework also contains many useful examples for how to integrate hdf5 datasets into your own learning pipeline.
59
+
60
+ The robosuite repository also has some utilities for using the demonstrations to alter the start state distribution of training episodes for learning RL policies - this have proved effective in [several](https://arxiv.org/abs/1802.09564) [prior](https://arxiv.org/abs/1807.06919) [works](https://arxiv.org/abs/1804.02717). For example, we provide a generic utility for setting various types of learning curriculums which dictate how to sample from demonstration episodes when doing an environment reset. For more information see the `DemoSamplerWrapper` class.
61
+
62
+ ## Warnings
63
+ We have verified that deterministic action playback works specifically when playing back demonstrations on the *same machine* that the demonstrations were originally collected upon. However, this means that deterministic action playback is NOT guaranteed (in fact, very unlikely) to work across platforms or even across different machines using the same OS.
64
+
65
+ While action playback trajectories are quite similar even if not completely identical to the original collected state trajectories, they do tend to drift over time, and should not be relied upon to accurately replicate demonstrations. Instead, we recommend directly setting states to reproduce the collected trajectories, as shown in [playback_demonstrations_from_hdf5](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/playback_demonstrations_from_hdf5.py).
docs/algorithms/sim2real.md ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sim-to-Real Transfer
2
+ This page covers the randomization techniques to narrow the reality gap of our robotics simulation. These techniques, which concerns about [visual observations](#visuals), [system dynamics](#dynamics), and [sensors](#sensors), are employed to improve the efficacy of transferring our simulation-trained models to the real world.
3
+
4
+
5
+ ## Visuals
6
+
7
+ It is well shown that randomizing the visuals in simulation can play an important role in sim2real applications. **robosuite** provides various `Modder` classes to control different aspects of the visual environment. This includes:
8
+
9
+ - `CameraModder`: Modder for controlling camera parameters, including FOV and pose
10
+ - `TextureModder`: Modder for controlling visual objects' appearances, including texture and material properties
11
+ - `LightingModder`: Modder for controlling lighting parameters, including light source properties and pose
12
+
13
+ Each of these Modders can be used by the user to directly override default simulation settings, or to randomize their respective properties mid-sim. We provide [demo_domain_randomization.py](../demos.html#domain-randomization) to showcase all of these modders being applied to randomize an environment during every sim step.
14
+
15
+
16
+ ## Dynamics
17
+
18
+ In order to achieve reasonable runtime speeds, many physics simulation platforms often must simplify the underlying physics model. Mujoco is no different, and as a result, many parameters such as friction, damping, and contact constraints do not fully capture real-world dynamics.
19
+
20
+ To better compensate for this, **robosuite** provides the `DynamicsModder` class, which can control individual dynamics parameters for each model within an environment. Theses parameters are sorted by element group, and briefly described below (for more information, please see [Mujoco XML Reference](http://www.mujoco.org/book/XMLreference.html)):
21
+
22
+ #### Opt (Global) Parameters
23
+ - `density`: Density of the medium (i.e.: air)
24
+ - `viscosity`: Viscosity of the medium (i.e.: air)
25
+
26
+ #### Body Parameters
27
+ - `position`: (x, y, z) Position of the body relative to its parent body
28
+ - `quaternion`: (qw, qx, qy, qz) Quaternion of the body relative to its parent body
29
+ - `inertia`: (ixx, iyy, izz) diagonal components of the inertia matrix associated with this body
30
+ - `mass`: mass of the body
31
+
32
+ #### Geom Parameters
33
+ - `friction`: (sliding, torsional, rolling) friction values for this geom
34
+ - `solref`: (timeconst, dampratio) contact solver values for this geom
35
+ - `solimp`: (dmin, dmax, width, midpoint, power) contact solver impedance values for this geom
36
+
37
+ #### Joint parameters
38
+ - `stiffness`: Stiffness for this joint
39
+ - `frictionloss`: Friction loss associated with this joint
40
+ - `damping`: Damping value for this joint
41
+ - `armature`: Gear inertia for this joint
42
+
43
+ This `DynamicsModder` follows the same basic API as the other `Modder` classes, and allows per-parameter and per-group randomization enabling. Apart from randomization, this modder can also be instantiated to selectively modify values at runtime. A brief example is given below:
44
+
45
+ ```python
46
+ import robosuite as suite
47
+ from robosuite.utils.mjmod import DynamicsModder
48
+ import numpy as np
49
+
50
+ # Create environment and modder
51
+ env = suite.make("Lift", robots="Panda")
52
+ modder = DynamicsModder(sim=env.sim, random_state=np.random.RandomState(5))
53
+
54
+ # Define function for easy printing
55
+ cube_body_id = env.sim.model.body_name2id(env.cube.root_body)
56
+ cube_geom_ids = [env.sim.model.geom_name2id(geom) for geom in env.cube.contact_geoms]
57
+
58
+ def print_params():
59
+ print(f"cube mass: {env.sim.model.body_mass[cube_body_id]}")
60
+ print(f"cube frictions: {env.sim.model.geom_friction[cube_geom_ids]}")
61
+ print()
62
+
63
+ # Print out initial parameter values
64
+ print("INITIAL VALUES")
65
+ print_params()
66
+
67
+ # Modify the cube's properties
68
+ modder.mod(env.cube.root_body, "mass", 5.0) # make the cube really heavy
69
+ for geom_name in env.cube.contact_geoms:
70
+ modder.mod(geom_name, "friction", [2.0, 0.2, 0.04]) # greatly increase the friction
71
+ modder.update() # make sure the changes propagate in sim
72
+
73
+ # Print out modified parameter values
74
+ print("MODIFIED VALUES")
75
+ print_params()
76
+
77
+ # We can also restore defaults (original values) at any time
78
+ modder.restore_defaults()
79
+
80
+ # Print out restored initial parameter values
81
+ print("RESTORED VALUES")
82
+ print_params()
83
+ ```
84
+
85
+ Running [demo_domain_randomization.py](../demos.html#domain-randomization) is another method for demo'ing (albeit an extreme example of) this functionality.
86
+
87
+ Note that the modder already has some sanity checks in place to prevent presumably undesired / non-sensical behavior, such as adding damping / frictionloss to a free joint or setting a non-zero stiffness value to a joint that is normally non-stiff to begin with.
88
+
89
+
90
+ ## Sensors
91
+
92
+ By default, Mujoco sensors are deterministic and delay-free, which is often an unrealistic assumption to make in the real world. To better close this domain gap, **robosuite** provides a realistic, customizable interface via the [Observable](../source/robosuite.utils.html#module-robosuite.utils.observables) class API. Observables model realistic sensor sampling, in which ground truth data is sampled (`sensor`), passed through a corrupting function (`corrupter`), and then finally passed through a filtering function (`filter`). Moreover, each observable has its own `sampling_rate` and `delayer` function which simulates sensor delay. While default values are used to instantiate each observable during environment creation, each of these components can be modified by the user at runtime using `env.modify_observable(...)` . Moreover, each observable is assigned a modality, and are grouped together in the returned observation dictionary during the `env.step()` call. For example, if an environment consists of camera observations and a single robot's proprioceptive observations, the observation dict structure might look as follows:
93
+
94
+ ```python
95
+ {
96
+ "frontview_image": np.array(...), # this has modality "image"
97
+ "frontview_depth": np.array(...), # this has modality "image"
98
+ "robot0_joint_pos": np.array(...), # this has modality "robot0_proprio"
99
+ "robot0_gripper_pos": np.array(...), # this has modality "robot0_proprio"
100
+ "image-state": np.array(...), # this is a concatenation of all image observations
101
+ "robot0_proprio-state": np.array(...), # this is a concatenation of all robot0_proprio observations
102
+ }
103
+ ```
104
+
105
+ Note that for memory efficiency the `image-state` is not returned by default (this can be toggled in `robosuite/macros.py`).
106
+
107
+ We showcase how the `Observable` functionality can be used to model sensor corruption and delay via [demo_sensor_corruption.py](../demos.html#sensor-realism). We also highlight that each of the `sensor`, `corrupter`, and `filter` functions can be arbitrarily specified to suit the end-user's usage. For example, a common use case for these observables is to keep track of sampled values from a sensor operating at a higher frequency than the environment step (control) frequency. In this case, the `filter` function can be leveraged to keep track of the real-time sensor values as they're being sampled. We provide a minimal script showcasing this ability below:
108
+
109
+ ```python
110
+ import robosuite as suite
111
+ import numpy as np
112
+ from robosuite.utils.buffers import RingBuffer
113
+
114
+ # Create env instance
115
+ control_freq = 10
116
+ env = suite.make("Lift", robots="Panda", has_offscreen_renderer=False, use_camera_obs=False, control_freq=control_freq)
117
+
118
+ # Define a ringbuffer to store joint position values
119
+ buffer = RingBuffer(dim=env.robots[0].robot_model.dof, length=10)
120
+
121
+ # Create a function that we'll use as the "filter" for the joint position Observable
122
+ # This is a pass-through operation, but we record the value every time it gets called
123
+ # As per the Observables API, this should take in an arbitrary numeric and return the same type / shape
124
+ def filter_fcn(corrupted_value):
125
+ # Record the inputted value
126
+ buffer.push(corrupted_value)
127
+ # Return this value (no-op performed)
128
+ return corrupted_value
129
+
130
+ # Now, let's enable the joint position Observable with this filter function
131
+ env.modify_observable(
132
+ observable_name="robot0_joint_pos",
133
+ attribute="filter",
134
+ modifier=filter_fcn,
135
+ )
136
+
137
+ # Let's also increase the sampling rate to showcase the Observable's ability to update multiple times per env step
138
+ obs_sampling_freq = control_freq * 4
139
+ env.modify_observable(
140
+ observable_name="robot0_joint_pos",
141
+ attribute="sampling_rate",
142
+ modifier=obs_sampling_freq,
143
+ )
144
+
145
+ # Take a single environment step with positive joint velocity actions
146
+ action = np.ones(env.robots[0].robot_model.dof) * 1.0
147
+ env.step(action)
148
+
149
+ # Now we can analyze what values were recorded
150
+ np.set_printoptions(precision=2)
151
+ print(f"\nPolicy Frequency: {control_freq}, Observable Sampling Frequency: {obs_sampling_freq}")
152
+ print(f"Number of recorded samples after 1 policy step: {buffer._size}\n")
153
+ for i in range(buffer._size):
154
+ print(f"Recorded value {i}: {buffer.buf[i]}")
155
+ ```
docs/modeling/arena.rst ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Arena
2
+ =====
3
+
4
+ The ``Arena`` class serves as a base model for building the simulation environment.
5
+ By default, this includes a ground plane and visual walls, and child classes extend this
6
+ to additionally include other objects, e.g., a table or bins.
7
+
8
+ Base Arena
9
+ ----------
10
+
11
+ .. autoclass:: robosuite.models.arenas.arena.Arena
12
+
13
+ .. automethod:: __init__
14
+ .. automethod:: set_origin
15
+ .. automethod:: set_camera
16
+
17
+ Empty Arena
18
+ -----------
19
+
20
+ .. autoclass:: robosuite.models.arenas.empty_arena.EmptyArena
21
+
22
+ .. automethod:: __init__
23
+
24
+ Bins Arena
25
+ ----------
26
+
27
+ .. autoclass:: robosuite.models.arenas.bins_arena.BinsArena
28
+
29
+ .. automethod:: __init__
30
+ .. automethod:: configure_location
31
+
32
+ Pegs Arena
33
+ ----------
34
+
35
+ .. autoclass:: robosuite.models.arenas.pegs_arena.PegsArena
36
+
37
+ .. automethod:: __init__
38
+
39
+ Table Arena
40
+ -----------
41
+
42
+ .. autoclass:: robosuite.models.arenas.table_arena.TableArena
43
+
44
+ .. automethod:: __init__
45
+ .. automethod:: configure_location
46
+ .. autoproperty:: table_top_abs
47
+
48
+ Wipe Arena
49
+ ----------
50
+
51
+ .. autoclass:: robosuite.models.arenas.wipe_arena.WipeArena
52
+
53
+ .. automethod:: __init__
54
+ .. automethod:: configure_location
55
+ .. automethod:: reset_arena
56
+ .. automethod:: sample_start_pos
57
+ .. automethod:: sample_path_pos
58
+
59
+ MultiTable Arena
60
+ ----------------
61
+
62
+ .. autoclass:: robosuite.models.arenas.multi_table_arena.MultiTableArena
63
+
64
+ .. automethod:: __init__
65
+ .. automethod:: _add_table
66
+ .. automethod:: configure_location
67
+ .. automethod:: _postprocess_arena
docs/modeling/mujoco_model.rst ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Mujoco Model
2
+ ============
3
+
4
+ The ``MujocoModel`` class is the foundational class from which all other model classes extend from in robosuite. This class represents a standardized API for all models used in simulation and is the core modeling component that other model classes build upon. The ``MujocoXMLModel`` is an extension of this class that represents models based on an XML file.
5
+
6
+ Base Mujoco Model
7
+ -----------------
8
+
9
+ .. autoclass:: robosuite.models.base.MujocoModel
10
+
11
+ .. automethod:: correct_naming
12
+ .. automethod:: set_sites_visibility
13
+ .. automethod:: exclude_from_prefixing
14
+ .. autoproperty:: name
15
+ .. autoproperty:: naming_prefix
16
+ .. autoproperty:: root_body
17
+ .. autoproperty:: bodies
18
+ .. autoproperty:: joints
19
+ .. autoproperty:: actuators
20
+ .. autoproperty:: sites
21
+ .. autoproperty:: sensors
22
+ .. autoproperty:: contact_geoms
23
+ .. autoproperty:: visual_geoms
24
+ .. autoproperty:: important_geoms
25
+ .. autoproperty:: important_sites
26
+ .. autoproperty:: important_sensors
27
+ .. autoproperty:: bottom_offset
28
+ .. autoproperty:: top_offset
29
+ .. autoproperty:: horizontal_radius
30
+
31
+
32
+ XML Mujoco Model
33
+ ----------------
34
+
35
+ .. autoclass:: robosuite.models.base.MujocoXMLModel
36
+
37
+ .. autoproperty:: base_offset
38
+ .. autoproperty:: contact_geom_rgba
docs/modeling/object_model.rst ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Object Model
2
+ ============
3
+
4
+ The ``MujocoObject`` class serves as a catch-all base class that is used to capture individual simulation objects to
5
+ instantiate within a given simulation. This is done in one of two ways via extended classes -- the ``MujocoXMLObject``
6
+ reads in information from a corresponding object XML file, whereas the ``MujocoGeneratedObject`` proecedurally generates a
7
+ custom object using a suite of utility mj modeling functions. In conjunction with the ``RobotModel``, and
8
+ ``Arena`` classes, these classes serve as the basis for forming the higher level ``Task`` class which is used to
9
+ ultimately generate the ``MjSim`` simulation object.
10
+
11
+ Base Object Model
12
+ -----------------
13
+
14
+ .. autoclass:: robosuite.models.objects.objects.MujocoObject
15
+
16
+ .. automethod:: __init__
17
+ .. automethod:: merge_assets
18
+ .. automethod:: get_obj
19
+ .. automethod:: exclude_from_prefixing
20
+ .. automethod:: _get_object_subtree
21
+ .. automethod:: _get_object_properties
22
+ .. autoproperty:: important_geoms
23
+ .. autoproperty:: important_sites
24
+ .. autoproperty:: important_sensors
25
+ .. autoproperty:: get_site_attrib_template
26
+ .. autoproperty:: get_joint_attrib_template
27
+
28
+
29
+ XML Object Model
30
+ ----------------
31
+
32
+ .. autoclass:: robosuite.models.objects.objects.MujocoXMLObject
33
+
34
+ .. automethod:: __init__
35
+ .. automethod:: _duplicate_visual_from_collision
36
+ .. automethod:: _get_geoms
37
+
38
+
39
+ Generated Object Model
40
+ ----------------------
41
+
42
+ .. autoclass:: robosuite.models.objects.objects.MujocoGeneratedObject
43
+
44
+ .. automethod:: __init__
45
+ .. automethod:: sanity_check
46
+ .. automethod:: get_collision_attrib_template
47
+ .. automethod:: get_visual_attrib_template
48
+ .. automethod:: append_material
docs/modeling/robot_model.rst ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Robot Model
2
+ ===========
3
+
4
+ Robot Model
5
+ -----------
6
+ The ``RobotModel`` class serves as a direct intermediary class that reads in information from a corresponding robot XML
7
+ file and also contains relevant hard-coded information from that XML. This represents an arbitrary robot optionally equipped with a mount via the ``RobotBaseModel`` class and is the core modeling component of the higher-level ``Robot`` class used in simulation.
8
+
9
+ .. autoclass:: robosuite.models.robots.robot_model.RobotModel
10
+
11
+ .. automethod:: set_base_xpos
12
+ .. automethod:: set_base_ori
13
+ .. automethod:: set_joint_attribute
14
+ .. automethod:: add_base
15
+ .. automethod:: add_mount
16
+ .. automethod:: add_mobile_base
17
+ .. automethod:: add_leg_base
18
+ .. autoproperty:: dof
19
+ .. autoproperty:: default_base
20
+ .. autoproperty:: default_controller_config
21
+ .. autoproperty:: init_qpos
22
+ .. autoproperty:: base_xpos_offset
23
+ .. autoproperty:: _horizontal_radius
24
+ .. autoproperty:: _important_sites
25
+ .. autoproperty:: _important_geoms
26
+ .. autoproperty:: _important_sensors
27
+
28
+
29
+ Manipulator Model
30
+ -----------------
31
+ The ``ManipulatorModel`` class extends from the base ``RobotModel`` class, and represents an armed, mounted robot with an optional gripper attached to its end effector. In conjunction with the corresponding ``GripperModel`` class and ``RobotBaseModel`` class, this serves as the core modeling component of the higher-level ``Manipulator`` class used in simulation.
32
+
33
+ .. autoclass:: robosuite.models.robots.manipulators.manipulator_model.ManipulatorModel
34
+
35
+ .. automethod:: add_gripper
36
+ .. autoproperty:: default_gripper
37
+ .. autoproperty:: arm_type
38
+ .. autoproperty:: base_xpos_offset
39
+ .. autoproperty:: eef_name
40
+ .. autoproperty:: _important_sites
41
+
42
+
43
+ Gripper Model
44
+ -------------
45
+ The ``GripperModel`` class serves as a direct intermediary class that reads in information from a corresponding gripper XML file and also contains relevant hard-coded information from that XML. In conjunction with the ``ManipulatorModel`` class, this serves as the core modeling component of the higher-level `Manipulator` class used in simulation.
46
+
47
+ .. autoclass:: robosuite.models.grippers.gripper_model.GripperModel
48
+
49
+ .. automethod:: format_action
50
+ .. autoproperty:: speed
51
+ .. autoproperty:: dof
52
+ .. autoproperty:: init_qpos
53
+ .. autoproperty:: _important_sites
54
+ .. autoproperty:: _important_geoms
55
+ .. autoproperty:: _important_sensors
56
+
57
+
58
+ Robot Base Model
59
+ -----------
60
+
61
+ The ``RobotBaseModel`` class represents the base of the robot. User can use ``add_base`` method in the ``RobotModel`` class to add a base model to the robot.
62
+
63
+ There are mainly three types of base models: ``MountModel``, ``MobileBaseModel``, and ``LegBaseModel``.
64
+
65
+ .. autoclass:: robosuite.models.bases.robot_base_model.RobotBaseModel
66
+
67
+ .. autoproperty:: top_offset
68
+ .. autoproperty:: horizontal_radius
69
+ .. autoproperty:: naming_prefix
70
+ .. autoproperty:: _important_sites
71
+ .. autoproperty:: _important_geoms
72
+ .. autoproperty:: _important_sensors
docs/modeling/task.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Task
2
+ =====
3
+
4
+ The ``Task`` class is responsible for integrating a given ``Arena``, ``RobotModel``, and set of ``MujocoObjects`` into a single element tree that is then parsed and converted into an ``MjSim`` object.
5
+
6
+ Base Task
7
+ ---------
8
+
9
+ .. autoclass:: robosuite.models.tasks.task.Task
10
+
11
+ .. automethod:: __init__
12
+ .. automethod:: merge_robot
13
+ .. automethod:: merge_arena
14
+ .. automethod:: merge_objects
docs/modules/controllers.rst ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Controllers
2
+ ==============
3
+
4
+ Composite Controllers
5
+ ---------------------
6
+
7
+ Robosuite's composite controllers assumes that a robot consists of multiple "body parts", such as arms, torso, head, base, and legs, and that each body part has
8
+ a "body part" controller (e.g., ``OSC_POSE``, ``JOINT_POSITION``). The composite controller orchestrates these body part controllers.
9
+ Composite controllers are controllers that are composed of multiple body-part controllers.
10
+ They are used to control the entire robot, including all of its parts.
11
+
12
+ When an action vector is commanded to the robot, the action will be split into multiple body-part actions, each of which will be sent to the corresponding body-part
13
+ controller. To understand the action split, use the function ``robosuite.robots.robot.print_action_info()``.
14
+ To create the action easily, we also provide a helper function ``robosuite.robots.robot.create_action_vector()`` which takes the action dictionary as
15
+ inputs and return the action vector with correct dimensions. For controller actions whose input dimentions does not match the robot's degrees of freedoms,
16
+ you need to write your own ``create_action_vector()`` function inside the custom composite controller so that the robot's function can retrieve the information properly.
17
+
18
+ **Basic**
19
+ ******
20
+
21
+ The "Basic" composite controller consists of individual part controllers that operate independently to control various parts of the robot, such as arms, torso, head, base, and legs.
22
+ Each part can be assigned a specific controller type (e.g., ``OSC_POSE``, ``JOINT_POSITION``) depending on the desired control behavior for that part.
23
+ For example, arms may use ``OSC_POSE`` for precise end-effector control, while the base may use JOINT_VELOCITY for movement across the ground.
24
+
25
+
26
+ **WholeBodyIK**
27
+ *************
28
+
29
+ The "WholeBodyIK" composite controller takes in end effector targets, and converts them into joint angle targets for the corresponding body parts' joints.
30
+
31
+
32
+ **Third-party Controllers**
33
+ ***********************
34
+
35
+ Third-party controllers integrate custom or external control algorithms into robosuite. Examples include https://github.com/kevinzakka/mink. We provide
36
+ an example of adding a third-party controller in https://robosuite.ai/docs/tutorials/add_controller.html.
37
+
38
+
39
+ Workflow of Loading Configs
40
+ ****************************
41
+ Loading configs for composite controllers is critical for selecting the correct control mode with well-tuned parameters. We provide a list of default controller configs for the composite controllers, and also support easy specification of your custom controller config file. A config file is defined in a json file.
42
+
43
+ An example of the controller config file is shown below (many parameters are omitted in `...` for brevity):
44
+
45
+ .. toggle::
46
+
47
+ Example for defining BASIC controller.
48
+
49
+ .. code-block:: json
50
+
51
+ {
52
+ "type": "BASIC",
53
+ "body_parts": {
54
+ "arms": {
55
+ "right": {
56
+ "type": "OSC_POSE",
57
+ "input_max": 1,
58
+ "input_min": -1,
59
+ "output_max": [0.05, 0.05, 0.05, 0.5, 0.5, 0.5],
60
+ "output_min": [-0.05, -0.05, -0.05, -0.5, -0.5, -0.5],
61
+ "kp": 150,
62
+ ...
63
+ },
64
+ "left": {
65
+ "type": "OSC_POSE",
66
+ ...
67
+ }
68
+ },
69
+ "torso": {
70
+ "type" : "JOINT_POSITION",
71
+ ...
72
+ },
73
+ "head": {
74
+ "type" : "JOINT_POSITION",
75
+ ...
76
+ },
77
+ "base": {
78
+ "type": "JOINT_VELOCITY",
79
+ ...
80
+ },
81
+ "legs": {
82
+ "type": "JOINT_POSITION",
83
+ ...
84
+ }
85
+ }
86
+ }
87
+
88
+
89
+
90
+ Part Controllers
91
+ ------------------
92
+
93
+ Part controllers are used to determine the type of high-level control over a given robot arm. While all arms are directly controlled via their joint torques, the inputted action space for a given environment can vary depending on the type of desired control. Our controller options include ``OSC_POSE``, ``OSC_POSITION``, ``JOINT_POSITION``, ``JOINT_VELOCITY``, and ``JOINT_TORQUE``.
94
+
95
+ For ``OSC_POSE``, ``OSC_POSITION``, and ``JOINT_POSITION``, we include three variants that determine the action space. The most common variant is to use a predefined and constant set of impedance parameters; in that case, the action space only includes the desired pose, position, or joint configuration. We also include the option to specify either the stiffness values (and the damping will be automatically set to values that lead to a critically damped system), or all impedance parameters, both stiffness and damping, as part of the action at each step. These two variants lead to extended action spaces that can control the stiffness and damping behavior of the controller in a variable manner, providing full control to the policy/solution over the contact and dynamic behavior of the robot.
96
+
97
+ When using any position-based control (``OSC``, ``IK``, or ``JOINT_POSITION`` controllers), input actions are, by default,
98
+ interpreted as delta values from the current state.
99
+
100
+ When using any end-effector pose controller (``IK``, ``OSC_POSE``), delta rotations from the current end-effector orientation
101
+ in the form of axis-angle coordinates ``(ax, ay, az)``, where the direction represents the axis and the magnitude
102
+ represents the angle (in radians). Note that for ``OSC``, the rotation axes are taken relative to the global world
103
+ coordinate frame, whereas for ``IK``, the rotation axes are taken relative to the end-effector origin, NOT the global world coordinate frame!
104
+
105
+ During runtime, the execution of the controllers is as follows. Controllers receive a desired configuration (reference value) and output joint torques that try to minimize the error between the current configuration and the desired one. Policies and solutions provide these desired configurations, elements of some action space, at what we call simulated policy frequency (:math:`f_{p}`), e.g., 20Hz or 30Hz. **robosuite** will execute several iterations composed of a controller execution and a simulation step at simulation frequency, :math:`f_s` (:math:`f_s = N\cdot f_p`), using the same reference signal until a new action is provided by the policy/solution. In these iterations, while the desired configuration is kept constant, the current state of the robot is changing, and thus, the error.
106
+
107
+ In the following we summarize the options, variables, and the control laws (equations) that convert desired values from the policy/solution and current robot states into executable joint torques to minimize the difference.
108
+
109
+ Joint Space Control - Torque
110
+ *********************************
111
+ Controller Type: ``JOINT_TORQUE``
112
+
113
+ Action Dimensions (not including gripper): ``n`` (number of joints)
114
+
115
+ Since our controllers transform the desired values from the policies/solutions into joint torques, if these values are already joint torques, there is a one-to-one mapping between the reference value from the policy/solution and the output value from the joint torque controller at each step: :math:`\tau = \tau_d`.
116
+
117
+ .. math::
118
+ \begin{equation}
119
+ \tau = \tau_d
120
+ \end{equation}
121
+
122
+ Joint Space Control - Velocity
123
+ *********************************
124
+ Controller Type: ``JOINT_VELOCITY``
125
+
126
+ Action Dimensions (not including gripper): ``n`` (number of joints)
127
+
128
+ To control joint velocities, we create a proportional (P) control law between the desired value provided by the policy/solution (interpreted as desired velocity of each joint) and the current joint velocity of the robot. This control law, parameterized by a proportional constant, :math:`k_p`, generates joint torques to execute at each simulation step:
129
+
130
+ .. math::
131
+ \tau = k_p (\dot{q}_d - \dot{q})
132
+
133
+
134
+ Joint Space Control - Position with Fixed Impedance
135
+ ********************************************************
136
+ Controller Type: ``JOINT_POSITION``
137
+
138
+ Impedance: fixed
139
+
140
+ Action Dimensions (not including gripper): ``n`` (number of joints)
141
+
142
+ In joint position control, we create a proportional-derivative (PD) control law between the desired value provided by the policy/solution (interpreted as desired configuration for each joint) and the current joint positions of the robot. The control law that generates the joint torques to execute is parameterized by proportional and derivative gains, :math:`k_p` and :math:`k_v`, and defined as
143
+
144
+ .. math::
145
+ \begin{equation}
146
+ \tau = \Lambda \left[k_p \Delta_q - k_d\dot{q}\right]
147
+ \end{equation}
148
+
149
+ where :math:`\Delta_q = q_d - q` is the difference between current and desired joint configurations, and :math:`\Lambda` is the inertia matrix, that we use to scale the error to remove the dynamic effects of the mechanism. The stiffness and damping parameters, :math:`k_p` and :math:`k_d`, are determined in construction and kept fixed.
150
+
151
+ Joint Space Control - Position with Variable Stiffness
152
+ ***********************************************************
153
+ Controller Type: ``JOINT_POSITION``
154
+
155
+ Impedance: variable_kp
156
+
157
+ Action Dimensions (not including gripper): ``2n`` (number of joints)
158
+
159
+ The control law is the same as for fixed impedance but, in this controller, :math:`k_p`` can be determined by the policy/solution at each policy step.
160
+
161
+ Joint Space Control - Position with Variable Impedance
162
+ ***********************************************************
163
+ Controller Type: ``JOINT_POSITION``
164
+
165
+ Impedance: variable
166
+
167
+ Action Dimensions (not including gripper): ``3n`` (number of joints)
168
+
169
+ Again, the control law is the same in the two previous control types, but now both the stiffness and damping parameters, :math:`k_p` and :math:`k_d`, are controllable by the policy/solution and can be changed at each step.
170
+
171
+ Operational Space Control - Pose with Fixed Impedance
172
+ **********************************************************
173
+ Controller Type: ``OSC_POSE``
174
+
175
+ Impedance: fixed
176
+
177
+ Action Dimensions (not including gripper): ``6``
178
+
179
+ In the ``OSC_POSE`` controller, the desired value is the 6D pose (position and orientation) of a controlled frame. We follow the formalism from `[Khatib87] <https://ieeexplore.ieee.org/document/1087068>`_. Our control frame is always the ``eef_site`` defined in the `Gripper Model <https://robosuite.ai/docs/modeling/robot_model.html#gripper-model>`_, placed at the end of the last link for robots without gripper or between the fingers for robots with gripper. The operational space control framework (OSC) computes the necessary joint torques to minimize the error between the desired and the current pose of the ``eef_site`` with the minimal kinematic energy.
180
+
181
+ Given a desired pose :math:`\mathbf{x}_{\mathit{des}}` and the current end-effector pose, , we first compute the end-effector acceleration that would help minimize the error between both, assumed. PD (proportional-derivative) control schema to improve convergence and stability. For that, we first decompose into a desired position, :math:`p_d \in \mathbb{R}^3`, and a desired orientation, :math:`R_d \in \mathbb{SO}(3)`. The end-effector acceleration to minimize the error should increase with the difference between desired end-effector pose and current pose, :math:`p` and :math:`R` (proportional term), and decrease with the current end-effector velocity, :math:`v` and :math:`\omega` (derivative term).
182
+
183
+ We then compute the robot actuation (joint torques) to achieve the desired end-effector space accelerations leveraging the kinematic and dynamic models of the robot with the dynamically-consistent operational space formulation in `[Khatib1995a] <https://journals.sagepub.com/doi/10.1177/027836499501400103>`_. First, we compute the wrenches at the end-effector that corresponds to the desired accelerations, :math:`{f}\in\mathbb{R}^{6}`.
184
+ Then, we map the wrenches in end-effector space :math:`{f}` to joint torque commands with the end-effector Jacobian at the current joint configuration :math:`J=J(q)`: :math:`\tau = J^T{f}`.
185
+
186
+ Thus, the function that maps end-effector space position and orientation to low-level robot commands is (:math:`\textrm{ee} = \textrm{\it end-effector space}`):
187
+
188
+ .. math::
189
+
190
+ \begin{equation}
191
+ \begin{aligned}
192
+ \tau &= J_p[\Lambda_p[k_p^p (p_d - p) - k_v^p v]] + J_R[\Lambda_R\left[k_p^R(R_d \ominus R) - k_d^R \omega \right]]
193
+ \end{aligned}
194
+ \end{equation}
195
+
196
+ where :math:`\Lambda_p` and :math:`\Lambda_R` are the parts corresponding to position and orientation in :math:`\Lambda \in \mathbb{R}^{6\times6}`, the inertial matrix in the end-effector frame that decouples the end-effector motions, :math:`J_p` and :math:`J_R` are the position and orientation parts of the end-effector Jacobian, and :math:`\ominus` corresponds to the subtraction in :math:`\mathbb{SO}(3)`. The difference between current and desired position (:math:`\Delta_p= p_d - p`) and between current and desired orientation (:math:`\Delta_R = R_d \ominus R`) can be used as alternative policy action space, :math:`\mathcal{A}`. :math:`k_p^p`, :math:`k_p^d`, :math:`k_p^R`, and :math:`k_d^R` are vectors of proportional and derivative gains for position and orientation (parameters :math:`\kappa`), respectively, set once at initialization and kept fixed.
197
+
198
+ Operational Space Control - Pose with Variable Stiffness
199
+ *************************************************************
200
+ Controller Type: ``OSC_POSE``
201
+
202
+ Impedance: variable_kp
203
+
204
+ Action Dimensions (not including gripper): ``12``
205
+
206
+ The control law is the same as ``OSC_POSE`` but, in this case, the stiffness of the controller, :math:`k_p`, is part of the action space and can be controlled and changed at each time step by the policy/solution. The damping parameters, :math:`k_d`, are set to maintain the critically damped behavior of the controller.
207
+
208
+ Operational Space Control - Pose with Variable Impedance
209
+ *********************************************************
210
+ Controller Type: ``OSC_POSE``
211
+
212
+ Impedance: variable
213
+
214
+ Action Dimensions (not including gripper): ``18``
215
+
216
+ The control law is the same as in the to previous controllers, but now both the stiffness and the damping, :math:`k_p` and :math:`k_d`, are part of the action space and can be controlled and changed at each time step by the policy/solution.
217
+
218
+
219
+ Configurations
220
+ ---------------
221
+
222
+ The `config directory <https://github.com/ARISE-Initiative/robosuite/tree/master/robosuite/controllers/config>`_ provides a set of default configuration files that hold default examples of parameters relevant to individual controllers. Note that when creating your controller config templates of a certain type of controller, the listed parameters in the default example are required and should be specified accordingly.
223
+
224
+ Note: Each robot has its own default controller configuration which is called by default unless a different controller config is called.
225
+
226
+ Below, a brief overview and description of each subset of controller parameters are shown:
227
+
228
+ Controller Settings
229
+ ********************
230
+ * ``type``: Type of controller to control. Can be ``OSC_POSE``, ``OSC_POSITION``, ``IK_POSE``, ``JOINT_POSITION``, ``JOINT_VELOCITY``, or ``JOINT_TORQUE``
231
+ * ``interpolation``: If not ``null``, specified type of interpolation to use between desired actions. Currently only ``linear`` is supported.
232
+ * ``ramp_ratio``: If using ``linear`` interpolation, specifies the proportion of allotted timesteps (value from [0, 1]) over which to execute the interpolated commands.
233
+ * ``{...}_limits``: Limits for that specific controller. E.g.: for a ``JOINT_POSITION``, the relevant limits are its joint positions, ``qpos_limits`` . Can be either a 2-element list (same min/max limits across entire relevant space), or a list of lists (specific limits for each component)
234
+ * ``ik_{pos, ori}_limit``: Only applicable for IK controller. Limits the magnitude of the desired relative change in position / orientation.
235
+ * ``{input,output}_{min,max}``: Scaling ranges for mapping action space inputs into controller inputs. Settings these limits will automatically clip the action space input to be within the ``input_{min,max}`` before mapping the requested value into the specified ``output_{min,max}`` range. Can be either a scalar (same limits across entire action space), or a list (specific limits for each action component)
236
+ * ``kp``: Where relevant, specifies the proportional gain for the controller. Can be either be a scalar (same value for all controller dimensions), or a list (specific values for each dimension)
237
+ * ``damping_ratio``: Where relevant, specifies the damping ratio constant for the controller.
238
+ * ``impedance_mode``: For impedance-based controllers (``OSC_*``, ``JOINT_POSITION``), determines the impedance mode for the controller, i.e. the nature of the impedance parameters. It can be ``fixed``, ``variable``, or ``variable_kp`` (kd is adjusted to provide critically damped behavior).
239
+ * ``kp_limits, damping_ratio_limits``: Only relevant if ``impedance_mode`` is set to ``variable`` or ``variable_kp``. Sets the limits for the resulting action space for variable impedance gains.
240
+ * ``control_delta``: Only relevant for ``OSC_POSE`` or ``OSC_POSITION`` controllers. ``true`` interprets input actions as delta values from the current robot end-effector position. Otherwise, assumed to be absolute (global) values
241
+ * ``uncouple_pos_ori``: Only relevant for ``OSC_POSE``. ``true`` decouples the desired position and orientation torques when executing the controller
242
+
243
+ Loading a Controller
244
+ ---------------------
245
+ By default, user will use the `load_composite_controller_config()` method to create a controller configuration.
246
+
247
+ Using a Default Controller Configuration
248
+ *****************************************
249
+ Any controller can be used with its default configuration, and can be easily loaded into a given environment by calling its name as shown below (where ``controller`` is one of acceptable controller ``type`` strings):
250
+
251
+ .. code-block:: python
252
+
253
+ import robosuite as suite
254
+ from robosuite import load_composite_controller_config
255
+
256
+ # Load the desired controller config with default Basic controller
257
+ config = load_composite_controller_config(controller="BASIC")
258
+
259
+ # Create environment
260
+ env = suite.make("Lift", robots="Panda", controller_configs=config, ... )
261
+
262
+
263
+ Using a Custom Controller Configuration
264
+ ****************************************
265
+ A custom controller configuration can also be used by simply creating a new config (``.json``) file with the relevant parameters as specified above. All robosuite environments have an optional ``controller_configs`` argument that can be used to pass in specific controller settings. Note that this is expected to be a ``dict``, so the new configuration must be read in and parsed as a ``dict`` before passing it during the environment ``robosuite.make(...)`` call. A brief example script showing how to import a custom controller configuration is shown below.
266
+
267
+
268
+ .. code-block:: python
269
+
270
+ import robosuite as suite
271
+ from robosuite import load_composite_controller_config
272
+
273
+ # Path to config file
274
+ controller_fpath = "/your/custom/config/filepath/here/filename.json"
275
+
276
+ # Import the file as a dict
277
+ config = load_composite_controller_config(controller=controller_fpath)
278
+
279
+ # Create environment
280
+ env = suite.make("Lift", robots="Panda", controller_configs=config, ... )
281
+
docs/modules/devices.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # I/O Devices
2
+
3
+ Devices are used to read user input and teleoperate simulated robots in real-time. This is achieved by either using a keyboard, a [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/) or a [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) joystick, and whose teleoperation capabilities can be demonstrated with the [demo_device_control.py](../demos.html#teleoperation) script. More generally, we support any interface that implements the [Device](../simulation/device) abstract base class. In order to support your own custom device, simply subclass this base class and implement the required methods.
4
+
5
+ ## Keyboard
6
+
7
+ We support keyboard input through the OpenCV2 window created by the mujoco renderer.
8
+
9
+ **Keyboard controls**
10
+
11
+ Note that the rendering window must be active for these commands to work.
12
+
13
+ | Keys | Command |
14
+ | :------------------ | :----------------------------------------- |
15
+ | Ctrl+q | reset simulation |
16
+ | spacebar | toggle gripper (open/close) |
17
+ | up-right-down-left | move horizontally in x-y plane |
18
+ | .-; | move vertically |
19
+ | o-p | rotate (yaw) |
20
+ | y-h | rotate (pitch) |
21
+ | e-r | rotate (roll) |
22
+ | b | toggle arm/base mode (if applicable) |
23
+ | s | switch active arm (if multi-armed robot) |
24
+ | = | switch active robot (if multi-robot env) |
25
+ | Ctrl+C | quit |
26
+
27
+
28
+ ## 3Dconnexion SpaceMouse
29
+
30
+ We support the use of a [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/) as well.
31
+
32
+ **3Dconnexion SpaceMouse controls**
33
+
34
+ | Control | Command |
35
+ | :------------------------ | :------------------------------------ |
36
+ | Right button | reset simulation |
37
+ | Left button (hold) | close gripper |
38
+ | Move mouse laterally | move arm horizontally in x-y plane |
39
+ | Move mouse vertically | move arm vertically |
40
+ | Twist mouse about an axis | rotate arm about a corresponding axis |
41
+ | b | toggle arm/base mode (if applicable) |
42
+ | s | switch active arm (if multi-armed robot) |
43
+ | = | switch active robot (if multi-robot environment) |
44
+ | Ctrl+C (keyboard) | quit |
45
+
46
+ ## Sony DualSense
47
+
48
+ we support the use of a [Sony DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) as well.
49
+
50
+ **Sony DualSense controls**
51
+
52
+ | Control | Command |
53
+ | :--------------------------- | :------------------------------------ |
54
+ | Square button | reset simulation |
55
+ | Circle button (hold) | close gripper |
56
+ | Move LX/LY Stick | move arm horizontally in x-y plane |
57
+ | Press L2 Trigger with or without L1 button | move arm vertically |
58
+ | Move RX/RY Stick | rotate arm about x/y axis (roll/pitch) |
59
+ | Press R2 Trigger with or without R1 button | rotate arm about z axis (yaw) |
60
+ | Triangle button | toggle arm/base mode (if applicable) |
61
+ | Left/Right Direction Pad | switch active arm (if multi-armed robot) |
62
+ | Up/Down Direction Pad | switch active robot (if multi-robot environment) |
63
+ | Ctrl+C (keyboard) | quit |
64
+
65
+ ## Mujoco GUI Device
66
+
67
+ To use the Mujoco GUI device for teleoperation, follow these steps:
68
+
69
+ 1. Set renderer as `"mjviewer"`. For example:
70
+
71
+ ```python
72
+ env = suite.make(
73
+ **options,
74
+ renderer="mjviewer",
75
+ has_renderer=True,
76
+ has_offscreen_renderer=False,
77
+ ignore_done=True,
78
+ use_camera_obs=False,
79
+ )
80
+ ```
81
+
82
+ Note: if using Mac, please use `mjpython` instead of `python`. For example:
83
+
84
+ ```mjpython robosuite/scripts/collect_human_demonstrations.py --environment Lift --robots Panda --device mjgui --camera frontview --controller WHOLE_BODY_IK```
85
+
86
+ 2. Double click on a mocap body to select a body to drag, then:
87
+
88
+ On Linux: `Ctrl` + right click to drag the body's position. `Ctrl` + left click to control the body's orientation.
89
+ On Mac: `fn` + `Ctrl` + right click.
docs/modules/environments.md ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environments
2
+
3
+ Environments are the main **robosuite** API objects that external code will interact with. Each environment corresponds to a robot manipulation task and provides a standard interface for an agent to interact with the environment. While **robosuite** can support environments from different robotic domains, the current release focuses is on manipulation environments.
4
+
5
+ Next, we will describe how to create an environment, how to interact with an environment, and how each environment creates a simulated task in the MuJoCo physics engine. We will use the `TwoArmLift` environment as a running example for each section.
6
+
7
+ ## Making an Environment
8
+
9
+ Environments are created by calling `robosuite.make` with the name of the task and with a set of arguments that configure environment properties. We provide a few examples of different use cases below.
10
+
11
+ ```python
12
+ import robosuite
13
+ from robosuite.controllers import load_composite_controller_config
14
+
15
+ # BASIC controller: arms controlled using OSC, mobile base (if present) using JOINT_VELOCITY, other parts controlled using JOINT_POSITION
16
+ controller_config = load_composite_controller_config(controller="BASIC")
17
+
18
+ # create an environment to visualize on-screen
19
+ env = robosuite.make(
20
+ "TwoArmLift",
21
+ robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
22
+ gripper_types="default", # use default grippers per robot arm
23
+ controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY
24
+ env_configuration="opposed", # (two-arm envs only) arms face each other
25
+ has_renderer=True, # on-screen rendering
26
+ render_camera="frontview", # visualize the "frontview" camera
27
+ has_offscreen_renderer=False, # no off-screen rendering
28
+ control_freq=20, # 20 hz control for applied actions
29
+ horizon=200, # each episode terminates after 200 steps
30
+ use_object_obs=False, # no observations needed
31
+ use_camera_obs=False, # no observations needed
32
+ )
33
+
34
+ # create an environment for policy learning from low-dimensional observations
35
+ env = robosuite.make(
36
+ "TwoArmLift",
37
+ robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
38
+ gripper_types="default", # use default grippers per robot arm
39
+ controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY
40
+ env_configuration="opposed", # (two-arm envs only) arms face each other
41
+ has_renderer=False, # no on-screen rendering
42
+ has_offscreen_renderer=False, # no off-screen rendering
43
+ control_freq=20, # 20 hz control for applied actions
44
+ horizon=200, # each episode terminates after 200 steps
45
+ use_object_obs=True, # provide object observations to agent
46
+ use_camera_obs=False, # don't provide image observations to agent
47
+ reward_shaping=True, # use a dense reward signal for learning
48
+ )
49
+
50
+ # create an environment for policy learning from pixels
51
+ env = robosuite.make(
52
+ "TwoArmLift",
53
+ robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
54
+ gripper_types="default", # use default grippers per robot arm
55
+ controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY
56
+ env_configuration="opposed", # (two-arm envs only) arms face each other
57
+ has_renderer=False, # no on-screen rendering
58
+ has_offscreen_renderer=True, # off-screen rendering needed for image obs
59
+ control_freq=20, # 20 hz control for applied actions
60
+ horizon=200, # each episode terminates after 200 steps
61
+ use_object_obs=False, # don't provide object observations to agent
62
+ use_camera_obs=True, # provide image observations to agent
63
+ camera_names="agentview", # use "agentview" camera for observations
64
+ camera_heights=84, # image height
65
+ camera_widths=84, # image width
66
+ reward_shaping=True, # use a dense reward signal for learning
67
+ )
68
+ ```
69
+
70
+ ### Modular Design
71
+
72
+ We provide a few additional details on a few keyword arguments below to highlight the modular structure of creating **robosuite** environments, and how easy it is to configure different environment features.
73
+
74
+ - `robots` : this argument can be used to easily instantiate tasks with different robot arms. For example, we could change the task to use two "Jaco" robots by passing `robots=["Jaco", "Jaco"]`. Once the environment is initialized, these robots (as captured by the [Robot](../simulation/robot.html#robot) class) can be accessed via the `robots` array attribute within the environment, i.e.: `env.robots[i]` for the `ith` robot arm in the environment.
75
+ - `gripper_types` : this argument can be used to easily swap out different grippers for each robot arm. For example, suppose we want to swap the default grippers for the arms in the example above. We could just pass `gripper_types=["PandaGripper", "RethinkGripper"]` to achieve this. Note that a single type can also be used to automatically broadcast the same gripper type across all arms.
76
+ - `controller_configs` : this argument can be used to easily replace the action space for each robot. For example, if we would like to control the robot using IK instead of OSC, we could use `load_composite_controller_config(controller="WHOLE_BODY_IK")` in the example above.
77
+ - `env_configuration` : this argument is mainly used for two-arm tasks to easily configure how the robots are oriented with respect to one another. For example, in the `TwoArmLift` environment, we could pass `env_configuration="parallel"` instead so that the robot arms are located next to each other, instead of opposite each other
78
+ - `placement_initializer` : this argument is optional, but can be used to specify a custom `ObjectPositionSampler` to override the default start state distribution for Mujoco objects. Samplers are responsible for sampling a set of valid, non-colliding placements for all of the objects in the scene at the start of each episode (e.g. when `env.reset()` is called).
79
+
80
+ ## Interacting with an Environment
81
+
82
+ ### Policy Loop
83
+
84
+ ```python
85
+ # this example assumes an env has already been created, and performs one agent rollout
86
+ import numpy as np
87
+
88
+ def get_policy_action(obs):
89
+ # a trained policy could be used here, but we choose a random action
90
+ low, high = env.action_spec
91
+ return np.random.uniform(low, high)
92
+
93
+ # reset the environment to prepare for a rollout
94
+ obs = env.reset()
95
+
96
+ done = False
97
+ ret = 0.
98
+ while not done:
99
+ action = get_policy_action(obs) # use observation to decide on an action
100
+ obs, reward, done, _ = env.step(action) # play action
101
+ ret += reward
102
+ print("rollout completed with return {}".format(ret))
103
+ ```
104
+
105
+ ### Observations
106
+
107
+ **robosuite** observations are dictionaries that include key-value pairs per modality. This makes it easy for agents to work with modalities of different shapes (for example, flat proprioception observations, and pixel observations). Note that any observation entry ending with `*-state` represents a concatenation of all individual observations that belong to `*` modality. Below, we list commonly used observation keys.
108
+
109
+ - `robot0_proprio-state`, `robot1_proprio-state` : proprioception observations for each robot arm. This includes the arm joint positions (encoded using `sin` and `cos`), arm joint velocities, end effector pose, gripper finger positions, and gripper finger velocities. The shape for this modality is flat (e.g. `(N,)`).
110
+ - `object-state` : task-specific object observations. For example, the `TwoArmLift` environment provides the pose of the pot, the position of each handle, and the relative position of each robot gripper with respect to each handle. The shape for this modality is flat (e.g. `(N,)`).
111
+ - `{camera_name}_image` : image observations for camera with name `camera_name`. The shape for this modality is `(H, W, 3)` where `H` and `W` are the height and width of the image respectively. By default, the returned image convention is mujoco's native `opengl` ("flipped"). This can alternatively be set to `opencv` convention (unflipped) via the `IMAGE_CONVENTION` macro in `macros.py`.
112
+ - `{camera_name}_depth` : depth image observations for camera with name `camera_name`. The shape for this modality is `(H, W)` where `H` and `W` are the height and width of the image respectively. By default, the returned image convention is mujoco's native `opengl` ("flipped"). This can alternatively be set to `opencv` convention (unflipped) via the `IMAGE_CONVENTION` macro in `macros.py`.
113
+ - `image-state` : (optional) stacked image observations. Note that this is disabled by default, and can be toggled via the `CONCATENATE_IMAGES` macro in `macros.py`.
114
+
115
+ ### Rewards and Termination
116
+
117
+ Each environment implements a reward function in the `reward` method of each environment class. The reward can be either be a binary success or failure reward (nonzero if the current state is a task completion state) or a dense, shaped reward that is crafted to be (mostly) non-negative and non-decreasing along trajectories that solve the task. The reward function that is used is determined by the `reward_shaping` argument. The binary success check that is used to compute the sparse reward is implemented in the `_check_success` method of each environment class.
118
+
119
+ Importantly, **robosuite** environments do not terminate if a success criterion is reached, but always continue for a fixed number of timesteps, determined by the `horizon` argument. This is a standard design decision for reinforcement learning in robot manipulation domains.
120
+
121
+ We provide an example via the reward function and success criteria for `TwoArmLift` below. Note that for simplicity, we provide function aliases instead of actual implementation details so that the logic remains easy to follow:
122
+
123
+ For the success criteria, we simply want to check if the pot is successfully lifted above a certain height threshold over the table, and return `True` or `False` accordingly.
124
+
125
+ ```python
126
+ def _check_success(self):
127
+ pot_height = get_pot_height()
128
+ table_height = get_table_height()
129
+ return pot_height > table_height + 0.10
130
+ ```
131
+
132
+ The reward function is a bit more involved. First, we initialize our reward variable to 0 and grab relevant sensory data from the environment, checking to see if the pot is tilted or not.
133
+ ```python
134
+ def reward(self, action=None):
135
+ reward = 0
136
+ pot_tilt = get_pot_tilt()
137
+
138
+ # check if the pot is tilted more than 30 degrees
139
+ cos_30 = np.cos(np.pi / 6)
140
+ direction_coef = 1 if pot_tilt >= cos_30 else 0
141
+ ```
142
+
143
+ Next, we first check to see if we have completed the task (the pot being lifted above the table and not overly tilted), and if so, apply the un-normalized reward.
144
+ ```python
145
+ if self._check_success():
146
+ reward = 3.0 * direction_coef
147
+ ```
148
+
149
+ Otherwise, we'll only provide partial rewards if we're using reward shaping, and calculate the appropriate reward.
150
+ ```python
151
+ elif self.reward_shaping:
152
+
153
+ # lifting reward (smooth value between [0, 1.5])
154
+ pot_height = get_pot_height()
155
+ r_lift = min(max(pot_height - 0.05, 0), 0.15)
156
+ reward += 10. * direction_coef * r_lift
157
+
158
+ # reaching reward (smooth value between [0, 1])
159
+ left_hand_handle_distance = get_left_distance()
160
+ right_hand_handle_distance = get_right_distance()
161
+ reward += 0.5 * (1 - np.tanh(10.0 * left_hand_handle_distance))
162
+ reward += 0.5 * (1 - np.tanh(10.0 * right_hand_handle_distance))
163
+
164
+ # grasping reward (discrete values between [0, 0.5])
165
+ left_hand_handle_contact = is_left_contact()
166
+ right_hand_handle_contact = is_right_contact()
167
+ if left_hand_handle_contact:
168
+ reward += 0.25
169
+ if right_hand_handle_contact:
170
+ reward += 0.5
171
+ ```
172
+
173
+ Lastly, we need to normalize our reward and then re-scale its value to `reward_scale` if it is specified before finally returning the calculated reward.
174
+ ```python
175
+ if self.reward_scale is not None:
176
+ reward *= self.reward_scale / 3.0
177
+
178
+ return reward
179
+ ```
180
+
181
+ ## Task Models
182
+
183
+ Every environment owns its own `MJCF` model that sets up the MuJoCo physics simulation by loading the robots, the workspace, and the objects into the simulator appropriately. This MuJoCo simulation model is programmatically instantiated in the `_load_model` function of each environment, by creating an instance of the `Task` class.
184
+
185
+ Each `Task` class instance owns an `Arena` model, a list of `RobotModel` instances, and a list of `ObjectModel` instances. These are **robosuite** classes that introduce a useful abstraction in order to make designing scenes in MuJoCo easy. Every `Arena` is based off of an xml that defines the workspace (for example, table or bins) and camera locations. Every `RobotModel` is a MuJoCo model of representing an arbitrary robot (for `ManipulationModel`s, this represent armed robots, e.g. Sawyer, Panda, etc.). Every `ObjectModel` corresponds to a physical object loaded into the simulation (e.g. cube, pot with handles, etc.).
186
+
187
+ ## Task Descriptions
188
+
189
+ While **robosuite** can support environments from different robotic domains, the current release focuses is on manipulation environments (`ManipulationEnv`), We provide a brief description of each environment below. For benchmarking results on these standardized environments, please check out the [Benchmarking](../algorithms/benchmarking) page.
190
+
191
+ ### Single-Arm Tasks
192
+
193
+ #### Block Lifting
194
+
195
+ ![env_lift](../images/env_lift.png)
196
+
197
+ - **Scene Description**: A cube is placed on the tabletop in front of a single robot arm.
198
+ - **Goal**: The robot arm must lift the cube above a certain height.
199
+ - **Start State Distribution**: The cube location is randomized at the beginning of each episode.
200
+
201
+ #### Block Stacking
202
+
203
+ ![env_stack](../images/env_stack.png)
204
+
205
+ - **Scene Description**: Two cubes are placed on the tabletop in front of a single robot arm.
206
+ - **Goal**: The robot must place one cube on top of the other cube.
207
+ - **Start State Distribution**: The cube locations are randomized at the beginning of each episode.
208
+
209
+ #### Pick-and-Place
210
+
211
+ ![env_pick_place](../images/env_pick_place.png)
212
+
213
+ - **Scene Description**: Four objects are placed in a bin in front of a single robot arm. There are four containers next to the bin.
214
+ - **Goal**: The robot must place each object into its corresponding container. This task also has easier single-object variants.
215
+ - **Start State Distribution**: The object locations are randomized at the beginning of each episode.
216
+
217
+ #### Nut Assembly
218
+
219
+ ![env_nut_assembly](../images/env_nut_assembly.png)
220
+
221
+ - **Scene Description**: Two colored pegs (one square and one round) are mounted on the tabletop, and two colored nuts (one square and one round) are placed on the table in front of a single robot arm.
222
+ - **Goal**: The robot must fit the square nut onto the square peg and the round nut onto the round peg. This task also has easier single nut-and-peg variants.
223
+ - **Start State Distribution**: The nut locations are randomized at the beginning of each episode.
224
+
225
+ #### Door Opening
226
+
227
+ ![env_door](../images/env_door.png)
228
+
229
+ - **Scene Description**: A door with a handle is mounted in free space in front of a single robot arm.
230
+ - **Goal**: The robot arm must learn to turn the handle and open the door.
231
+ - **Start State Distribution**: The door location is randomized at the beginning of each episode.
232
+
233
+ #### Table Wiping
234
+
235
+ ![env_door](../images/env_wipe.png)
236
+
237
+ - **Scene Description**: A table with a whiteboard surface and some markings is placed in front of a single robot arm, which has a whiteboard eraser mounted on its hand.
238
+ - **Goal**: The robot arm must learn to wipe the whiteboard surface and clean all of the markings.
239
+ - **Start State Distribution**: The whiteboard markings are randomized at the beginning of each episode.
240
+
241
+ ### Two-Arm Tasks
242
+
243
+ #### Two Arm Lifting
244
+
245
+ ![env_two_arm_lift](../images/env_two_arm_lift.png)
246
+
247
+ - **Scene Description**: A large pot with two handles is placed on a table top. Two robot arms are placed on the same side of the table or on opposite ends of the table.
248
+ - **Goal**: The two robot arms must each grab a handle and lift the pot together, above a certain height, while keeping the pot level.
249
+ - **Start State Distribution**: The pot location is randomized at the beginning of each episode.
250
+
251
+ #### Two Arm Peg-In-Hole
252
+
253
+ ![env_two_arm_peg_in_hole](../images/env_two_arm_peg_in_hole.png)
254
+
255
+ - **Scene Description**: Two robot arms are placed either next to each other or opposite each other. One robot arm holds a board with a square hole in the center, and the other robot arm holds a long peg.
256
+ - **Goal**: The two robot arms must coordinate to insert the peg into the hole.
257
+ - **Start State Distribution**: The initial arm configurations are randomized at the beginning of each episode.
258
+
259
+ #### Two Arm Handover
260
+
261
+ ![env_two_arm_handover](../images/env_two_arm_handover.png)
262
+
263
+ - **Scene Description**: A hammer is placed on a narrow table. Two robot arms are placed on the same side of the table or on opposite ends of the table.
264
+ - **Goal**: The two robot arms must coordinate so that the arm closer to the hammer picks it up and hands it to the other arm.
265
+ - **Start State Distribution**: The hammer location and size is randomized at the beginning of each episode.
docs/modules/objects.md ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Objects
2
+
3
+ ## How to create a custom object
4
+ Objects, such as boxes and cans, are essential to building manipulation environments. We designed the [MujocoObject](../source/robosuite.models.objects.html#robosuite.models.objects.objects.MujocoObject) interfaces to standardize and simplify the procedure for importing 3D models into the scene or procedurally generate new objects. MuJoCo defines models via the [MJCF](http://www.mujoco.org/book/modeling.html) XML format. These MJCF files can either be stored as XML files on disk and loaded into simulator, or be created on-the-fly by code prior to simulation. Based on these two mechanisms of how MJCF models are created, we offer two main ways of creating your own object:
5
+
6
+ * Define an object in an MJCF XML file;
7
+ * Use procedural generation APIs to dynamically create an MJCF model.
8
+
9
+ ## The MujocoObject class
10
+ ```python
11
+ class MujocoObject(MujocoModel):
12
+ def __init__(...):
13
+
14
+ ...
15
+
16
+ # Attributes that should be filled in within the subclass
17
+ self._name = None
18
+ self._obj = None
19
+
20
+ # Attributes that are auto-filled by _get_object_properties call
21
+ self._root_body = None
22
+ self._bodies = None
23
+ self._joints = None
24
+ self._actuators = None
25
+ self._sites = None
26
+ self._contact_geoms = None
27
+ self._visual_geoms = None
28
+ ```
29
+ `MujocoObject` is the base class of all objects. One must note that it is not a subclass of `MujocoXML`, but does extend from the unifying `MujocoModel` class from which all simulation models (including robots, grippers, etc.) should extend from. All of the attributes shown above prepended with a `_` are intended to be private variables and not accessed by external objects. Instead, any of these properties can be accessed via its public version, without the `_` (e.g.: to access all the object's joints, call `obj.joints` instead of `obj._joints`). This is because all public attributes are automatically post-processed from their private counterparts and have naming prefixes appended to it.
30
+
31
+ The XML of an object is generated once during initialization via the `_get_object_subtree` call, after which any external object can extract a reference to this XML via the `get_obj` call.
32
+ ```python
33
+ def _get_object_subtree(self):
34
+ pass
35
+
36
+ def get_obj(self):
37
+ pass
38
+ ```
39
+
40
+ Additionally, objects are usually placed relatively. For example, we want to put an object on a table or place a cube on top of another. Instance methods `get_bottom_offset`, `get_top_offset`, `get_horizontal_radius` provide the necessary information to place objects properly.
41
+ ```python
42
+ def get_bottom_offset(self):
43
+ pass
44
+
45
+ def get_top_offset(self):
46
+ pass
47
+
48
+ def get_horizontal_radius(self):
49
+ pass
50
+ ```
51
+ This allows us to do things like the following.
52
+ ```python
53
+ table_top = np.array([0, 1, 0])
54
+ bottom_offset = obj.get_bottom_offset()
55
+ pos = table_top - bottom_offset # pos + bottom_offset = table_top
56
+ obj_xml = obj.get_obj().set("pos", array_to_string(pos)) # Set the top-level body of this object
57
+ ```
58
+
59
+ ## Creating a XMLObject
60
+ One can use MuJoCo MJCF XML to generate an object, either as a composition of primitive [geoms](http://mujoco.org/book/modeling.html#geom) or imported from STL files of triangulated [meshes](http://www.mujoco.org/book/modeling.html#mesh). An example is `robosuite.models.objects.xml_objects.BreadObject`. Its [python definition](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/objects/xml_objects.py#L49) is short. Note that all `MujocoXMLObject` classes require both a `fname` and `name` argument, the former which specifies the filepath to the raw XML file and the latter which specifies the in-sim name of the object instantiated. The optional `joints` argument can also specify a custom set of joints to apply to the given object (defaults to "default", which is a single free joint). This joint argument determines the DOF of the object as a whole and does not interfere with the joints already in the object. Additionally, the type of object created can be specified via the `obj_type` argument, and must be one of (`'collision'`, `'visual'`, or `'all'`). Lastly, setting `duplicate_collision_geoms` makes sure that all collision geoms automatically have an associated visual geom as well. Generally, the normal use case is to define a single class corresponding to a specific XML file, as shown below:
61
+ ```python
62
+ class BreadObject(MujocoXMLObject):
63
+ def __init__(self, name):
64
+ super().__init__(xml_path_completion("objects/bread.xml"),
65
+ name=name, joints=[dict(type="free", damping="0.0005")],
66
+ obj_type="all", duplicate_collision_geoms=True)
67
+ ```
68
+
69
+ In the corresponding XML file, a few key definitions must be present. The top-level, un-named body must contain as immediate children tags (a) the actual object bodie(s) (the top-level **must** be named `object`) and (b) three site tags named `bottom_site`, `top_site`, and `horizontal_radius_site` and whose `pos` values must be specified. The example for the `BreadObject`, [bread.xml](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/assets/objects/bread.xml), is shown below:
70
+ ```xml
71
+ <mujoco model="bread">
72
+ <asset>
73
+ <mesh file="meshes/bread.stl" name="bread_mesh" scale="0.8 0.8 0.8"/>
74
+ <texture file="../textures/bread.png" type="2d" name="tex-bread" />
75
+ <material name="bread" reflectance="0.7" texrepeat="15 15" texture="tex-bread" texuniform="true"/>
76
+ </asset>
77
+ <worldbody>
78
+ <body>
79
+ <body name="object">
80
+ <geom pos="0 0 0" mesh="bread_mesh" type="mesh" solimp="0.998 0.998 0.001" solref="0.001 1" density="50" friction="0.95 0.3 0.1" material="bread" group="0" condim="4"/>
81
+ </body>
82
+ <site rgba="0 0 0 0" size="0.005" pos="0 0 -0.045" name="bottom_site"/>
83
+ <site rgba="0 0 0 0" size="0.005" pos="0 0 0.03" name="top_site"/>
84
+ <site rgba="0 0 0 0" size="0.005" pos="0.03 0.03 0" name="horizontal_radius_site"/>
85
+ </body>
86
+ </worldbody>
87
+ </mujoco>
88
+ ```
89
+ Concretely,
90
+ * `_get_object_subtree` looks for the object bodie(s) as defined by all nested bodie(s) beginning with the `object`-named body tag.
91
+ * `bottom_site` should be the bottom of the object, i.e. contact point with the surface it is placed on.
92
+ * `top_site` should be the top of the object, i.e. contact point if something is placed on it.
93
+ * `horizontal_radius_site` can be any point on a circle in the x-y plane that does not intersect the object. This allows us to place multiple objects without having them collide into one another.
94
+
95
+ ## Creating a procedurally generated object
96
+ Procedurally generated objects have been used in [several](https://arxiv.org/abs/1802.09564) [recent](https://arxiv.org/abs/1806.09266) [works](https://arxiv.org/abs/1709.07857) to train control policies with improved robustness and generalization. Here you can programmatically generate an MJCF XML of an object from scratch using `xml.etree.ElementTree`, and compose an object of multiple geom primitives. The base class for this type of object is `MujocoGeneratedObject`.
97
+ **robosuite** natively supports all Mujoco primitive objects with procedurally-generated `PrimitiveObject` classes (`BoxObject`, `BallObject`, `CapsuleObject`, and `CylinderObject`).
98
+
99
+ Additionally, **robosuite** supports custom, complex objects that can be defined by collections of primitive geoms (the [CompositeObject](../source/robosuite.models.objects.html#robosuite.models.objects.generated_objects.CompositeObject) class) or even other objects (the [CompositeBodyObject](../source/robosuite.models.objects.html#robosuite.models.objects.generated_objects.CompositeBodyObject) class). The APIs for each of these classes have been standardized for ease of usage, and interested readers should consult the docstrings for each of these classes, as well as provided examples of each class ([HammerObject](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/objects/composite/hammer.py#L10), [HingedBoxObject](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/models/objects/composite_body/hinged_box.py#L8)).
100
+
101
+ It should also be noted that all of the above classes extending from the `MujocoGenereatedObject` class automatically supports custom texture definitions on a per-geom level, where specific texture images can be mapped to individual geoms. The above `HammerObject` showcases an example applying custom textures to different geoms of the resulting object.
102
+
103
+ ## Placing Objects
104
+
105
+ Object locations are initialized on every environment reset using instances of the [ObjectPositionSampler](../source/robosuite.utils.html#robosuite.utils.placement_samplers.ObjectPositionSampler) class. Object samplers use the `bottom_site` and `top_site` sites of each object in order to place objects on top of other objects, and the `horizontal_radius_site` site in order to ensure that objects do not collide with one another. The most basic sampler is the [UniformRandomSampler](../source/robosuite.utils.html#robosuite.utils.placement_samplers.UniformRandomSampler) class - this just uses rejection sampling to place objects randomly. As an example, consider the following code snippet from the `__init__` method of the `Lift` environment class.
106
+
107
+ ```python
108
+ self.placement_initializer = UniformRandomSampler(
109
+ name="ObjectSampler",
110
+ mujoco_objects=self.cube,
111
+ x_range=[-0.03, 0.03],
112
+ y_range=[-0.03, 0.03],
113
+ rotation_axis='z',
114
+ rotation=None,
115
+ ensure_object_boundary_in_range=False,
116
+ ensure_valid_placement=True,
117
+ reference_pos=self.table_offset,
118
+ z_offset=0.01,
119
+ )
120
+ ```
121
+
122
+ This will sample the `self.cube`'s object location uniformly at random in a box of size `0.03` (`x_range`, `y_range`) with random (`rotation`) z-rotation (`rotation_axis`), and with an offset of `0.01` (`z_offset`) above the table surface location (`reference_pos`). The sampler will also make sure that the entire object boundary falls within the sampling box size (`ensure_object_boundary_in_range`) and does not collide with any placed objects (`ensure_valid_placement`).
123
+
124
+ Another common sampler is the [SequentialCompositeSampler](../source/robosuite.utils.html#robosuite.utils.placement_samplers.SequentialCompositeSampler), which is useful for composing multiple arbitrary placement samplers together. As an example, consider the following code snippet from the `__init__` method of the `NutAssembly` environment class.
125
+
126
+ ```python
127
+ # Establish named references to each nut object
128
+ nut_names = ("SquareNut", "RoundNut")
129
+
130
+ # Initialize the top-level sampler
131
+ self.placement_initializer = SequentialCompositeSampler(name="ObjectSampler")
132
+
133
+ # Create individual samplers per nut
134
+ for nut_name, default_y_range in zip(nut_names, ([0.11, 0.225], [-0.225, -0.11])):
135
+ self.placement_initializer.append_sampler(
136
+ sampler=UniformRandomSampler(
137
+ name=f"{nut_name}Sampler",
138
+ x_range=[-0.115, -0.11],
139
+ y_range=default_y_range,
140
+ rotation=None,
141
+ rotation_axis='z',
142
+ ensure_object_boundary_in_range=False,
143
+ ensure_valid_placement=True,
144
+ reference_pos=self.table_offset,
145
+ z_offset=0.02,
146
+ )
147
+ )
148
+
149
+ # No objects have been assigned to any samplers yet, so we do that now
150
+ for i, (nut_cls, nut_name) in enumerate(zip(
151
+ (SquareNutObject, RoundNutObject),
152
+ nut_names,
153
+ )):
154
+ nut = nut_cls(name=nut_name)
155
+ self.placement_initializer.add_objects_to_sampler(sampler_name=f"{nut_name}Sampler", mujoco_objects=nut)
156
+ ```
157
+
158
+ The code snippet above results in two `UniformRandomSampler` instances being used to place the nuts onto the table surface - one for each type of nut. Notice this also allows the nuts to be initialized in separate regions of the table, and with arbitrary sampling settings. The `SequentialCompositeSampler` makes it easy to compose multiple placement initializers together and assign objects to each sub-sampler in a modular way.
docs/modules/overview.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Overview
2
+
3
+ ![module_overview_diagram](../images/module_overview.png)
4
+
5
+ Our framework offers two main categories of APIs: 1) **Modeling APIs** for defining simulation environments in a modular and programmatic fashion, and 2) **Simulation APIs** for interfacing with external inputs such as from a **Policy** or an **I/O Device**. A **Simulation Model** specified by the Modeling APIs is instantiated by the **MuJoCo Engine** to create a simulation runtime, called **Environment**. The Environment generates observations through the **Sensors**, such as cameras and proprioception, and receives action commands from policies or devices through the **Controllers** of the **Robots**. The diagram above illustrates the key components in our framework and their relationships.
6
+
7
+ A simulation model is defined by a [Task](../modeling/task) object, which encapsulates three essential constituents of robotic simulation: [Robot Model](../modeling/robot_model)s, [Object Model](../modeling/object_model)s, and [Arena](../modeling/arena). A task may contain one or more robots, zero to many objects, and a single arena. The Robot Model loads models of robots and optionally other models as well; for example, the [Manipulator](../modeling/robot_model.html#manipulator-model) robot model class also loads a corresponding [Gripper Model](../modeling/robot_model.html#gripper-model)s from XML files. The Object Model can be either loaded from 3D object assets or procedurally generated with programmatic APIs. The Arena defines the workspace of the robot, including the environment fixtures, such as a tabletop, and their placements. The task class combines these constituents into a single XML object in MuJoCo's [MJCF modeling language](http://www.mujoco.org/book/XMLreference.html). This MJCF object is passed to the MuJoCo engine through the [mujoco](https://mujoco.readthedocs.io/en/latest/python.html) library to instantiate the [MjModel](https://mujoco.readthedocs.io/en/latest/APIreference.html?highlight=MjModel#mjmodel) object for simulation runtime.
8
+
9
+ The [Environment](environments) object provides [OpenAI Gym](https://gym.openai.com/)-style APIs for external inputs to interface with the simulation. External inputs correspond to the action commands used to control the [Robots](robots) and any actuators it owns (for example, in the case of a manipulator robot the arm joints and gripper), where the kinematic component of the action spaces are specific to the [Controllers](controllers) used by the robots. For instance, for joint-space controllers of a robot manipulator arm, the action space corresponds to the number of joints of the robot, and for operational space controllers, the action space corresponds to 3D Cartesian movement or 6D pose of the end-effector. These action commands can either be automatically generated by an algorithm (such as a deep neural network policy) or come from [I/O devices](devices) for human teleoperation (such as the keyboard). The controllers of the robots are responsible for interpreting these action commands and transforming them into the low-level torques passing to the underlying physics engine, which performs internal computations to determine the next state of the simulation. [Sensors](./sensors) retrieve information from the MjSim object and generate observations as the physical signals that the robots receive as response to their actions. Our framework supports multimodal sensing modalities, such as RGB-D cameras, force-torque measurements, and proprioceptive data, and also provides modular APIs to model realistic sensor dynamics. In addition to these sensory data, environments also provide additional information about the task progress and success conditions, including reward functions (for reinforcement learning) and other meta-data. For more information, please check out the descriptions of the individual components in this section.
10
+
docs/modules/renderers.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Renderers
2
+
3
+ [Renderers](../source/robosuite.renderers) are used to visualize the simulation and can be used either in on-screen mode or headless (off-screen) mode. Renderers are also responsible for generating image-based observations that are returned from a given environment, and compute virtual images of the environment based on the properties defined in the cameras.
4
+
5
+ Currently, the following ground-truth vision modalities are supported by the MuJoCo renderer:
6
+
7
+ - **RGB**: Standard 3-channel color frames with values in range `[0, 255]`. This is set during environment construction with the `use_camera_obs` argument.
8
+ - **Depth**: 1-channel frame with normalized values in range `[0, 1]`. This is set during environment construction with the `camera_depths` argument.
9
+ - **Segmentation**: 1-channel frames with pixel values corresponding to integer IDs for various objects. Segmentation can
10
+ occur by class, instance, or geom, and is set during environment construction with the `camera_segmentations` argument.
11
+
12
+ **robosuite** presents the following rendering options:
13
+
14
+ <!-- ![Comparison of renderer options](../images/renderers/renderers.png "Comparison of renderer options") -->
15
+
16
+ ## MuJoCo Default Renderer
17
+
18
+ MuJoCo exposes users to an OpenGL context supported by [mujoco](https://mujoco.readthedocs.io/en/latest/python.html#rendering). Based on [OpenGL](https://www.opengl.org/), our assets and environment definitions have been tuned to look good with this renderer. The rendered frames can be displayed in a window with [OpenCV's imshow](https://pythonexamples.org/python-opencv-imshow/).
19
+
20
+ ![MuJoCo rendering](../images/gr1_cereal_mujoco.png "MuJoCo Default Renderer")
21
+
22
+ ## Isaac Rendering
23
+
24
+ Users are also able to render using photorealistic methods through Isaac Sim. Specifically, we users are able to choose between two rendering modes: ray tracing and path tracing. For more information about Isaac Sim rendering options, please visit [here](https://docs.omniverse.nvidia.com/materials-and-rendering/latest/rtx-renderer.html). Isaac renderers are only available to those who are running on a Linux or Windows machine.
25
+
26
+ To install Isaac on your local system, please follow the instructions listed [here](https://isaac-sim.github.io/IsaacLab/main/source/setup/installation/pip_installation.html). Make sure to follow instructions to install both Isaac Sim and Isaac Lab.
27
+
28
+ ### Ray tracing
29
+ ![Ray tracing](../images/gr1_cereal_ray_tracing.png "Ray tracing")
30
+
31
+ Ray tracing can be performed in real time. We are currently working on enhancing the rendering pipeline to support an online viewer with ray tracing capabilities.
32
+
33
+ ### Path tracing
34
+ ![Path tracing](../images/gr1_cereal_path_tracing.png "Path tracing")
35
+
36
+ Path tracing typically offers higher quality and is ideal for offline learning. If you have the time to collect data and plan to train algorithms using offline data, we recommend using path tracing for its photorealistic results.
37
+
38
+ ### Basic usage
39
+
40
+ Once all dependecies for Isaac rendering have been installed, users can run the `robosuite/scripts/render_dataset_with_omniverse.py` to render previously collected demonstrations using either ray tracing or path tracining. Below we highlight the arguments that can be passed into the script.
41
+
42
+ - **dataset**: Path to hdf5 dataset with the demonstrations to render.
43
+ - **ds_format**: Dataset format (options include `robosuite` and `robomimic` depending on if the dataset was collected using robosuite or robomimic, respectively).
44
+ - **episode**: Episode/demonstration to render. If no episode is provided, all demonstrations will be rendered.
45
+ - **output_directory**: Directory to store outputs from Isaac rendering and USD generation.
46
+ - **cameras**: List of cameras to render images. Cameras must be defined in robosuite.
47
+ - **width**: Width of the rendered output.
48
+ - **height**: Height of the rendered output.
49
+ - **renderer**: Renderer mode to use (options include `RayTracedLighting` or `PathTracing`).
50
+ - **save_video**: Whether to save the outputs renderings as a video.
51
+ - **online**: Enables online rendering and will not save the USD for future rendering offline.
52
+ - **skip_frames**: Renders every nth frame.
53
+ - **hide_sites**: Hides all sites in the scene.
54
+ - **reload_model**: Reloads the model from the Mujoco XML file.
55
+ - **keep_models**: List of names of models to keep from the original Mujoco XML file.
56
+ - **rgb**: Render with the RGB modality. If no other modality is selected, we default to rendering with RGB.
57
+ - **normals**: Render with normals.
58
+ - **semantic_segmentation**: Render with semantic segmentation.
59
+
60
+ Here is an example command to render an video of a demonstration using ray tracing with the RGB and normal modality.
61
+
62
+ ```bash
63
+ $ python robosuite/scripts/render_dataset_with_omniverse.py --dataset /home/abhishek/Documents/research/rpl/robosuite/robosuite/models/assets/demonstrations_private/1734107564_9898326/demo.hdf5 --ds_format robosuite --episode 1 --camera agentview frontview --width 1920 --height 1080 --renderer RayTracedLighting --save_video --hide_sites --rgb --normals
64
+ ```
65
+
66
+ ### Rendering Speed
67
+
68
+ Below, we present a table showing the estimated frames per second when using these renderers. Note that the exact speed of rendering might depend on your machine and scene size. Larger scenes may take longer to render. Additionally, changing renderer inputs such as samples per pixel (spp) or max bounces might affect rendering speeds. The values below are estimates using the `Lift` task with an NVIDIA GeForce RTX 4090. We use an spp of 64 when rendering with path tracing.
69
+
70
+ | Renderer | Estimated FPS |
71
+ |----------------|---------------|
72
+ | MuJoCo | 3500 |
73
+ | Ray Tracing | 58 |
74
+ | Path Tracing | 2.8 |
docs/modules/robots.rst ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Robots
2
+ =======
3
+
4
+ .. figure:: ../images/robots_module_v15.png
5
+
6
+ **Robots** are a key component in **robosuite**, and serve as the embodiment of a given agent as well as the central interaction point within an environment and key interface to MuJoCo for the robot-related state and control. **robosuite** captures this level of abstraction with the `Robot <../simulation/robot>`_-based classes, with support for both single-armed and bimanual variations, as well as robots with mobile manipulation capabilities, including both legged and wheeled variants. In turn, the Robot class is centrally defined by a `RobotModel <../modeling/robot_model>`_, `RobotBaseModel <../modeling/robot_model.html#robot-base-model>`_, `GripperModel <../modeling/robot_model.html#gripper-model>`_, and `Controller(s) <../simulation/controller>`_. Subclasses of the ``RobotModel`` class may also include additional models as well; for example, the `ManipulatorModel <../modeling/robot_model.html#manipulator-model>`_ class also includes `GripperModel(s) <../modeling/robot_model.html#gripper-model>`_ (with no gripper being represented by a dummy class).
7
+
8
+ The high-level features of **robosuite**'s robots are described as follows:
9
+
10
+ * **Diverse and Realistic Models**: **robosuite** provides models for 10 commercially-available robots (including the humanoid GR1 Robot), 9 grippers (including the inspire dexterous hand model), 4 bases (including the Omron wheeled mobile base), and 6 body-part controllers, with model properties either taken directly from official product documentation or raw spec sheets. An additional 8 robots, 8 grippers, and 3 bases can be installed separately from the `robosuite-models <https://github.com/ARISE-Initiative/robosuite_models>`_ repository.
11
+
12
+ * **Modularized Support**: Robots are designed to be plug-n-play -- any combinations of robots, models, and controllers can be used, assuming the given environment is intended for the desired robot configuration. Because each robot is assigned a unique ID number, multiple instances of identical robots can be instantiated within the simulation without error.
13
+
14
+ * **Self-Enclosed Abstraction**: For a given task and environment, any information relevant to the specific robot instance can be found within the properties and methods within that instance. This means that each robot is responsible for directly setting its initial state within the simulation at the start of each episode, and also directly controls the robot in simulation via torques outputted by its controller's transformed actions.
15
+
16
+ Usage
17
+ =====
18
+ Below, we discuss the usage and functionality of the robots over the course of its program lifetime.
19
+
20
+ Initialization
21
+ --------------
22
+ During environment creation (``suite.make(...)``), individual robots are both instantiated and initialized. The desired RobotModel, RobotBaseModel, and Controller(s) (where multiple and/or additional models may be specified, e.g. for manipulator bimanual robots) are loaded into each robot, with the models being passed into the environment to compose the final MuJoCo simulation object. Each robot is then set to its initial state.
23
+
24
+ Runtime
25
+ -------
26
+ During a given simulation episode (each ``env.step(...)`` call), the environment will receive a set of actions and distribute them accordingly to each robot, according to their respective action spaces. Each robot then converts these actions into low-level torques via their respective controllers, and directly executes these torques in the simulation. At the conclusion of the environment step, each robot will pass its set of robot-specific observations to the environment, which will then concatenate and append additional task-level observations before passing them as output from the ``env.step(...)`` call.
27
+
28
+ Callables
29
+ ---------
30
+ At any given time, each robot has a set of ``properties`` whose real-time values can be accessed at any time. These include specifications for a given robot, such as its DoF, action dimension, and torque limits, as well as proprioceptive values, such as its joint positions and velocities. Additionally, if the robot is enabled with any sensors, those readings can also be polled as well. A full list of robot properties can be found in the `Robots API <../simulation/robot.html>`_ section.
31
+
32
+ Models
33
+ ======
34
+ **robosuite** is designed to be generalizable to multiple robotic domains. The current release focuses on manipulator robots. For adding new robots, we provide a `rudimentary guide <https://docs.google.com/document/d/1bSUKkpjmbKqWyV5Oc7_4VL4FGKAQZx8aWm_nvlmTVmE/edit?usp=sharing>`_ on how to import raw Robot and Gripper models (based on a URDF source file) into robosuite.
35
+
36
+ Manipulators
37
+ ------------
38
+
39
+ .. list-table::
40
+ :widths: 15 50 35
41
+ :header-rows: 1
42
+
43
+ * - Robot
44
+ - Image
45
+ - Description
46
+ * - **Panda**
47
+ - .. image:: ../images/models/robot_model_Panda_isaac.png
48
+ :width: 90%
49
+ :align: center
50
+ - - **DoF:** 7
51
+ - **Default Gripper:** PandaGripper
52
+ - **Default Base:** RethinkMount
53
+ * - **Sawyer**
54
+ - .. image:: ../images/models/robot_model_Sawyer_isaac.png
55
+ :width: 90%
56
+ :align: center
57
+ - - **DoF:** 7
58
+ - **Default Gripper:** RethinkGripper
59
+ - **Default Base:** RethinkMount
60
+ * - **IIWA**
61
+ - .. image:: ../images/models/robot_model_IIWA_isaac.png
62
+ :width: 90%
63
+ :align: center
64
+ - - **DoF:** 7
65
+ - **Default Gripper:** Robotiq140Gripper
66
+ - **Default Base:** RethinkMount
67
+ * - **Jaco**
68
+ - .. image:: ../images/models/robot_model_Jaco_isaac.png
69
+ :width: 90%
70
+ :align: center
71
+ - - **DoF:** 7
72
+ - **Default Gripper:** JacoThreeFingerGripper
73
+ - **Default Base:** RethinkMount
74
+ * - **Kinova3**
75
+ - .. image:: ../images/models/robot_model_Kinova3_isaac.png
76
+ :width: 90%
77
+ :align: center
78
+ - - **DoF:** 7
79
+ - **Default Gripper:** Robotiq85Gripper
80
+ - **Default Base:** RethinkMount
81
+ * - **UR5e**
82
+ - .. image:: ../images/models/robot_model_UR5e_isaac.png
83
+ :width: 90%
84
+ :align: center
85
+ - - **DoF:** 6
86
+ - **Default Gripper:** Robotiq85Gripper
87
+ - **Default Base:** RethinkMount
88
+ * - **Baxter**
89
+ - .. image:: ../images/models/robot_model_Baxter_isaac.png
90
+ :width: 90%
91
+ :align: center
92
+ - - **DoF:** 14
93
+ - **Default Gripper:** RethinkGripper
94
+ - **Default Base:** RethinkMount
95
+ * - **GR1**
96
+ - .. image:: ../images/models/robot_model_GR1_isaac.png
97
+ :width: 90%
98
+ :align: center
99
+ - - **DoF:** 24
100
+ - **Default Gripper:** InspireHands
101
+ - **Default Base:** NoActuationBase
102
+ - **Variants**: GR1FixedLowerBody, GR1FloatingBody, GR1ArmsOnly
103
+ * - **Spot**
104
+ - .. image:: ../images/models/robot_model_Spot_isaac.png
105
+ :width: 90%
106
+ :align: center
107
+ - - **DoF:** 19
108
+ - **Default Gripper:** BDGripper
109
+ - **Default Base:** Spot
110
+ - **Variants**: SpotWithArmFloating
111
+ * - **Tiago**
112
+ - .. image:: ../images/models/robot_model_Tiago_isaac.png
113
+ :width: 90%
114
+ :align: center
115
+ - - **DoF:** 20
116
+ - **Default Gripper:** Robotiq85Gripper
117
+ - **Default Base:** NullMobileBase
118
+
119
+ Grippers
120
+ --------
121
+
122
+ .. list-table::
123
+ :widths: 20 45 35
124
+ :header-rows: 1
125
+
126
+ * - Gripper
127
+ - Image
128
+ - Description
129
+ * - **BD Gripper**
130
+ - .. image:: ../images/models/bd_gripper.png
131
+ :width: 90%
132
+ :align: center
133
+ - - **DoF:** 1
134
+ * - **Inspire Hands**
135
+ - .. image:: ../images/models/inspire_hands.png
136
+ :width: 90%
137
+ :align: center
138
+ - - **DoF:** 6
139
+ * - **Jaco Three Finger Gripper**
140
+ - .. image:: ../images/models/jaco_gripper.png
141
+ :width: 90%
142
+ :align: center
143
+ - - **DoF:** 1 (3 for dexterous version)
144
+ * - **Panda Gripper**
145
+ - .. image:: ../images/models/panda_gripper.png
146
+ :width: 90%
147
+ :align: center
148
+ - - **DoF:** 1
149
+ * - **Rethink Gripper**
150
+ - .. image:: ../images/models/rethink_gripper.png
151
+ :width: 90%
152
+ :align: center
153
+ - - **DoF:** 1
154
+ * - **Robotiq 85 Gripper**
155
+ - .. image:: ../images/models/robotiq85_gripper.png
156
+ :width: 90%
157
+ :align: center
158
+ - - **DoF:** 1
159
+ * - **Robotiq 140 Gripper**
160
+ - .. image:: ../images/models/robotiq140_gripper.png
161
+ :width: 90%
162
+ :align: center
163
+ - - **DoF:** 1
164
+ * - **Robotiq Three Finger Gripper**
165
+ - .. image:: ../images/models/robotiq_three_gripper.png
166
+ :width: 90%
167
+ :align: center
168
+ - - **DoF:** 1
169
+ * - **Wiping Gripper**
170
+ - .. image:: ../images/models/wiping_gripper.png
171
+ :width: 90%
172
+ :align: center
173
+ - - **DoF:** 0
174
+
175
+ Bases
176
+ -----
177
+
178
+ .. list-table::
179
+ :widths: 20 45 35
180
+ :header-rows: 1
181
+
182
+ * - Gripper
183
+ - Image
184
+ - Description
185
+ * - **Rethink Mount**
186
+ - .. image:: ../images/models/rethink_base.png
187
+ :width: 90%
188
+ :align: center
189
+ - - **Type:** Fixed
190
+ * - **Rethink Minimal Mount**
191
+ - .. image:: ../images/models/rethink_minimal_base.png
192
+ :width: 90%
193
+ :align: center
194
+ - - **Type:** Fixed
195
+ * - **Omron Mobile Base**
196
+ - .. image:: ../images/models/omron_base.png
197
+ :width: 90%
198
+ :align: center
199
+ - - **Type:** Mobile
200
+ * - **Spot Base**
201
+ - .. image:: ../images/models/spot_base.png
202
+ :width: 90%
203
+ :align: center
204
+ - - **Type:** Legged
205
+
206
+ Create Your Own Robot
207
+ ----------------------
208
+
209
+ As of v1.5, users can create composite robots to match their specification. Specificially, arms, grippers, and bases can be swapped to create new robots configurations. We also provide several other robot models in an external repo. For more information, please refer to `here <https://github.com/ARISE-Initiative/robosuite_models>`_.
210
+
docs/modules/sensors.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sensors
2
+
3
+ Sensors are an important aspect of **robosuite**, and encompass an agent's feedback from interaction with the environment. Mujoco provides low-level APIs to directly interface with raw simulation data, though we provide more a more realistic interface via the `Observable` class API to model obtained sensory information.
4
+
5
+ #### Mujoco-Native Sensors
6
+
7
+ The simulator generates virtual physical signals as response to a robot's interactions. Virtual signals include images, force-torque measurements (from a force-torque sensor like the one included by default in the wrist of all [Gripper models](../modeling/robot_model.html#gripper-model)), pressure signals (e.g. from a sensor on the robot's finger or on the environment), etc. Raw sensor information (except cameras and joint sensors) can be accessed via the function `get_sensor_measurement` provided the name of the sensor.
8
+
9
+ Joint sensors provide information about the state of each robot's joint including position and velocity. In MuJoCo these are not measured by sensors, but resolved and set by the simulator as the result of the actuation forces. Therefore, they are not accessed through the common `get_sensor_measurement` function but as properties of the [Robot simulation API](../simulation/robot), i.e., `_joint_positions` and `_joint_velocities`.
10
+
11
+ Cameras bundle a name to a set of properties to render images of the environment such as the pose and pointing direction, field of view, and resolution. Inheriting from MuJoCo, cameras are defined in the [robot](../modeling/robot_model) and [arena models](../modeling/arena) and can be attached to any body. Images, as they would be generated from the cameras, are not accessed through `get_sensor_measurement` but via the renderer (see below). In a common user pipeline, images are not queried directly; we specify one or several cameras we want to use images from when we create the environment, and the images are generated and appended automatically to the observation dictionary.
12
+
13
+ #### Observables
14
+
15
+ **robosuite** provides a realistic, customizable interface via the [Observable](../source/robosuite.utils.html#robosuite.utils.observables.Observable) class API. Observables model realistic sensor sampling, in which ground truth data is sampled (`sensor`), passed through a corrupting function (`corrupter`), and then finally passed through a filtering function (`filter`). Moreover, each observable has its own `sampling_rate` and `delayer` function which simulates sensor delay. While default values are used to instantiate each observable during environment creation, each of these components can be modified by the user at runtime using `env.modify_observable(...)` . Moreover, each observable is assigned a modality, and are grouped together in the returned observation dictionary during the `env.step()` call. For example, if an environment consists of camera observations (RGB, depth, and instance segmentation) and a single robot's proprioceptive observations, the observation dict structure might look as follows:
16
+
17
+ ```python
18
+ {
19
+ "frontview_image": np.array(...), # this has modality "image"
20
+ "frontview_depth": np.array(...), # this has modality "image"
21
+ "frontview_segmentation_instance": np.array(...), # this has modality "image"
22
+ "robot0_joint_pos": np.array(...), # this has modality "robot0_proprio"
23
+ "robot0_gripper_pos": np.array(...), # this has modality "robot0_proprio"
24
+ "image-state": np.array(...), # this is a concatenation of all image observations
25
+ "robot0_proprio-state": np.array(...), # this is a concatenation of all robot0_proprio observations
26
+ }
27
+ ```
28
+
29
+ For more information on the vision ground-truth sensors supported, please see the [Renderer](./renderers) section.
30
+
31
+ Note that for memory efficiency the `image-state` is not returned by default (this can be toggled in `robosuite/macros.py`).
32
+
33
+ Observables can also be used to model sensor corruption and delay, and refer the reader to the [Sensor Randomization](../algorithms/sim2real.html#sensors) section for additional information.
docs/source/robosuite.controllers.interpolators.rst ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.controllers.interpolators package
2
+ ===========================================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.controllers.interpolators.base\_interpolator module
8
+ -------------------------------------------------------------
9
+
10
+ .. automodule:: robosuite.controllers.interpolators.base_interpolator
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.controllers.interpolators.linear\_interpolator module
16
+ ---------------------------------------------------------------
17
+
18
+ .. automodule:: robosuite.controllers.interpolators.linear_interpolator
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ Module contents
24
+ ---------------
25
+
26
+ .. automodule:: robosuite.controllers.interpolators
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
docs/source/robosuite.controllers.parts.gripper.rst ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.controllers.parts.gripper package
2
+ ===========================================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.controllers.parts.gripper.gripper\_controller module
8
+ --------------------------------------------------------------
9
+
10
+ .. automodule:: robosuite.controllers.parts.gripper.gripper_controller
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.controllers.parts.gripper.simple\_grip module
16
+ -------------------------------------------------------
17
+
18
+ .. automodule:: robosuite.controllers.parts.gripper.simple_grip
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ Module contents
24
+ ---------------
25
+
26
+ .. automodule:: robosuite.controllers.parts.gripper
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
docs/source/robosuite.controllers.parts.rst ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.controllers.parts package
2
+ ===================================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+ :maxdepth: 4
9
+
10
+ robosuite.controllers.parts.arm
11
+ robosuite.controllers.parts.generic
12
+ robosuite.controllers.parts.gripper
13
+ robosuite.controllers.parts.mobile_base
14
+
15
+ Submodules
16
+ ----------
17
+
18
+ robosuite.controllers.parts.controller module
19
+ ---------------------------------------------
20
+
21
+ .. automodule:: robosuite.controllers.parts.controller
22
+ :members:
23
+ :undoc-members:
24
+ :show-inheritance:
25
+
26
+ robosuite.controllers.parts.controller\_factory module
27
+ ------------------------------------------------------
28
+
29
+ .. automodule:: robosuite.controllers.parts.controller_factory
30
+ :members:
31
+ :undoc-members:
32
+ :show-inheritance:
33
+
34
+ Module contents
35
+ ---------------
36
+
37
+ .. automodule:: robosuite.controllers.parts
38
+ :members:
39
+ :undoc-members:
40
+ :show-inheritance:
docs/source/robosuite.controllers.rst ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.controllers package
2
+ =============================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+ :maxdepth: 4
9
+
10
+ robosuite.controllers.composite
11
+ robosuite.controllers.parts
12
+
13
+ Module contents
14
+ ---------------
15
+
16
+ .. automodule:: robosuite.controllers
17
+ :members:
18
+ :undoc-members:
19
+ :show-inheritance:
docs/source/robosuite.environments.rst ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.environments package
2
+ ==============================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+ :maxdepth: 4
9
+
10
+ robosuite.environments.manipulation
11
+
12
+ Submodules
13
+ ----------
14
+
15
+ robosuite.environments.base module
16
+ ----------------------------------
17
+
18
+ .. automodule:: robosuite.environments.base
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ robosuite.environments.robot\_env module
24
+ ----------------------------------------
25
+
26
+ .. automodule:: robosuite.environments.robot_env
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
30
+
31
+ Module contents
32
+ ---------------
33
+
34
+ .. automodule:: robosuite.environments
35
+ :members:
36
+ :undoc-members:
37
+ :show-inheritance:
docs/source/robosuite.models.arenas.rst ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.models.arenas package
2
+ ===============================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.models.arenas.arena module
8
+ ------------------------------------
9
+
10
+ .. automodule:: robosuite.models.arenas.arena
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.models.arenas.bins\_arena module
16
+ ------------------------------------------
17
+
18
+ .. automodule:: robosuite.models.arenas.bins_arena
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ robosuite.models.arenas.empty\_arena module
24
+ -------------------------------------------
25
+
26
+ .. automodule:: robosuite.models.arenas.empty_arena
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
30
+
31
+ robosuite.models.arenas.multi\_table\_arena module
32
+ --------------------------------------------------
33
+
34
+ .. automodule:: robosuite.models.arenas.multi_table_arena
35
+ :members:
36
+ :undoc-members:
37
+ :show-inheritance:
38
+
39
+ robosuite.models.arenas.pegs\_arena module
40
+ ------------------------------------------
41
+
42
+ .. automodule:: robosuite.models.arenas.pegs_arena
43
+ :members:
44
+ :undoc-members:
45
+ :show-inheritance:
46
+
47
+ robosuite.models.arenas.table\_arena module
48
+ -------------------------------------------
49
+
50
+ .. automodule:: robosuite.models.arenas.table_arena
51
+ :members:
52
+ :undoc-members:
53
+ :show-inheritance:
54
+
55
+ robosuite.models.arenas.wipe\_arena module
56
+ ------------------------------------------
57
+
58
+ .. automodule:: robosuite.models.arenas.wipe_arena
59
+ :members:
60
+ :undoc-members:
61
+ :show-inheritance:
62
+
63
+ Module contents
64
+ ---------------
65
+
66
+ .. automodule:: robosuite.models.arenas
67
+ :members:
68
+ :undoc-members:
69
+ :show-inheritance:
docs/source/robosuite.models.bases.rst ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.models.bases package
2
+ ==============================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.models.bases.floating\_legged\_base module
8
+ ----------------------------------------------------
9
+
10
+ .. automodule:: robosuite.models.bases.floating_legged_base
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.models.bases.leg\_base\_model module
16
+ ----------------------------------------------
17
+
18
+ .. automodule:: robosuite.models.bases.leg_base_model
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ robosuite.models.bases.mobile\_base\_model module
24
+ -------------------------------------------------
25
+
26
+ .. automodule:: robosuite.models.bases.mobile_base_model
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
30
+
31
+ robosuite.models.bases.mount\_model module
32
+ ------------------------------------------
33
+
34
+ .. automodule:: robosuite.models.bases.mount_model
35
+ :members:
36
+ :undoc-members:
37
+ :show-inheritance:
38
+
39
+ robosuite.models.bases.no\_actuation\_base module
40
+ -------------------------------------------------
41
+
42
+ .. automodule:: robosuite.models.bases.no_actuation_base
43
+ :members:
44
+ :undoc-members:
45
+ :show-inheritance:
46
+
47
+ robosuite.models.bases.null\_mobile\_base module
48
+ ------------------------------------------------
49
+
50
+ .. automodule:: robosuite.models.bases.null_mobile_base
51
+ :members:
52
+ :undoc-members:
53
+ :show-inheritance:
54
+
55
+ robosuite.models.bases.null\_mount module
56
+ -----------------------------------------
57
+
58
+ .. automodule:: robosuite.models.bases.null_mount
59
+ :members:
60
+ :undoc-members:
61
+ :show-inheritance:
62
+
63
+ robosuite.models.bases.omron\_mobile\_base module
64
+ -------------------------------------------------
65
+
66
+ .. automodule:: robosuite.models.bases.omron_mobile_base
67
+ :members:
68
+ :undoc-members:
69
+ :show-inheritance:
70
+
71
+ robosuite.models.bases.rethink\_minimal\_mount module
72
+ -----------------------------------------------------
73
+
74
+ .. automodule:: robosuite.models.bases.rethink_minimal_mount
75
+ :members:
76
+ :undoc-members:
77
+ :show-inheritance:
78
+
79
+ robosuite.models.bases.rethink\_mount module
80
+ --------------------------------------------
81
+
82
+ .. automodule:: robosuite.models.bases.rethink_mount
83
+ :members:
84
+ :undoc-members:
85
+ :show-inheritance:
86
+
87
+ robosuite.models.bases.robot\_base\_factory module
88
+ --------------------------------------------------
89
+
90
+ .. automodule:: robosuite.models.bases.robot_base_factory
91
+ :members:
92
+ :undoc-members:
93
+ :show-inheritance:
94
+
95
+ robosuite.models.bases.robot\_base\_model module
96
+ ------------------------------------------------
97
+
98
+ .. automodule:: robosuite.models.bases.robot_base_model
99
+ :members:
100
+ :undoc-members:
101
+ :show-inheritance:
102
+
103
+ robosuite.models.bases.spot\_base module
104
+ ----------------------------------------
105
+
106
+ .. automodule:: robosuite.models.bases.spot_base
107
+ :members:
108
+ :undoc-members:
109
+ :show-inheritance:
110
+
111
+ Module contents
112
+ ---------------
113
+
114
+ .. automodule:: robosuite.models.bases
115
+ :members:
116
+ :undoc-members:
117
+ :show-inheritance:
docs/source/robosuite.models.objects.composite_body.rst ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.models.objects.composite\_body package
2
+ ================================================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.models.objects.composite\_body.hinged\_box module
8
+ -----------------------------------------------------------
9
+
10
+ .. automodule:: robosuite.models.objects.composite_body.hinged_box
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.models.objects.composite\_body.ratcheting\_wrench module
16
+ ------------------------------------------------------------------
17
+
18
+ .. automodule:: robosuite.models.objects.composite_body.ratcheting_wrench
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ Module contents
24
+ ---------------
25
+
26
+ .. automodule:: robosuite.models.objects.composite_body
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
docs/source/robosuite.models.objects.primitive.rst ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.models.objects.primitive package
2
+ ==========================================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.models.objects.primitive.ball module
8
+ ----------------------------------------------
9
+
10
+ .. automodule:: robosuite.models.objects.primitive.ball
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.models.objects.primitive.box module
16
+ ---------------------------------------------
17
+
18
+ .. automodule:: robosuite.models.objects.primitive.box
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ robosuite.models.objects.primitive.capsule module
24
+ -------------------------------------------------
25
+
26
+ .. automodule:: robosuite.models.objects.primitive.capsule
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
30
+
31
+ robosuite.models.objects.primitive.cylinder module
32
+ --------------------------------------------------
33
+
34
+ .. automodule:: robosuite.models.objects.primitive.cylinder
35
+ :members:
36
+ :undoc-members:
37
+ :show-inheritance:
38
+
39
+ Module contents
40
+ ---------------
41
+
42
+ .. automodule:: robosuite.models.objects.primitive
43
+ :members:
44
+ :undoc-members:
45
+ :show-inheritance:
docs/source/robosuite.models.objects.rst ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.models.objects package
2
+ ================================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+ :maxdepth: 4
9
+
10
+ robosuite.models.objects.composite
11
+ robosuite.models.objects.composite_body
12
+ robosuite.models.objects.group
13
+ robosuite.models.objects.primitive
14
+
15
+ Submodules
16
+ ----------
17
+
18
+ robosuite.models.objects.generated\_objects module
19
+ --------------------------------------------------
20
+
21
+ .. automodule:: robosuite.models.objects.generated_objects
22
+ :members:
23
+ :undoc-members:
24
+ :show-inheritance:
25
+
26
+ robosuite.models.objects.object\_groups module
27
+ ----------------------------------------------
28
+
29
+ .. automodule:: robosuite.models.objects.object_groups
30
+ :members:
31
+ :undoc-members:
32
+ :show-inheritance:
33
+
34
+ robosuite.models.objects.objects module
35
+ ---------------------------------------
36
+
37
+ .. automodule:: robosuite.models.objects.objects
38
+ :members:
39
+ :undoc-members:
40
+ :show-inheritance:
41
+
42
+ robosuite.models.objects.xml\_objects module
43
+ --------------------------------------------
44
+
45
+ .. automodule:: robosuite.models.objects.xml_objects
46
+ :members:
47
+ :undoc-members:
48
+ :show-inheritance:
49
+
50
+ Module contents
51
+ ---------------
52
+
53
+ .. automodule:: robosuite.models.objects
54
+ :members:
55
+ :undoc-members:
56
+ :show-inheritance:
docs/source/robosuite.models.rst ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.models package
2
+ ========================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+ :maxdepth: 4
9
+
10
+ robosuite.models.arenas
11
+ robosuite.models.bases
12
+ robosuite.models.grippers
13
+ robosuite.models.objects
14
+ robosuite.models.robots
15
+ robosuite.models.tasks
16
+
17
+ Submodules
18
+ ----------
19
+
20
+ robosuite.models.base module
21
+ ----------------------------
22
+
23
+ .. automodule:: robosuite.models.base
24
+ :members:
25
+ :undoc-members:
26
+ :show-inheritance:
27
+
28
+ robosuite.models.world module
29
+ -----------------------------
30
+
31
+ .. automodule:: robosuite.models.world
32
+ :members:
33
+ :undoc-members:
34
+ :show-inheritance:
35
+
36
+ Module contents
37
+ ---------------
38
+
39
+ .. automodule:: robosuite.models
40
+ :members:
41
+ :undoc-members:
42
+ :show-inheritance:
docs/source/robosuite.wrappers.rst ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ robosuite.wrappers package
2
+ ==========================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ robosuite.wrappers.data\_collection\_wrapper module
8
+ ---------------------------------------------------
9
+
10
+ .. automodule:: robosuite.wrappers.data_collection_wrapper
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ robosuite.wrappers.demo\_sampler\_wrapper module
16
+ ------------------------------------------------
17
+
18
+ .. automodule:: robosuite.wrappers.demo_sampler_wrapper
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ robosuite.wrappers.domain\_randomization\_wrapper module
24
+ --------------------------------------------------------
25
+
26
+ .. automodule:: robosuite.wrappers.domain_randomization_wrapper
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
30
+
31
+ robosuite.wrappers.gym\_wrapper module
32
+ --------------------------------------
33
+
34
+ .. automodule:: robosuite.wrappers.gym_wrapper
35
+ :members:
36
+ :undoc-members:
37
+ :show-inheritance:
38
+
39
+ robosuite.wrappers.visualization\_wrapper module
40
+ ------------------------------------------------
41
+
42
+ .. automodule:: robosuite.wrappers.visualization_wrapper
43
+ :members:
44
+ :undoc-members:
45
+ :show-inheritance:
46
+
47
+ robosuite.wrappers.wrapper module
48
+ ---------------------------------
49
+
50
+ .. automodule:: robosuite.wrappers.wrapper
51
+ :members:
52
+ :undoc-members:
53
+ :show-inheritance:
54
+
55
+ Module contents
56
+ ---------------
57
+
58
+ .. automodule:: robosuite.wrappers
59
+ :members:
60
+ :undoc-members:
61
+ :show-inheritance:
docs/tutorials/add_controller.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ## Adding Third Party Controllers
3
+
4
+ To use a third-party controller with robosuite, you'll need to:
5
+ 1. Create a new class that subclasses one of the composite controllers in `robosuite/controllers/composite/composite_controller.py`.
6
+ 2. Register the composite controller with the decorator `@register_composite_controller`.
7
+ 3. Implement composite specific functionality that ultimately provides control input to the underlying `part_controller`'s.
8
+ 4. Import the new class so that it gets added to robosuite's `REGISTERED_COMPOSITE_CONTROLLERS_DICT` via the `@register_composite_controller` decorator.
9
+ 5. Provide controller specific configs and the new controller's `type` in a json file.
10
+
11
+ For the new composite controllers subclassing `WholeBody`, you'll mainly need to update `joint_action_policy`.
12
+
13
+ We provide an example of how to use a third-party `WholeBodyMinkIK` composite controller with robosuite, in the `robosuite/examples/third_party_controller/` directory. You can run the command `python teleop_mink.py` example script to see a third-party controller in action. Note: to run this specific example, you'll need to `pip install mink`.
14
+
15
+
16
+ Steps 1 and 2:
17
+
18
+ In `robosuite/examples/third_party_controller/mink_controller.py`:
19
+
20
+ ```
21
+ @register_composite_controller
22
+ class WholeBodyMinkIK(WholeBody):
23
+ name = "WHOLE_BODY_MINK_IK"
24
+ ```
25
+
26
+ Step 3:
27
+
28
+ In `robosuite/examples/third_party_controller/mink_controller.py`, add logic specific to the new composite controller:
29
+
30
+ ```
31
+ self.joint_action_policy = IKSolverMink(...)
32
+ ```
33
+
34
+ Step 4:
35
+
36
+ In `teleop_mink.py`, we import:
37
+
38
+ ```
39
+ from robosuite.examples.third_party_controller.mink_controller import WholeBodyMinkIK
40
+ ```
41
+
42
+ Step 5:
43
+
44
+ In `robosuite/examples/third_party_controller/default_mink_ik_gr1.json`, we add configs specific to our new composite controller. and also set the `type` to
45
+ match the `name` specified in `WholeBodyMinkIK`:
46
+
47
+ ```
48
+ {
49
+ "type": "WHOLE_BODY_MINK_IK", # set the correct type
50
+ "composite_controller_specific_configs": {
51
+ ...
52
+ },
53
+ ...
54
+ }
55
+ ```
docs/tutorials/add_environment.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Building Your Own Environments
2
+
3
+ **robosuite** offers great flexibility in creating your own environments. A [task](../modeling/task) typically involves the participation of a [robot](../modeling/robot_model) with [grippers](../modeling/robot_model.html#gripper-model) as its end-effectors, an [arena](../modeling/arena) (workspace), and [objects](../modeling/object_model) that the robot interacts with. For a detailed overview of our design architecture, please check out the [Overview](../modules/overview) page in Modules. Our Modeling APIs provide methods of composing these modularized elements into a scene, which can be loaded in MuJoCo for simulation. To build your own environments, we recommend you take a look at the [Environment classes](../simulation/environment) which have used these APIs to define robotics environments and tasks and the [source code](https://github.com/ARISE-Initiative/robosuite/tree/master/robosuite/environments) of our standardized environments. Below we walk through a step-by-step example of building a new tabletop manipulation environment with our APIs.
4
+
5
+ **Step 1: Creating the world.** All mujoco object definitions are housed in an xml. We create a [MujocoWorldBase](../source/robosuite.models) class to do it.
6
+ ```python
7
+ from robosuite.models import MujocoWorldBase
8
+
9
+ world = MujocoWorldBase()
10
+ ```
11
+
12
+ **Step 2: Creating the robot.** The class housing the xml of a robot can be created as follows.
13
+ ```python
14
+ from robosuite.models.robots import Panda
15
+
16
+ mujoco_robot = Panda()
17
+ ```
18
+ We can add a gripper to the robot by creating a gripper instance and calling the add_gripper method on a robot.
19
+ ```python
20
+ from robosuite.models.grippers import gripper_factory
21
+
22
+ gripper = gripper_factory('PandaGripper')
23
+ mujoco_robot.add_gripper(gripper)
24
+ ```
25
+ To add the robot to the world, we place the robot on to a desired position and merge it into the world
26
+ ```python
27
+ mujoco_robot.set_base_xpos([0, 0, 0])
28
+ world.merge(mujoco_robot)
29
+ ```
30
+
31
+ **Step 3: Creating the table.** We can initialize the [TableArena](../source/robosuite.models.arenas) instance that creates a table and the floorplane
32
+ ```python
33
+ from robosuite.models.arenas import TableArena
34
+
35
+ mujoco_arena = TableArena()
36
+ mujoco_arena.set_origin([0.8, 0, 0])
37
+ world.merge(mujoco_arena)
38
+ ```
39
+
40
+ **Step 4: Adding the object.** For details of `MujocoObject`, refer to the documentation about [MujocoObject](../modeling/object_model), we can create a ball and add it to the world.
41
+ ```python
42
+ from robosuite.models.objects import BallObject
43
+ from robosuite.utils.mjcf_utils import new_joint
44
+
45
+ sphere = BallObject(
46
+ name="sphere",
47
+ size=[0.04],
48
+ rgba=[0, 0.5, 0.5, 1]).get_obj()
49
+ sphere.set('pos', '1.0 0 1.0')
50
+ world.worldbody.append(sphere)
51
+ ```
52
+
53
+ **Step 5: Running Simulation.** Once we have created the object, we can obtain a `mujoco.MjModel` model by running
54
+ ```python
55
+ model = world.get_model(mode="mujoco")
56
+ ```
57
+ This is an `MjModel` instance that can then be used for simulation. For example,
58
+ ```python
59
+ import mujoco
60
+
61
+ data = mujoco.MjData(model)
62
+ while data.time < 1:
63
+ mujoco.mj_step(model, data)
64
+ ```
65
+
robosuite.egg-info/PKG-INFO ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: robosuite
3
+ Version: 1.5.2
4
+ Summary: robosuite: A Modular Simulation Framework and Benchmark for Robot Learning
5
+ Home-page: https://github.com/ARISE-Initiative/robosuite
6
+ Author: Yuke Zhu
7
+ Author-email: yukez@cs.utexas.edu
8
+ Requires-Python: >=3
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ License-File: AUTHORS
12
+ Requires-Dist: numpy>=1.13.3
13
+ Requires-Dist: numba>=0.49.1
14
+ Requires-Dist: scipy>=1.2.3
15
+ Requires-Dist: mujoco>=3.3.0
16
+ Requires-Dist: qpsolvers[quadprog]>=4.3.1
17
+ Requires-Dist: Pillow
18
+ Requires-Dist: opencv-python
19
+ Requires-Dist: pynput
20
+ Requires-Dist: termcolor
21
+ Requires-Dist: pytest
22
+ Requires-Dist: tqdm
23
+ Provides-Extra: mink
24
+ Requires-Dist: mink==0.0.5; extra == "mink"
25
+ Dynamic: author
26
+ Dynamic: author-email
27
+ Dynamic: description
28
+ Dynamic: description-content-type
29
+ Dynamic: home-page
30
+ Dynamic: license-file
31
+ Dynamic: provides-extra
32
+ Dynamic: requires-dist
33
+ Dynamic: requires-python
34
+ Dynamic: summary
35
+
36
+ # robosuite
37
+
38
+
39
+ [**[Homepage]**](https://robosuite.ai/) &ensp; [**[White Paper]**](https://arxiv.org/abs/2009.12293) &ensp; [**[Documentations]**](https://robosuite.ai/docs/overview.html) &ensp; [**[ARISE Initiative]**](https://github.com/ARISE-Initiative)
40
+
41
+ -------
42
+ ## Latest Updates
43
+
44
+ - [10/28/2024] **v1.5**: Added support for diverse robot embodiments (including humanoids), custom robot composition, composite controllers (including whole body controllers), more teleoperation devices, photo-realistic rendering. [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.5.0) [[documentation]](http://robosuite.ai/docs/overview.html)
45
+
46
+ - [11/15/2022] **v1.4**: Backend migration to DeepMind's official [MuJoCo Python binding](https://github.com/deepmind/mujoco), robot textures, and bug fixes :robot: [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.4.0) [[documentation]](http://robosuite.ai/docs/v1.4/)
47
+
48
+ - [10/19/2021] **v1.3**: Ray tracing and physically based rendering tools :sparkles: and access to additional vision modalities 🎥 [[video spotlight]](https://www.youtube.com/watch?v=2xesly6JrQ8) [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.3) [[documentation]](http://robosuite.ai/docs/v1.3/)
49
+
50
+ - [02/17/2021] **v1.2**: Added observable sensor models :eyes: and dynamics randomization :game_die: [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.2)
51
+
52
+ - [12/17/2020] **v1.1**: Refactored infrastructure and standardized model classes for much easier environment prototyping :wrench: [[release notes]](https://github.com/ARISE-Initiative/robosuite/releases/tag/v1.1)
53
+
54
+ -------
55
+
56
+ **robosuite** is a simulation framework powered by the [MuJoCo](http://mujoco.org/) physics engine for robot learning. It also offers a suite of benchmark environments for reproducible research. The current release (v1.5) features support for diverse robot embodiments (including humanoids), custom robot composition, composite controllers (including whole body controllers), more teleoperation devices, photo-realistic rendering. This project is part of the broader [Advancing Robot Intelligence through Simulated Environments (ARISE) Initiative](https://github.com/ARISE-Initiative), with the aim of lowering the barriers of entry for cutting-edge research at the intersection of AI and Robotics.
57
+
58
+ Data-driven algorithms, such as reinforcement learning and imitation learning, provide a powerful and generic tool in robotics. These learning paradigms, fueled by new advances in deep learning, have achieved some exciting successes in a variety of robot control problems. However, the challenges of reproducibility and the limited accessibility of robot hardware (especially during a pandemic) have impaired research progress. The overarching goal of **robosuite** is to provide researchers with:
59
+
60
+ * a standardized set of benchmarking tasks for rigorous evaluation and algorithm development;
61
+ * a modular design that offers great flexibility in designing new robot simulation environments;
62
+ * a high-quality implementation of robot controllers and off-the-shelf learning algorithms to lower the barriers to entry.
63
+
64
+ This framework was originally developed in late 2017 by researchers in [Stanford Vision and Learning Lab](http://svl.stanford.edu) (SVL) as an internal tool for robot learning research. Now, it is actively maintained and used for robotics research projects in SVL, the [UT Robot Perception and Learning Lab](http://rpl.cs.utexas.edu) (RPL) and NVIDIA [Generalist Embodied Agent Research Group](https://research.nvidia.com/labs/gear/) (GEAR). We welcome community contributions to this project. For details, please check out our [contributing guidelines](CONTRIBUTING.md).
65
+
66
+ **Robosuite** offers a modular design of APIs for building new environments, robot embodiments, and robot controllers with procedural generation. We highlight these primary features below:
67
+
68
+ * **standardized tasks**: a set of standardized manipulation tasks of large diversity and varying complexity and RL benchmarking results for reproducible research;
69
+ * **procedural generation**: modular APIs for programmatically creating new environments and new tasks as combinations of robot models, arenas, and parameterized 3D objects. Check out our repo [robosuite_models](https://github.com/ARISE-Initiative/robosuite_models) for extra robot models tailored to robosuite.
70
+ * **robot controllers**: a selection of controller types to command the robots, such as joint-space velocity control, inverse kinematics control, operational space control, and whole body control;
71
+ * **teleoperation devices**: a selection of teleoperation devices including keyboard, spacemouse and MuJoCo viewer drag-drop;
72
+ * **multi-modal sensors**: heterogeneous types of sensory signals, including low-level physical states, RGB cameras, depth maps, and proprioception;
73
+ * **human demonstrations**: utilities for collecting human demonstrations, replaying demonstration datasets, and leveraging demonstration data for learning. Check out our sister project [robomimic](https://arise-initiative.github.io/robomimic-web/);
74
+ * **photorealistic rendering**: integration with advanced graphics tools that provide real-time photorealistic renderings of simulated scenes, including support for NVIDIA Isaac Sim rendering.
75
+
76
+ ## Citation
77
+ Please cite [**robosuite**](https://robosuite.ai) if you use this framework in your publications:
78
+ ```bibtex
79
+ @inproceedings{robosuite2020,
80
+ title={robosuite: A Modular Simulation Framework and Benchmark for Robot Learning},
81
+ author={Yuke Zhu and Josiah Wong and Ajay Mandlekar and Roberto Mart\'{i}n-Mart\'{i}n and Abhishek Joshi and Soroush Nasiriany and Yifeng Zhu and Kevin Lin},
82
+ booktitle={arXiv preprint arXiv:2009.12293},
83
+ year={2020}
84
+ }
85
+ ```
robosuite.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AUTHORS
2
+ LICENSE
3
+ MANIFEST.in
4
+ README.md
5
+ pyproject.toml
6
+ setup.py
7
+ robosuite/__init__.py
8
+ robosuite/macros.py
9
+ robosuite.egg-info/PKG-INFO
10
+ robosuite.egg-info/SOURCES.txt
11
+ robosuite.egg-info/dependency_links.txt
12
+ robosuite.egg-info/eager_resources.txt
13
+ robosuite.egg-info/requires.txt
14
+ robosuite.egg-info/top_level.txt
15
+ robosuite/controllers/__init__.py
16
+ robosuite/controllers/composite/__init__.py
17
+ robosuite/controllers/composite/composite_controller.py
18
+ robosuite/controllers/composite/composite_controller_factory.py
19
+ robosuite/controllers/config/default/composite/basic.json
20
+ robosuite/controllers/config/default/composite/hybrid_mobile_base.json
21
+ robosuite/controllers/config/default/composite/whole_body_ik.json
22
+ robosuite/controllers/config/default/composite/whole_body_mink_ik.json
23
+ robosuite/controllers/config/default/parts/ik_pose.json
24
+ robosuite/controllers/config/default/parts/joint_position.json
25
+ robosuite/controllers/config/default/parts/joint_torque.json
26
+ robosuite/controllers/config/default/parts/joint_velocity.json
27
+ robosuite/controllers/config/default/parts/osc_pose.json
28
+ robosuite/controllers/config/default/parts/osc_position.json
29
+ robosuite/controllers/config/robots/default_baxter.json
30
+ robosuite/controllers/config/robots/default_gr1.json
31
+ robosuite/controllers/config/robots/default_gr1_fixed_lower_body.json
32
+ robosuite/controllers/config/robots/default_gr1_floating_body.json
33
+ robosuite/controllers/config/robots/default_iiwa.json
34
+ robosuite/controllers/config/robots/default_kinova3.json
35
+ robosuite/controllers/config/robots/default_panda.json
36
+ robosuite/controllers/config/robots/default_panda_dex.json
37
+ robosuite/controllers/config/robots/default_pandaomron.json
38
+ robosuite/controllers/config/robots/default_pandaomron_whole_body_ik.json
39
+ robosuite/controllers/config/robots/default_sawyer.json
40
+ robosuite/controllers/config/robots/default_spotwitharm.json
41
+ robosuite/controllers/config/robots/default_tiago.json
42
+ robosuite/controllers/config/robots/default_tiago_whole_body_ik.json
43
+ robosuite/controllers/config/robots/default_ur5e.json
44
+ robosuite/controllers/parts/__init__.py
45
+ robosuite/controllers/parts/controller.py
46
+ robosuite/controllers/parts/controller_factory.py
47
+ robosuite/controllers/parts/arm/__init__.py
48
+ robosuite/controllers/parts/arm/ik.py
49
+ robosuite/controllers/parts/arm/osc.py
50
+ robosuite/controllers/parts/generic/__init__.py
51
+ robosuite/controllers/parts/generic/joint_pos.py
52
+ robosuite/controllers/parts/generic/joint_tor.py
53
+ robosuite/controllers/parts/generic/joint_vel.py
54
+ robosuite/controllers/parts/gripper/__init__.py
55
+ robosuite/controllers/parts/gripper/gripper_controller.py
56
+ robosuite/controllers/parts/gripper/simple_grip.py
57
+ robosuite/controllers/parts/mobile_base/__init__.py
58
+ robosuite/controllers/parts/mobile_base/joint_vel.py
59
+ robosuite/controllers/parts/mobile_base/mobile_base_controller.py
60
+ robosuite/demos/demo_collect_and_playback_data.py
61
+ robosuite/demos/demo_composite_robot.py
62
+ robosuite/demos/demo_control.py
63
+ robosuite/demos/demo_device_control.py
64
+ robosuite/demos/demo_domain_randomization.py
65
+ robosuite/demos/demo_gripper_interaction.py
66
+ robosuite/demos/demo_gripper_selection.py
67
+ robosuite/demos/demo_gym_functionality.py
68
+ robosuite/demos/demo_multi_camera.py
69
+ robosuite/demos/demo_random_action.py
70
+ robosuite/demos/demo_renderers.py
71
+ robosuite/demos/demo_segmentation.py
72
+ robosuite/demos/demo_sensor_corruption.py
73
+ robosuite/demos/demo_usd_export.py
74
+ robosuite/demos/demo_video_recording.py
75
+ robosuite/devices/__init__.py
76
+ robosuite/devices/device.py
77
+ robosuite/devices/dualsense.py
78
+ robosuite/devices/keyboard.py
79
+ robosuite/devices/mjgui.py
80
+ robosuite/devices/spacemouse.py
81
+ robosuite/environments/__init__.py
82
+ robosuite/environments/base.py
83
+ robosuite/environments/robot_env.py
84
+ robosuite/environments/manipulation/__init__.py
85
+ robosuite/environments/manipulation/door.py
86
+ robosuite/environments/manipulation/lift.py
87
+ robosuite/environments/manipulation/manipulation_env.py
88
+ robosuite/environments/manipulation/nut_assembly.py
89
+ robosuite/environments/manipulation/pick_place.py
90
+ robosuite/environments/manipulation/stack.py
91
+ robosuite/environments/manipulation/tool_hang.py
92
+ robosuite/environments/manipulation/two_arm_env.py
93
+ robosuite/environments/manipulation/two_arm_handover.py
94
+ robosuite/environments/manipulation/two_arm_lift.py
95
+ robosuite/environments/manipulation/two_arm_peg_in_hole.py
96
+ robosuite/environments/manipulation/two_arm_transport.py
97
+ robosuite/environments/manipulation/wipe.py
98
+ robosuite/models/__init__.py
99
+ robosuite/models/base.py
100
+ robosuite/models/world.py
101
+ robosuite/models/arenas/__init__.py
102
+ robosuite/models/arenas/arena.py
103
+ robosuite/models/arenas/bins_arena.py
104
+ robosuite/models/arenas/empty_arena.py
105
+ robosuite/models/arenas/multi_table_arena.py
106
+ robosuite/models/arenas/pegs_arena.py
107
+ robosuite/models/arenas/table_arena.py
108
+ robosuite/models/arenas/wipe_arena.py
109
+ robosuite/models/assets/base.xml
110
+ robosuite/models/assets/arenas/bins_arena.xml
111
+ robosuite/models/assets/arenas/empty_arena.xml
112
+ robosuite/models/assets/arenas/multi_table_arena.xml
113
+ robosuite/models/assets/arenas/pegs_arena.xml
114
+ robosuite/models/assets/arenas/table_arena.xml
115
+ robosuite/models/assets/bases/floating_legged_base.xml
116
+ robosuite/models/assets/bases/no_actuation_base.xml
117
+ robosuite/models/assets/bases/null_base.xml
118
+ robosuite/models/assets/bases/null_mobile_base.xml
119
+ robosuite/models/assets/bases/null_mount.xml
120
+ robosuite/models/assets/bases/omron_mobile_base.xml
121
+ robosuite/models/assets/bases/rethink_minimal_mount.xml
122
+ robosuite/models/assets/bases/rethink_mount.xml
123
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_0.obj
124
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_1.obj
125
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_2.obj
126
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_3.obj
127
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_4.obj
128
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_5.obj
129
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_6.obj
130
+ robosuite/models/assets/bases/meshes/omron_mobile_base/omron_7.obj
131
+ robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_collision.mtl
132
+ robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_collision.obj
133
+ robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_collision.stl
134
+ robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_vis.mtl
135
+ robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_vis.obj
136
+ robosuite/models/assets/bases/meshes/rethink_minimal_mount/pedestal_vis.stl
137
+ robosuite/models/assets/bases/meshes/rethink_mount/pedestal.dae
138
+ robosuite/models/assets/bases/meshes/rethink_mount/pedestal.mtl
139
+ robosuite/models/assets/bases/meshes/rethink_mount/pedestal.obj
140
+ robosuite/models/assets/bases/meshes/rethink_mount/pedestal.stl
141
+ robosuite/models/assets/bullet_data/baxter_description/meshes/base/PEDESTAL.dae
142
+ robosuite/models/assets/bullet_data/baxter_description/meshes/base/PEDESTAL.stl
143
+ robosuite/models/assets/bullet_data/baxter_description/meshes/base/pedestal_link_collision.dae
144
+ robosuite/models/assets/bullet_data/baxter_description/meshes/base/pedestal_link_collision.stl
145
+ robosuite/models/assets/bullet_data/baxter_description/meshes/head/H0.dae
146
+ robosuite/models/assets/bullet_data/baxter_description/meshes/head/H0.stl
147
+ robosuite/models/assets/bullet_data/baxter_description/meshes/head/H1.dae
148
+ robosuite/models/assets/bullet_data/baxter_description/meshes/head/H1.stl
149
+ robosuite/models/assets/bullet_data/baxter_description/meshes/lower_elbow/E1.dae
150
+ robosuite/models/assets/bullet_data/baxter_description/meshes/lower_elbow/E1.stl
151
+ robosuite/models/assets/bullet_data/baxter_description/meshes/lower_forearm/W1.dae
152
+ robosuite/models/assets/bullet_data/baxter_description/meshes/lower_forearm/W1.stl
153
+ robosuite/models/assets/bullet_data/baxter_description/meshes/lower_shoulder/S1.dae
154
+ robosuite/models/assets/bullet_data/baxter_description/meshes/lower_shoulder/S1.stl
155
+ robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link.dae
156
+ robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link.stl
157
+ robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link_collision.dae
158
+ robosuite/models/assets/bullet_data/baxter_description/meshes/torso/base_link_collision.stl
159
+ robosuite/models/assets/bullet_data/baxter_description/meshes/upper_elbow/E0.dae
160
+ robosuite/models/assets/bullet_data/baxter_description/meshes/upper_elbow/E0.stl
161
+ robosuite/models/assets/bullet_data/baxter_description/meshes/upper_forearm/W0.dae
162
+ robosuite/models/assets/bullet_data/baxter_description/meshes/upper_forearm/W0.stl
163
+ robosuite/models/assets/bullet_data/baxter_description/meshes/upper_shoulder/S0.dae
164
+ robosuite/models/assets/bullet_data/baxter_description/meshes/upper_shoulder/S0.stl
165
+ robosuite/models/assets/bullet_data/baxter_description/meshes/wrist/W2.dae
166
+ robosuite/models/assets/bullet_data/baxter_description/meshes/wrist/W2.stl
167
+ robosuite/models/assets/bullet_data/baxter_description/urdf/baxter_arm.urdf
168
+ robosuite/models/assets/bullet_data/panda_description/CMakeLists.txt
169
+ robosuite/models/assets/bullet_data/panda_description/mainpage.dox
170
+ robosuite/models/assets/bullet_data/panda_description/package.xml
171
+ robosuite/models/assets/bullet_data/panda_description/rosdoc.yaml
172
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/finger.stl
173
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/hand.stl
174
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link0.stl
175
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link1.stl
176
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link2.stl
177
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link3.stl
178
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link4.stl
179
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link5.stl
180
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link6.stl
181
+ robosuite/models/assets/bullet_data/panda_description/meshes/collision/link7.stl
182
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/finger.dae
183
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/hand.dae
184
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link0.dae
185
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link1.dae
186
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link2.dae
187
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link3.dae
188
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link4.dae
189
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link5.dae
190
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link6.dae
191
+ robosuite/models/assets/bullet_data/panda_description/meshes/visual/link7.dae
192
+ robosuite/models/assets/bullet_data/panda_description/urdf/hand.urdf
193
+ robosuite/models/assets/bullet_data/panda_description/urdf/hand.urdf.xacro
194
+ robosuite/models/assets/bullet_data/panda_description/urdf/hand.xacro
195
+ robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm.urdf
196
+ robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm.urdf.xacro
197
+ robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm.xacro
198
+ robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm_hand.urdf
199
+ robosuite/models/assets/bullet_data/panda_description/urdf/panda_arm_hand.urdf.xacro
200
+ robosuite/models/assets/bullet_data/sawyer_description/CMakeLists.txt
201
+ robosuite/models/assets/bullet_data/sawyer_description/package.xml
202
+ robosuite/models/assets/bullet_data/sawyer_description/config/sawyer.rviz
203
+ robosuite/models/assets/bullet_data/sawyer_description/launch/test_sawyer_description.launch.test
204
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/base.dae
205
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/base.stl
206
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l0.dae
207
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l0.stl
208
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l1.dae
209
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l1.stl
210
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l2.dae
211
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l2.stl
212
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l3.dae
213
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l3.stl
214
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l4.dae
215
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l4.stl
216
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l5.dae
217
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l5.stl
218
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l6.dae
219
+ robosuite/models/assets/bullet_data/sawyer_description/meshes/l6.stl
220
+ robosuite/models/assets/bullet_data/sawyer_description/params/named_poses.yaml
221
+ robosuite/models/assets/bullet_data/sawyer_description/urdf/sawyer_arm.urdf
222
+ robosuite/models/assets/grippers/bd_gripper.xml
223
+ robosuite/models/assets/grippers/fourier_left_hand.xml
224
+ robosuite/models/assets/grippers/fourier_right_hand.xml
225
+ robosuite/models/assets/grippers/inspire_left_hand.xml
226
+ robosuite/models/assets/grippers/inspire_right_hand.xml
227
+ robosuite/models/assets/grippers/jaco_three_finger_gripper.xml
228
+ robosuite/models/assets/grippers/null_gripper.xml
229
+ robosuite/models/assets/grippers/panda_gripper.xml
230
+ robosuite/models/assets/grippers/rethink_gripper.xml
231
+ robosuite/models/assets/grippers/robotiq_gripper_140.xml
232
+ robosuite/models/assets/grippers/robotiq_gripper_85.xml
233
+ robosuite/models/assets/grippers/robotiq_gripper_s.xml
234
+ robosuite/models/assets/grippers/wiping_gripper.xml
235
+ robosuite/models/assets/grippers/xarm7_gripper.xml
236
+ robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_fngr_0.obj
237
+ robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_fngr_1.obj
238
+ robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_wr1.obj
239
+ robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_wr1_0.obj
240
+ robosuite/models/assets/grippers/meshes/bd_gripper/arm_link_wr1_1.obj
241
+ robosuite/models/assets/grippers/meshes/bd_gripper/front_jaw.obj
242
+ robosuite/models/assets/grippers/meshes/bd_gripper/jaw_tooth.obj
243
+ robosuite/models/assets/grippers/meshes/bd_gripper/left_finger.obj
244
+ robosuite/models/assets/grippers/meshes/bd_gripper/left_hinge.obj
245
+ robosuite/models/assets/grippers/meshes/bd_gripper/left_tooth.obj
246
+ robosuite/models/assets/grippers/meshes/bd_gripper/middle_jaw.obj
247
+ robosuite/models/assets/grippers/meshes/bd_gripper/right_finger.obj
248
+ robosuite/models/assets/grippers/meshes/bd_gripper/right_hinge.obj
249
+ robosuite/models/assets/grippers/meshes/bd_gripper/right_tooth.obj
250
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_hand_base_link.STL
251
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_index_intermediate_link.STL
252
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_index_proximal_link.STL
253
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_intermediate_link.STL
254
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_proximal_link.STL
255
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_intermediate_link.STL
256
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_proximal_link.STL
257
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_intermediate_link.STL
258
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_proximal_link.STL
259
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_distal_link.STL
260
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_base_link.STL
261
+ robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_link.STL
262
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_hand_base_link.STL
263
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_index_intermediate_link.STL
264
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_index_proximal_link.STL
265
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_intermediate_link.STL
266
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_proximal_link.STL
267
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_intermediate_link.STL
268
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_proximal_link.STL
269
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_intermediate_link.STL
270
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_proximal_link.STL
271
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_distal_link.STL
272
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_base_link.STL
273
+ robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_link.STL
274
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link11.STL
275
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link12.STL
276
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link13.STL
277
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link14.STL
278
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link15.STL
279
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link16.STL
280
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link17.STL
281
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link18.STL
282
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link19.STL
283
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link20.STL
284
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link21.STL
285
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_Link22.STL
286
+ robosuite/models/assets/grippers/meshes/inspire_hands/lh_base_link.STL
287
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link11.STL
288
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link12.STL
289
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link13.STL
290
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link14.STL
291
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link15.STL
292
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link16.STL
293
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link17.STL
294
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link18.STL
295
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link19.STL
296
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link20.STL
297
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link21.STL
298
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_Link22.STL
299
+ robosuite/models/assets/grippers/meshes/inspire_hands/rh_base_link.STL
300
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.dae
301
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.mtl
302
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.obj
303
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_distal.stl
304
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.dae
305
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.mtl
306
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.obj
307
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/finger_proximal.stl
308
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.dae
309
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.mtl
310
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.obj
311
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/hand_3finger.stl
312
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.dae
313
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.mtl
314
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.obj
315
+ robosuite/models/assets/grippers/meshes/jaco_three_finger_gripper/ring_small.stl
316
+ robosuite/models/assets/grippers/meshes/panda_gripper/finger.stl
317
+ robosuite/models/assets/grippers/meshes/panda_gripper/finger_longer.stl
318
+ robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.dae
319
+ robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.mtl
320
+ robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.obj
321
+ robosuite/models/assets/grippers/meshes/panda_gripper/finger_vis.stl
322
+ robosuite/models/assets/grippers/meshes/panda_gripper/hand.stl
323
+ robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.dae
324
+ robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.mtl
325
+ robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.obj
326
+ robosuite/models/assets/grippers/meshes/panda_gripper/hand_vis.stl
327
+ robosuite/models/assets/grippers/meshes/rethink_gripper/connector_plate.mtl
328
+ robosuite/models/assets/grippers/meshes/rethink_gripper/connector_plate.obj
329
+ robosuite/models/assets/grippers/meshes/rethink_gripper/connector_plate.stl
330
+ robosuite/models/assets/grippers/meshes/rethink_gripper/electric_gripper_base.mtl
331
+ robosuite/models/assets/grippers/meshes/rethink_gripper/electric_gripper_base.obj
332
+ robosuite/models/assets/grippers/meshes/rethink_gripper/electric_gripper_base.stl
333
+ robosuite/models/assets/grippers/meshes/rethink_gripper/half_round_tip.mtl
334
+ robosuite/models/assets/grippers/meshes/rethink_gripper/half_round_tip.obj
335
+ robosuite/models/assets/grippers/meshes/rethink_gripper/half_round_tip.stl
336
+ robosuite/models/assets/grippers/meshes/rethink_gripper/standard_narrow.mtl
337
+ robosuite/models/assets/grippers/meshes/rethink_gripper/standard_narrow.obj
338
+ robosuite/models/assets/grippers/meshes/rethink_gripper/standard_narrow.stl
339
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_finger.stl
340
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_finger_vis.obj
341
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_finger_vis.stl
342
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_knuckle.stl
343
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_knuckle_vis.obj
344
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_inner_knuckle_vis.stl
345
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_finger.stl
346
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_finger_vis.obj
347
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_finger_vis.stl
348
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_knuckle.stl
349
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_knuckle_vis.obj
350
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_140_outer_knuckle_vis.stl
351
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_base_link.stl
352
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_base_link_vis.obj
353
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_base_link_vis.stl
354
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_coupling.stl
355
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_coupling_vis.obj
356
+ robosuite/models/assets/grippers/meshes/robotiq_140_gripper/robotiq_arg2f_coupling_vis.stl
357
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_adapter_plate.obj
358
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_base.obj
359
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_0_L.obj
360
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_0_R.obj
361
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_1_L.obj
362
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_1_R.obj
363
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_2_L.obj
364
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_2_R.obj
365
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_3_L.obj
366
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_85_gripper_joint_3_R.obj
367
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_base_link.stl
368
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_base_link_vis.dae
369
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.dae
370
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.mtl
371
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.obj
372
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger.stl
373
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.dae
374
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.mtl
375
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.obj
376
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_finger_vis.stl
377
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.dae
378
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.mtl
379
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.obj
380
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle.stl
381
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.dae
382
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.mtl
383
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.obj
384
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_inner_knuckle_vis.stl
385
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.dae
386
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.mtl
387
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.obj
388
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger.stl
389
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.dae
390
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.mtl
391
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.obj
392
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_finger_vis.stl
393
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.dae
394
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.mtl
395
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.obj
396
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle.stl
397
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.dae
398
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.mtl
399
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.obj
400
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_outer_knuckle_vis.stl
401
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_pad_vis.dae
402
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_85_pad_vis.stl
403
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_base_link.mtl
404
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_base_link.obj
405
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_arg2f_base_link.stl
406
+ robosuite/models/assets/grippers/meshes/robotiq_85_gripper/robotiq_gripper_coupling_vis.stl
407
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0.stl
408
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0_vis.mtl
409
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0_vis.obj
410
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_0_vis.stl
411
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1.stl
412
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1_vis.mtl
413
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1_vis.obj
414
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_1_vis.stl
415
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2.stl
416
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2_vis.mtl
417
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2_vis.obj
418
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_2_vis.stl
419
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3.stl
420
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3_vis.mtl
421
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3_vis.obj
422
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/link_3_vis.stl
423
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm.stl
424
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm_vis.mtl
425
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm_vis.obj
426
+ robosuite/models/assets/grippers/meshes/robotiq_s_gripper/palm_vis.stl
427
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/base_link.stl
428
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/end_tool.stl
429
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/left_finger.stl
430
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/left_inner_knuckle.stl
431
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/left_outer_knuckle.stl
432
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link1.stl
433
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link2.stl
434
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link3.stl
435
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link4.stl
436
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link5.stl
437
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link6.stl
438
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link7.stl
439
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/link_base.stl
440
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/right_finger.stl
441
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/right_inner_knuckle.stl
442
+ robosuite/models/assets/grippers/meshes/xarm7_gripper/right_outer_knuckle.stl
443
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate.mtl
444
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate.obj
445
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base.mtl
446
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base.obj
447
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate/connector_plate.obj
448
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/connector_plate/connector_plate.xml
449
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base/electric_gripper_base_0.obj
450
+ robosuite/models/assets/grippers/obj_meshes/rethink_gripper/electric_gripper_base/electric_gripper_base_1.obj
451
+ robosuite/models/assets/objects/bottle.xml
452
+ robosuite/models/assets/objects/bread-visual.xml
453
+ robosuite/models/assets/objects/bread.xml
454
+ robosuite/models/assets/objects/can-visual.xml
455
+ robosuite/models/assets/objects/can.xml
456
+ robosuite/models/assets/objects/cereal-visual.xml
457
+ robosuite/models/assets/objects/cereal.xml
458
+ robosuite/models/assets/objects/door.xml
459
+ robosuite/models/assets/objects/door_lock.xml
460
+ robosuite/models/assets/objects/lemon.xml
461
+ robosuite/models/assets/objects/milk-visual.xml
462
+ robosuite/models/assets/objects/milk.xml
463
+ robosuite/models/assets/objects/plate-with-hole.xml
464
+ robosuite/models/assets/objects/round-nut.xml
465
+ robosuite/models/assets/objects/square-nut.xml
466
+ robosuite/models/assets/objects/meshes/bottle.msh
467
+ robosuite/models/assets/objects/meshes/bottle.mtl
468
+ robosuite/models/assets/objects/meshes/bottle.obj
469
+ robosuite/models/assets/objects/meshes/bottle.stl
470
+ robosuite/models/assets/objects/meshes/bread.msh
471
+ robosuite/models/assets/objects/meshes/bread.mtl
472
+ robosuite/models/assets/objects/meshes/bread.obj
473
+ robosuite/models/assets/objects/meshes/bread.stl
474
+ robosuite/models/assets/objects/meshes/can.msh
475
+ robosuite/models/assets/objects/meshes/can.mtl
476
+ robosuite/models/assets/objects/meshes/can.obj
477
+ robosuite/models/assets/objects/meshes/can.stl
478
+ robosuite/models/assets/objects/meshes/cereal.msh
479
+ robosuite/models/assets/objects/meshes/cereal.mtl
480
+ robosuite/models/assets/objects/meshes/cereal.obj
481
+ robosuite/models/assets/objects/meshes/cereal.stl
482
+ robosuite/models/assets/objects/meshes/cube.obj
483
+ robosuite/models/assets/objects/meshes/cylinder.msh
484
+ robosuite/models/assets/objects/meshes/cylinder.obj
485
+ robosuite/models/assets/objects/meshes/handles.msh
486
+ robosuite/models/assets/objects/meshes/handles.mtl
487
+ robosuite/models/assets/objects/meshes/handles.obj
488
+ robosuite/models/assets/objects/meshes/handles.stl
489
+ robosuite/models/assets/objects/meshes/lemon.msh
490
+ robosuite/models/assets/objects/meshes/lemon.mtl
491
+ robosuite/models/assets/objects/meshes/lemon.obj
492
+ robosuite/models/assets/objects/meshes/lemon.stl
493
+ robosuite/models/assets/objects/meshes/milk.msh
494
+ robosuite/models/assets/objects/meshes/milk.mtl
495
+ robosuite/models/assets/objects/meshes/milk.obj
496
+ robosuite/models/assets/objects/meshes/milk.stl
497
+ robosuite/models/assets/objects/meshes/sphere8.obj
498
+ robosuite/models/assets/robots/baxter/robot.xml
499
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H0.mtl
500
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H0.obj
501
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H1.mtl
502
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H1.obj
503
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H0/H0.obj
504
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H1/H1_0.obj
505
+ robosuite/models/assets/robots/baxter/obj_meshes/head/H1/H1_1.obj
506
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1.mtl
507
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1.obj
508
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1/E1_0.obj
509
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_elbow/E1/E1_1.obj
510
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1.mtl
511
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1.obj
512
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1/W1_0.obj
513
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_forearm/W1/W1_1.obj
514
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_shoulder/S1.mtl
515
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_shoulder/S1.obj
516
+ robosuite/models/assets/robots/baxter/obj_meshes/lower_shoulder/S1/S1.obj
517
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link.mtl
518
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link.obj
519
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision.mtl
520
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision.obj
521
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision.stl
522
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_0.obj
523
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_1.obj
524
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_2.obj
525
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_3.obj
526
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_4.obj
527
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link/base_link_5.obj
528
+ robosuite/models/assets/robots/baxter/obj_meshes/torso/base_link_collision/base_link_collision.obj
529
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0.mtl
530
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0.obj
531
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0/E0_0.obj
532
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_elbow/E0/E0_1.obj
533
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0.mtl
534
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0.obj
535
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_0.obj
536
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_1.obj
537
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_2.obj
538
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_forearm/W0/W0_3.obj
539
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0.mtl
540
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0.obj
541
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0/S0_0.obj
542
+ robosuite/models/assets/robots/baxter/obj_meshes/upper_shoulder/S0/S0_1.obj
543
+ robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2.mtl
544
+ robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2.obj
545
+ robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2/W2_0.obj
546
+ robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2/W2_1.obj
547
+ robosuite/models/assets/robots/baxter/obj_meshes/wrist/W2/W2_2.obj
548
+ robosuite/models/assets/robots/gr1/robot.xml
549
+ robosuite/models/assets/robots/gr1/meshes/base.STL
550
+ robosuite/models/assets/robots/gr1/meshes/head_pitch.STL
551
+ robosuite/models/assets/robots/gr1/meshes/head_roll.STL
552
+ robosuite/models/assets/robots/gr1/meshes/head_yaw.STL
553
+ robosuite/models/assets/robots/gr1/meshes/l_foot_pitch.STL
554
+ robosuite/models/assets/robots/gr1/meshes/l_foot_roll.STL
555
+ robosuite/models/assets/robots/gr1/meshes/l_hand_pitch.STL
556
+ robosuite/models/assets/robots/gr1/meshes/l_hand_roll.STL
557
+ robosuite/models/assets/robots/gr1/meshes/l_hand_yaw.STL
558
+ robosuite/models/assets/robots/gr1/meshes/l_lower_arm_pitch.STL
559
+ robosuite/models/assets/robots/gr1/meshes/l_shank_pitch.STL
560
+ robosuite/models/assets/robots/gr1/meshes/l_thigh_pitch.STL
561
+ robosuite/models/assets/robots/gr1/meshes/l_thigh_roll.STL
562
+ robosuite/models/assets/robots/gr1/meshes/l_thigh_yaw.STL
563
+ robosuite/models/assets/robots/gr1/meshes/l_upper_arm_pitch.STL
564
+ robosuite/models/assets/robots/gr1/meshes/l_upper_arm_roll.STL
565
+ robosuite/models/assets/robots/gr1/meshes/l_upper_arm_yaw.STL
566
+ robosuite/models/assets/robots/gr1/meshes/r_foot_pitch.STL
567
+ robosuite/models/assets/robots/gr1/meshes/r_foot_roll.STL
568
+ robosuite/models/assets/robots/gr1/meshes/r_hand_pitch.STL
569
+ robosuite/models/assets/robots/gr1/meshes/r_hand_roll.STL
570
+ robosuite/models/assets/robots/gr1/meshes/r_hand_yaw.STL
571
+ robosuite/models/assets/robots/gr1/meshes/r_lower_arm_pitch.STL
572
+ robosuite/models/assets/robots/gr1/meshes/r_shank_pitch.STL
573
+ robosuite/models/assets/robots/gr1/meshes/r_thigh_pitch.STL
574
+ robosuite/models/assets/robots/gr1/meshes/r_thigh_roll.STL
575
+ robosuite/models/assets/robots/gr1/meshes/r_thigh_yaw.STL
576
+ robosuite/models/assets/robots/gr1/meshes/r_upper_arm_pitch.STL
577
+ robosuite/models/assets/robots/gr1/meshes/r_upper_arm_roll.STL
578
+ robosuite/models/assets/robots/gr1/meshes/r_upper_arm_yaw.STL
579
+ robosuite/models/assets/robots/gr1/meshes/torso.STL
580
+ robosuite/models/assets/robots/gr1/meshes/waist_pitch.STL
581
+ robosuite/models/assets/robots/gr1/meshes/waist_roll.STL
582
+ robosuite/models/assets/robots/gr1/meshes/waist_yaw.STL
583
+ robosuite/models/assets/robots/iiwa/robot.xml
584
+ robosuite/models/assets/robots/iiwa/meshes/link_0.obj
585
+ robosuite/models/assets/robots/iiwa/meshes/link_0.stl
586
+ robosuite/models/assets/robots/iiwa/meshes/link_0_vis.mtl
587
+ robosuite/models/assets/robots/iiwa/meshes/link_0_vis.obj
588
+ robosuite/models/assets/robots/iiwa/meshes/link_0_vis.stl
589
+ robosuite/models/assets/robots/iiwa/meshes/link_1.obj
590
+ robosuite/models/assets/robots/iiwa/meshes/link_1.stl
591
+ robosuite/models/assets/robots/iiwa/meshes/link_1_vis.mtl
592
+ robosuite/models/assets/robots/iiwa/meshes/link_1_vis.obj
593
+ robosuite/models/assets/robots/iiwa/meshes/link_1_vis.stl
594
+ robosuite/models/assets/robots/iiwa/meshes/link_2.obj
595
+ robosuite/models/assets/robots/iiwa/meshes/link_2.stl
596
+ robosuite/models/assets/robots/iiwa/meshes/link_2_vis.mtl
597
+ robosuite/models/assets/robots/iiwa/meshes/link_2_vis.obj
598
+ robosuite/models/assets/robots/iiwa/meshes/link_2_vis.stl
599
+ robosuite/models/assets/robots/iiwa/meshes/link_3.obj
600
+ robosuite/models/assets/robots/iiwa/meshes/link_3.stl
601
+ robosuite/models/assets/robots/iiwa/meshes/link_3_vis.mtl
602
+ robosuite/models/assets/robots/iiwa/meshes/link_3_vis.obj
603
+ robosuite/models/assets/robots/iiwa/meshes/link_3_vis.stl
604
+ robosuite/models/assets/robots/iiwa/meshes/link_4.obj
605
+ robosuite/models/assets/robots/iiwa/meshes/link_4.stl
606
+ robosuite/models/assets/robots/iiwa/meshes/link_4_vis.mtl
607
+ robosuite/models/assets/robots/iiwa/meshes/link_4_vis.obj
608
+ robosuite/models/assets/robots/iiwa/meshes/link_4_vis.stl
609
+ robosuite/models/assets/robots/iiwa/meshes/link_5.obj
610
+ robosuite/models/assets/robots/iiwa/meshes/link_5.stl
611
+ robosuite/models/assets/robots/iiwa/meshes/link_5_vis.mtl
612
+ robosuite/models/assets/robots/iiwa/meshes/link_5_vis.obj
613
+ robosuite/models/assets/robots/iiwa/meshes/link_5_vis.stl
614
+ robosuite/models/assets/robots/iiwa/meshes/link_6.obj
615
+ robosuite/models/assets/robots/iiwa/meshes/link_6.stl
616
+ robosuite/models/assets/robots/iiwa/meshes/link_6_vis.mtl
617
+ robosuite/models/assets/robots/iiwa/meshes/link_6_vis.obj
618
+ robosuite/models/assets/robots/iiwa/meshes/link_6_vis.stl
619
+ robosuite/models/assets/robots/iiwa/meshes/link_7.obj
620
+ robosuite/models/assets/robots/iiwa/meshes/link_7.stl
621
+ robosuite/models/assets/robots/iiwa/meshes/link_7_vis.mtl
622
+ robosuite/models/assets/robots/iiwa/meshes/link_7_vis.obj
623
+ robosuite/models/assets/robots/iiwa/meshes/link_7_vis.stl
624
+ robosuite/models/assets/robots/iiwa/meshes/pedestal.mtl
625
+ robosuite/models/assets/robots/jaco/robot.xml
626
+ robosuite/models/assets/robots/jaco/meshes/arm_half_1.mtl
627
+ robosuite/models/assets/robots/jaco/meshes/arm_half_1.obj
628
+ robosuite/models/assets/robots/jaco/meshes/arm_half_1.stl
629
+ robosuite/models/assets/robots/jaco/meshes/arm_half_2.mtl
630
+ robosuite/models/assets/robots/jaco/meshes/arm_half_2.obj
631
+ robosuite/models/assets/robots/jaco/meshes/arm_half_2.stl
632
+ robosuite/models/assets/robots/jaco/meshes/base.mtl
633
+ robosuite/models/assets/robots/jaco/meshes/base.obj
634
+ robosuite/models/assets/robots/jaco/meshes/base.stl
635
+ robosuite/models/assets/robots/jaco/meshes/forearm.mtl
636
+ robosuite/models/assets/robots/jaco/meshes/forearm.obj
637
+ robosuite/models/assets/robots/jaco/meshes/forearm.stl
638
+ robosuite/models/assets/robots/jaco/meshes/pedestal.mtl
639
+ robosuite/models/assets/robots/jaco/meshes/ring_big.mtl
640
+ robosuite/models/assets/robots/jaco/meshes/ring_big.obj
641
+ robosuite/models/assets/robots/jaco/meshes/ring_big.stl
642
+ robosuite/models/assets/robots/jaco/meshes/ring_small.mtl
643
+ robosuite/models/assets/robots/jaco/meshes/ring_small.obj
644
+ robosuite/models/assets/robots/jaco/meshes/ring_small.stl
645
+ robosuite/models/assets/robots/jaco/meshes/shoulder.mtl
646
+ robosuite/models/assets/robots/jaco/meshes/shoulder.obj
647
+ robosuite/models/assets/robots/jaco/meshes/shoulder.stl
648
+ robosuite/models/assets/robots/jaco/meshes/wrist_spherical_1.mtl
649
+ robosuite/models/assets/robots/jaco/meshes/wrist_spherical_1.obj
650
+ robosuite/models/assets/robots/jaco/meshes/wrist_spherical_1.stl
651
+ robosuite/models/assets/robots/jaco/meshes/wrist_spherical_2.mtl
652
+ robosuite/models/assets/robots/jaco/meshes/wrist_spherical_2.obj
653
+ robosuite/models/assets/robots/jaco/meshes/wrist_spherical_2.stl
654
+ robosuite/models/assets/robots/kinova3/robot.xml
655
+ robosuite/models/assets/robots/kinova3/meshes/base_link.mtl
656
+ robosuite/models/assets/robots/kinova3/meshes/base_link.obj
657
+ robosuite/models/assets/robots/kinova3/meshes/base_link.stl
658
+ robosuite/models/assets/robots/kinova3/meshes/bracelet_no_vision_link.mtl
659
+ robosuite/models/assets/robots/kinova3/meshes/bracelet_no_vision_link.obj
660
+ robosuite/models/assets/robots/kinova3/meshes/bracelet_no_vision_link.stl
661
+ robosuite/models/assets/robots/kinova3/meshes/bracelet_with_vision_link.mtl
662
+ robosuite/models/assets/robots/kinova3/meshes/bracelet_with_vision_link.obj
663
+ robosuite/models/assets/robots/kinova3/meshes/bracelet_with_vision_link.stl
664
+ robosuite/models/assets/robots/kinova3/meshes/end_effector_link.mtl
665
+ robosuite/models/assets/robots/kinova3/meshes/end_effector_link.obj
666
+ robosuite/models/assets/robots/kinova3/meshes/end_effector_link.stl
667
+ robosuite/models/assets/robots/kinova3/meshes/forearm_link.mtl
668
+ robosuite/models/assets/robots/kinova3/meshes/forearm_link.obj
669
+ robosuite/models/assets/robots/kinova3/meshes/forearm_link.stl
670
+ robosuite/models/assets/robots/kinova3/meshes/half_arm_1_link.mtl
671
+ robosuite/models/assets/robots/kinova3/meshes/half_arm_1_link.obj
672
+ robosuite/models/assets/robots/kinova3/meshes/half_arm_1_link.stl
673
+ robosuite/models/assets/robots/kinova3/meshes/half_arm_2_link.mtl
674
+ robosuite/models/assets/robots/kinova3/meshes/half_arm_2_link.obj
675
+ robosuite/models/assets/robots/kinova3/meshes/half_arm_2_link.stl
676
+ robosuite/models/assets/robots/kinova3/meshes/pedestal.mtl
677
+ robosuite/models/assets/robots/kinova3/meshes/shoulder_link.mtl
678
+ robosuite/models/assets/robots/kinova3/meshes/shoulder_link.obj
679
+ robosuite/models/assets/robots/kinova3/meshes/shoulder_link.stl
680
+ robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_1_link.mtl
681
+ robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_1_link.obj
682
+ robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_1_link.stl
683
+ robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_2_link.mtl
684
+ robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_2_link.obj
685
+ robosuite/models/assets/robots/kinova3/meshes/spherical_wrist_2_link.stl
686
+ robosuite/models/assets/robots/panda/robot.xml
687
+ robosuite/models/assets/robots/panda/meshes/finger.stl
688
+ robosuite/models/assets/robots/panda/meshes/hand.stl
689
+ robosuite/models/assets/robots/panda/meshes/link0.stl
690
+ robosuite/models/assets/robots/panda/meshes/link1.stl
691
+ robosuite/models/assets/robots/panda/meshes/link2.stl
692
+ robosuite/models/assets/robots/panda/meshes/link3.stl
693
+ robosuite/models/assets/robots/panda/meshes/link4.stl
694
+ robosuite/models/assets/robots/panda/meshes/link5.stl
695
+ robosuite/models/assets/robots/panda/meshes/link6.stl
696
+ robosuite/models/assets/robots/panda/meshes/link7.stl
697
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_0.obj
698
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_1.obj
699
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_10.obj
700
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_11.obj
701
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_2.obj
702
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_3.obj
703
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_4.obj
704
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_5.obj
705
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_6.obj
706
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_7.obj
707
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_8.obj
708
+ robosuite/models/assets/robots/panda/obj_meshes/link0_vis/link0_vis_9.obj
709
+ robosuite/models/assets/robots/panda/obj_meshes/link1_vis/link1_vis.obj
710
+ robosuite/models/assets/robots/panda/obj_meshes/link2_vis/link2_vis.obj
711
+ robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_0.obj
712
+ robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_1.obj
713
+ robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_2.obj
714
+ robosuite/models/assets/robots/panda/obj_meshes/link3_vis/link3_vis_3.obj
715
+ robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_0.obj
716
+ robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_1.obj
717
+ robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_2.obj
718
+ robosuite/models/assets/robots/panda/obj_meshes/link4_vis/link4_vis_3.obj
719
+ robosuite/models/assets/robots/panda/obj_meshes/link5_vis/link5_vis_0.obj
720
+ robosuite/models/assets/robots/panda/obj_meshes/link5_vis/link5_vis_1.obj
721
+ robosuite/models/assets/robots/panda/obj_meshes/link5_vis/link5_vis_2.obj
722
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_0.obj
723
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_1.obj
724
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_10.obj
725
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_11.obj
726
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_12.obj
727
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_13.obj
728
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_14.obj
729
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_15.obj
730
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_16.obj
731
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_2.obj
732
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_3.obj
733
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_4.obj
734
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_5.obj
735
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_6.obj
736
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_7.obj
737
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_8.obj
738
+ robosuite/models/assets/robots/panda/obj_meshes/link6_vis/link6_vis_9.obj
739
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_0.obj
740
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_1.obj
741
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_2.obj
742
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_3.obj
743
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_4.obj
744
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_5.obj
745
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_6.obj
746
+ robosuite/models/assets/robots/panda/obj_meshes/link7_vis/link7_vis_7.obj
747
+ robosuite/models/assets/robots/sawyer/robot.xml
748
+ robosuite/models/assets/robots/sawyer/obj_meshes/base.mtl
749
+ robosuite/models/assets/robots/sawyer/obj_meshes/base.obj
750
+ robosuite/models/assets/robots/sawyer/obj_meshes/head.mtl
751
+ robosuite/models/assets/robots/sawyer/obj_meshes/head.obj
752
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0.mtl
753
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0.obj
754
+ robosuite/models/assets/robots/sawyer/obj_meshes/l1.mtl
755
+ robosuite/models/assets/robots/sawyer/obj_meshes/l1.obj
756
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2.mtl
757
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2.obj
758
+ robosuite/models/assets/robots/sawyer/obj_meshes/l3.mtl
759
+ robosuite/models/assets/robots/sawyer/obj_meshes/l3.obj
760
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4.mtl
761
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4.obj
762
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5.mtl
763
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5.obj
764
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6.mtl
765
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6.obj
766
+ robosuite/models/assets/robots/sawyer/obj_meshes/base/base_0.obj
767
+ robosuite/models/assets/robots/sawyer/obj_meshes/base/base_1.obj
768
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_0.obj
769
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_1.obj
770
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_2.obj
771
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_3.obj
772
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_4.obj
773
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_5.obj
774
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_6.obj
775
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_7.obj
776
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_8.obj
777
+ robosuite/models/assets/robots/sawyer/obj_meshes/head/head_9.obj
778
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_0.obj
779
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_1.obj
780
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_2.obj
781
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_3.obj
782
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_4.obj
783
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_5.obj
784
+ robosuite/models/assets/robots/sawyer/obj_meshes/l0/l0_6.obj
785
+ robosuite/models/assets/robots/sawyer/obj_meshes/l1/l1_0.obj
786
+ robosuite/models/assets/robots/sawyer/obj_meshes/l1/l1_1.obj
787
+ robosuite/models/assets/robots/sawyer/obj_meshes/l1/l1_2.obj
788
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_0.obj
789
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_1.obj
790
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_2.obj
791
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_3.obj
792
+ robosuite/models/assets/robots/sawyer/obj_meshes/l2/l2_4.obj
793
+ robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_0.obj
794
+ robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_1.obj
795
+ robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_2.obj
796
+ robosuite/models/assets/robots/sawyer/obj_meshes/l3/l3_3.obj
797
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_0.obj
798
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_1.obj
799
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_2.obj
800
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_3.obj
801
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_4.obj
802
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_5.obj
803
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_6.obj
804
+ robosuite/models/assets/robots/sawyer/obj_meshes/l4/l4_7.obj
805
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_0.obj
806
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_1.obj
807
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_2.obj
808
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_3.obj
809
+ robosuite/models/assets/robots/sawyer/obj_meshes/l5/l5_4.obj
810
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_0.obj
811
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_1.obj
812
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_2.obj
813
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_3.obj
814
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_4.obj
815
+ robosuite/models/assets/robots/sawyer/obj_meshes/l6/l6_5.obj
816
+ robosuite/models/assets/robots/spot/robot.xml
817
+ robosuite/models/assets/robots/spot/meshes/body_0.obj
818
+ robosuite/models/assets/robots/spot/meshes/body_1.obj
819
+ robosuite/models/assets/robots/spot/meshes/body_collision.obj
820
+ robosuite/models/assets/robots/spot/meshes/front_jaw.obj
821
+ robosuite/models/assets/robots/spot/meshes/front_left_hip.obj
822
+ robosuite/models/assets/robots/spot/meshes/front_left_lower_leg.obj
823
+ robosuite/models/assets/robots/spot/meshes/front_left_upper_leg_0.obj
824
+ robosuite/models/assets/robots/spot/meshes/front_left_upper_leg_1.obj
825
+ robosuite/models/assets/robots/spot/meshes/front_right_hip.obj
826
+ robosuite/models/assets/robots/spot/meshes/front_right_lower_leg.obj
827
+ robosuite/models/assets/robots/spot/meshes/front_right_upper_leg_0.obj
828
+ robosuite/models/assets/robots/spot/meshes/front_right_upper_leg_1.obj
829
+ robosuite/models/assets/robots/spot/meshes/jaw_tooth.obj
830
+ robosuite/models/assets/robots/spot/meshes/left_finger.obj
831
+ robosuite/models/assets/robots/spot/meshes/left_hinge.obj
832
+ robosuite/models/assets/robots/spot/meshes/left_lower_leg_collision.obj
833
+ robosuite/models/assets/robots/spot/meshes/left_tooth.obj
834
+ robosuite/models/assets/robots/spot/meshes/left_upper_leg_collision.obj
835
+ robosuite/models/assets/robots/spot/meshes/middle_jaw.obj
836
+ robosuite/models/assets/robots/spot/meshes/rear_left_hip.obj
837
+ robosuite/models/assets/robots/spot/meshes/rear_left_lower_leg.obj
838
+ robosuite/models/assets/robots/spot/meshes/rear_left_upper_leg_0.obj
839
+ robosuite/models/assets/robots/spot/meshes/rear_left_upper_leg_1.obj
840
+ robosuite/models/assets/robots/spot/meshes/rear_right_hip.obj
841
+ robosuite/models/assets/robots/spot/meshes/rear_right_lower_leg.obj
842
+ robosuite/models/assets/robots/spot/meshes/rear_right_upper_leg_0.obj
843
+ robosuite/models/assets/robots/spot/meshes/rear_right_upper_leg_1.obj
844
+ robosuite/models/assets/robots/spot/meshes/right_finger.obj
845
+ robosuite/models/assets/robots/spot/meshes/right_hinge.obj
846
+ robosuite/models/assets/robots/spot/meshes/right_lower_leg_collision.obj
847
+ robosuite/models/assets/robots/spot/meshes/right_tooth.obj
848
+ robosuite/models/assets/robots/spot/meshes/right_upper_leg_collision.obj
849
+ robosuite/models/assets/robots/spot_arm/robot.xml
850
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_el0.obj
851
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_el0_coll.obj
852
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_0.obj
853
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_1.obj
854
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_lip.obj
855
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_el1_main.obj
856
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_hr0.obj
857
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_hr0_coll.obj
858
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0.obj
859
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0_base.obj
860
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0_left_motor.obj
861
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh0_right_motor.obj
862
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh1_0.obj
863
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_sh1_1.obj
864
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_wr0.obj
865
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_wr0_0.obj
866
+ robosuite/models/assets/robots/spot_arm/meshes/arm_link_wr0_1.obj
867
+ robosuite/models/assets/robots/tiago/robot.xml
868
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_1.mtl
869
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_1.stl
870
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_2.mtl
871
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_2.stl
872
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_3.mtl
873
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_3.stl
874
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_4.mtl
875
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_4.stl
876
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_4_collision.stl
877
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_5-wrist-2010.stl
878
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_5-wrist-2017.stl
879
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_6-wrist-2010.stl
880
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_6-wrist-2017.mtl
881
+ robosuite/models/assets/robots/tiago/meshes/arm/arm_6-wrist-2017.stl
882
+ robosuite/models/assets/robots/tiago/meshes/base/base.stl
883
+ robosuite/models/assets/robots/tiago/meshes/base/base_antena_link.stl
884
+ robosuite/models/assets/robots/tiago/meshes/base/base_collision.stl
885
+ robosuite/models/assets/robots/tiago/meshes/base/base_dock_link.stl
886
+ robosuite/models/assets/robots/tiago/meshes/base/base_link.stl
887
+ robosuite/models/assets/robots/tiago/meshes/base/base_ring.stl
888
+ robosuite/models/assets/robots/tiago/meshes/base/high_resolution/base.stl
889
+ robosuite/models/assets/robots/tiago/meshes/base/high_resolution/base_collision.stl
890
+ robosuite/models/assets/robots/tiago/meshes/grippers/finger_abd_collision.stl
891
+ robosuite/models/assets/robots/tiago/meshes/grippers/finger_flex_collision.stl
892
+ robosuite/models/assets/robots/tiago/meshes/grippers/finger_flex_tip_collision.stl
893
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link.mtl
894
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link.stl
895
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link_collision.stl
896
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_finger_link_mirror_x_y.stl
897
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_link.mtl
898
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_link.stl
899
+ robosuite/models/assets/robots/tiago/meshes/grippers/gripper_link_collision.stl
900
+ robosuite/models/assets/robots/tiago/meshes/grippers/palm.stl
901
+ robosuite/models/assets/robots/tiago/meshes/grippers/palm_collision.stl
902
+ robosuite/models/assets/robots/tiago/meshes/grippers/thumb_abd_collision.stl
903
+ robosuite/models/assets/robots/tiago/meshes/head/head_1.stl
904
+ robosuite/models/assets/robots/tiago/meshes/head/head_2.stl
905
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_fix.stl
906
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_collision_core.stl
907
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_collision_shoulder.stl
908
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_dual_arm.stl
909
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_dual_arm_with_screen.stl
910
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_with_arm.stl
911
+ robosuite/models/assets/robots/tiago/meshes/torso/torso_lift_without_arm.stl
912
+ robosuite/models/assets/robots/tiago/meshes/wheels/caster_1.mtl
913
+ robosuite/models/assets/robots/tiago/meshes/wheels/caster_1.stl
914
+ robosuite/models/assets/robots/tiago/meshes/wheels/caster_2.mtl
915
+ robosuite/models/assets/robots/tiago/meshes/wheels/caster_2.stl
916
+ robosuite/models/assets/robots/tiago/meshes/wheels/suspension_front_link.mtl
917
+ robosuite/models/assets/robots/tiago/meshes/wheels/suspension_front_link.stl
918
+ robosuite/models/assets/robots/tiago/meshes/wheels/suspension_front_link_mirror_y.stl
919
+ robosuite/models/assets/robots/tiago/meshes/wheels/suspension_rear_link.mtl
920
+ robosuite/models/assets/robots/tiago/meshes/wheels/suspension_rear_link.stl
921
+ robosuite/models/assets/robots/tiago/meshes/wheels/wheel.mtl
922
+ robosuite/models/assets/robots/tiago/meshes/wheels/wheel.stl
923
+ robosuite/models/assets/robots/tiago/meshes/wheels/wheel_link.mtl
924
+ robosuite/models/assets/robots/tiago/meshes/wheels/wheel_link.stl
925
+ robosuite/models/assets/robots/tiago/meshes/wheels/wheel_link_mirror_z.stl
926
+ robosuite/models/assets/robots/tiago/meshes/wheels/high_resolution/caster_1.stl
927
+ robosuite/models/assets/robots/tiago/meshes/wheels/high_resolution/caster_2.stl
928
+ robosuite/models/assets/robots/tiago/meshes/wheels/high_resolution/wheel.stl
929
+ robosuite/models/assets/robots/ur5e/robot.xml
930
+ robosuite/models/assets/robots/ur5e/meshes/base.dae
931
+ robosuite/models/assets/robots/ur5e/meshes/base.stl
932
+ robosuite/models/assets/robots/ur5e/meshes/base_vis.mtl
933
+ robosuite/models/assets/robots/ur5e/meshes/base_vis.obj
934
+ robosuite/models/assets/robots/ur5e/meshes/base_vis.stl
935
+ robosuite/models/assets/robots/ur5e/meshes/forearm.dae
936
+ robosuite/models/assets/robots/ur5e/meshes/forearm.stl
937
+ robosuite/models/assets/robots/ur5e/meshes/forearm_vis.mtl
938
+ robosuite/models/assets/robots/ur5e/meshes/forearm_vis.obj
939
+ robosuite/models/assets/robots/ur5e/meshes/forearm_vis.stl
940
+ robosuite/models/assets/robots/ur5e/meshes/pedestal.dae
941
+ robosuite/models/assets/robots/ur5e/meshes/pedestal.mtl
942
+ robosuite/models/assets/robots/ur5e/meshes/pedestal.obj
943
+ robosuite/models/assets/robots/ur5e/meshes/shoulder.dae
944
+ robosuite/models/assets/robots/ur5e/meshes/shoulder.stl
945
+ robosuite/models/assets/robots/ur5e/meshes/shoulder_vis.mtl
946
+ robosuite/models/assets/robots/ur5e/meshes/shoulder_vis.obj
947
+ robosuite/models/assets/robots/ur5e/meshes/shoulder_vis.stl
948
+ robosuite/models/assets/robots/ur5e/meshes/upperarm.dae
949
+ robosuite/models/assets/robots/ur5e/meshes/upperarm.stl
950
+ robosuite/models/assets/robots/ur5e/meshes/upperarm_vis.mtl
951
+ robosuite/models/assets/robots/ur5e/meshes/upperarm_vis.obj
952
+ robosuite/models/assets/robots/ur5e/meshes/upperarm_vis.stl
953
+ robosuite/models/assets/robots/ur5e/meshes/wrist1.dae
954
+ robosuite/models/assets/robots/ur5e/meshes/wrist1.stl
955
+ robosuite/models/assets/robots/ur5e/meshes/wrist1_vis.mtl
956
+ robosuite/models/assets/robots/ur5e/meshes/wrist1_vis.obj
957
+ robosuite/models/assets/robots/ur5e/meshes/wrist1_vis.stl
958
+ robosuite/models/assets/robots/ur5e/meshes/wrist2.dae
959
+ robosuite/models/assets/robots/ur5e/meshes/wrist2.stl
960
+ robosuite/models/assets/robots/ur5e/meshes/wrist2_vis.mtl
961
+ robosuite/models/assets/robots/ur5e/meshes/wrist2_vis.obj
962
+ robosuite/models/assets/robots/ur5e/meshes/wrist2_vis.stl
963
+ robosuite/models/assets/robots/ur5e/meshes/wrist3.dae
964
+ robosuite/models/assets/robots/ur5e/meshes/wrist3.stl
965
+ robosuite/models/assets/robots/ur5e/meshes/wrist3_vis.mtl
966
+ robosuite/models/assets/robots/ur5e/meshes/wrist3_vis.obj
967
+ robosuite/models/assets/robots/ur5e/meshes/wrist3_vis.stl
968
+ robosuite/models/assets/robots/ur5e/obj_meshes/base_vis/base_vis_0.obj
969
+ robosuite/models/assets/robots/ur5e/obj_meshes/base_vis/base_vis_1.obj
970
+ robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_0.obj
971
+ robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_1.obj
972
+ robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_2.obj
973
+ robosuite/models/assets/robots/ur5e/obj_meshes/forearm_vis/forearm_vis_3.obj
974
+ robosuite/models/assets/robots/ur5e/obj_meshes/shoulder_vis/shoulder_vis_0.obj
975
+ robosuite/models/assets/robots/ur5e/obj_meshes/shoulder_vis/shoulder_vis_1.obj
976
+ robosuite/models/assets/robots/ur5e/obj_meshes/shoulder_vis/shoulder_vis_2.obj
977
+ robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_0.obj
978
+ robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_1.obj
979
+ robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_2.obj
980
+ robosuite/models/assets/robots/ur5e/obj_meshes/upperarm_vis/upperarm_vis_3.obj
981
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist1_vis/wrist1_vis_0.obj
982
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist1_vis/wrist1_vis_1.obj
983
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist1_vis/wrist1_vis_2.obj
984
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist2_vis/wrist2_vis_0.obj
985
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist2_vis/wrist2_vis_1.obj
986
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist2_vis/wrist2_vis_2.obj
987
+ robosuite/models/assets/robots/ur5e/obj_meshes/wrist3_vis/wrist3_vis.obj
988
+ robosuite/models/assets/robots/xarm7/robot.xml
989
+ robosuite/models/assets/robots/xarm7/assets/base_link.stl
990
+ robosuite/models/assets/robots/xarm7/assets/end_tool.stl
991
+ robosuite/models/assets/robots/xarm7/assets/left_finger.stl
992
+ robosuite/models/assets/robots/xarm7/assets/left_inner_knuckle.stl
993
+ robosuite/models/assets/robots/xarm7/assets/left_outer_knuckle.stl
994
+ robosuite/models/assets/robots/xarm7/assets/link1.stl
995
+ robosuite/models/assets/robots/xarm7/assets/link2.stl
996
+ robosuite/models/assets/robots/xarm7/assets/link3.stl
997
+ robosuite/models/assets/robots/xarm7/assets/link4.stl
998
+ robosuite/models/assets/robots/xarm7/assets/link5.stl
999
+ robosuite/models/assets/robots/xarm7/assets/link6.stl
1000
+ robosuite/models/assets/robots/xarm7/assets/link7.stl
1001
+ robosuite/models/assets/robots/xarm7/assets/link_base.stl
1002
+ robosuite/models/assets/robots/xarm7/assets/right_finger.stl
1003
+ robosuite/models/assets/robots/xarm7/assets/right_inner_knuckle.stl
1004
+ robosuite/models/assets/robots/xarm7/assets/right_outer_knuckle.stl
1005
+ robosuite/models/assets/robots/xarm7/meshes/base_link.stl
1006
+ robosuite/models/assets/robots/xarm7/meshes/end_tool.stl
1007
+ robosuite/models/assets/robots/xarm7/meshes/left_finger.stl
1008
+ robosuite/models/assets/robots/xarm7/meshes/left_inner_knuckle.stl
1009
+ robosuite/models/assets/robots/xarm7/meshes/left_outer_knuckle.stl
1010
+ robosuite/models/assets/robots/xarm7/meshes/link1.stl
1011
+ robosuite/models/assets/robots/xarm7/meshes/link2.stl
1012
+ robosuite/models/assets/robots/xarm7/meshes/link3.stl
1013
+ robosuite/models/assets/robots/xarm7/meshes/link4.stl
1014
+ robosuite/models/assets/robots/xarm7/meshes/link5.stl
1015
+ robosuite/models/assets/robots/xarm7/meshes/link6.stl
1016
+ robosuite/models/assets/robots/xarm7/meshes/link7.stl
1017
+ robosuite/models/assets/robots/xarm7/meshes/link_base.stl
1018
+ robosuite/models/assets/robots/xarm7/meshes/right_finger.stl
1019
+ robosuite/models/assets/robots/xarm7/meshes/right_inner_knuckle.stl
1020
+ robosuite/models/assets/robots/xarm7/meshes/right_outer_knuckle.stl
1021
+ robosuite/models/assets/textures/blue-wood.png
1022
+ robosuite/models/assets/textures/brass-ambra.png
1023
+ robosuite/models/assets/textures/bread.png
1024
+ robosuite/models/assets/textures/can.png
1025
+ robosuite/models/assets/textures/ceramic.png
1026
+ robosuite/models/assets/textures/cereal.png
1027
+ robosuite/models/assets/textures/clay.png
1028
+ robosuite/models/assets/textures/cream-plaster.png
1029
+ robosuite/models/assets/textures/dark-wood.png
1030
+ robosuite/models/assets/textures/dirt.png
1031
+ robosuite/models/assets/textures/glass.png
1032
+ robosuite/models/assets/textures/gray-felt.png
1033
+ robosuite/models/assets/textures/gray-plaster.png
1034
+ robosuite/models/assets/textures/gray-woodgrain.png
1035
+ robosuite/models/assets/textures/green-wood.png
1036
+ robosuite/models/assets/textures/lemon.png
1037
+ robosuite/models/assets/textures/light-gray-floor-tile.png
1038
+ robosuite/models/assets/textures/light-gray-plaster.png
1039
+ robosuite/models/assets/textures/light-wood.png
1040
+ robosuite/models/assets/textures/metal.png
1041
+ robosuite/models/assets/textures/pink-plaster.png
1042
+ robosuite/models/assets/textures/red-wood.png
1043
+ robosuite/models/assets/textures/soda.png
1044
+ robosuite/models/assets/textures/steel-brushed.png
1045
+ robosuite/models/assets/textures/steel-scratched.png
1046
+ robosuite/models/assets/textures/white-bricks.png
1047
+ robosuite/models/assets/textures/white-plaster.png
1048
+ robosuite/models/assets/textures/wood-tiles.png
1049
+ robosuite/models/assets/textures/wood-varnished-panels.png
1050
+ robosuite/models/assets/textures/yellow-plaster.png
1051
+ robosuite/models/bases/__init__.py
1052
+ robosuite/models/bases/floating_legged_base.py
1053
+ robosuite/models/bases/leg_base_model.py
1054
+ robosuite/models/bases/mobile_base_model.py
1055
+ robosuite/models/bases/mount_model.py
1056
+ robosuite/models/bases/no_actuation_base.py
1057
+ robosuite/models/bases/null_base.py
1058
+ robosuite/models/bases/null_base_model.py
1059
+ robosuite/models/bases/null_mobile_base.py
1060
+ robosuite/models/bases/null_mount.py
1061
+ robosuite/models/bases/omron_mobile_base.py
1062
+ robosuite/models/bases/rethink_minimal_mount.py
1063
+ robosuite/models/bases/rethink_mount.py
1064
+ robosuite/models/bases/robot_base_factory.py
1065
+ robosuite/models/bases/robot_base_model.py
1066
+ robosuite/models/bases/spot_base.py
1067
+ robosuite/models/grippers/__init__.py
1068
+ robosuite/models/grippers/bd_gripper.py
1069
+ robosuite/models/grippers/fourier_hands.py
1070
+ robosuite/models/grippers/gripper_factory.py
1071
+ robosuite/models/grippers/gripper_model.py
1072
+ robosuite/models/grippers/gripper_tester.py
1073
+ robosuite/models/grippers/inspire_hands.py
1074
+ robosuite/models/grippers/jaco_three_finger_gripper.py
1075
+ robosuite/models/grippers/null_gripper.py
1076
+ robosuite/models/grippers/panda_gripper.py
1077
+ robosuite/models/grippers/rethink_gripper.py
1078
+ robosuite/models/grippers/robotiq_140_gripper.py
1079
+ robosuite/models/grippers/robotiq_85_gripper.py
1080
+ robosuite/models/grippers/robotiq_three_finger_gripper.py
1081
+ robosuite/models/grippers/wiping_gripper.py
1082
+ robosuite/models/grippers/xarm7_gripper.py
1083
+ robosuite/models/objects/__init__.py
1084
+ robosuite/models/objects/generated_objects.py
1085
+ robosuite/models/objects/object_groups.py
1086
+ robosuite/models/objects/objects.py
1087
+ robosuite/models/objects/xml_objects.py
1088
+ robosuite/models/objects/composite/__init__.py
1089
+ robosuite/models/objects/composite/bin.py
1090
+ robosuite/models/objects/composite/cone.py
1091
+ robosuite/models/objects/composite/hammer.py
1092
+ robosuite/models/objects/composite/hollow_cylinder.py
1093
+ robosuite/models/objects/composite/hook_frame.py
1094
+ robosuite/models/objects/composite/lid.py
1095
+ robosuite/models/objects/composite/pot_with_handles.py
1096
+ robosuite/models/objects/composite/stand_with_mount.py
1097
+ robosuite/models/objects/composite_body/__init__.py
1098
+ robosuite/models/objects/composite_body/hinged_box.py
1099
+ robosuite/models/objects/composite_body/ratcheting_wrench.py
1100
+ robosuite/models/objects/group/__init__.py
1101
+ robosuite/models/objects/group/transport.py
1102
+ robosuite/models/objects/primitive/__init__.py
1103
+ robosuite/models/objects/primitive/ball.py
1104
+ robosuite/models/objects/primitive/box.py
1105
+ robosuite/models/objects/primitive/capsule.py
1106
+ robosuite/models/objects/primitive/cylinder.py
1107
+ robosuite/models/robots/__init__.py
1108
+ robosuite/models/robots/compositional.py
1109
+ robosuite/models/robots/robot_model.py
1110
+ robosuite/models/robots/manipulators/__init__.py
1111
+ robosuite/models/robots/manipulators/baxter_robot.py
1112
+ robosuite/models/robots/manipulators/gr1_robot.py
1113
+ robosuite/models/robots/manipulators/humanoid_model.py
1114
+ robosuite/models/robots/manipulators/humanoid_upperbody_model.py
1115
+ robosuite/models/robots/manipulators/iiwa_robot.py
1116
+ robosuite/models/robots/manipulators/jaco_robot.py
1117
+ robosuite/models/robots/manipulators/kinova3_robot.py
1118
+ robosuite/models/robots/manipulators/legged_manipulator_model.py
1119
+ robosuite/models/robots/manipulators/manipulator_model.py
1120
+ robosuite/models/robots/manipulators/panda_robot.py
1121
+ robosuite/models/robots/manipulators/sawyer_robot.py
1122
+ robosuite/models/robots/manipulators/spot_arm.py
1123
+ robosuite/models/robots/manipulators/tiago_robot.py
1124
+ robosuite/models/robots/manipulators/ur5e_robot.py
1125
+ robosuite/models/robots/manipulators/xarm7_robot.py
1126
+ robosuite/models/tasks/__init__.py
1127
+ robosuite/models/tasks/manipulation_task.py
1128
+ robosuite/models/tasks/task.py
1129
+ robosuite/renderers/__init__.py
1130
+ robosuite/renderers/base.py
1131
+ robosuite/renderers/base_parser.py
1132
+ robosuite/renderers/context/__init__.py
1133
+ robosuite/renderers/context/egl_context.py
1134
+ robosuite/renderers/context/glfw_context.py
1135
+ robosuite/renderers/context/osmesa_context.py
1136
+ robosuite/renderers/viewer/__init__.py
1137
+ robosuite/renderers/viewer/mjviewer_renderer.py
1138
+ robosuite/renderers/viewer/opencv_renderer.py
1139
+ robosuite/robots/__init__.py
1140
+ robosuite/robots/fixed_base_robot.py
1141
+ robosuite/robots/legged_robot.py
1142
+ robosuite/robots/mobile_robot.py
1143
+ robosuite/robots/robot.py
1144
+ robosuite/robots/wheeled_robot.py
1145
+ robosuite/scripts/browse_mjcf_model.py
1146
+ robosuite/scripts/check_custom_robot_model.py
1147
+ robosuite/scripts/collect_human_demonstrations.py
1148
+ robosuite/scripts/compile_mjcf_model.py
1149
+ robosuite/scripts/make_reset_video.py
1150
+ robosuite/scripts/playback_demonstrations_from_hdf5.py
1151
+ robosuite/scripts/print_robosuite_info.py
1152
+ robosuite/scripts/print_robot_action_info.py
1153
+ robosuite/scripts/render_dataset_with_omniverse.py
1154
+ robosuite/scripts/setup_macros.py
1155
+ robosuite/scripts/tune_camera.py
1156
+ robosuite/scripts/tune_joints.py
1157
+ robosuite/scripts/internal/view_robot_initialization.py
1158
+ robosuite/utils/__init__.py
1159
+ robosuite/utils/binding_utils.py
1160
+ robosuite/utils/buffers.py
1161
+ robosuite/utils/camera_utils.py
1162
+ robosuite/utils/control_utils.py
1163
+ robosuite/utils/errors.py
1164
+ robosuite/utils/ik_utils.py
1165
+ robosuite/utils/input_utils.py
1166
+ robosuite/utils/log_utils.py
1167
+ robosuite/utils/mjcf_utils.py
1168
+ robosuite/utils/mjmod.py
1169
+ robosuite/utils/numba.py
1170
+ robosuite/utils/observables.py
1171
+ robosuite/utils/placement_samplers.py
1172
+ robosuite/utils/robot_composition_utils.py
1173
+ robosuite/utils/robot_utils.py
1174
+ robosuite/utils/sim_utils.py
1175
+ robosuite/utils/traj_utils.py
1176
+ robosuite/utils/transform_utils.py
1177
+ robosuite/wrappers/__init__.py
1178
+ robosuite/wrappers/data_collection_wrapper.py
1179
+ robosuite/wrappers/demo_sampler_wrapper.py
1180
+ robosuite/wrappers/domain_randomization_wrapper.py
1181
+ robosuite/wrappers/gym_wrapper.py
1182
+ robosuite/wrappers/visualization_wrapper.py
1183
+ robosuite/wrappers/wrapper.py
robosuite.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
robosuite.egg-info/eager_resources.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ *
robosuite.egg-info/requires.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy>=1.13.3
2
+ numba>=0.49.1
3
+ scipy>=1.2.3
4
+ mujoco>=3.3.0
5
+ qpsolvers[quadprog]>=4.3.1
6
+ Pillow
7
+ opencv-python
8
+ pynput
9
+ termcolor
10
+ pytest
11
+ tqdm
12
+
13
+ [mink]
14
+ mink==0.0.5
robosuite.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ robosuite
tactile_tasks/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from tactile_tasks.uskin_sensor import USkinSensor
2
+ from tactile_tasks.motion_planner import MotionPlanner
tactile_tasks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (264 Bytes). View file
 
tactile_tasks/__pycache__/collect_data.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
tactile_tasks/__pycache__/motion_planner.cpython-310.pyc ADDED
Binary file (28.6 kB). View file
 
tactile_tasks/__pycache__/sawyer_ik.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
tactile_tasks/__pycache__/uskin_sensor.cpython-310.pyc ADDED
Binary file (5.95 kB). View file
 
tactile_tasks/__pycache__/visualize_data.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
tactile_tasks/collect_data.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Data collection for contact-rich manipulation with tactile sensing.
4
+ Fixed version: improved alignment loops, better z calculation, stable success check.
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import argparse
10
+ from datetime import datetime
11
+
12
+ import numpy as np
13
+ import h5py
14
+
15
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
16
+
17
+ import robosuite
18
+ from tactile_tasks.uskin_sensor import USkinSensor
19
+
20
+
21
+ # ---- OSC_POSE helpers (7D action: [Δx, Δy, Δz, Δrx, Δry, Δrz, gripper]) ----
22
+
23
+ def get_eef_pos(env):
24
+ return env.sim.data.site_xpos[env.robots[0].eef_site_id["right"]].copy()
25
+
26
+
27
+ def get_eef_z_axis(env):
28
+ """Get EEF z-axis in world frame."""
29
+ mat = env.sim.data.site_xmat[env.robots[0].eef_site_id["right"]].reshape(3, 3)
30
+ return mat[:, 2].copy()
31
+
32
+
33
+ def get_ori_correction(env, ori_gain=1.0):
34
+ """
35
+ Compute axis-angle delta to rotate EEF z-axis toward [0,0,-1] (vertical down).
36
+ Returns 3D orientation delta for OSC_POSE action.
37
+ With goal_update_mode='achieved', delta is applied relative to current orientation.
38
+ """
39
+ z_axis = get_eef_z_axis(env)
40
+ target_z = np.array([0.0, 0.0, -1.0])
41
+ cross = np.cross(z_axis, target_z)
42
+ sin_a = np.linalg.norm(cross)
43
+ if sin_a < 1e-6:
44
+ return np.zeros(3) # already aligned
45
+ axis = cross / sin_a
46
+ cos_a = np.dot(z_axis, target_z)
47
+ angle = np.arctan2(sin_a, cos_a)
48
+ return np.clip(axis * angle * ori_gain, -1, 1)
49
+
50
+
51
+ def move_action(env, target_pos, gain=5.0, gripper=0.0, ori_gain=0.0):
52
+ """7D action: position delta + orientation delta + gripper."""
53
+ delta = target_pos - get_eef_pos(env)
54
+ delta = np.clip(delta * gain, -1, 1)
55
+ ori = get_ori_correction(env, ori_gain) if ori_gain > 0 else np.zeros(3)
56
+ return np.concatenate([delta, ori, [gripper]])
57
+
58
+
59
+ def at_target(env, target_pos, threshold=0.01):
60
+ return np.linalg.norm(get_eef_pos(env) - target_pos) < threshold
61
+
62
+
63
+ def grip_action(env, gripper=1.0, ori_gain=0.0):
64
+ ori = get_ori_correction(env, ori_gain) if ori_gain > 0 else np.zeros(3)
65
+ return np.concatenate([[0, 0, 0], ori, [gripper]])
66
+
67
+
68
+ def is_upright(env, body_id, tol=0.15):
69
+ import mujoco
70
+ quat = env.sim.data.body_xquat[body_id].copy()
71
+ mat = np.zeros(9)
72
+ mujoco.mju_quat2Mat(mat, quat)
73
+ z_axis = mat.reshape(3, 3)[:, 2]
74
+ return abs(z_axis[2]) > (1.0 - tol)
75
+
76
+
77
+ # ---- Core alignment primitive ----
78
+ # Key fix: continuous feedback loop that drives OBJECT xy to TARGET xy
79
+ # instead of one-shot correction. Runs until converged or max_steps.
80
+
81
+ def align_object_to_xy(env, sensor, recorder, get_obj_xy_fn, target_xy,
82
+ gripper=1.0, xy_tol=0.003, max_steps=80, gain=8.0,
83
+ ori_gain=0.0):
84
+ """
85
+ Continuously correct EEF so that the held object's xy matches target_xy.
86
+ ori_gain>0 applies vertical orientation correction (OSC_POSE).
87
+ """
88
+ for _ in range(max_steps):
89
+ if recorder.done:
90
+ break
91
+ obj_xy = get_obj_xy_fn()
92
+ eef = get_eef_pos(env)
93
+ err = target_xy - obj_xy
94
+ if np.linalg.norm(err) < xy_tol:
95
+ break
96
+ target = eef.copy()
97
+ target[:2] += np.clip(err * gain, -0.05, 0.05)
98
+ action = move_action(env, target, gain=5.0, gripper=gripper, ori_gain=ori_gain)
99
+ recorder.step(env, sensor, action)
100
+
101
+ eef_hold = get_eef_pos(env).copy()
102
+ recorder.run_for(env, sensor,
103
+ lambda: move_action(env, eef_hold, gain=5.0, gripper=gripper,
104
+ ori_gain=ori_gain),
105
+ steps=8)
106
+
107
+
108
+ def descend_with_alignment(env, sensor, recorder, get_obj_xy_fn, target_xy,
109
+ target_z, gripper=1.0, z_tol=0.005,
110
+ xy_gain=8.0, z_gain=5.0, max_steps=80,
111
+ ori_gain=0.0):
112
+ """
113
+ Descend to target_z while keeping object aligned over target_xy.
114
+ ori_gain>0 applies vertical orientation correction.
115
+ """
116
+ for _ in range(max_steps):
117
+ if recorder.done:
118
+ break
119
+ obj_xy = get_obj_xy_fn()
120
+ eef = get_eef_pos(env)
121
+ xy_err = target_xy - obj_xy
122
+ z_err = target_z - eef[2]
123
+
124
+ target = eef.copy()
125
+ target[:2] += np.clip(xy_err * xy_gain, -0.05, 0.05)
126
+ target[2] += np.clip(z_err * z_gain, -0.15, 0.15)
127
+
128
+ action = move_action(env, target, gain=5.0, gripper=gripper, ori_gain=ori_gain)
129
+ recorder.step(env, sensor, action)
130
+
131
+ if abs(z_err) < z_tol and np.linalg.norm(xy_err) < 0.005:
132
+ break
133
+
134
+
135
+ # ---- Task policies ----
136
+
137
+ def run_precision_grasp(env, sensor, recorder):
138
+ obj_pos = env.sim.data.body_xpos[env.obj_body_id].copy()
139
+
140
+ above = obj_pos.copy(); above[2] += 0.08
141
+ recorder.run_until(env, sensor, lambda: move_action(env, above, gain=5.0, gripper=-1),
142
+ done_fn=lambda: at_target(env, above, 0.01), max_steps=80)
143
+
144
+ recorder.run_until(env, sensor, lambda: move_action(env, obj_pos, gain=5.0, gripper=-1),
145
+ done_fn=lambda: at_target(env, obj_pos, 0.01), max_steps=60)
146
+
147
+ recorder.run_for(env, sensor, lambda: grip_action(env, 1.0), steps=15)
148
+
149
+ lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.12
150
+ recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
151
+ done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=60)
152
+
153
+
154
+ def grasp_object(env, sensor, recorder, obj_body_id, obj_geoms,
155
+ descend_gain=3.0, grasp_steps=30, retry_dz=-0.012):
156
+ """
157
+ General-purpose grasp primitive.
158
+ Returns measured (eef_pos - obj_pos) offset after successful grasp,
159
+ so callers can use it for accurate downstream z calculations.
160
+ Tries multiple z heights if first attempt fails.
161
+ """
162
+ obj_pos = env.sim.data.body_xpos[obj_body_id].copy()
163
+
164
+ # Move above object
165
+ above = obj_pos.copy(); above[2] += 0.12
166
+ recorder.run_until(env, sensor, lambda: move_action(env, above, gain=5.0, gripper=-1),
167
+ done_fn=lambda: at_target(env, above, 0.01), max_steps=80)
168
+
169
+ # Try grasping at progressively lower z positions
170
+ # Start at obj center, try up to 3 heights spaced 12mm apart
171
+ for dz in [0.0, retry_dz, retry_dz * 2]:
172
+ grasp_target = obj_pos.copy(); grasp_target[2] += dz
173
+ recorder.run_until(env, sensor,
174
+ lambda t=grasp_target: move_action(env, t, gain=descend_gain, gripper=-1),
175
+ done_fn=lambda t=grasp_target: at_target(env, t, 0.006),
176
+ max_steps=80)
177
+ recorder.run_for(env, sensor, lambda: grip_action(env, 1.0), steps=grasp_steps)
178
+ if env._check_grasp(gripper=env.robots[0].gripper, object_geoms=obj_geoms):
179
+ break
180
+ # Re-open for next retry
181
+ recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=10)
182
+
183
+ # Measure actual offset EEF→obj immediately after grasp
184
+ eef_after = get_eef_pos(env).copy()
185
+ obj_after = env.sim.data.body_xpos[obj_body_id].copy()
186
+ eef_to_obj_offset = eef_after - obj_after # typically [~0, ~0, +z] (EEF above obj center)
187
+
188
+ return eef_to_obj_offset
189
+
190
+
191
+ def run_peg_insertion(env, sensor, recorder):
192
+ # ── Phase 1: Grasp peg ──────────────────────────────────────────────────
193
+ eef_to_peg = grasp_object(
194
+ env, sensor, recorder,
195
+ obj_body_id=env.peg_body_id,
196
+ obj_geoms=env.peg,
197
+ descend_gain=3.0,
198
+ grasp_steps=30,
199
+ retry_dz=-0.012,
200
+ )
201
+
202
+ # ── Phase 2: Lift high ──────────────────────────────────────────────────
203
+ lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.22
204
+ recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
205
+ done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=100)
206
+
207
+ # ── Phase 3: Move to above hole ────────────────────────────────────────
208
+ hole_pos = env.sim.data.body_xpos[env.hole_body_id].copy()
209
+ hole_xy = hole_pos[:2].copy()
210
+
211
+ above_hole = np.array([hole_xy[0], hole_xy[1], get_eef_pos(env)[2]])
212
+ recorder.run_until(env, sensor,
213
+ lambda: move_action(env, above_hole, gain=4.0, gripper=1.0),
214
+ done_fn=lambda: at_target(env, above_hole, 0.01), max_steps=120)
215
+
216
+ # ── Phase 4: Position align + gradual orientation correction ────────────
217
+ # First: precise xy alignment (no ori)
218
+ align_object_to_xy(
219
+ env, sensor, recorder,
220
+ get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(),
221
+ target_xy=hole_xy,
222
+ gripper=1.0, xy_tol=0.002, max_steps=80, gain=10.0, ori_gain=0.0,
223
+ )
224
+
225
+ # Gradual ori ramp with continuous position correction
226
+ for step in range(200):
227
+ if recorder.done:
228
+ break
229
+ peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
230
+ eef = get_eef_pos(env)
231
+ target = eef.copy()
232
+ target[:2] += (hole_xy - peg_xy) * 3.0
233
+ og = min(1.5, step / 150.0 * 1.5)
234
+ action = move_action(env, target, gain=10.0, gripper=1.0, ori_gain=og)
235
+ recorder.step(env, sensor, action)
236
+
237
+ # Re-measure offset with final orientation
238
+ eef_settled = get_eef_pos(env).copy()
239
+ peg_settled = env.sim.data.body_xpos[env.peg_body_id].copy()
240
+ eef_to_peg = eef_settled - peg_settled
241
+
242
+ # ── Phase 5: Descend to just above hole ─────────────────────────────────
243
+ hole_top_z = hole_pos[2] + env.hole_height * 0.5
244
+ target_eef_z = hole_top_z + 0.005 + eef_to_peg[2] + env.peg_height
245
+
246
+ descend_with_alignment(
247
+ env, sensor, recorder,
248
+ get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.peg_body_id][:2].copy(),
249
+ target_xy=hole_xy,
250
+ target_z=target_eef_z,
251
+ gripper=1.0, z_tol=0.005, xy_gain=10.0, z_gain=4.0,
252
+ max_steps=120, ori_gain=0.5,
253
+ )
254
+
255
+ # ── Phase 6: Insertion — push down with xy + ori corrections ────────────
256
+ for _ in range(150):
257
+ if recorder.done:
258
+ break
259
+ peg_xy = env.sim.data.body_xpos[env.peg_body_id][:2].copy()
260
+ xy_err = hole_xy - peg_xy
261
+ ori = get_ori_correction(env, ori_gain=0.5)
262
+ action = np.zeros(7)
263
+ action[:2] = np.clip(xy_err * 15.0, -1, 1)
264
+ action[2] = -0.15
265
+ action[3:6] = ori
266
+ action[6] = 1.0
267
+ recorder.step(env, sensor, action)
268
+ if env._check_success():
269
+ break
270
+
271
+ # ── Phase 7: Release and retreat ─────────────────────────────────────────
272
+ recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=12)
273
+ retreat = get_eef_pos(env).copy(); retreat[2] += 0.10
274
+ recorder.run_until(env, sensor, lambda: move_action(env, retreat, gain=4.0, gripper=-1),
275
+ done_fn=lambda: at_target(env, retreat, 0.01), max_steps=50)
276
+
277
+
278
+ def run_gentle_stack(env, sensor, recorder):
279
+ # ── Phase 1: Grasp box, measure real EEF→box offset ──────────────────────
280
+ # box is flat (half-h ~0.032), grasp at center height
281
+ eef_to_box = grasp_object(
282
+ env, sensor, recorder,
283
+ obj_body_id=env.box_body_id,
284
+ obj_geoms=env.stack_box,
285
+ descend_gain=3.0,
286
+ grasp_steps=30,
287
+ retry_dz=-0.010,
288
+ )
289
+
290
+ # ── Phase 2: Lift ─────────────────────────────────────────────────────────
291
+ lift_pos = get_eef_pos(env).copy(); lift_pos[2] += 0.14
292
+ recorder.run_until(env, sensor, lambda: move_action(env, lift_pos, gain=5.0, gripper=1.0),
293
+ done_fn=lambda: at_target(env, lift_pos, 0.01), max_steps=80)
294
+
295
+ eef_hold = get_eef_pos(env).copy()
296
+ recorder.run_for(env, sensor,
297
+ lambda: move_action(env, eef_hold, gain=8.0, gripper=1.0),
298
+ steps=12)
299
+
300
+ # Re-measure offset after settling (box may shift in grip)
301
+ eef_settled = get_eef_pos(env).copy()
302
+ box_settled = env.sim.data.body_xpos[env.box_body_id].copy()
303
+ eef_to_box = eef_settled - box_settled # [~0, ~0, +z]: EEF is above box center
304
+
305
+ # ── Phase 3: Align box xy over can ───────────────────────────────────────
306
+ can_pos = env.sim.data.body_xpos[env.can_body_id].copy()
307
+ can_xy = can_pos[:2].copy()
308
+
309
+ align_object_to_xy(
310
+ env, sensor, recorder,
311
+ get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.box_body_id][:2].copy(),
312
+ target_xy=can_xy,
313
+ gripper=1.0,
314
+ xy_tol=0.005,
315
+ max_steps=100,
316
+ gain=8.0,
317
+ )
318
+
319
+ # ── Phase 4: Descend to just above can top ────────────────────────────────
320
+ # Key formula:
321
+ # box_bottom_z = eef_z - eef_to_box[2] - box_half_h
322
+ # want box_bottom_z = can_top_z + small_clearance
323
+ # → target_eef_z = can_top_z + clearance + eef_to_box[2] + box_half_h
324
+ can_top_z = can_pos[2] + 0.04 # CylinderObject size=[0.022, 0.04] → half_h=0.04
325
+ # Read actual box half-height from sim geom (BoxObject is randomized, .size unreliable)
326
+ box_geom_names = [n for n in env.sim.model.geom_names
327
+ if env.stack_box.root_body.lower() in n.lower()]
328
+ if box_geom_names:
329
+ gid = env.sim.model.geom_name2id(box_geom_names[0])
330
+ box_half_h = env.sim.model.geom_size[gid][2] # z half-extent
331
+ else:
332
+ box_half_h = 0.032 # fallback: midpoint of size_min/size_max z
333
+ target_eef_z = can_top_z + 0.010 + eef_to_box[2] + box_half_h
334
+
335
+ descend_with_alignment(
336
+ env, sensor, recorder,
337
+ get_obj_xy_fn=lambda: env.sim.data.body_xpos[env.box_body_id][:2].copy(),
338
+ target_xy=can_xy,
339
+ target_z=target_eef_z,
340
+ gripper=1.0,
341
+ z_tol=0.006,
342
+ xy_gain=6.0,
343
+ z_gain=3.0,
344
+ max_steps=120,
345
+ )
346
+
347
+ # ── Phase 5: Gentle final push until contact ──────────────────────────────
348
+ for _ in range(80):
349
+ if recorder.done:
350
+ break
351
+ box_xy = env.sim.data.body_xpos[env.box_body_id][:2].copy()
352
+ xy_err = can_xy - box_xy
353
+ action = np.zeros(7)
354
+ action[:2] = np.clip(xy_err * 8.0, -0.3, 0.3)
355
+ action[2] = -0.004 # very slow descent
356
+ action[6] = 1.0 # gripper closed
357
+ recorder.step(env, sensor, action)
358
+
359
+ # Stop on tactile contact
360
+ mags = sensor.get_force_magnitudes()
361
+ avg_force = (mags["left_finger"].mean() + mags["right_finger"].mean()) / 2.0
362
+ if avg_force > 1.0:
363
+ break
364
+
365
+ # ── Phase 6: Hold, release, retreat ──────────────────────────────────────
366
+ eef_hold = get_eef_pos(env).copy()
367
+ recorder.run_for(env, sensor,
368
+ lambda: move_action(env, eef_hold, gain=8.0, gripper=1.0),
369
+ steps=20)
370
+
371
+ recorder.run_for(env, sensor, lambda: grip_action(env, -1.0), steps=15)
372
+
373
+ # Straight-up retreat, slow to avoid disturbing box
374
+ retreat = eef_hold.copy(); retreat[2] += 0.10
375
+ recorder.run_until(env, sensor,
376
+ lambda: move_action(env, retreat, gain=2.0, gripper=-1),
377
+ done_fn=lambda: at_target(env, retreat, 0.015), max_steps=60)
378
+
379
+ # Wait for box to fully settle (success check happens here)
380
+ recorder.run_for(env, sensor,
381
+ lambda: move_action(env, retreat, gain=2.0, gripper=-1),
382
+ steps=35)
383
+
384
+
385
+ # ---- Data recording ----
386
+
387
+ class EpisodeRecorder:
388
+ def __init__(self, env, sensor):
389
+ self.env = env
390
+ self.sensor = sensor
391
+ self.data = {
392
+ "agentview_image": [], "eye_in_hand_image": [],
393
+ "agentview_depth": [], "eye_in_hand_depth": [],
394
+ "tactile_left": [], "tactile_right": [],
395
+ "joint_pos": [], "joint_vel": [],
396
+ "eef_pos": [], "eef_quat": [],
397
+ "gripper_qpos": [],
398
+ "actions": [], "rewards": [], "success": [],
399
+ }
400
+ self.step_count = 0
401
+ self.done = False
402
+
403
+ def step(self, env, sensor, action):
404
+ if self.done:
405
+ return None, 0, True, {}
406
+ obs, reward, done, info = env.step(action)
407
+ self.done = done
408
+
409
+ for _ in range(USkinSensor.FREQ_MULTIPLIER):
410
+ td = sensor.update()
411
+ self.data["tactile_left"].append(td["left_finger"].copy())
412
+ self.data["tactile_right"].append(td["right_finger"].copy())
413
+
414
+ if "agentview_image" in obs:
415
+ self.data["agentview_image"].append(obs["agentview_image"])
416
+ if "robot0_eye_in_hand_image" in obs:
417
+ self.data["eye_in_hand_image"].append(obs["robot0_eye_in_hand_image"])
418
+ if "agentview_depth" in obs:
419
+ self.data["agentview_depth"].append(obs["agentview_depth"])
420
+ if "robot0_eye_in_hand_depth" in obs:
421
+ self.data["eye_in_hand_depth"].append(obs["robot0_eye_in_hand_depth"])
422
+
423
+ robot = env.robots[0]
424
+ self.data["joint_pos"].append(np.array(env.sim.data.qpos[robot._ref_joint_pos_indexes]))
425
+ self.data["joint_vel"].append(np.array(env.sim.data.qvel[robot._ref_joint_vel_indexes]))
426
+ self.data["eef_pos"].append(get_eef_pos(env))
427
+
428
+ eef_mat = env.sim.data.site_xmat[robot.eef_site_id["right"]].reshape(3, 3)
429
+ import mujoco
430
+ quat = np.zeros(4)
431
+ mujoco.mju_mat2Quat(quat, eef_mat.flatten())
432
+ self.data["eef_quat"].append(quat)
433
+
434
+ gripper_idx = robot._ref_gripper_joint_pos_indexes.get("right", [])
435
+ if gripper_idx:
436
+ self.data["gripper_qpos"].append(np.array(env.sim.data.qpos[gripper_idx]))
437
+ else:
438
+ self.data["gripper_qpos"].append(np.array([action[-1]]))
439
+
440
+ self.data["actions"].append(action)
441
+ self.data["rewards"].append(reward)
442
+ self.data["success"].append(env._check_success())
443
+ self.step_count += 1
444
+ return obs, reward, done, info
445
+
446
+ def run_for(self, env, sensor, action_fn, steps):
447
+ for _ in range(steps):
448
+ if self.done:
449
+ break
450
+ self.step(env, sensor, action_fn())
451
+
452
+ def run_until(self, env, sensor, action_fn, done_fn, max_steps):
453
+ for _ in range(max_steps):
454
+ if self.done:
455
+ break
456
+ self.step(env, sensor, action_fn())
457
+ if done_fn():
458
+ break
459
+
460
+ def finalize(self):
461
+ for key in self.data:
462
+ if len(self.data[key]) > 0:
463
+ self.data[key] = np.array(self.data[key])
464
+ else:
465
+ self.data[key] = np.array([])
466
+ self.data["task_success"] = bool(any(self.data["success"])) if len(self.data["success"]) > 0 else False
467
+ self.data["n_steps"] = self.step_count
468
+ return self.data
469
+
470
+
471
+ # ---- Task configs ----
472
+
473
+ TASK_CONFIGS = {
474
+ "precision_grasp": {
475
+ "env_class": "PrecisionGrasp",
476
+ "run_fn": run_precision_grasp,
477
+ "horizon": 300,
478
+ "controller": "OSC_POSE",
479
+ },
480
+ "peg_insertion": {
481
+ "env_class": "PegInsertion",
482
+ "run_fn": run_peg_insertion,
483
+ "horizon": 600,
484
+ "controller": "OSC_POSE",
485
+ },
486
+ "gentle_stack": {
487
+ "env_class": "GentleStack",
488
+ "run_fn": run_gentle_stack,
489
+ "horizon": 400,
490
+ "controller": "OSC_POSE",
491
+ },
492
+ }
493
+
494
+
495
+ def create_env(task_name, has_renderer=False, camera_names=None):
496
+ from tactile_tasks.envs.precision_grasp import PrecisionGrasp
497
+ from tactile_tasks.envs.peg_insertion import PegInsertion
498
+ from tactile_tasks.envs.gentle_stack import GentleStack
499
+
500
+ env_classes = {
501
+ "PrecisionGrasp": PrecisionGrasp,
502
+ "PegInsertion": PegInsertion,
503
+ "GentleStack": GentleStack,
504
+ }
505
+
506
+ config = TASK_CONFIGS[task_name]
507
+ EnvClass = env_classes[config["env_class"]]
508
+
509
+ if camera_names is None:
510
+ camera_names = ["agentview", "robot0_eye_in_hand"]
511
+
512
+ controller_configs = {
513
+ "type": "BASIC",
514
+ "body_parts": {
515
+ "right": {
516
+ "type": "OSC_POSE",
517
+ "input_max": 1,
518
+ "input_min": -1,
519
+ "output_max": [0.15, 0.15, 0.15, 0.5, 0.5, 0.5],
520
+ "output_min": [-0.15, -0.15, -0.15, -0.5, -0.5, -0.5],
521
+ "kp": 200,
522
+ "damping_ratio": 1,
523
+ "impedance_mode": "fixed",
524
+ "kp_limits": [0, 400],
525
+ "damping_ratio_limits": [0, 10],
526
+ "position_limits": None,
527
+ "uncouple_pos_ori": True,
528
+ "input_type": "delta",
529
+ "input_ref_frame": "base",
530
+ "interpolation": None,
531
+ "ramp_ratio": 0.2,
532
+ "gripper": {"type": "GRIP"},
533
+ }
534
+ },
535
+ }
536
+
537
+ env = EnvClass(
538
+ robots="Sawyer",
539
+ gripper_types="Robotiq85Gripper",
540
+ controller_configs=controller_configs,
541
+ has_renderer=has_renderer,
542
+ has_offscreen_renderer=True,
543
+ use_camera_obs=True,
544
+ use_object_obs=True,
545
+ control_freq=20,
546
+ horizon=config["horizon"],
547
+ camera_names=camera_names,
548
+ camera_heights=256,
549
+ camera_widths=256,
550
+ camera_depths=True,
551
+ reward_shaping=True,
552
+ renderer="mjviewer",
553
+ )
554
+ return env
555
+
556
+
557
+ def save_episode_hdf5(filepath, episode_data, task_name):
558
+ """Save a single episode to its own HDF5 file."""
559
+ os.makedirs(os.path.dirname(filepath), exist_ok=True)
560
+ with h5py.File(filepath, "w") as f:
561
+ meta = f.create_group("metadata")
562
+ meta.attrs["task"] = task_name
563
+ meta.attrs["robot"] = "Sawyer"
564
+ meta.attrs["gripper"] = "Robotiq85"
565
+ meta.attrs["tactile_sensor"] = "uSkin_4x4"
566
+ meta.attrs["controller"] = TASK_CONFIGS.get(task_name, {}).get("controller", "OSC_POSE")
567
+ meta.attrs["control_freq"] = 20
568
+ meta.attrs["tactile_freq"] = 100
569
+ meta.attrs["camera_freq"] = 20
570
+ meta.attrs["created"] = datetime.now().isoformat()
571
+
572
+ f.attrs["success"] = bool(episode_data["task_success"])
573
+ f.attrs["n_steps"] = int(episode_data["n_steps"])
574
+
575
+ for key, value in episode_data.items():
576
+ if isinstance(value, np.ndarray) and value.size > 0:
577
+ if "image" in key or "depth" in key:
578
+ f.create_dataset(key, data=value, compression="gzip", compression_opts=4)
579
+ else:
580
+ f.create_dataset(key, data=value)
581
+ elif isinstance(value, (bool, int, float)):
582
+ f.attrs[key] = value
583
+
584
+
585
+ def collect_task_data(task_name, n_episodes=1, save_dir="./tactile_data", visualize=False,
586
+ max_attempts=500):
587
+ """Collect n_episodes SUCCESSFUL episodes. Failed episodes are discarded."""
588
+ print(f"\n{'='*60}")
589
+ print(f"Task: {task_name} | Target: {n_episodes} successful episodes")
590
+ print(f"{'='*60}\n")
591
+
592
+ task_dir = os.path.join(save_dir, task_name)
593
+ os.makedirs(task_dir, exist_ok=True)
594
+ config = TASK_CONFIGS[task_name]
595
+ env = create_env(task_name, has_renderer=visualize)
596
+ successes = 0
597
+ attempts = 0
598
+
599
+ while successes < n_episodes and attempts < max_attempts:
600
+ attempts += 1
601
+ print(f"Attempt {attempts} | Saved: {successes}/{n_episodes}")
602
+ obs = env.reset()
603
+ for _ in range(80):
604
+ obs, _, _, _ = env.step(np.zeros(env.action_dim))
605
+
606
+ sensor = USkinSensor(env.sim, gripper_prefix="gripper0_right_", noise_std=0.005)
607
+ recorder = EpisodeRecorder(env, sensor)
608
+ config["run_fn"](env, sensor, recorder)
609
+ episode_data = recorder.finalize()
610
+
611
+ success = episode_data["task_success"]
612
+ print(f" Steps: {episode_data['n_steps']}, "
613
+ f"Success: {success}, "
614
+ f"Reward: {episode_data['rewards'].sum():.1f}")
615
+
616
+ if success:
617
+ filepath = os.path.join(task_dir, f"episode_{successes:02d}.hdf5")
618
+ save_episode_hdf5(filepath, episode_data, task_name)
619
+ successes += 1
620
+ print(f" -> Saved as episode_{successes-1:02d}.hdf5")
621
+ else:
622
+ print(f" -> Discarded (failed)")
623
+
624
+ print(f"\nDone: {successes}/{n_episodes} successful episodes in {attempts} attempts")
625
+ env.close()
626
+ return task_dir
627
+
628
+
629
+ def main():
630
+ parser = argparse.ArgumentParser()
631
+ parser.add_argument("--task", type=str, default="precision_grasp",
632
+ choices=list(TASK_CONFIGS.keys()) + ["all"])
633
+ parser.add_argument("--n_episodes", type=int, default=1)
634
+ parser.add_argument("--save_dir", type=str, default="./tactile_data")
635
+ parser.add_argument("--visualize", action="store_true")
636
+ args = parser.parse_args()
637
+
638
+ tasks = list(TASK_CONFIGS.keys()) if args.task == "all" else [args.task]
639
+ for task in tasks:
640
+ collect_task_data(task, args.n_episodes, args.save_dir, args.visualize)
641
+
642
+
643
+ if __name__ == "__main__":
644
+ main()
tactile_tasks/convert_for_act.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Convert tactile_data HDF5 episodes to ACT training format.
4
+
5
+ Input: tactile_data/{task}/episode_XX.hdf5 (our format)
6
+ Output: policy/ACT/processed_data/sim-{task}/{config}-{num}/episode_{i}.hdf5
7
+
8
+ ACT expects per-episode HDF5 with:
9
+ /action [T, action_dim]
10
+ /observations/qpos [T, state_dim]
11
+ /observations/images/{cam_name} [T, H, W, 3]
12
+ """
13
+
14
+ import os
15
+ import sys
16
+ import json
17
+ import argparse
18
+ import numpy as np
19
+ import h5py
20
+
21
+
22
+ def convert_episode(src_path, dst_path, camera_names):
23
+ """Convert a single episode from our format to ACT format."""
24
+ with h5py.File(src_path, "r") as src:
25
+ T = src["actions"].shape[0]
26
+
27
+ # State: joint_pos(7) + normalized gripper(1) = 8D
28
+ joint_pos = src["joint_pos"][:] # [T, 7]
29
+ gripper_qpos = src["gripper_qpos"][:] # [T, 6]
30
+ # Normalize first gripper joint (finger_joint): [0, 0.8] → [0, 1]
31
+ gripper_norm = gripper_qpos[:, 0:1] / 0.8
32
+ qpos = np.concatenate([joint_pos, gripper_norm], axis=1).astype(np.float32) # [T, 8]
33
+
34
+ # Actions: 7D OSC_POSE → pad to 8D to match state_dim (ACT uses same dim for both)
35
+ actions_7d = src["actions"][:].astype(np.float32) # [T, 7]
36
+ actions = np.concatenate([actions_7d, np.zeros((T, 1), dtype=np.float32)], axis=1) # [T, 8]
37
+
38
+ # Camera name mapping: our names → ACT cam names
39
+ cam_map = {
40
+ "agentview": "agentview_image",
41
+ "eye_in_hand": "eye_in_hand_image",
42
+ }
43
+
44
+ os.makedirs(os.path.dirname(dst_path), exist_ok=True)
45
+ with h5py.File(dst_path, "w") as dst:
46
+ dst.create_dataset("/action", data=actions)
47
+ obs = dst.create_group("/observations")
48
+ obs.create_dataset("qpos", data=qpos)
49
+ img_group = obs.create_group("images")
50
+ for cam_name in camera_names:
51
+ src_key = cam_map.get(cam_name, cam_name + "_image")
52
+ if src_key in src:
53
+ img_group.create_dataset(cam_name, data=src[src_key][:])
54
+ else:
55
+ print(f" Warning: camera '{src_key}' not found in {src_path}")
56
+
57
+
58
+ def convert_task(task_name, data_dir, output_dir, config_name="default",
59
+ camera_names=None):
60
+ """Convert all episodes for a task."""
61
+ if camera_names is None:
62
+ camera_names = ["agentview", "eye_in_hand"]
63
+
64
+ task_dir = os.path.join(data_dir, task_name)
65
+ episodes = sorted([f for f in os.listdir(task_dir) if f.endswith(".hdf5")])
66
+ num_episodes = len(episodes)
67
+
68
+ out_dir = os.path.join(output_dir, f"sim-{task_name}", f"{config_name}-{num_episodes}")
69
+ os.makedirs(out_dir, exist_ok=True)
70
+
71
+ max_episode_len = 0
72
+ for i, ep_file in enumerate(episodes):
73
+ src_path = os.path.join(task_dir, ep_file)
74
+ dst_path = os.path.join(out_dir, f"episode_{i}.hdf5")
75
+ convert_episode(src_path, dst_path, camera_names)
76
+
77
+ with h5py.File(dst_path, "r") as f:
78
+ ep_len = f["/action"].shape[0]
79
+ max_episode_len = max(max_episode_len, ep_len)
80
+
81
+ print(f" [{i+1}/{num_episodes}] {ep_file} → episode_{i}.hdf5 ({ep_len} steps)")
82
+
83
+ print(f"\nConverted {num_episodes} episodes to {out_dir}")
84
+ print(f"Max episode length: {max_episode_len}")
85
+
86
+ return {
87
+ "dataset_dir": out_dir,
88
+ "num_episodes": num_episodes,
89
+ "episode_len": max_episode_len,
90
+ "camera_names": camera_names,
91
+ }
92
+
93
+
94
+ def update_task_configs(config_path, task_name, task_info, config_name="default"):
95
+ """Update SIM_TASK_CONFIGS.json with new task entry."""
96
+ if os.path.exists(config_path):
97
+ with open(config_path, "r") as f:
98
+ content = f.read().strip()
99
+ configs = json.loads(content) if content else {}
100
+ else:
101
+ configs = {}
102
+
103
+ key = f"sim-{task_name}-{config_name}-{task_info['num_episodes']}"
104
+ # Store path relative to ACT directory (where training runs)
105
+ act_dir = os.path.dirname(config_path)
106
+ rel_dir = os.path.relpath(task_info["dataset_dir"], act_dir)
107
+ configs[key] = {
108
+ "dataset_dir": "./" + rel_dir,
109
+ "num_episodes": task_info["num_episodes"],
110
+ "episode_len": task_info["episode_len"],
111
+ "camera_names": task_info["camera_names"],
112
+ }
113
+
114
+ with open(config_path, "w") as f:
115
+ json.dump(configs, f, indent=4)
116
+ print(f"Updated {config_path} with task '{key}'")
117
+
118
+
119
+ def main():
120
+ parser = argparse.ArgumentParser(description="Convert tactile data to ACT format")
121
+ parser.add_argument("--data_dir", default="./tactile_data",
122
+ help="Source data directory")
123
+ parser.add_argument("--output_dir", default="./policy/ACT/processed_data",
124
+ help="Output directory for ACT data")
125
+ parser.add_argument("--task", default="all",
126
+ help="Task name or 'all'")
127
+ parser.add_argument("--config_name", default="default")
128
+ args = parser.parse_args()
129
+
130
+ tasks = ["precision_grasp", "peg_insertion", "gentle_stack"] if args.task == "all" else [args.task]
131
+
132
+ config_path = os.path.join(os.path.dirname(args.output_dir), "SIM_TASK_CONFIGS.json")
133
+
134
+ for task in tasks:
135
+ task_dir = os.path.join(args.data_dir, task)
136
+ if not os.path.exists(task_dir):
137
+ print(f"Skipping {task}: no data at {task_dir}")
138
+ continue
139
+
140
+ print(f"\n{'='*50}")
141
+ print(f"Converting {task}")
142
+ print(f"{'='*50}")
143
+
144
+ info = convert_task(task, args.data_dir, args.output_dir, args.config_name)
145
+ update_task_configs(config_path, task, info, args.config_name)
146
+
147
+ print("\nDone! To train ACT:")
148
+ print(f" cd policy/ACT")
149
+ print(f" python imitate_episodes.py \\")
150
+ print(f" --ckpt_dir ./checkpoints/{{task}}_act \\")
151
+ print(f" --policy_class ACT \\")
152
+ print(f" --task_name sim-{{task}}-default-{{num}} \\")
153
+ print(f" --batch_size 8 --num_epochs 2000 --lr 1e-4 \\")
154
+ print(f" --kl_weight 10 --chunk_size 16 \\")
155
+ print(f" --hidden_dim 256 --dim_feedforward 2048 \\")
156
+ print(f" --state_dim 8 --save_freq 100")
157
+
158
+
159
+ if __name__ == "__main__":
160
+ main()
tactile_tasks/convert_for_dp.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Convert tactile_data HDF5 episodes to Diffusion Policy (DP) Zarr format.
4
+
5
+ Input: tactile_data/{task}/episode_XX.hdf5 (our format)
6
+ Output: policy/DP/data/{task}-{config}-{num}.zarr
7
+
8
+ DP expects Zarr archive with:
9
+ data/head_camera [N, 3, H, W] uint8 NCHW
10
+ data/state [N, state_dim] float32
11
+ data/action [N, action_dim] float32
12
+ meta/episode_ends [num_episodes] int64
13
+ """
14
+
15
+ import os
16
+ import sys
17
+ import argparse
18
+ import numpy as np
19
+ import h5py
20
+
21
+ try:
22
+ import zarr
23
+ except ImportError:
24
+ print("Error: zarr not installed. Run: pip install zarr")
25
+ sys.exit(1)
26
+
27
+
28
+ def convert_task(task_name, data_dir, output_dir, config_name="default"):
29
+ """Convert all episodes for a task to a single Zarr archive."""
30
+ task_dir = os.path.join(data_dir, task_name)
31
+ episodes = sorted([f for f in os.listdir(task_dir) if f.endswith(".hdf5")])
32
+ num_episodes = len(episodes)
33
+
34
+ all_images = []
35
+ all_states = []
36
+ all_actions = []
37
+ episode_ends = []
38
+ total_steps = 0
39
+
40
+ for i, ep_file in enumerate(episodes):
41
+ src_path = os.path.join(task_dir, ep_file)
42
+ with h5py.File(src_path, "r") as f:
43
+ T = f["actions"].shape[0]
44
+
45
+ # Image: agentview as head_camera, HWC uint8
46
+ images = f["agentview_image"][:] # [T, H, W, 3]
47
+
48
+ # State: joint_pos(7) + normalized gripper(1) = 8D
49
+ joint_pos = f["joint_pos"][:] # [T, 7]
50
+ gripper_qpos = f["gripper_qpos"][:] # [T, 6]
51
+ gripper_norm = gripper_qpos[:, 0:1] / 0.8
52
+ state = np.concatenate([joint_pos, gripper_norm], axis=1).astype(np.float32)
53
+
54
+ # Action: 7D OSC_POSE
55
+ actions = f["actions"][:].astype(np.float32)
56
+
57
+ # DP uses (T-1) transitions: state[:-1] → action[1:] shift
58
+ # But for simplicity and to match the original code, keep them aligned
59
+ all_images.append(images)
60
+ all_states.append(state)
61
+ all_actions.append(actions)
62
+
63
+ total_steps += T
64
+ episode_ends.append(total_steps)
65
+
66
+ print(f" [{i+1}/{num_episodes}] {ep_file}: {T} steps")
67
+
68
+ # Stack all episodes
69
+ all_images = np.concatenate(all_images, axis=0) # [N, H, W, 3]
70
+ all_states = np.concatenate(all_states, axis=0) # [N, 8]
71
+ all_actions = np.concatenate(all_actions, axis=0) # [N, 7]
72
+ episode_ends = np.array(episode_ends, dtype=np.int64)
73
+
74
+ # Convert images: NHWC → NCHW
75
+ all_images = np.moveaxis(all_images, -1, 1) # [N, 3, H, W]
76
+
77
+ print(f"\nTotal: {total_steps} steps from {num_episodes} episodes")
78
+ print(f"Images: {all_images.shape}, States: {all_states.shape}, Actions: {all_actions.shape}")
79
+
80
+ # Save as Zarr
81
+ save_path = os.path.join(output_dir, f"{task_name}-{config_name}-{num_episodes}.zarr")
82
+ if os.path.exists(save_path):
83
+ import shutil
84
+ shutil.rmtree(save_path)
85
+
86
+ os.makedirs(output_dir, exist_ok=True)
87
+ zarr_root = zarr.group(save_path)
88
+ zarr_data = zarr_root.create_group("data")
89
+ zarr_meta = zarr_root.create_group("meta")
90
+
91
+ compressor = zarr.Blosc(cname="zstd", clevel=3, shuffle=1)
92
+
93
+ zarr_data.create_dataset("head_camera", data=all_images,
94
+ chunks=(100, *all_images.shape[1:]),
95
+ overwrite=True, compressor=compressor)
96
+ zarr_data.create_dataset("state", data=all_states,
97
+ chunks=(100, all_states.shape[1]),
98
+ dtype="float32", overwrite=True, compressor=compressor)
99
+ zarr_data.create_dataset("action", data=all_actions,
100
+ chunks=(100, all_actions.shape[1]),
101
+ dtype="float32", overwrite=True, compressor=compressor)
102
+ zarr_meta.create_dataset("episode_ends", data=episode_ends,
103
+ dtype="int64", overwrite=True, compressor=compressor)
104
+
105
+ print(f"Saved to {save_path}")
106
+ return save_path
107
+
108
+
109
+ def main():
110
+ parser = argparse.ArgumentParser(description="Convert tactile data to DP Zarr format")
111
+ parser.add_argument("--data_dir", default="./tactile_data",
112
+ help="Source data directory")
113
+ parser.add_argument("--output_dir", default="./policy/DP/data",
114
+ help="Output directory for Zarr files")
115
+ parser.add_argument("--task", default="all",
116
+ help="Task name or 'all'")
117
+ parser.add_argument("--config_name", default="default")
118
+ args = parser.parse_args()
119
+
120
+ tasks = ["precision_grasp", "peg_insertion", "gentle_stack"] if args.task == "all" else [args.task]
121
+
122
+ for task in tasks:
123
+ task_dir = os.path.join(args.data_dir, task)
124
+ if not os.path.exists(task_dir):
125
+ print(f"Skipping {task}: no data at {task_dir}")
126
+ continue
127
+
128
+ print(f"\n{'='*50}")
129
+ print(f"Converting {task}")
130
+ print(f"{'='*50}")
131
+
132
+ zarr_path = convert_task(task, args.data_dir, args.output_dir, args.config_name)
133
+
134
+ print("\nDone! To train DP:")
135
+ print(f" cd policy/DP")
136
+ print(f" python train.py --config-name=robot_dp_tactile.yaml \\")
137
+ print(f" task.name={{task}} \\")
138
+ print(f" task.dataset.zarr_path=data/{{task}}-default-{{num}}.zarr \\")
139
+ print(f" training.seed=0 training.device=cuda:0")
140
+
141
+
142
+ if __name__ == "__main__":
143
+ main()
tactile_tasks/envs/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from tactile_tasks.envs.precision_grasp import PrecisionGrasp
2
+ from tactile_tasks.envs.peg_insertion import PegInsertion
3
+ from tactile_tasks.envs.gentle_stack import GentleStack