Fxxkrobotics commited on
Commit
3be04b1
·
verified ·
1 Parent(s): 1ac176e

Add files using upload-large-folder tool

Browse files
Files changed (45) hide show
  1. .gitattributes +20 -0
  2. policy/EVA/SIM_TASK_CONFIGS.json +11 -0
  3. policy/EVA/__init__.py +2 -0
  4. policy/EVA/constants.py +15 -0
  5. policy/EVA/detr/__init__.py +0 -0
  6. policy/EVA/detr/main.py +103 -0
  7. policy/EVA/detr/models/__init__.py +11 -0
  8. policy/EVA/detr/models/backbone.py +128 -0
  9. policy/EVA/detr/models/detr_vae.py +253 -0
  10. policy/EVA/detr/models/position_encoding.py +98 -0
  11. policy/EVA/detr/models/transformer.py +347 -0
  12. policy/EVA/detr/setup.py +10 -0
  13. policy/EVA/detr/util/__init__.py +1 -0
  14. policy/EVA/detr/util/box_ops.py +86 -0
  15. policy/EVA/detr/util/misc.py +481 -0
  16. policy/EVA/detr/util/plot_utils.py +110 -0
  17. policy/EVA/eva_policy.py +127 -0
  18. policy/EVA/imitate_episodes.py +240 -0
  19. policy/EVA/process_data.py +148 -0
  20. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_10.hdf5 +3 -0
  21. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_13.hdf5 +3 -0
  22. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_18.hdf5 +3 -0
  23. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_20.hdf5 +3 -0
  24. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_33.hdf5 +3 -0
  25. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_35.hdf5 +3 -0
  26. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_36.hdf5 +3 -0
  27. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_37.hdf5 +3 -0
  28. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_39.hdf5 +3 -0
  29. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_41.hdf5 +3 -0
  30. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_48.hdf5 +3 -0
  31. policy/EVA/processed_data/sim-peg_insertion/default-50/episode_5.hdf5 +3 -0
  32. policy/EVA/tactile_event.py +178 -0
  33. policy/EVA/train_all.sh +22 -0
  34. policy/EVA/train_config.yaml +25 -0
  35. policy/EVA/utils.py +185 -0
  36. robosuite/models/assets/grippers/robotiq_gripper_85.xml +2 -2
  37. tactile_data/videos/peg_insertion/episode_00.hdf5 +3 -0
  38. tactile_data/videos/peg_insertion/episode_01.hdf5 +3 -0
  39. tactile_data/videos/peg_insertion/episode_02.hdf5 +3 -0
  40. tactile_data/videos/peg_insertion/episode_03.hdf5 +3 -0
  41. tactile_data/videos/peg_insertion/episode_04.hdf5 +3 -0
  42. tactile_data/videos/peg_insertion/episode_05.hdf5 +3 -0
  43. tactile_data/videos/peg_insertion/episode_06.hdf5 +3 -0
  44. tactile_data/videos/peg_insertion/episode_07.hdf5 +3 -0
  45. tactile_data/videos/peg_insertion/peg_insertion_ep00.mp4 +2 -2
.gitattributes CHANGED
@@ -2022,3 +2022,23 @@ objects/012_plant-pot/collision/base4.glb filter=lfs diff=lfs merge=lfs -text
2022
  objects/012_plant-pot/collision/base3.glb filter=lfs diff=lfs merge=lfs -text
2023
  objects/074_displaystand/collision/base0.glb filter=lfs diff=lfs merge=lfs -text
2024
  objects/074_displaystand/collision/base2.glb filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022
  objects/012_plant-pot/collision/base3.glb filter=lfs diff=lfs merge=lfs -text
2023
  objects/074_displaystand/collision/base0.glb filter=lfs diff=lfs merge=lfs -text
2024
  objects/074_displaystand/collision/base2.glb filter=lfs diff=lfs merge=lfs -text
2025
+ tactile_data/videos/peg_insertion/episode_00.hdf5 filter=lfs diff=lfs merge=lfs -text
2026
+ tactile_data/videos/peg_insertion/episode_02.hdf5 filter=lfs diff=lfs merge=lfs -text
2027
+ tactile_data/videos/peg_insertion/episode_07.hdf5 filter=lfs diff=lfs merge=lfs -text
2028
+ tactile_data/videos/peg_insertion/episode_06.hdf5 filter=lfs diff=lfs merge=lfs -text
2029
+ tactile_data/videos/peg_insertion/episode_01.hdf5 filter=lfs diff=lfs merge=lfs -text
2030
+ tactile_data/videos/peg_insertion/episode_05.hdf5 filter=lfs diff=lfs merge=lfs -text
2031
+ tactile_data/videos/peg_insertion/episode_03.hdf5 filter=lfs diff=lfs merge=lfs -text
2032
+ tactile_data/videos/peg_insertion/episode_04.hdf5 filter=lfs diff=lfs merge=lfs -text
2033
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_36.hdf5 filter=lfs diff=lfs merge=lfs -text
2034
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_39.hdf5 filter=lfs diff=lfs merge=lfs -text
2035
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_33.hdf5 filter=lfs diff=lfs merge=lfs -text
2036
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_10.hdf5 filter=lfs diff=lfs merge=lfs -text
2037
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_48.hdf5 filter=lfs diff=lfs merge=lfs -text
2038
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_13.hdf5 filter=lfs diff=lfs merge=lfs -text
2039
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_5.hdf5 filter=lfs diff=lfs merge=lfs -text
2040
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_20.hdf5 filter=lfs diff=lfs merge=lfs -text
2041
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_18.hdf5 filter=lfs diff=lfs merge=lfs -text
2042
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_35.hdf5 filter=lfs diff=lfs merge=lfs -text
2043
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_37.hdf5 filter=lfs diff=lfs merge=lfs -text
2044
+ policy/EVA/processed_data/sim-peg_insertion/default-50/episode_41.hdf5 filter=lfs diff=lfs merge=lfs -text
policy/EVA/SIM_TASK_CONFIGS.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "sim-peg_insertion-default-50": {
3
+ "dataset_dir": "./processed_data/sim-peg_insertion/default-50",
4
+ "num_episodes": 50,
5
+ "episode_len": 495,
6
+ "camera_names": [
7
+ "agentview",
8
+ "eye_in_hand"
9
+ ]
10
+ }
11
+ }
policy/EVA/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .eva_policy import EVAPolicy
2
+ from .tactile_event import TactileEventDetector, label_tactile_events, tactile_event_loss
policy/EVA/constants.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ current_dir = os.path.dirname(__file__)
5
+
6
+ ### Task parameters
7
+ SIM_TASK_CONFIGS_PATH = os.path.join(current_dir, "SIM_TASK_CONFIGS.json")
8
+ try:
9
+ with open(SIM_TASK_CONFIGS_PATH, "r") as f:
10
+ SIM_TASK_CONFIGS = json.load(f)
11
+ except (FileNotFoundError, json.JSONDecodeError):
12
+ SIM_TASK_CONFIGS = {}
13
+
14
+ ### Simulation fixed constants
15
+ DT = 0.05 # 20Hz control
policy/EVA/detr/__init__.py ADDED
File without changes
policy/EVA/detr/main.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ import argparse
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import torch
7
+ from .models import build_EVA_model, build_CNNMLP_model
8
+
9
+ import IPython
10
+
11
+ e = IPython.embed
12
+
13
+
14
+ def get_args_parser():
15
+ parser = argparse.ArgumentParser("Set transformer detector", add_help=False)
16
+ parser.add_argument("--lr", default=1e-4, type=float)
17
+ parser.add_argument("--lr_backbone", default=1e-5, type=float)
18
+ parser.add_argument("--batch_size", default=2, type=int)
19
+ parser.add_argument("--weight_decay", default=1e-4, type=float)
20
+ parser.add_argument("--epochs", default=300, type=int)
21
+ parser.add_argument("--lr_drop", default=200, type=int)
22
+ parser.add_argument("--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm")
23
+
24
+ # Model parameters
25
+ parser.add_argument("--backbone", default="resnet18", type=str, help="Name of the convolutional backbone to use")
26
+ parser.add_argument("--dilation", action="store_true",
27
+ help="If true, we replace stride with dilation in the last convolutional block (DC5)")
28
+ parser.add_argument("--position_embedding", default="sine", type=str, choices=("sine", "learned"),
29
+ help="Type of positional embedding to use on top of the image features")
30
+ parser.add_argument("--camera_names", default=[], type=list, help="A list of camera names")
31
+
32
+ # Transformer
33
+ parser.add_argument("--enc_layers", default=4, type=int, help="Number of encoding layers in the transformer")
34
+ parser.add_argument("--dec_layers", default=6, type=int, help="Number of decoding layers in the transformer")
35
+ parser.add_argument("--dim_feedforward", default=2048, type=int,
36
+ help="Intermediate size of the feedforward layers in the transformer blocks")
37
+ parser.add_argument("--hidden_dim", default=256, type=int, help="Size of the embeddings (dimension of the transformer)")
38
+ parser.add_argument("--dropout", default=0.1, type=float, help="Dropout applied in the transformer")
39
+ parser.add_argument("--nheads", default=8, type=int, help="Number of attention heads inside the transformer's attentions")
40
+ parser.add_argument("--pre_norm", action="store_true")
41
+
42
+ # Segmentation
43
+ parser.add_argument("--masks", action="store_true", help="Train segmentation head if the flag is provided")
44
+
45
+ # repeat args in imitate_episodes just to avoid error
46
+ parser.add_argument("--eval", action="store_true")
47
+ parser.add_argument("--onscreen_render", action="store_true")
48
+ parser.add_argument("--ckpt_dir", type=str, default=None)
49
+ parser.add_argument("--policy_class", type=str, default=None)
50
+ parser.add_argument("--task_name", type=str, default=None)
51
+ parser.add_argument("--seed", type=int, default=None)
52
+ parser.add_argument("--num_epochs", type=int, default=None)
53
+ parser.add_argument("--kl_weight", type=int, default=None)
54
+ parser.add_argument("--chunk_size", type=int, default=None)
55
+ parser.add_argument("--temporal_agg", action="store_true")
56
+ parser.add_argument("--state_dim", type=int, default=None)
57
+ parser.add_argument("--save_freq", type=int, default=6000)
58
+
59
+ return parser
60
+
61
+
62
+ def build_EVA_model_and_optimizer(args_override, RoboTwin_Config=None):
63
+ if RoboTwin_Config is None:
64
+ parser = argparse.ArgumentParser("DETR training and evaluation script", parents=[get_args_parser()])
65
+ args, _ = parser.parse_known_args()
66
+ for k, v in args_override.items():
67
+ setattr(args, k, v)
68
+ else:
69
+ args = RoboTwin_Config
70
+
71
+ print("build_EVA_model_and_optimizer", args)
72
+
73
+ model = build_EVA_model(args)
74
+ model.cuda()
75
+
76
+ param_dicts = [
77
+ {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
78
+ {"params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
79
+ "lr": args.lr_backbone},
80
+ ]
81
+ optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
82
+
83
+ return model, optimizer
84
+
85
+
86
+ def build_CNNMLP_model_and_optimizer(args_override):
87
+ parser = argparse.ArgumentParser("DETR training and evaluation script", parents=[get_args_parser()])
88
+ args = parser.parse_args()
89
+
90
+ for k, v in args_override.items():
91
+ setattr(args, k, v)
92
+
93
+ model = build_CNNMLP_model(args)
94
+ model.cuda()
95
+
96
+ param_dicts = [
97
+ {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
98
+ {"params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
99
+ "lr": args.lr_backbone},
100
+ ]
101
+ optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
102
+
103
+ return model, optimizer
policy/EVA/detr/models/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ from .detr_vae import build as build_vae
3
+ from .detr_vae import build_cnnmlp as build_cnnmlp
4
+
5
+
6
+ def build_EVA_model(args):
7
+ return build_vae(args)
8
+
9
+
10
+ def build_CNNMLP_model(args):
11
+ return build_cnnmlp(args)
policy/EVA/detr/models/backbone.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ """
3
+ Backbone modules.
4
+ """
5
+ from collections import OrderedDict
6
+ import os
7
+ import torch
8
+ import torch.nn.functional as F
9
+ import torchvision
10
+ from torch import nn
11
+ from torchvision.models._utils import IntermediateLayerGetter
12
+ from typing import Dict, List
13
+ import sys
14
+
15
+ current_dir = os.path.dirname(os.path.abspath(__file__))
16
+ project_root = os.path.abspath(os.path.join(current_dir, '..'))
17
+ sys.path.append(project_root)
18
+
19
+ from util.misc import NestedTensor, is_main_process
20
+
21
+ from .position_encoding import build_position_encoding
22
+
23
+ import IPython
24
+
25
+ e = IPython.embed
26
+
27
+
28
+ class FrozenBatchNorm2d(torch.nn.Module):
29
+ """
30
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
31
+
32
+ Copy-paste from torchvision.misc.ops with added eps before rqsrt,
33
+ without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101]
34
+ produce nans.
35
+ """
36
+
37
+ def __init__(self, n):
38
+ super(FrozenBatchNorm2d, self).__init__()
39
+ self.register_buffer("weight", torch.ones(n))
40
+ self.register_buffer("bias", torch.zeros(n))
41
+ self.register_buffer("running_mean", torch.zeros(n))
42
+ self.register_buffer("running_var", torch.ones(n))
43
+
44
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
45
+ error_msgs):
46
+ num_batches_tracked_key = prefix + 'num_batches_tracked'
47
+ if num_batches_tracked_key in state_dict:
48
+ del state_dict[num_batches_tracked_key]
49
+
50
+ super(FrozenBatchNorm2d, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys,
51
+ unexpected_keys, error_msgs)
52
+
53
+ def forward(self, x):
54
+ # move reshapes to the beginning
55
+ # to make it fuser-friendly
56
+ w = self.weight.reshape(1, -1, 1, 1)
57
+ b = self.bias.reshape(1, -1, 1, 1)
58
+ rv = self.running_var.reshape(1, -1, 1, 1)
59
+ rm = self.running_mean.reshape(1, -1, 1, 1)
60
+ eps = 1e-5
61
+ scale = w * (rv + eps).rsqrt()
62
+ bias = b - rm * scale
63
+ return x * scale + bias
64
+
65
+
66
+ class BackboneBase(nn.Module):
67
+
68
+ def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
69
+ super().__init__()
70
+ # for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this?
71
+ # if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
72
+ # parameter.requires_grad_(False)
73
+ if return_interm_layers:
74
+ return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
75
+ else:
76
+ return_layers = {'layer4': "0"}
77
+ self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
78
+ self.num_channels = num_channels
79
+
80
+ def forward(self, tensor):
81
+ xs = self.body(tensor)
82
+ return xs
83
+ # out: Dict[str, NestedTensor] = {}
84
+ # for name, x in xs.items():
85
+ # m = tensor_list.mask
86
+ # assert m is not None
87
+ # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
88
+ # out[name] = NestedTensor(x, mask)
89
+ # return out
90
+
91
+
92
+ class Backbone(BackboneBase):
93
+ """ResNet backbone with frozen BatchNorm."""
94
+
95
+ def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool):
96
+ backbone = getattr(torchvision.models,
97
+ name)(replace_stride_with_dilation=[False, False, dilation],
98
+ pretrained=is_main_process(),
99
+ norm_layer=FrozenBatchNorm2d) # pretrained # TODO do we want frozen batch_norm??
100
+ num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
101
+ super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
102
+
103
+
104
+ class Joiner(nn.Sequential):
105
+
106
+ def __init__(self, backbone, position_embedding):
107
+ super().__init__(backbone, position_embedding)
108
+
109
+ def forward(self, tensor_list: NestedTensor):
110
+ xs = self[0](tensor_list)
111
+ out: List[NestedTensor] = []
112
+ pos = []
113
+ for name, x in xs.items():
114
+ out.append(x)
115
+ # position encoding
116
+ pos.append(self[1](x).to(x.dtype))
117
+
118
+ return out, pos
119
+
120
+
121
+ def build_backbone(args):
122
+ position_embedding = build_position_encoding(args)
123
+ train_backbone = args.lr_backbone > 0
124
+ return_interm_layers = args.masks
125
+ backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
126
+ model = Joiner(backbone, position_embedding)
127
+ model.num_channels = backbone.num_channels
128
+ return model
policy/EVA/detr/models/detr_vae.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ # Modified for EVA: adds event_token injection into transformer encoder input.
3
+ """
4
+ DETR-VAE model for EVA (Event-driven Visual-tactile ACT).
5
+
6
+ Only change vs original ACT:
7
+ - DETRVAE.forward() accepts optional event_token [B, hidden_dim]
8
+ - event_token is prepended to the decoder transformer input alongside latent & proprio
9
+ - additional_pos_embed grows from 2 → 3 when event_token is used
10
+ """
11
+ import torch
12
+ from torch import nn
13
+ from torch.autograd import Variable
14
+ from .backbone import build_backbone
15
+ from .transformer import build_transformer, TransformerEncoder, TransformerEncoderLayer
16
+
17
+ import numpy as np
18
+
19
+ import IPython
20
+
21
+ e = IPython.embed
22
+
23
+
24
+ def reparametrize(mu, logvar):
25
+ std = logvar.div(2).exp()
26
+ eps = Variable(std.data.new(std.size()).normal_())
27
+ return mu + std * eps
28
+
29
+
30
+ def get_sinusoid_encoding_table(n_position, d_hid):
31
+
32
+ def get_position_angle_vec(position):
33
+ return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
34
+
35
+ sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
36
+ sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
37
+ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
38
+
39
+ return torch.FloatTensor(sinusoid_table).unsqueeze(0)
40
+
41
+
42
+ class DETRVAE(nn.Module):
43
+ """DETR-VAE with optional tactile event token injection."""
44
+
45
+ def __init__(self, backbones, transformer, encoder, state_dim, num_queries, camera_names):
46
+ super().__init__()
47
+ self.num_queries = num_queries
48
+ self.camera_names = camera_names
49
+ self.transformer = transformer
50
+ self.encoder = encoder
51
+ hidden_dim = transformer.d_model
52
+ self.action_head = nn.Linear(hidden_dim, state_dim)
53
+ self.is_pad_head = nn.Linear(hidden_dim, 1)
54
+ self.query_embed = nn.Embedding(num_queries, hidden_dim)
55
+ if backbones is not None:
56
+ self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1)
57
+ self.backbones = nn.ModuleList(backbones)
58
+ self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim)
59
+ else:
60
+ self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim)
61
+ self.input_proj_env_state = nn.Linear(7, hidden_dim)
62
+ self.pos = torch.nn.Embedding(2, hidden_dim)
63
+ self.backbones = None
64
+
65
+ # encoder extra parameters (CVAE)
66
+ self.latent_dim = 32
67
+ self.cls_embed = nn.Embedding(1, hidden_dim)
68
+ self.encoder_action_proj = nn.Linear(state_dim, hidden_dim)
69
+ self.encoder_joint_proj = nn.Linear(state_dim, hidden_dim)
70
+ self.latent_proj = nn.Linear(hidden_dim, self.latent_dim * 2)
71
+ self.register_buffer('pos_table', get_sinusoid_encoding_table(1 + 1 + num_queries, hidden_dim))
72
+
73
+ # decoder extra parameters
74
+ self.latent_out_proj = nn.Linear(self.latent_dim, hidden_dim)
75
+ # 3 slots: [latent, proprio, event_token]
76
+ self.additional_pos_embed = nn.Embedding(3, hidden_dim)
77
+
78
+ def forward(self, qpos, image, env_state, actions=None, is_pad=None, event_token=None):
79
+ """
80
+ Args:
81
+ qpos: [B, state_dim]
82
+ image: [B, num_cam, C, H, W]
83
+ env_state: None
84
+ actions: [B, seq, action_dim] or None
85
+ is_pad: [B, seq] or None
86
+ event_token: [B, hidden_dim] or None — from TactileEventDetector
87
+ """
88
+ is_training = actions is not None
89
+ bs, _ = qpos.shape
90
+
91
+ ### Obtain latent z from action sequence (CVAE encoder — unchanged)
92
+ if is_training:
93
+ action_embed = self.encoder_action_proj(actions)
94
+ qpos_embed = self.encoder_joint_proj(qpos)
95
+ qpos_embed = torch.unsqueeze(qpos_embed, axis=1)
96
+ cls_embed = self.cls_embed.weight
97
+ cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1)
98
+ encoder_input = torch.cat([cls_embed, qpos_embed, action_embed], axis=1)
99
+ encoder_input = encoder_input.permute(1, 0, 2)
100
+ cls_joint_is_pad = torch.full((bs, 2), False).to(qpos.device)
101
+ is_pad = torch.cat([cls_joint_is_pad, is_pad], axis=1)
102
+ pos_embed = self.pos_table.clone().detach()
103
+ pos_embed = pos_embed.permute(1, 0, 2)
104
+ encoder_output = self.encoder(encoder_input, pos=pos_embed, src_key_padding_mask=is_pad)
105
+ encoder_output = encoder_output[0]
106
+ latent_info = self.latent_proj(encoder_output)
107
+ mu = latent_info[:, :self.latent_dim]
108
+ logvar = latent_info[:, self.latent_dim:]
109
+ latent_sample = reparametrize(mu, logvar)
110
+ latent_input = self.latent_out_proj(latent_sample)
111
+ else:
112
+ mu = logvar = None
113
+ latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(qpos.device)
114
+ latent_input = self.latent_out_proj(latent_sample)
115
+
116
+ ### Decoder transformer — inject event_token here
117
+ if self.backbones is not None:
118
+ all_cam_features = []
119
+ all_cam_pos = []
120
+ for cam_id, cam_name in enumerate(self.camera_names):
121
+ features, pos = self.backbones[0](image[:, cam_id])
122
+ features = features[0]
123
+ pos = pos[0]
124
+ all_cam_features.append(self.input_proj(features))
125
+ all_cam_pos.append(pos)
126
+ proprio_input = self.input_proj_robot_state(qpos)
127
+ src = torch.cat(all_cam_features, axis=3)
128
+ pos = torch.cat(all_cam_pos, axis=3)
129
+ hs = self.transformer(
130
+ src, None, self.query_embed.weight, pos,
131
+ latent_input, proprio_input,
132
+ self.additional_pos_embed.weight,
133
+ event_input=event_token,
134
+ )[0]
135
+ else:
136
+ qpos = self.input_proj_robot_state(qpos)
137
+ env_state = self.input_proj_env_state(env_state)
138
+ transformer_input = torch.cat([qpos, env_state], axis=1)
139
+ hs = self.transformer(transformer_input, None, self.query_embed.weight, self.pos.weight)[0]
140
+
141
+ a_hat = self.action_head(hs)
142
+ is_pad_hat = self.is_pad_head(hs)
143
+ return a_hat, is_pad_hat, [mu, logvar]
144
+
145
+
146
+ class CNNMLP(nn.Module):
147
+
148
+ def __init__(self, backbones, state_dim, camera_names):
149
+ super().__init__()
150
+ self.camera_names = camera_names
151
+ self.action_head = nn.Linear(1000, state_dim)
152
+ if backbones is not None:
153
+ self.backbones = nn.ModuleList(backbones)
154
+ backbone_down_projs = []
155
+ for backbone in backbones:
156
+ down_proj = nn.Sequential(nn.Conv2d(backbone.num_channels, 128, kernel_size=5),
157
+ nn.Conv2d(128, 64, kernel_size=5), nn.Conv2d(64, 32, kernel_size=5))
158
+ backbone_down_projs.append(down_proj)
159
+ self.backbone_down_projs = nn.ModuleList(backbone_down_projs)
160
+ mlp_in_dim = 768 * len(backbones) + 14
161
+ self.mlp = mlp(input_dim=mlp_in_dim, hidden_dim=1024, output_dim=state_dim, hidden_depth=2)
162
+ else:
163
+ raise NotImplementedError
164
+
165
+ def forward(self, qpos, image, env_state, actions=None):
166
+ is_training = actions is not None
167
+ bs, _ = qpos.shape
168
+ all_cam_features = []
169
+ for cam_id, cam_name in enumerate(self.camera_names):
170
+ features, pos = self.backbones[cam_id](image[:, cam_id])
171
+ features = features[0]
172
+ pos = pos[0]
173
+ all_cam_features.append(self.backbone_down_projs[cam_id](features))
174
+ flattened_features = []
175
+ for cam_feature in all_cam_features:
176
+ flattened_features.append(cam_feature.reshape([bs, -1]))
177
+ flattened_features = torch.cat(flattened_features, axis=1)
178
+ features = torch.cat([flattened_features, qpos], axis=1)
179
+ a_hat = self.mlp(features)
180
+ return a_hat
181
+
182
+
183
+ def mlp(input_dim, hidden_dim, output_dim, hidden_depth):
184
+ if hidden_depth == 0:
185
+ mods = [nn.Linear(input_dim, output_dim)]
186
+ else:
187
+ mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
188
+ for i in range(hidden_depth - 1):
189
+ mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
190
+ mods.append(nn.Linear(hidden_dim, output_dim))
191
+ trunk = nn.Sequential(*mods)
192
+ return trunk
193
+
194
+
195
+ def build_encoder(args):
196
+ d_model = args.hidden_dim
197
+ dropout = args.dropout
198
+ nhead = args.nheads
199
+ dim_feedforward = args.dim_feedforward
200
+ num_encoder_layers = args.enc_layers
201
+ normalize_before = args.pre_norm
202
+ activation = "relu"
203
+
204
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
205
+ encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
206
+ encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
207
+
208
+ return encoder
209
+
210
+
211
+ def build(args):
212
+ state_dim = getattr(args, 'state_dim', 14)
213
+
214
+ backbones = []
215
+ backbone = build_backbone(args)
216
+ backbones.append(backbone)
217
+
218
+ transformer = build_transformer(args)
219
+ encoder = build_encoder(args)
220
+
221
+ model = DETRVAE(
222
+ backbones,
223
+ transformer,
224
+ encoder,
225
+ state_dim=state_dim,
226
+ num_queries=args.chunk_size,
227
+ camera_names=args.camera_names,
228
+ )
229
+
230
+ n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
231
+ print("number of parameters: %.2fM" % (n_parameters / 1e6,))
232
+
233
+ return model
234
+
235
+
236
+ def build_cnnmlp(args):
237
+ state_dim = getattr(args, 'state_dim', 16)
238
+
239
+ backbones = []
240
+ for _ in args.camera_names:
241
+ backbone = build_backbone(args)
242
+ backbones.append(backbone)
243
+
244
+ model = CNNMLP(
245
+ backbones,
246
+ state_dim=state_dim,
247
+ camera_names=args.camera_names,
248
+ )
249
+
250
+ n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
251
+ print("number of parameters: %.2fM" % (n_parameters / 1e6,))
252
+
253
+ return model
policy/EVA/detr/models/position_encoding.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ """
3
+ Various positional encodings for the transformer.
4
+ """
5
+ import math
6
+ import torch
7
+ from torch import nn
8
+
9
+ from util.misc import NestedTensor
10
+
11
+ import IPython
12
+
13
+ e = IPython.embed
14
+
15
+
16
+ class PositionEmbeddingSine(nn.Module):
17
+ """
18
+ This is a more standard version of the position embedding, very similar to the one
19
+ used by the Attention is all you need paper, generalized to work on images.
20
+ """
21
+
22
+ def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
23
+ super().__init__()
24
+ self.num_pos_feats = num_pos_feats
25
+ self.temperature = temperature
26
+ self.normalize = normalize
27
+ if scale is not None and normalize is False:
28
+ raise ValueError("normalize should be True if scale is passed")
29
+ if scale is None:
30
+ scale = 2 * math.pi
31
+ self.scale = scale
32
+
33
+ def forward(self, tensor):
34
+ x = tensor
35
+ # mask = tensor_list.mask
36
+ # assert mask is not None
37
+ # not_mask = ~mask
38
+
39
+ not_mask = torch.ones_like(x[0, [0]])
40
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
41
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
42
+ if self.normalize:
43
+ eps = 1e-6
44
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
45
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
46
+
47
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
48
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_pos_feats)
49
+
50
+ pos_x = x_embed[:, :, :, None] / dim_t
51
+ pos_y = y_embed[:, :, :, None] / dim_t
52
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
53
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
54
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
55
+ return pos
56
+
57
+
58
+ class PositionEmbeddingLearned(nn.Module):
59
+ """
60
+ Absolute pos embedding, learned.
61
+ """
62
+
63
+ def __init__(self, num_pos_feats=256):
64
+ super().__init__()
65
+ self.row_embed = nn.Embedding(50, num_pos_feats)
66
+ self.col_embed = nn.Embedding(50, num_pos_feats)
67
+ self.reset_parameters()
68
+
69
+ def reset_parameters(self):
70
+ nn.init.uniform_(self.row_embed.weight)
71
+ nn.init.uniform_(self.col_embed.weight)
72
+
73
+ def forward(self, tensor_list: NestedTensor):
74
+ x = tensor_list.tensors
75
+ h, w = x.shape[-2:]
76
+ i = torch.arange(w, device=x.device)
77
+ j = torch.arange(h, device=x.device)
78
+ x_emb = self.col_embed(i)
79
+ y_emb = self.row_embed(j)
80
+ pos = torch.cat([
81
+ x_emb.unsqueeze(0).repeat(h, 1, 1),
82
+ y_emb.unsqueeze(1).repeat(1, w, 1),
83
+ ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
84
+ return pos
85
+
86
+
87
+ def build_position_encoding(args):
88
+ # print(args.keys())
89
+ N_steps = args.hidden_dim // 2
90
+ if args.position_embedding in ('v2', 'sine'):
91
+ # TODO find a better way of exposing other arguments
92
+ position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
93
+ elif args.position_embedding in ('v3', 'learned'):
94
+ position_embedding = PositionEmbeddingLearned(N_steps)
95
+ else:
96
+ raise ValueError(f"not supported {args.position_embedding}")
97
+
98
+ return position_embedding
policy/EVA/detr/models/transformer.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ """
3
+ DETR Transformer class.
4
+
5
+ Copy-paste from torch.nn.Transformer with modifications:
6
+ * positional encodings are passed in MHattention
7
+ * extra LN at the end of encoder is removed
8
+ * decoder returns a stack of activations from all decoding layers
9
+ """
10
+ import copy
11
+ from typing import Optional, List
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torch import nn, Tensor
16
+
17
+ import IPython
18
+
19
+ e = IPython.embed
20
+
21
+
22
+ class Transformer(nn.Module):
23
+
24
+ def __init__(self,
25
+ d_model=512,
26
+ nhead=8,
27
+ num_encoder_layers=6,
28
+ num_decoder_layers=6,
29
+ dim_feedforward=2048,
30
+ dropout=0.1,
31
+ activation="relu",
32
+ normalize_before=False,
33
+ return_intermediate_dec=False):
34
+ super().__init__()
35
+
36
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
37
+ encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
38
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
39
+
40
+ decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
41
+ decoder_norm = nn.LayerNorm(d_model)
42
+ self.decoder = TransformerDecoder(decoder_layer,
43
+ num_decoder_layers,
44
+ decoder_norm,
45
+ return_intermediate=return_intermediate_dec)
46
+
47
+ self._reset_parameters()
48
+
49
+ self.d_model = d_model
50
+ self.nhead = nhead
51
+
52
+ def _reset_parameters(self):
53
+ for p in self.parameters():
54
+ if p.dim() > 1:
55
+ nn.init.xavier_uniform_(p)
56
+
57
+ def forward(self,
58
+ src,
59
+ mask,
60
+ query_embed,
61
+ pos_embed,
62
+ latent_input=None,
63
+ proprio_input=None,
64
+ additional_pos_embed=None,
65
+ event_input=None):
66
+ # TODO flatten only when input has H and W
67
+ if len(src.shape) == 4: # has H and W
68
+ # flatten NxCxHxW to HWxNxC
69
+ bs, c, h, w = src.shape
70
+ src = src.flatten(2).permute(2, 0, 1)
71
+ pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1)
72
+ query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
73
+ # mask = mask.flatten(1)
74
+
75
+ # Build prefix tokens: [latent, proprio] or [latent, proprio, event]
76
+ prefix_tokens = [latent_input, proprio_input]
77
+ if event_input is not None:
78
+ prefix_tokens.append(event_input)
79
+ n_prefix = len(prefix_tokens)
80
+
81
+ # Slice pos embed to match prefix count
82
+ add_pos = additional_pos_embed[:n_prefix]
83
+ add_pos = add_pos.unsqueeze(1).repeat(1, bs, 1) # [n_prefix, bs, dim]
84
+ pos_embed = torch.cat([add_pos, pos_embed], axis=0)
85
+
86
+ addition_input = torch.stack(prefix_tokens, axis=0)
87
+ src = torch.cat([addition_input, src], axis=0)
88
+ else:
89
+ assert len(src.shape) == 3
90
+ # flatten NxHWxC to HWxNxC
91
+ bs, hw, c = src.shape
92
+ src = src.permute(1, 0, 2)
93
+ pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1)
94
+ query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
95
+
96
+ tgt = torch.zeros_like(query_embed)
97
+ memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
98
+ hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed)
99
+ hs = hs.transpose(1, 2)
100
+ return hs
101
+
102
+
103
+ class TransformerEncoder(nn.Module):
104
+
105
+ def __init__(self, encoder_layer, num_layers, norm=None):
106
+ super().__init__()
107
+ self.layers = _get_clones(encoder_layer, num_layers)
108
+ self.num_layers = num_layers
109
+ self.norm = norm
110
+
111
+ def forward(self,
112
+ src,
113
+ mask: Optional[Tensor] = None,
114
+ src_key_padding_mask: Optional[Tensor] = None,
115
+ pos: Optional[Tensor] = None):
116
+ output = src
117
+
118
+ for layer in self.layers:
119
+ output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
120
+
121
+ if self.norm is not None:
122
+ output = self.norm(output)
123
+
124
+ return output
125
+
126
+
127
+ class TransformerDecoder(nn.Module):
128
+
129
+ def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
130
+ super().__init__()
131
+ self.layers = _get_clones(decoder_layer, num_layers)
132
+ self.num_layers = num_layers
133
+ self.norm = norm
134
+ self.return_intermediate = return_intermediate
135
+
136
+ def forward(self,
137
+ tgt,
138
+ memory,
139
+ tgt_mask: Optional[Tensor] = None,
140
+ memory_mask: Optional[Tensor] = None,
141
+ tgt_key_padding_mask: Optional[Tensor] = None,
142
+ memory_key_padding_mask: Optional[Tensor] = None,
143
+ pos: Optional[Tensor] = None,
144
+ query_pos: Optional[Tensor] = None):
145
+ output = tgt
146
+
147
+ intermediate = []
148
+
149
+ for layer in self.layers:
150
+ output = layer(output,
151
+ memory,
152
+ tgt_mask=tgt_mask,
153
+ memory_mask=memory_mask,
154
+ tgt_key_padding_mask=tgt_key_padding_mask,
155
+ memory_key_padding_mask=memory_key_padding_mask,
156
+ pos=pos,
157
+ query_pos=query_pos)
158
+ if self.return_intermediate:
159
+ intermediate.append(self.norm(output))
160
+
161
+ if self.norm is not None:
162
+ output = self.norm(output)
163
+ if self.return_intermediate:
164
+ intermediate.pop()
165
+ intermediate.append(output)
166
+
167
+ if self.return_intermediate:
168
+ return torch.stack(intermediate)
169
+
170
+ return output.unsqueeze(0)
171
+
172
+
173
+ class TransformerEncoderLayer(nn.Module):
174
+
175
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False):
176
+ super().__init__()
177
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
178
+ # Implementation of Feedforward model
179
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
180
+ self.dropout = nn.Dropout(dropout)
181
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
182
+
183
+ self.norm1 = nn.LayerNorm(d_model)
184
+ self.norm2 = nn.LayerNorm(d_model)
185
+ self.dropout1 = nn.Dropout(dropout)
186
+ self.dropout2 = nn.Dropout(dropout)
187
+
188
+ self.activation = _get_activation_fn(activation)
189
+ self.normalize_before = normalize_before
190
+
191
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
192
+ return tensor if pos is None else tensor + pos
193
+
194
+ def forward_post(self,
195
+ src,
196
+ src_mask: Optional[Tensor] = None,
197
+ src_key_padding_mask: Optional[Tensor] = None,
198
+ pos: Optional[Tensor] = None):
199
+ q = k = self.with_pos_embed(src, pos)
200
+ src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
201
+ src = src + self.dropout1(src2)
202
+ src = self.norm1(src)
203
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
204
+ src = src + self.dropout2(src2)
205
+ src = self.norm2(src)
206
+ return src
207
+
208
+ def forward_pre(self,
209
+ src,
210
+ src_mask: Optional[Tensor] = None,
211
+ src_key_padding_mask: Optional[Tensor] = None,
212
+ pos: Optional[Tensor] = None):
213
+ src2 = self.norm1(src)
214
+ q = k = self.with_pos_embed(src2, pos)
215
+ src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
216
+ src = src + self.dropout1(src2)
217
+ src2 = self.norm2(src)
218
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
219
+ src = src + self.dropout2(src2)
220
+ return src
221
+
222
+ def forward(self,
223
+ src,
224
+ src_mask: Optional[Tensor] = None,
225
+ src_key_padding_mask: Optional[Tensor] = None,
226
+ pos: Optional[Tensor] = None):
227
+ if self.normalize_before:
228
+ return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
229
+ return self.forward_post(src, src_mask, src_key_padding_mask, pos)
230
+
231
+
232
+ class TransformerDecoderLayer(nn.Module):
233
+
234
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False):
235
+ super().__init__()
236
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
237
+ self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
238
+ # Implementation of Feedforward model
239
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
240
+ self.dropout = nn.Dropout(dropout)
241
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
242
+
243
+ self.norm1 = nn.LayerNorm(d_model)
244
+ self.norm2 = nn.LayerNorm(d_model)
245
+ self.norm3 = nn.LayerNorm(d_model)
246
+ self.dropout1 = nn.Dropout(dropout)
247
+ self.dropout2 = nn.Dropout(dropout)
248
+ self.dropout3 = nn.Dropout(dropout)
249
+
250
+ self.activation = _get_activation_fn(activation)
251
+ self.normalize_before = normalize_before
252
+
253
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
254
+ return tensor if pos is None else tensor + pos
255
+
256
+ def forward_post(self,
257
+ tgt,
258
+ memory,
259
+ tgt_mask: Optional[Tensor] = None,
260
+ memory_mask: Optional[Tensor] = None,
261
+ tgt_key_padding_mask: Optional[Tensor] = None,
262
+ memory_key_padding_mask: Optional[Tensor] = None,
263
+ pos: Optional[Tensor] = None,
264
+ query_pos: Optional[Tensor] = None):
265
+ q = k = self.with_pos_embed(tgt, query_pos)
266
+ tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
267
+ tgt = tgt + self.dropout1(tgt2)
268
+ tgt = self.norm1(tgt)
269
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
270
+ key=self.with_pos_embed(memory, pos),
271
+ value=memory,
272
+ attn_mask=memory_mask,
273
+ key_padding_mask=memory_key_padding_mask)[0]
274
+ tgt = tgt + self.dropout2(tgt2)
275
+ tgt = self.norm2(tgt)
276
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
277
+ tgt = tgt + self.dropout3(tgt2)
278
+ tgt = self.norm3(tgt)
279
+ return tgt
280
+
281
+ def forward_pre(self,
282
+ tgt,
283
+ memory,
284
+ tgt_mask: Optional[Tensor] = None,
285
+ memory_mask: Optional[Tensor] = None,
286
+ tgt_key_padding_mask: Optional[Tensor] = None,
287
+ memory_key_padding_mask: Optional[Tensor] = None,
288
+ pos: Optional[Tensor] = None,
289
+ query_pos: Optional[Tensor] = None):
290
+ tgt2 = self.norm1(tgt)
291
+ q = k = self.with_pos_embed(tgt2, query_pos)
292
+ tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
293
+ tgt = tgt + self.dropout1(tgt2)
294
+ tgt2 = self.norm2(tgt)
295
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
296
+ key=self.with_pos_embed(memory, pos),
297
+ value=memory,
298
+ attn_mask=memory_mask,
299
+ key_padding_mask=memory_key_padding_mask)[0]
300
+ tgt = tgt + self.dropout2(tgt2)
301
+ tgt2 = self.norm3(tgt)
302
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
303
+ tgt = tgt + self.dropout3(tgt2)
304
+ return tgt
305
+
306
+ def forward(self,
307
+ tgt,
308
+ memory,
309
+ tgt_mask: Optional[Tensor] = None,
310
+ memory_mask: Optional[Tensor] = None,
311
+ tgt_key_padding_mask: Optional[Tensor] = None,
312
+ memory_key_padding_mask: Optional[Tensor] = None,
313
+ pos: Optional[Tensor] = None,
314
+ query_pos: Optional[Tensor] = None):
315
+ if self.normalize_before:
316
+ return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask,
317
+ pos, query_pos)
318
+ return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos,
319
+ query_pos)
320
+
321
+
322
+ def _get_clones(module, N):
323
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
324
+
325
+
326
+ def build_transformer(args):
327
+ return Transformer(
328
+ d_model=args.hidden_dim,
329
+ dropout=args.dropout,
330
+ nhead=args.nheads,
331
+ dim_feedforward=args.dim_feedforward,
332
+ num_encoder_layers=args.enc_layers,
333
+ num_decoder_layers=args.dec_layers,
334
+ normalize_before=args.pre_norm,
335
+ return_intermediate_dec=True,
336
+ )
337
+
338
+
339
+ def _get_activation_fn(activation):
340
+ """Return an activation function given a string"""
341
+ if activation == "relu":
342
+ return F.relu
343
+ if activation == "gelu":
344
+ return F.gelu
345
+ if activation == "glu":
346
+ return F.glu
347
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
policy/EVA/detr/setup.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from distutils.core import setup
2
+ from setuptools import find_packages
3
+
4
+ setup(
5
+ name="detr",
6
+ version="0.0.0",
7
+ packages=find_packages(),
8
+ license="MIT License",
9
+ long_description=open("README.md").read(),
10
+ )
policy/EVA/detr/util/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
policy/EVA/detr/util/box_ops.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ """
3
+ Utilities for bounding box manipulation and GIoU.
4
+ """
5
+ import torch
6
+ from torchvision.ops.boxes import box_area
7
+
8
+
9
+ def box_cxcywh_to_xyxy(x):
10
+ x_c, y_c, w, h = x.unbind(-1)
11
+ b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
12
+ return torch.stack(b, dim=-1)
13
+
14
+
15
+ def box_xyxy_to_cxcywh(x):
16
+ x0, y0, x1, y1 = x.unbind(-1)
17
+ b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
18
+ return torch.stack(b, dim=-1)
19
+
20
+
21
+ # modified from torchvision to also return the union
22
+ def box_iou(boxes1, boxes2):
23
+ area1 = box_area(boxes1)
24
+ area2 = box_area(boxes2)
25
+
26
+ lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
27
+ rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
28
+
29
+ wh = (rb - lt).clamp(min=0) # [N,M,2]
30
+ inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
31
+
32
+ union = area1[:, None] + area2 - inter
33
+
34
+ iou = inter / union
35
+ return iou, union
36
+
37
+
38
+ def generalized_box_iou(boxes1, boxes2):
39
+ """
40
+ Generalized IoU from https://giou.stanford.edu/
41
+
42
+ The boxes should be in [x0, y0, x1, y1] format
43
+
44
+ Returns a [N, M] pairwise matrix, where N = len(boxes1)
45
+ and M = len(boxes2)
46
+ """
47
+ # degenerate boxes gives inf / nan results
48
+ # so do an early check
49
+ assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
50
+ assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
51
+ iou, union = box_iou(boxes1, boxes2)
52
+
53
+ lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
54
+ rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
55
+
56
+ wh = (rb - lt).clamp(min=0) # [N,M,2]
57
+ area = wh[:, :, 0] * wh[:, :, 1]
58
+
59
+ return iou - (area - union) / area
60
+
61
+
62
+ def masks_to_boxes(masks):
63
+ """Compute the bounding boxes around the provided masks
64
+
65
+ The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
66
+
67
+ Returns a [N, 4] tensors, with the boxes in xyxy format
68
+ """
69
+ if masks.numel() == 0:
70
+ return torch.zeros((0, 4), device=masks.device)
71
+
72
+ h, w = masks.shape[-2:]
73
+
74
+ y = torch.arange(0, h, dtype=torch.float)
75
+ x = torch.arange(0, w, dtype=torch.float)
76
+ y, x = torch.meshgrid(y, x)
77
+
78
+ x_mask = masks * x.unsqueeze(0)
79
+ x_max = x_mask.flatten(1).max(-1)[0]
80
+ x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
81
+
82
+ y_mask = masks * y.unsqueeze(0)
83
+ y_max = y_mask.flatten(1).max(-1)[0]
84
+ y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
85
+
86
+ return torch.stack([x_min, y_min, x_max, y_max], 1)
policy/EVA/detr/util/misc.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ """
3
+ Misc functions, including distributed helpers.
4
+
5
+ Mostly copy-paste from torchvision references.
6
+ """
7
+ import os
8
+ import subprocess
9
+ import time
10
+ from collections import defaultdict, deque
11
+ import datetime
12
+ import pickle
13
+ from packaging import version
14
+ from typing import Optional, List
15
+
16
+ import torch
17
+ import torch.distributed as dist
18
+ from torch import Tensor
19
+
20
+ # needed due to empty tensor bug in pytorch and torchvision 0.5
21
+ import torchvision
22
+
23
+ if version.parse(torchvision.__version__) < version.parse("0.7"):
24
+ from torchvision.ops import _new_empty_tensor
25
+ from torchvision.ops.misc import _output_size
26
+
27
+
28
+ class SmoothedValue(object):
29
+ """Track a series of values and provide access to smoothed values over a
30
+ window or the global series average.
31
+ """
32
+
33
+ def __init__(self, window_size=20, fmt=None):
34
+ if fmt is None:
35
+ fmt = "{median:.4f} ({global_avg:.4f})"
36
+ self.deque = deque(maxlen=window_size)
37
+ self.total = 0.0
38
+ self.count = 0
39
+ self.fmt = fmt
40
+
41
+ def update(self, value, n=1):
42
+ self.deque.append(value)
43
+ self.count += n
44
+ self.total += value * n
45
+
46
+ def synchronize_between_processes(self):
47
+ """
48
+ Warning: does not synchronize the deque!
49
+ """
50
+ if not is_dist_avail_and_initialized():
51
+ return
52
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
53
+ dist.barrier()
54
+ dist.all_reduce(t)
55
+ t = t.tolist()
56
+ self.count = int(t[0])
57
+ self.total = t[1]
58
+
59
+ @property
60
+ def median(self):
61
+ d = torch.tensor(list(self.deque))
62
+ return d.median().item()
63
+
64
+ @property
65
+ def avg(self):
66
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
67
+ return d.mean().item()
68
+
69
+ @property
70
+ def global_avg(self):
71
+ return self.total / self.count
72
+
73
+ @property
74
+ def max(self):
75
+ return max(self.deque)
76
+
77
+ @property
78
+ def value(self):
79
+ return self.deque[-1]
80
+
81
+ def __str__(self):
82
+ return self.fmt.format(
83
+ median=self.median,
84
+ avg=self.avg,
85
+ global_avg=self.global_avg,
86
+ max=self.max,
87
+ value=self.value,
88
+ )
89
+
90
+
91
+ def all_gather(data):
92
+ """
93
+ Run all_gather on arbitrary picklable data (not necessarily tensors)
94
+ Args:
95
+ data: any picklable object
96
+ Returns:
97
+ list[data]: list of data gathered from each rank
98
+ """
99
+ world_size = get_world_size()
100
+ if world_size == 1:
101
+ return [data]
102
+
103
+ # serialized to a Tensor
104
+ buffer = pickle.dumps(data)
105
+ storage = torch.ByteStorage.from_buffer(buffer)
106
+ tensor = torch.ByteTensor(storage).to("cuda")
107
+
108
+ # obtain Tensor size of each rank
109
+ local_size = torch.tensor([tensor.numel()], device="cuda")
110
+ size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
111
+ dist.all_gather(size_list, local_size)
112
+ size_list = [int(size.item()) for size in size_list]
113
+ max_size = max(size_list)
114
+
115
+ # receiving Tensor from all ranks
116
+ # we pad the tensor because torch all_gather does not support
117
+ # gathering tensors of different shapes
118
+ tensor_list = []
119
+ for _ in size_list:
120
+ tensor_list.append(torch.empty((max_size, ), dtype=torch.uint8, device="cuda"))
121
+ if local_size != max_size:
122
+ padding = torch.empty(size=(max_size - local_size, ), dtype=torch.uint8, device="cuda")
123
+ tensor = torch.cat((tensor, padding), dim=0)
124
+ dist.all_gather(tensor_list, tensor)
125
+
126
+ data_list = []
127
+ for size, tensor in zip(size_list, tensor_list):
128
+ buffer = tensor.cpu().numpy().tobytes()[:size]
129
+ data_list.append(pickle.loads(buffer))
130
+
131
+ return data_list
132
+
133
+
134
+ def reduce_dict(input_dict, average=True):
135
+ """
136
+ Args:
137
+ input_dict (dict): all the values will be reduced
138
+ average (bool): whether to do average or sum
139
+ Reduce the values in the dictionary from all processes so that all processes
140
+ have the averaged results. Returns a dict with the same fields as
141
+ input_dict, after reduction.
142
+ """
143
+ world_size = get_world_size()
144
+ if world_size < 2:
145
+ return input_dict
146
+ with torch.no_grad():
147
+ names = []
148
+ values = []
149
+ # sort the keys so that they are consistent across processes
150
+ for k in sorted(input_dict.keys()):
151
+ names.append(k)
152
+ values.append(input_dict[k])
153
+ values = torch.stack(values, dim=0)
154
+ dist.all_reduce(values)
155
+ if average:
156
+ values /= world_size
157
+ reduced_dict = {k: v for k, v in zip(names, values)}
158
+ return reduced_dict
159
+
160
+
161
+ class MetricLogger(object):
162
+
163
+ def __init__(self, delimiter="\t"):
164
+ self.meters = defaultdict(SmoothedValue)
165
+ self.delimiter = delimiter
166
+
167
+ def update(self, **kwargs):
168
+ for k, v in kwargs.items():
169
+ if isinstance(v, torch.Tensor):
170
+ v = v.item()
171
+ assert isinstance(v, (float, int))
172
+ self.meters[k].update(v)
173
+
174
+ def __getattr__(self, attr):
175
+ if attr in self.meters:
176
+ return self.meters[attr]
177
+ if attr in self.__dict__:
178
+ return self.__dict__[attr]
179
+ raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
180
+
181
+ def __str__(self):
182
+ loss_str = []
183
+ for name, meter in self.meters.items():
184
+ loss_str.append("{}: {}".format(name, str(meter)))
185
+ return self.delimiter.join(loss_str)
186
+
187
+ def synchronize_between_processes(self):
188
+ for meter in self.meters.values():
189
+ meter.synchronize_between_processes()
190
+
191
+ def add_meter(self, name, meter):
192
+ self.meters[name] = meter
193
+
194
+ def log_every(self, iterable, print_freq, header=None):
195
+ i = 0
196
+ if not header:
197
+ header = ""
198
+ start_time = time.time()
199
+ end = time.time()
200
+ iter_time = SmoothedValue(fmt="{avg:.4f}")
201
+ data_time = SmoothedValue(fmt="{avg:.4f}")
202
+ space_fmt = ":" + str(len(str(len(iterable)))) + "d"
203
+ if torch.cuda.is_available():
204
+ log_msg = self.delimiter.join([
205
+ header,
206
+ "[{0" + space_fmt + "}/{1}]",
207
+ "eta: {eta}",
208
+ "{meters}",
209
+ "time: {time}",
210
+ "data: {data}",
211
+ "max mem: {memory:.0f}",
212
+ ])
213
+ else:
214
+ log_msg = self.delimiter.join([
215
+ header,
216
+ "[{0" + space_fmt + "}/{1}]",
217
+ "eta: {eta}",
218
+ "{meters}",
219
+ "time: {time}",
220
+ "data: {data}",
221
+ ])
222
+ MB = 1024.0 * 1024.0
223
+ for obj in iterable:
224
+ data_time.update(time.time() - end)
225
+ yield obj
226
+ iter_time.update(time.time() - end)
227
+ if i % print_freq == 0 or i == len(iterable) - 1:
228
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
229
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
230
+ if torch.cuda.is_available():
231
+ print(
232
+ log_msg.format(
233
+ i,
234
+ len(iterable),
235
+ eta=eta_string,
236
+ meters=str(self),
237
+ time=str(iter_time),
238
+ data=str(data_time),
239
+ memory=torch.cuda.max_memory_allocated() / MB,
240
+ ))
241
+ else:
242
+ print(
243
+ log_msg.format(
244
+ i,
245
+ len(iterable),
246
+ eta=eta_string,
247
+ meters=str(self),
248
+ time=str(iter_time),
249
+ data=str(data_time),
250
+ ))
251
+ i += 1
252
+ end = time.time()
253
+ total_time = time.time() - start_time
254
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
255
+ print("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable)))
256
+
257
+
258
+ def get_sha():
259
+ cwd = os.path.dirname(os.path.abspath(__file__))
260
+
261
+ def _run(command):
262
+ return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
263
+
264
+ sha = "N/A"
265
+ diff = "clean"
266
+ branch = "N/A"
267
+ try:
268
+ sha = _run(["git", "rev-parse", "HEAD"])
269
+ subprocess.check_output(["git", "diff"], cwd=cwd)
270
+ diff = _run(["git", "diff-index", "HEAD"])
271
+ diff = "has uncommited changes" if diff else "clean"
272
+ branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
273
+ except Exception:
274
+ pass
275
+ message = f"sha: {sha}, status: {diff}, branch: {branch}"
276
+ return message
277
+
278
+
279
+ def collate_fn(batch):
280
+ batch = list(zip(*batch))
281
+ batch[0] = nested_tensor_from_tensor_list(batch[0])
282
+ return tuple(batch)
283
+
284
+
285
+ def _max_by_axis(the_list):
286
+ # type: (List[List[int]]) -> List[int]
287
+ maxes = the_list[0]
288
+ for sublist in the_list[1:]:
289
+ for index, item in enumerate(sublist):
290
+ maxes[index] = max(maxes[index], item)
291
+ return maxes
292
+
293
+
294
+ class NestedTensor(object):
295
+
296
+ def __init__(self, tensors, mask: Optional[Tensor]):
297
+ self.tensors = tensors
298
+ self.mask = mask
299
+
300
+ def to(self, device):
301
+ # type: (Device) -> NestedTensor # noqa
302
+ cast_tensor = self.tensors.to(device)
303
+ mask = self.mask
304
+ if mask is not None:
305
+ assert mask is not None
306
+ cast_mask = mask.to(device)
307
+ else:
308
+ cast_mask = None
309
+ return NestedTensor(cast_tensor, cast_mask)
310
+
311
+ def decompose(self):
312
+ return self.tensors, self.mask
313
+
314
+ def __repr__(self):
315
+ return str(self.tensors)
316
+
317
+
318
+ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
319
+ # TODO make this more general
320
+ if tensor_list[0].ndim == 3:
321
+ if torchvision._is_tracing():
322
+ # nested_tensor_from_tensor_list() does not export well to ONNX
323
+ # call _onnx_nested_tensor_from_tensor_list() instead
324
+ return _onnx_nested_tensor_from_tensor_list(tensor_list)
325
+
326
+ # TODO make it support different-sized images
327
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
328
+ # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
329
+ batch_shape = [len(tensor_list)] + max_size
330
+ b, c, h, w = batch_shape
331
+ dtype = tensor_list[0].dtype
332
+ device = tensor_list[0].device
333
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
334
+ mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
335
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
336
+ pad_img[:img.shape[0], :img.shape[1], :img.shape[2]].copy_(img)
337
+ m[:img.shape[1], :img.shape[2]] = False
338
+ else:
339
+ raise ValueError("not supported")
340
+ return NestedTensor(tensor, mask)
341
+
342
+
343
+ # _onnx_nested_tensor_from_tensor_list() is an implementation of
344
+ # nested_tensor_from_tensor_list() that is supported by ONNX tracing.
345
+ @torch.jit.unused
346
+ def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
347
+ max_size = []
348
+ for i in range(tensor_list[0].dim()):
349
+ max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
350
+ max_size.append(max_size_i)
351
+ max_size = tuple(max_size)
352
+
353
+ # work around for
354
+ # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
355
+ # m[: img.shape[1], :img.shape[2]] = False
356
+ # which is not yet supported in onnx
357
+ padded_imgs = []
358
+ padded_masks = []
359
+ for img in tensor_list:
360
+ padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
361
+ padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
362
+ padded_imgs.append(padded_img)
363
+
364
+ m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
365
+ padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
366
+ padded_masks.append(padded_mask.to(torch.bool))
367
+
368
+ tensor = torch.stack(padded_imgs)
369
+ mask = torch.stack(padded_masks)
370
+
371
+ return NestedTensor(tensor, mask=mask)
372
+
373
+
374
+ def setup_for_distributed(is_master):
375
+ """
376
+ This function disables printing when not in master process
377
+ """
378
+ import builtins as __builtin__
379
+
380
+ builtin_print = __builtin__.print
381
+
382
+ def print(*args, **kwargs):
383
+ force = kwargs.pop("force", False)
384
+ if is_master or force:
385
+ builtin_print(*args, **kwargs)
386
+
387
+ __builtin__.print = print
388
+
389
+
390
+ def is_dist_avail_and_initialized():
391
+ if not dist.is_available():
392
+ return False
393
+ if not dist.is_initialized():
394
+ return False
395
+ return True
396
+
397
+
398
+ def get_world_size():
399
+ if not is_dist_avail_and_initialized():
400
+ return 1
401
+ return dist.get_world_size()
402
+
403
+
404
+ def get_rank():
405
+ if not is_dist_avail_and_initialized():
406
+ return 0
407
+ return dist.get_rank()
408
+
409
+
410
+ def is_main_process():
411
+ return get_rank() == 0
412
+
413
+
414
+ def save_on_master(*args, **kwargs):
415
+ if is_main_process():
416
+ torch.save(*args, **kwargs)
417
+
418
+
419
+ def init_distributed_mode(args):
420
+ if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
421
+ args.rank = int(os.environ["RANK"])
422
+ args.world_size = int(os.environ["WORLD_SIZE"])
423
+ args.gpu = int(os.environ["LOCAL_RANK"])
424
+ elif "SLURM_PROCID" in os.environ:
425
+ args.rank = int(os.environ["SLURM_PROCID"])
426
+ args.gpu = args.rank % torch.cuda.device_count()
427
+ else:
428
+ print("Not using distributed mode")
429
+ args.distributed = False
430
+ return
431
+
432
+ args.distributed = True
433
+
434
+ torch.cuda.set_device(args.gpu)
435
+ args.dist_backend = "nccl"
436
+ print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
437
+ torch.distributed.init_process_group(
438
+ backend=args.dist_backend,
439
+ init_method=args.dist_url,
440
+ world_size=args.world_size,
441
+ rank=args.rank,
442
+ )
443
+ torch.distributed.barrier()
444
+ setup_for_distributed(args.rank == 0)
445
+
446
+
447
+ @torch.no_grad()
448
+ def accuracy(output, target, topk=(1, )):
449
+ """Computes the precision@k for the specified values of k"""
450
+ if target.numel() == 0:
451
+ return [torch.zeros([], device=output.device)]
452
+ maxk = max(topk)
453
+ batch_size = target.size(0)
454
+
455
+ _, pred = output.topk(maxk, 1, True, True)
456
+ pred = pred.t()
457
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
458
+
459
+ res = []
460
+ for k in topk:
461
+ correct_k = correct[:k].view(-1).float().sum(0)
462
+ res.append(correct_k.mul_(100.0 / batch_size))
463
+ return res
464
+
465
+
466
+ def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
467
+ # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
468
+ """
469
+ Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
470
+ This will eventually be supported natively by PyTorch, and this
471
+ class can go away.
472
+ """
473
+ if version.parse(torchvision.__version__) < version.parse("0.7"):
474
+ if input.numel() > 0:
475
+ return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
476
+
477
+ output_shape = _output_size(2, input, size, scale_factor)
478
+ output_shape = list(input.shape[:-2]) + list(output_shape)
479
+ return _new_empty_tensor(input, output_shape)
480
+ else:
481
+ return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
policy/EVA/detr/util/plot_utils.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Plotting utilities to visualize training logs.
3
+ """
4
+
5
+ import torch
6
+ import pandas as pd
7
+ import numpy as np
8
+ import seaborn as sns
9
+ import matplotlib.pyplot as plt
10
+
11
+ from pathlib import Path, PurePath
12
+
13
+
14
+ def plot_logs(
15
+ logs,
16
+ fields=("class_error", "loss_bbox_unscaled", "mAP"),
17
+ ewm_col=0,
18
+ log_name="log.txt",
19
+ ):
20
+ """
21
+ Function to plot specific fields from training log(s). Plots both training and test results.
22
+
23
+ :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
24
+ - fields = which results to plot from each log file - plots both training and test for each field.
25
+ - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
26
+ - log_name = optional, name of log file if different than default 'log.txt'.
27
+
28
+ :: Outputs - matplotlib plots of results in fields, color coded for each log file.
29
+ - solid lines are training results, dashed lines are test results.
30
+
31
+ """
32
+ func_name = "plot_utils.py::plot_logs"
33
+
34
+ # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
35
+ # convert single Path to list to avoid 'not iterable' error
36
+
37
+ if not isinstance(logs, list):
38
+ if isinstance(logs, PurePath):
39
+ logs = [logs]
40
+ print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
41
+ else:
42
+ raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
43
+ Expect list[Path] or single Path obj, received {type(logs)}")
44
+
45
+ # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
46
+ for i, dir in enumerate(logs):
47
+ if not isinstance(dir, PurePath):
48
+ raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
49
+ if not dir.exists():
50
+ raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
51
+ # verify log_name exists
52
+ fn = Path(dir / log_name)
53
+ if not fn.exists():
54
+ print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?")
55
+ print(f"--> full path of missing log file: {fn}")
56
+ return
57
+
58
+ # load log file(s) and plot
59
+ dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
60
+
61
+ fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
62
+
63
+ for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
64
+ for j, field in enumerate(fields):
65
+ if field == "mAP":
66
+ coco_eval = (pd.DataFrame(np.stack(df.test_coco_eval_bbox.dropna().values)[:,
67
+ 1]).ewm(com=ewm_col).mean())
68
+ axs[j].plot(coco_eval, c=color)
69
+ else:
70
+ df.interpolate().ewm(com=ewm_col).mean().plot(
71
+ y=[f"train_{field}", f"test_{field}"],
72
+ ax=axs[j],
73
+ color=[color] * 2,
74
+ style=["-", "--"],
75
+ )
76
+ for ax, field in zip(axs, fields):
77
+ ax.legend([Path(p).name for p in logs])
78
+ ax.set_title(field)
79
+
80
+
81
+ def plot_precision_recall(files, naming_scheme="iter"):
82
+ if naming_scheme == "exp_id":
83
+ # name becomes exp_id
84
+ names = [f.parts[-3] for f in files]
85
+ elif naming_scheme == "iter":
86
+ names = [f.stem for f in files]
87
+ else:
88
+ raise ValueError(f"not supported {naming_scheme}")
89
+ fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
90
+ for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
91
+ data = torch.load(f)
92
+ # precision is n_iou, n_points, n_cat, n_area, max_det
93
+ precision = data["precision"]
94
+ recall = data["params"].recThrs
95
+ scores = data["scores"]
96
+ # take precision for all classes, all areas and 100 detections
97
+ precision = precision[0, :, :, 0, -1].mean(1)
98
+ scores = scores[0, :, :, 0, -1].mean(1)
99
+ prec = precision.mean()
100
+ rec = data["recall"][0, :, 0, -1].mean()
101
+ print(f"{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, " + f"score={scores.mean():0.3f}, " +
102
+ f"f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}")
103
+ axs[0].plot(recall, precision, c=color)
104
+ axs[1].plot(recall, scores, c=color)
105
+
106
+ axs[0].set_title("Precision / Recall")
107
+ axs[0].legend(names)
108
+ axs[1].set_title("Scores / Recall")
109
+ axs[1].legend(names)
110
+ return fig, axs
policy/EVA/eva_policy.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ EVA Policy: Event-driven Visual-tactile ACT.
3
+
4
+ Differences from ACTPolicy:
5
+ - Owns a TactileEventDetector that encodes [B, 2, 5, 4, 4, 3] → event_token
6
+ - Passes event_token into DETRVAE as an extra encoder input token
7
+ - Adds auxiliary cross-entropy loss (event classification vs rule-based labels)
8
+ """
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from torch.nn import functional as F
13
+ import torchvision.transforms as transforms
14
+
15
+ try:
16
+ from detr.main import build_EVA_model_and_optimizer
17
+ except ImportError:
18
+ from .detr.main import build_EVA_model_and_optimizer
19
+
20
+ from .tactile_event import TactileEventDetector, tactile_event_loss
21
+
22
+
23
+ def kl_divergence(mu, logvar):
24
+ batch_size = mu.size(0)
25
+ assert batch_size != 0
26
+ if mu.data.ndimension() == 4:
27
+ mu = mu.view(mu.size(0), mu.size(1))
28
+ if logvar.data.ndimension() == 4:
29
+ logvar = logvar.view(logvar.size(0), logvar.size(1))
30
+
31
+ klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp())
32
+ total_kld = klds.sum(1).mean(0, True)
33
+ dimension_wise_kld = klds.mean(0)
34
+ mean_kld = klds.mean(1).mean(0, True)
35
+
36
+ return total_kld, dimension_wise_kld, mean_kld
37
+
38
+
39
+ class EVAPolicy(nn.Module):
40
+ """
41
+ EVA = ACT + TactileEventDetector.
42
+
43
+ Forward signature:
44
+ Training: policy(qpos, image, tactile, actions, is_pad, event_labels)
45
+ Inference: policy(qpos, image, tactile)
46
+ """
47
+
48
+ def __init__(self, args_override, RoboTwin_Config=None):
49
+ super().__init__()
50
+ model, optimizer = build_EVA_model_and_optimizer(args_override, RoboTwin_Config)
51
+ self.model = model # CVAE decoder (DETRVAE)
52
+ self.optimizer = optimizer
53
+ self.kl_weight = args_override["kl_weight"]
54
+ self.event_weight = args_override.get("event_weight", 0.1)
55
+
56
+ hidden_dim = args_override.get("hidden_dim", 256)
57
+ self.tactile_detector = TactileEventDetector(hidden_dim=hidden_dim)
58
+ self.tactile_detector.cuda()
59
+
60
+ # Add tactile detector params to optimizer
61
+ self.optimizer.add_param_group({
62
+ "params": self.tactile_detector.parameters(),
63
+ "lr": args_override.get("lr", 1e-4),
64
+ })
65
+
66
+ print(f"EVA Policy | KL weight: {self.kl_weight}, Event weight: {self.event_weight}")
67
+ n_tactile = sum(p.numel() for p in self.tactile_detector.parameters())
68
+ print(f"TactileEventDetector parameters: {n_tactile / 1e3:.1f}K")
69
+
70
+ def __call__(self, qpos, image, tactile, actions=None, is_pad=None, event_labels=None):
71
+ """
72
+ Args:
73
+ qpos: [B, state_dim]
74
+ image: [B, num_cam, C, H, W]
75
+ tactile: [B, 2, 5, 4, 4, 3]
76
+ actions: [B, seq, action_dim] or None (inference)
77
+ is_pad: [B, seq] or None
78
+ event_labels: [B] int64 or None (rule-based labels for aux loss)
79
+
80
+ Returns:
81
+ Training: loss_dict with keys {l1, kl, event, loss}
82
+ Inference: a_hat [B, num_queries, action_dim]
83
+ """
84
+ env_state = None
85
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
86
+ image = normalize(image)
87
+
88
+ # Encode tactile → event token
89
+ event_token, event_logits = self.tactile_detector(tactile)
90
+
91
+ if actions is not None: # training
92
+ actions = actions[:, :self.model.num_queries]
93
+ is_pad = is_pad[:, :self.model.num_queries]
94
+
95
+ a_hat, is_pad_hat, (mu, logvar) = self.model(
96
+ qpos, image, env_state, actions, is_pad, event_token=event_token
97
+ )
98
+
99
+ total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar)
100
+
101
+ loss_dict = dict()
102
+ all_l1 = F.l1_loss(actions, a_hat, reduction="none")
103
+ l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean()
104
+ loss_dict["l1"] = l1
105
+ loss_dict["kl"] = total_kld[0]
106
+
107
+ # Auxiliary event classification loss
108
+ if event_labels is not None:
109
+ event_loss = tactile_event_loss(event_logits, event_labels)
110
+ else:
111
+ event_loss = torch.tensor(0.0, device=qpos.device)
112
+ loss_dict["event"] = event_loss
113
+
114
+ loss_dict["loss"] = (
115
+ loss_dict["l1"]
116
+ + loss_dict["kl"] * self.kl_weight
117
+ + loss_dict["event"] * self.event_weight
118
+ )
119
+ return loss_dict
120
+ else: # inference
121
+ a_hat, _, (_, _) = self.model(
122
+ qpos, image, env_state, event_token=event_token
123
+ )
124
+ return a_hat
125
+
126
+ def configure_optimizers(self):
127
+ return self.optimizer
policy/EVA/imitate_episodes.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ EVA training script. Based on ACT's imitate_episodes.py.
3
+
4
+ Usage:
5
+ python imitate_episodes.py # use train_config.yaml defaults
6
+ python imitate_episodes.py --task peg_insertion # shorthand
7
+ python imitate_episodes.py --eval # evaluate best checkpoint
8
+ """
9
+
10
+ import os
11
+
12
+ os.environ["MUJOCO_GL"] = "egl"
13
+
14
+ import torch
15
+ import numpy as np
16
+ import pickle
17
+ import argparse
18
+ import matplotlib
19
+
20
+ matplotlib.use("Agg")
21
+ import matplotlib.pyplot as plt
22
+ from copy import deepcopy
23
+ from tqdm import tqdm
24
+
25
+ from constants import DT, SIM_TASK_CONFIGS
26
+ from utils import load_data, compute_dict_mean, set_seed, detach_dict
27
+ from eva_policy import EVAPolicy
28
+
29
+
30
+ def main(args):
31
+ set_seed(1)
32
+
33
+ is_eval = args["eval"]
34
+ ckpt_dir = args["ckpt_dir"]
35
+ task_name = args["task_name"]
36
+ batch_size_train = args["batch_size"]
37
+ batch_size_val = args["batch_size"]
38
+ num_epochs = args["num_epochs"]
39
+
40
+ # Task config
41
+ task_config = SIM_TASK_CONFIGS[task_name]
42
+ dataset_dir = task_config["dataset_dir"]
43
+ num_episodes = task_config["num_episodes"]
44
+ episode_len = task_config["episode_len"]
45
+ camera_names = task_config["camera_names"]
46
+
47
+ # Model config
48
+ state_dim = args["state_dim"]
49
+ policy_config = {
50
+ "lr": args["lr"],
51
+ "num_queries": args["chunk_size"],
52
+ "kl_weight": args["kl_weight"],
53
+ "event_weight": args.get("event_weight", 0.1),
54
+ "hidden_dim": args["hidden_dim"],
55
+ "dim_feedforward": args["dim_feedforward"],
56
+ "lr_backbone": 1e-5,
57
+ "backbone": "resnet18",
58
+ "enc_layers": 4,
59
+ "dec_layers": 7,
60
+ "nheads": 8,
61
+ "camera_names": camera_names,
62
+ "state_dim": state_dim,
63
+ "chunk_size": args["chunk_size"],
64
+ }
65
+
66
+ config = {
67
+ "num_epochs": num_epochs,
68
+ "ckpt_dir": ckpt_dir,
69
+ "episode_len": episode_len,
70
+ "state_dim": state_dim,
71
+ "lr": args["lr"],
72
+ "policy_config": policy_config,
73
+ "task_name": task_name,
74
+ "seed": args["seed"],
75
+ "temporal_agg": args.get("temporal_agg", False),
76
+ "camera_names": camera_names,
77
+ "save_freq": args["save_freq"],
78
+ }
79
+
80
+ if is_eval:
81
+ ckpt_name = "policy_best.ckpt"
82
+ print(f"Evaluating {ckpt_name}...")
83
+ # TODO: implement eval loop for EVA
84
+ print("EVA eval not yet implemented — use ACT eval with tactile wrapper")
85
+ return
86
+
87
+ train_dataloader, val_dataloader, stats, _ = load_data(
88
+ dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val
89
+ )
90
+
91
+ # Save dataset stats
92
+ os.makedirs(ckpt_dir, exist_ok=True)
93
+ stats_path = os.path.join(ckpt_dir, "dataset_stats.pkl")
94
+ with open(stats_path, "wb") as f:
95
+ pickle.dump(stats, f)
96
+
97
+ best_ckpt_info = train_bc(train_dataloader, val_dataloader, config)
98
+ best_epoch, min_val_loss, best_state_dict = best_ckpt_info
99
+
100
+ ckpt_path = os.path.join(ckpt_dir, "policy_best.ckpt")
101
+ torch.save(best_state_dict, ckpt_path)
102
+ print(f"Best ckpt, val loss {min_val_loss:.6f} @ epoch {best_epoch}")
103
+
104
+
105
+ def forward_pass(data, policy):
106
+ """EVA forward pass: unpack 6-tuple (includes tactile + event_labels)."""
107
+ image_data, qpos_data, action_data, is_pad, tactile_data, event_labels = data
108
+ image_data = image_data.cuda()
109
+ qpos_data = qpos_data.cuda()
110
+ action_data = action_data.cuda()
111
+ is_pad = is_pad.cuda()
112
+ tactile_data = tactile_data.cuda()
113
+ event_labels = event_labels.cuda()
114
+ return policy(qpos_data, image_data, tactile_data, action_data, is_pad, event_labels)
115
+
116
+
117
+ def train_bc(train_dataloader, val_dataloader, config):
118
+ num_epochs = config["num_epochs"]
119
+ ckpt_dir = config["ckpt_dir"]
120
+ seed = config["seed"]
121
+ policy_config = config["policy_config"]
122
+
123
+ set_seed(seed)
124
+
125
+ policy = EVAPolicy(policy_config)
126
+ policy.cuda()
127
+ optimizer = policy.configure_optimizers()
128
+
129
+ train_history = []
130
+ validation_history = []
131
+ min_val_loss = np.inf
132
+ best_ckpt_info = None
133
+
134
+ for epoch in tqdm(range(num_epochs)):
135
+ print(f"\nEpoch {epoch}")
136
+
137
+ # Validation
138
+ with torch.inference_mode():
139
+ policy.eval()
140
+ epoch_dicts = []
141
+ for batch_idx, data in enumerate(val_dataloader):
142
+ forward_dict = forward_pass(data, policy)
143
+ epoch_dicts.append(forward_dict)
144
+ epoch_summary = compute_dict_mean(epoch_dicts)
145
+ validation_history.append(epoch_summary)
146
+
147
+ epoch_val_loss = epoch_summary["loss"]
148
+ if epoch_val_loss < min_val_loss:
149
+ min_val_loss = epoch_val_loss
150
+ best_ckpt_info = (epoch, min_val_loss, deepcopy(policy.state_dict()))
151
+
152
+ summary_str = " | ".join(f"{k}: {v.item():.4f}" for k, v in epoch_summary.items())
153
+ print(f"Val: {summary_str}")
154
+
155
+ # Training
156
+ policy.train()
157
+ optimizer.zero_grad()
158
+ for batch_idx, data in enumerate(train_dataloader):
159
+ forward_dict = forward_pass(data, policy)
160
+ loss = forward_dict["loss"]
161
+ loss.backward()
162
+ optimizer.step()
163
+ optimizer.zero_grad()
164
+ train_history.append(detach_dict(forward_dict))
165
+
166
+ epoch_summary = compute_dict_mean(
167
+ train_history[(batch_idx + 1) * epoch:(batch_idx + 1) * (epoch + 1)]
168
+ )
169
+ epoch_train_loss = epoch_summary["loss"]
170
+ summary_str = " | ".join(f"{k}: {v.item():.4f}" for k, v in epoch_summary.items())
171
+ print(f"Train: {summary_str}")
172
+
173
+ if (epoch + 1) % config["save_freq"] == 0:
174
+ ckpt_path = os.path.join(ckpt_dir, f"policy_epoch_{epoch + 1}_seed_{seed}.ckpt")
175
+ torch.save(policy.state_dict(), ckpt_path)
176
+ plot_history(train_history, validation_history, epoch, ckpt_dir, seed)
177
+
178
+ ckpt_path = os.path.join(ckpt_dir, "policy_last.ckpt")
179
+ torch.save(policy.state_dict(), ckpt_path)
180
+
181
+ best_epoch, min_val_loss, best_state_dict = best_ckpt_info
182
+ ckpt_path = os.path.join(ckpt_dir, f"policy_epoch_{best_epoch}_seed_{seed}.ckpt")
183
+ torch.save(best_state_dict, ckpt_path)
184
+ print(f"Training finished: seed {seed}, val loss {min_val_loss:.6f} at epoch {best_epoch}")
185
+
186
+ plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed)
187
+ return best_ckpt_info
188
+
189
+
190
+ def plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed):
191
+ for key in train_history[0]:
192
+ plot_path = os.path.join(ckpt_dir, f"train_val_{key}_seed_{seed}.png")
193
+ plt.figure()
194
+ train_values = [s[key].item() for s in train_history]
195
+ val_values = [s[key].item() for s in validation_history]
196
+ plt.plot(np.linspace(0, num_epochs - 1, len(train_history)), train_values, label="train")
197
+ plt.plot(np.linspace(0, num_epochs - 1, len(validation_history)), val_values, label="validation")
198
+ plt.tight_layout()
199
+ plt.legend()
200
+ plt.title(key)
201
+ plt.savefig(plot_path)
202
+ plt.close()
203
+ print(f"Saved plots to {ckpt_dir}")
204
+
205
+
206
+ if __name__ == "__main__":
207
+ import yaml
208
+
209
+ config_path = os.path.join(os.path.dirname(__file__), "train_config.yaml")
210
+ with open(config_path, "r") as f:
211
+ args = yaml.safe_load(f)
212
+
213
+ parser = argparse.ArgumentParser()
214
+ parser.add_argument("--task", type=str, default=None)
215
+ parser.add_argument("--eval", action="store_true")
216
+ parser.add_argument("--ckpt_dir", type=str, default=None)
217
+ parser.add_argument("--task_name", type=str, default=None)
218
+ parser.add_argument("--batch_size", type=int, default=None)
219
+ parser.add_argument("--seed", type=int, default=None)
220
+ parser.add_argument("--num_epochs", type=int, default=None)
221
+ parser.add_argument("--lr", type=float, default=None)
222
+ parser.add_argument("--kl_weight", type=float, default=None)
223
+ parser.add_argument("--event_weight", type=float, default=None)
224
+ parser.add_argument("--chunk_size", type=int, default=None)
225
+ parser.add_argument("--hidden_dim", type=int, default=None)
226
+ parser.add_argument("--state_dim", type=int, default=None)
227
+ parser.add_argument("--save_freq", type=int, default=None)
228
+ parser.add_argument("--dim_feedforward", type=int, default=None)
229
+ parser.add_argument("--temporal_agg", action="store_true")
230
+
231
+ for k, v in vars(parser.parse_args()).items():
232
+ if v is not None and v is not False:
233
+ args[k] = v
234
+
235
+ if args.get("task"):
236
+ task = args.pop("task")
237
+ args["task_name"] = f"sim-{task}-default-50"
238
+ args["ckpt_dir"] = f"./checkpoints/{task}_eva"
239
+
240
+ main(args)
policy/EVA/process_data.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Process raw tactile_data/ HDF5 into EVA-compatible format.
3
+
4
+ Raw format (from collect_data.py):
5
+ actions: [T, 7] float64 (OSC_POSE delta commands)
6
+ agentview_image: [T, 256, 256, 3] uint8
7
+ eye_in_hand_image: [T, 256, 256, 3] uint8
8
+ joint_pos: [T, 7] float64
9
+ gripper_qpos: [T, 6] float64
10
+ eef_pos: [T, 3] float64
11
+ eef_quat: [T, 4] float64
12
+ tactile_left: [T*5, 4, 4, 3] float64 (100Hz)
13
+ tactile_right: [T*5, 4, 4, 3] float64
14
+ rewards: [T]
15
+ success: [T]
16
+
17
+ EVA processed format (ACT-compatible + tactile):
18
+ action: [T, 8] float32 (7 joint + 1 gripper)
19
+ observations/qpos: [T, 8] float32
20
+ observations/images/agentview: [T, 256, 256, 3] uint8
21
+ observations/images/eye_in_hand: [T, 256, 256, 3] uint8
22
+ observations/tactile: [T, 2, 5, 4, 4, 3] float32 (per-step tactile window)
23
+ observations/event_labels: [T] int64 (rule-based event class)
24
+ attrs: sim=True
25
+ """
26
+
27
+ import os
28
+ import sys
29
+ import h5py
30
+ import json
31
+ import numpy as np
32
+ import argparse
33
+ from glob import glob
34
+
35
+ sys.path.insert(0, os.path.dirname(__file__))
36
+ from tactile_event import label_tactile_events
37
+
38
+
39
+ def process_episode(raw_path, save_path):
40
+ """Convert one raw episode to EVA format."""
41
+ with h5py.File(raw_path, 'r') as raw:
42
+ T = raw['actions'].shape[0]
43
+ actions_raw = raw['actions'][:] # [T, 7]
44
+ joint_pos = raw['joint_pos'][:] # [T, 7]
45
+ gripper_qpos = raw['gripper_qpos'][:] # [T, 6]
46
+ agentview = raw['agentview_image'][:] # [T, 256, 256, 3]
47
+ eye_in_hand = raw['eye_in_hand_image'][:] # [T, 256, 256, 3]
48
+ tl = raw['tactile_left'][:] # [T*5, 4, 4, 3]
49
+ tr = raw['tactile_right'][:]
50
+
51
+ # qpos: 7 joint angles + 1 gripper (first joint of gripper_qpos)
52
+ gripper_state = gripper_qpos[:, 0:1] # [T, 1]
53
+ qpos = np.concatenate([joint_pos, gripper_state], axis=1).astype(np.float32) # [T, 8]
54
+
55
+ # action: same as qpos (next-step joint target), following ACT convention
56
+ # Use qpos[1:] as action for qpos[:-1], pad last with repeat
57
+ action = np.zeros_like(qpos)
58
+ action[:-1] = qpos[1:]
59
+ action[-1] = qpos[-1]
60
+
61
+ # Tactile window: for each control step t, take 5 tactile frames [t*5 : t*5+5]
62
+ T_tac = len(tl)
63
+ window = 5
64
+ assert T_tac == T * window, f"Tactile length {T_tac} != {T}*{window}"
65
+
66
+ tactile = np.zeros((T, 2, window, 4, 4, 3), dtype=np.float32)
67
+ for t in range(T):
68
+ s = t * window
69
+ tactile[t, 0] = tl[s:s+window]
70
+ tactile[t, 1] = tr[s:s+window]
71
+
72
+ # Event labels (rule-based)
73
+ event_labels = label_tactile_events(tl, tr, window=window)
74
+ assert len(event_labels) == T
75
+
76
+ # Save in ACT-compatible format
77
+ with h5py.File(save_path, 'w') as f:
78
+ f.attrs['sim'] = True
79
+ f.create_dataset('action', data=action)
80
+ obs = f.create_group('observations')
81
+ obs.create_dataset('qpos', data=qpos)
82
+ imgs = obs.create_group('images')
83
+ imgs.create_dataset('agentview', data=agentview)
84
+ imgs.create_dataset('eye_in_hand', data=eye_in_hand)
85
+ obs.create_dataset('tactile', data=tactile)
86
+ obs.create_dataset('event_labels', data=event_labels)
87
+
88
+ return T
89
+
90
+
91
+ def process_task(task_name, max_episodes=None):
92
+ """Process all episodes for a task."""
93
+ raw_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'tactile_data', task_name)
94
+ raw_dir = os.path.abspath(raw_dir)
95
+ raw_files = sorted(glob(os.path.join(raw_dir, 'episode_*.hdf5')))
96
+
97
+ if not raw_files:
98
+ print(f"No episodes found in {raw_dir}")
99
+ return
100
+
101
+ if max_episodes:
102
+ raw_files = raw_files[:max_episodes]
103
+
104
+ n_episodes = len(raw_files)
105
+ save_dir = os.path.join(os.path.dirname(__file__), 'processed_data', f'sim-{task_name}', f'default-{n_episodes}')
106
+ os.makedirs(save_dir, exist_ok=True)
107
+
108
+ episode_lens = []
109
+ for i, raw_path in enumerate(raw_files):
110
+ save_path = os.path.join(save_dir, f'episode_{i}.hdf5')
111
+ T = process_episode(raw_path, save_path)
112
+ episode_lens.append(T)
113
+ print(f" [{i+1}/{n_episodes}] {os.path.basename(raw_path)} → {T} steps")
114
+
115
+ max_len = max(episode_lens)
116
+ median_len = sorted(episode_lens)[len(episode_lens) // 2]
117
+
118
+ # Update SIM_TASK_CONFIGS.json
119
+ config_path = os.path.join(os.path.dirname(__file__), 'SIM_TASK_CONFIGS.json')
120
+ try:
121
+ with open(config_path, 'r') as f:
122
+ configs = json.load(f)
123
+ except (FileNotFoundError, json.JSONDecodeError):
124
+ configs = {}
125
+
126
+ config_key = f"sim-{task_name}-default-{n_episodes}"
127
+ configs[config_key] = {
128
+ "dataset_dir": f"./processed_data/sim-{task_name}/default-{n_episodes}",
129
+ "num_episodes": n_episodes,
130
+ "episode_len": max_len,
131
+ "camera_names": ["agentview", "eye_in_hand"],
132
+ }
133
+
134
+ with open(config_path, 'w') as f:
135
+ json.dump(configs, f, indent=4)
136
+
137
+ print(f"\nProcessed {n_episodes} episodes → {save_dir}")
138
+ print(f"Episode lengths: {min(episode_lens)}-{max_len} (median {median_len})")
139
+ print(f"Config key: {config_key}")
140
+
141
+
142
+ if __name__ == '__main__':
143
+ parser = argparse.ArgumentParser(description='Process tactile data for EVA training')
144
+ parser.add_argument('task_name', type=str, help='Task name (e.g., peg_insertion)')
145
+ parser.add_argument('--max_episodes', type=int, default=None, help='Max episodes to process')
146
+ args = parser.parse_args()
147
+
148
+ process_task(args.task_name, args.max_episodes)
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_10.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:754f35d7f400e34c7cff6f07ad68e2a7283f479c7b145c4e4940abf40e3e4111
3
+ size 183777864
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_13.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d75e048c50c79229c63d49f161d7abe91431e88509b30d56f5e3677a0fb3a07
3
+ size 192472440
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_18.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08f3b651840b910dd1d7f0d0ff9cf2e4ecf99bc64f163e49a42ee01d426024c9
3
+ size 192867648
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_20.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a80b7498f4cb7bd496bc851095f0bf05b276d39406074866aaa5918b7183a33e
3
+ size 187334736
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_33.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52e8a0f7c79ea527049f871a3d43669b1c4446c062b30c36064e8b76008a982
3
+ size 192867648
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_35.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a0a78369d6db3696b0b78ad7895896d69af3c4b479523a8fa7e4a47d97be86c
3
+ size 185358696
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_36.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c5b3f1d73f894c5466594d711925fc16d5284c9a06483a895ad2372f1f0a421
3
+ size 186939528
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_37.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b64437d7df940a370ea6abed257c8293318b62c9118b3a3e108ff23b5214b258
3
+ size 191286816
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_39.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:349634a5f2aea7f8718d037259e708115a315d5d28fcb4f6ca8610a5fe0e6d3a
3
+ size 188520360
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_41.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5b2b03a8960d1da28f136bcd591b09e4eb7b62dc08603f12722ad241f7fc6a5
3
+ size 186149112
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_48.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcdd231af21c9a0510d3e4bcad923eb083a4d7366fe1177dcbd472a9e2d8978a
3
+ size 189310776
policy/EVA/processed_data/sim-peg_insertion/default-50/episode_5.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc39566a0181b9d309f15b52c51f1d95c181371ba25e1715f96fba6f9a33b448
3
+ size 195634104
policy/EVA/tactile_event.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tactile Event Detection for EVA (Event-driven Visual-tactile ACT).
3
+
4
+ Two components:
5
+ 1. Auto event labeling (rule-based, no grad) — produces training labels
6
+ 2. TactileEventDetector (learned) — produces soft event embeddings for the transformer
7
+
8
+ Event classes:
9
+ 0 = no_contact : both fingers below force threshold
10
+ 1 = initial_contact: force crosses threshold (rising edge within window)
11
+ 2 = stuck : sustained high force + high asymmetry, low jerk
12
+ 3 = slip : tangential jerk spike (sudden lateral force change)
13
+ """
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ import numpy as np
19
+
20
+
21
+ # ---------------------------------------------------------------------------
22
+ # 1. Rule-based auto labeling (numpy, offline)
23
+ # ---------------------------------------------------------------------------
24
+
25
+ def label_tactile_events(
26
+ tactile_left: np.ndarray,
27
+ tactile_right: np.ndarray,
28
+ contact_thresh: float = 0.3,
29
+ stuck_force_thresh: float = 1.0,
30
+ stuck_asym_thresh: float = 0.3,
31
+ slip_jerk_thresh: float = 0.8,
32
+ window: int = 5,
33
+ ) -> np.ndarray:
34
+ """
35
+ Label each control-rate timestep with a contact event class.
36
+
37
+ Args:
38
+ tactile_left: [T_tactile, 4, 4, 3] (100Hz)
39
+ tactile_right: [T_tactile, 4, 4, 3]
40
+ window: number of tactile frames per control step (5 for 100Hz/20Hz)
41
+
42
+ Returns:
43
+ labels: [T_control] int array, values in {0,1,2,3}
44
+ """
45
+ T = len(tactile_left)
46
+ n_steps = T // window
47
+
48
+ # Per-frame scalar features
49
+ normal_l = np.abs(tactile_left[..., 2]).mean(axis=(-1, -2)) # [T]
50
+ normal_r = np.abs(tactile_right[..., 2]).mean(axis=(-1, -2))
51
+ normal_mean = (normal_l + normal_r) / 2
52
+ asymmetry = np.abs(normal_l - normal_r)
53
+
54
+ tangent_l = tactile_left[..., :2].reshape(T, -1)
55
+ tangent_r = tactile_right[..., :2].reshape(T, -1)
56
+ tangent = np.concatenate([tangent_l, tangent_r], axis=-1)
57
+ jerk = np.linalg.norm(np.diff(tangent, axis=0, prepend=tangent[:1]), axis=-1)
58
+
59
+ labels = np.zeros(n_steps, dtype=np.int64)
60
+
61
+ for i in range(n_steps):
62
+ s, e = i * window, (i + 1) * window
63
+ win_force = normal_mean[s:e]
64
+ win_asym = asymmetry[s:e]
65
+ win_jerk = jerk[s:e]
66
+
67
+ max_force = win_force.max()
68
+ max_jerk = win_jerk.max()
69
+ mean_asym = win_asym.mean()
70
+
71
+ # Check previous window for rising-edge detection
72
+ if i > 0:
73
+ prev_s = (i - 1) * window
74
+ prev_force = normal_mean[prev_s:s].max()
75
+ else:
76
+ prev_force = 0.0
77
+
78
+ if max_force < contact_thresh:
79
+ labels[i] = 0 # no_contact
80
+ elif prev_force < contact_thresh and max_force >= contact_thresh:
81
+ labels[i] = 1 # initial_contact (rising edge)
82
+ elif max_jerk > slip_jerk_thresh:
83
+ labels[i] = 3 # slip
84
+ elif max_force > stuck_force_thresh and mean_asym > stuck_asym_thresh:
85
+ labels[i] = 2 # stuck
86
+ else:
87
+ # In contact but no special event — default to no_contact
88
+ # (could also be a "stable grasp" class, but we keep 4 classes)
89
+ labels[i] = 0
90
+
91
+ return labels
92
+
93
+
94
+ # ---------------------------------------------------------------------------
95
+ # 2. TactileEventDetector (learned, differentiable)
96
+ # ---------------------------------------------------------------------------
97
+
98
+ class TactileEventDetector(nn.Module):
99
+ """
100
+ Encodes a tactile window into a single event embedding vector.
101
+
102
+ Input: [B, 2, 5, 4, 4, 3] (batch, fingers, frames, rows, cols, xyz)
103
+ Output:
104
+ event_embedding: [B, hidden_dim] — injected into ACT transformer
105
+ event_logits: [B, 4] — supervised by rule-based labels
106
+ """
107
+
108
+ NUM_CLASSES = 4
109
+
110
+ def __init__(self, hidden_dim: int = 256, num_frames: int = 5):
111
+ super().__init__()
112
+ self.hidden_dim = hidden_dim
113
+
114
+ # Per-finger spatial encoder: 4x4 grid x 3 channels → feature vector
115
+ # Flatten grid: 4*4*3 = 48 per frame per finger
116
+ input_per_frame = 2 * 48 # both fingers concatenated
117
+
118
+ # Temporal 1D conv over frames
119
+ self.temporal_encoder = nn.Sequential(
120
+ nn.Conv1d(input_per_frame, 128, kernel_size=3, padding=1),
121
+ nn.ReLU(),
122
+ nn.Conv1d(128, hidden_dim, kernel_size=3, padding=1),
123
+ nn.ReLU(),
124
+ )
125
+
126
+ # Pool over time → single vector
127
+ self.pool = nn.AdaptiveAvgPool1d(1)
128
+
129
+ # Project to event embedding + classification head
130
+ self.event_proj = nn.Linear(hidden_dim, hidden_dim)
131
+ self.event_classifier = nn.Linear(hidden_dim, self.NUM_CLASSES)
132
+
133
+ self._init_weights()
134
+
135
+ def _init_weights(self):
136
+ for m in self.modules():
137
+ if isinstance(m, (nn.Linear, nn.Conv1d)):
138
+ nn.init.xavier_uniform_(m.weight)
139
+ if m.bias is not None:
140
+ nn.init.zeros_(m.bias)
141
+
142
+ def forward(self, tactile):
143
+ """
144
+ Args:
145
+ tactile: [B, 2, 5, 4, 4, 3]
146
+
147
+ Returns:
148
+ event_embedding: [B, hidden_dim]
149
+ event_logits: [B, NUM_CLASSES]
150
+ """
151
+ B, F, T, H, W, C = tactile.shape # F=2 fingers, T=5 frames
152
+ # Flatten spatial dims per finger per frame: [B, T, 2*48]
153
+ x = tactile.reshape(B, F, T, H * W * C) # [B, 2, 5, 48]
154
+ x = x.permute(0, 2, 1, 3).reshape(B, T, -1) # [B, 5, 96]
155
+
156
+ # Conv1d expects [B, C, T]
157
+ x = x.permute(0, 2, 1) # [B, 96, 5]
158
+ x = self.temporal_encoder(x) # [B, hidden_dim, 5]
159
+ x = self.pool(x).squeeze(-1) # [B, hidden_dim]
160
+
161
+ event_embedding = self.event_proj(x) # [B, hidden_dim]
162
+ event_logits = self.event_classifier(x) # [B, 4]
163
+
164
+ return event_embedding, event_logits
165
+
166
+
167
+ def tactile_event_loss(event_logits, event_labels):
168
+ """
169
+ Auxiliary cross-entropy loss for event classification.
170
+
171
+ Args:
172
+ event_logits: [B, 4] from TactileEventDetector
173
+ event_labels: [B] int64 from rule-based labeling
174
+
175
+ Returns:
176
+ scalar loss
177
+ """
178
+ return F.cross_entropy(event_logits, event_labels)
policy/EVA/train_all.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+ cd "$(dirname "$0")"
4
+
5
+ # Step 1: Process data for all tasks
6
+ echo "========== Processing data =========="
7
+ for task in peg_insertion gentle_stack precision_grasp; do
8
+ echo "--- $task ---"
9
+ python process_data.py $task
10
+ done
11
+
12
+ # Step 2: Train EVA on each task
13
+ echo ""
14
+ echo "========== Training EVA =========="
15
+ for task in peg_insertion gentle_stack precision_grasp; do
16
+ echo ""
17
+ echo "====== Training $task ======"
18
+ python imitate_episodes.py --task $task
19
+ done
20
+
21
+ echo ""
22
+ echo "========== All done =========="
policy/EVA/train_config.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EVA Training Config
2
+ # Usage: cd policy/EVA && python imitate_episodes.py [--task peg_insertion]
3
+
4
+ # Task (override with --task)
5
+ task_name: sim-peg_insertion-default-50
6
+ ckpt_dir: ./checkpoints/peg_insertion_eva
7
+
8
+ # Model
9
+ state_dim: 8
10
+ hidden_dim: 256
11
+ dim_feedforward: 2048
12
+ chunk_size: 20
13
+ kl_weight: 10
14
+ event_weight: 0.1
15
+
16
+ # Training
17
+ batch_size: 128
18
+ lr: 1.0e-4
19
+ num_epochs: 3000
20
+ seed: 1
21
+ save_freq: 500
22
+ temporal_agg: false
23
+
24
+ # Eval
25
+ eval: false
policy/EVA/utils.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data loading utilities for EVA.
3
+ Extends ACT's EpisodicDataset with tactile data and event labels.
4
+ """
5
+
6
+ import numpy as np
7
+ import torch
8
+ import os
9
+ import h5py
10
+ from torch.utils.data import DataLoader
11
+
12
+
13
+ class EVAEpisodicDataset(torch.utils.data.Dataset):
14
+ """
15
+ Dataset that returns (image, qpos, action, is_pad, tactile, event_label) tuples.
16
+ Tactile: [2, 5, 4, 4, 3] per timestep.
17
+ Event label: int64 scalar per timestep.
18
+ """
19
+
20
+ def __init__(self, episode_ids, dataset_dir, camera_names, norm_stats, max_action_len):
21
+ super().__init__()
22
+ self.episode_ids = episode_ids
23
+ self.dataset_dir = dataset_dir
24
+ self.camera_names = camera_names
25
+ self.norm_stats = norm_stats
26
+ self.max_action_len = max_action_len
27
+ self.is_sim = None
28
+ self.__getitem__(0) # initialize self.is_sim
29
+
30
+ def __len__(self):
31
+ return len(self.episode_ids)
32
+
33
+ def __getitem__(self, index):
34
+ episode_id = self.episode_ids[index]
35
+ dataset_path = os.path.join(self.dataset_dir, f"episode_{episode_id}.hdf5")
36
+
37
+ with h5py.File(dataset_path, "r") as root:
38
+ is_sim = root.attrs.get("sim", True)
39
+ episode_len = root["/action"].shape[0]
40
+ start_ts = np.random.choice(episode_len)
41
+
42
+ # Observation at start_ts
43
+ qpos = root["/observations/qpos"][start_ts]
44
+ image_dict = {}
45
+ for cam_name in self.camera_names:
46
+ image_dict[cam_name] = root[f"/observations/images/{cam_name}"][start_ts]
47
+
48
+ # Tactile at start_ts: [2, 5, 4, 4, 3]
49
+ tactile = root["/observations/tactile"][start_ts]
50
+
51
+ # Event label at start_ts
52
+ event_label = root["/observations/event_labels"][start_ts]
53
+
54
+ # Actions from start_ts onward
55
+ action = root["/action"][start_ts:]
56
+ action_len = episode_len - start_ts
57
+
58
+ self.is_sim = is_sim
59
+
60
+ # Pad actions
61
+ padded_action = np.zeros((self.max_action_len, action.shape[1]), dtype=np.float32)
62
+ padded_action[:action_len] = action
63
+ is_pad = np.ones(self.max_action_len, dtype=bool)
64
+ is_pad[:action_len] = False
65
+
66
+ # Stack camera images
67
+ all_cam_images = np.stack([image_dict[c] for c in self.camera_names], axis=0)
68
+
69
+ # To tensors
70
+ image_data = torch.from_numpy(all_cam_images)
71
+ qpos_data = torch.from_numpy(qpos).float()
72
+ action_data = torch.from_numpy(padded_action).float()
73
+ is_pad = torch.from_numpy(is_pad).bool()
74
+ tactile_data = torch.from_numpy(tactile).float()
75
+ event_label = torch.tensor(event_label, dtype=torch.long)
76
+
77
+ # NCHW for images
78
+ image_data = torch.einsum("k h w c -> k c h w", image_data)
79
+ image_data = image_data / 255.0
80
+
81
+ # Normalize
82
+ action_data = (action_data - self.norm_stats["action_mean"]) / self.norm_stats["action_std"]
83
+ qpos_data = (qpos_data - self.norm_stats["qpos_mean"]) / self.norm_stats["qpos_std"]
84
+
85
+ return image_data, qpos_data, action_data, is_pad, tactile_data, event_label
86
+
87
+
88
+ def get_norm_stats(dataset_dir, num_episodes):
89
+ """Compute normalization statistics for qpos and action."""
90
+ all_qpos_data = []
91
+ all_action_data = []
92
+
93
+ for episode_idx in range(num_episodes):
94
+ dataset_path = os.path.join(dataset_dir, f"episode_{episode_idx}.hdf5")
95
+ with h5py.File(dataset_path, "r") as root:
96
+ qpos = root["/observations/qpos"][()]
97
+ action = root["/action"][()]
98
+ all_qpos_data.append(torch.from_numpy(qpos))
99
+ all_action_data.append(torch.from_numpy(action))
100
+
101
+ max_qpos_len = max(q.size(0) for q in all_qpos_data)
102
+ max_action_len = max(a.size(0) for a in all_action_data)
103
+
104
+ # Pad to max length
105
+ padded_qpos = []
106
+ for qpos in all_qpos_data:
107
+ if qpos.size(0) < max_qpos_len:
108
+ pad = qpos[-1:].repeat(max_qpos_len - qpos.size(0), 1)
109
+ qpos = torch.cat([qpos, pad], dim=0)
110
+ padded_qpos.append(qpos)
111
+
112
+ padded_action = []
113
+ for action in all_action_data:
114
+ if action.size(0) < max_action_len:
115
+ pad = action[-1:].repeat(max_action_len - action.size(0), 1)
116
+ action = torch.cat([action, pad], dim=0)
117
+ padded_action.append(action)
118
+
119
+ all_qpos_data = torch.stack(padded_qpos)
120
+ all_action_data = torch.stack(padded_action)
121
+
122
+ action_mean = all_action_data.mean(dim=[0, 1], keepdim=True)
123
+ action_std = all_action_data.std(dim=[0, 1], keepdim=True)
124
+ action_std = torch.clip(action_std, 1e-2, np.inf)
125
+
126
+ qpos_mean = all_qpos_data.mean(dim=[0, 1], keepdim=True)
127
+ qpos_std = all_qpos_data.std(dim=[0, 1], keepdim=True)
128
+ qpos_std = torch.clip(qpos_std, 1e-2, np.inf)
129
+
130
+ stats = {
131
+ "action_mean": action_mean.numpy().squeeze(),
132
+ "action_std": action_std.numpy().squeeze(),
133
+ "qpos_mean": qpos_mean.numpy().squeeze(),
134
+ "qpos_std": qpos_std.numpy().squeeze(),
135
+ }
136
+
137
+ return stats, max_action_len
138
+
139
+
140
+ def load_data(dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val):
141
+ """Load EVA dataset with tactile data."""
142
+ print(f"\nData from: {dataset_dir}\n")
143
+
144
+ train_ratio = 0.8
145
+ shuffled_indices = np.random.permutation(num_episodes)
146
+ train_indices = shuffled_indices[:int(train_ratio * num_episodes)]
147
+ val_indices = shuffled_indices[int(train_ratio * num_episodes):]
148
+
149
+ norm_stats, max_action_len = get_norm_stats(dataset_dir, num_episodes)
150
+
151
+ train_dataset = EVAEpisodicDataset(train_indices, dataset_dir, camera_names, norm_stats, max_action_len)
152
+ val_dataset = EVAEpisodicDataset(val_indices, dataset_dir, camera_names, norm_stats, max_action_len)
153
+
154
+ train_dataloader = DataLoader(
155
+ train_dataset, batch_size=batch_size_train, shuffle=True,
156
+ pin_memory=True, num_workers=1, prefetch_factor=1,
157
+ )
158
+ val_dataloader = DataLoader(
159
+ val_dataset, batch_size=batch_size_val, shuffle=True,
160
+ pin_memory=True, num_workers=1, prefetch_factor=1,
161
+ )
162
+
163
+ return train_dataloader, val_dataloader, norm_stats, train_dataset.is_sim
164
+
165
+
166
+ # Helper functions (same as ACT)
167
+
168
+ def compute_dict_mean(epoch_dicts):
169
+ result = {k: None for k in epoch_dicts[0]}
170
+ num_items = len(epoch_dicts)
171
+ for k in result:
172
+ value_sum = 0
173
+ for epoch_dict in epoch_dicts:
174
+ value_sum += epoch_dict[k]
175
+ result[k] = value_sum / num_items
176
+ return result
177
+
178
+
179
+ def detach_dict(d):
180
+ return {k: v.detach() for k, v in d.items()}
181
+
182
+
183
+ def set_seed(seed):
184
+ torch.manual_seed(seed)
185
+ np.random.seed(seed)
robosuite/models/assets/grippers/robotiq_gripper_85.xml CHANGED
@@ -62,7 +62,7 @@
62
  <geom type="mesh" group="0" rgba="0.1 0.1 0.1 1" mesh="robotiq_arg2f_85_inner_finger" name="left_inner_finger_collision" solref="0.01 0.25" />
63
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" contype="0" conaffinity="0" group="1" rgba="0.9 0.9 0.9 1" name="left_fingertip_visual" />
64
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" group="0" rgba="0.9 0.9 0.9 1" name="left_fingertip_collision" solref="0.01 0.25" />
65
- <geom size="0.010 0.001 0.01675" pos="0 -0.0245203 0.03242" type="box" group="0" name="left_fingerpad_collision" />
66
  <!-- 4x4 taxel grid on left fingerpad (uSkin-like) -->
67
  <site name="taxel_l_r0c0" type="sphere" size="0.002" pos="-0.0075 -0.0255 0.01986" rgba="0 1 0 0.3" group="4"/>
68
  <site name="taxel_l_r0c1" type="sphere" size="0.002" pos="-0.0025 -0.0255 0.01986" rgba="0 1 0 0.3" group="4"/>
@@ -102,7 +102,7 @@
102
  <geom type="mesh" group="0" rgba="0.1 0.1 0.1 1" mesh="robotiq_arg2f_85_inner_finger" name="right_inner_finger_collision" solref="0.01 0.25" />
103
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" contype="0" conaffinity="0" group="1" rgba="0.9 0.9 0.9 1" name="right_fingertip_visual" />
104
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" group="0" rgba="0.9 0.9 0.9 1" name="right_fingertip_collision" solref="0.01 0.25" />
105
- <geom size="0.010 0.001 0.01675" pos="0 -0.0245203 0.03242" type="box" group="0" name="right_fingerpad_collision" />
106
  <!-- 4x4 taxel grid on right fingerpad (uSkin-like) -->
107
  <site name="taxel_r_r0c0" type="sphere" size="0.002" pos="-0.0075 -0.0255 0.01986" rgba="0 0.5 1 0.3" group="4"/>
108
  <site name="taxel_r_r0c1" type="sphere" size="0.002" pos="-0.0025 -0.0255 0.01986" rgba="0 0.5 1 0.3" group="4"/>
 
62
  <geom type="mesh" group="0" rgba="0.1 0.1 0.1 1" mesh="robotiq_arg2f_85_inner_finger" name="left_inner_finger_collision" solref="0.01 0.25" />
63
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" contype="0" conaffinity="0" group="1" rgba="0.9 0.9 0.9 1" name="left_fingertip_visual" />
64
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" group="0" rgba="0.9 0.9 0.9 1" name="left_fingertip_collision" solref="0.01 0.25" />
65
+ <geom size="0.010 0.001 0.01675" pos="0 -0.0245203 0.03242" type="box" group="0" name="left_fingerpad_collision" friction="2.0 0.5 0.01" condim="4" />
66
  <!-- 4x4 taxel grid on left fingerpad (uSkin-like) -->
67
  <site name="taxel_l_r0c0" type="sphere" size="0.002" pos="-0.0075 -0.0255 0.01986" rgba="0 1 0 0.3" group="4"/>
68
  <site name="taxel_l_r0c1" type="sphere" size="0.002" pos="-0.0025 -0.0255 0.01986" rgba="0 1 0 0.3" group="4"/>
 
102
  <geom type="mesh" group="0" rgba="0.1 0.1 0.1 1" mesh="robotiq_arg2f_85_inner_finger" name="right_inner_finger_collision" solref="0.01 0.25" />
103
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" contype="0" conaffinity="0" group="1" rgba="0.9 0.9 0.9 1" name="right_fingertip_visual" />
104
  <geom size="0.011 0.003175 0.01875" pos="0 -0.0220203 0.03242" type="box" group="0" rgba="0.9 0.9 0.9 1" name="right_fingertip_collision" solref="0.01 0.25" />
105
+ <geom size="0.010 0.001 0.01675" pos="0 -0.0245203 0.03242" type="box" group="0" name="right_fingerpad_collision" friction="2.0 0.5 0.01" condim="4" />
106
  <!-- 4x4 taxel grid on right fingerpad (uSkin-like) -->
107
  <site name="taxel_r_r0c0" type="sphere" size="0.002" pos="-0.0075 -0.0255 0.01986" rgba="0 0.5 1 0.3" group="4"/>
108
  <site name="taxel_r_r0c1" type="sphere" size="0.002" pos="-0.0025 -0.0255 0.01986" rgba="0 0.5 1 0.3" group="4"/>
tactile_data/videos/peg_insertion/episode_00.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf73e0db1834b78ca661a313cbdd105b64a7a4f0209383bb834ad6ddc6adf727
3
+ size 132782352
tactile_data/videos/peg_insertion/episode_01.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cb4e78fa52e70927d38a3a56c6931c2d1207fca024c011fafb743c34180b428
3
+ size 177724829
tactile_data/videos/peg_insertion/episode_02.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:747aaab888aa488f3b8becf1b31ea6cca53eb1083cf145bad1f6a9bdcdf7775a
3
+ size 181730076
tactile_data/videos/peg_insertion/episode_03.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8e46266901d816cdfa838418e4e02e5f2ee604aed222f233b8ba688a9d06cdc
3
+ size 177563752
tactile_data/videos/peg_insertion/episode_04.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6365ce9231745f24285fd4faf2a2ed434b323094ef8fe3ba6e1ca577fba219d1
3
+ size 131992471
tactile_data/videos/peg_insertion/episode_05.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff7af7a443c2a8701a92a3716cee5fba84435e14735fc0219c41c8d6f89450c1
3
+ size 179919535
tactile_data/videos/peg_insertion/episode_06.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13ee0e0dc21187609f98a6db4f03e13ebc2c7536cd25ef7970209d95fe3f8fe7
3
+ size 178115959
tactile_data/videos/peg_insertion/episode_07.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5345e8d1041633e862644ca962f6ccece0ce695f496a17623773f6e42cd4fea
3
+ size 180681465
tactile_data/videos/peg_insertion/peg_insertion_ep00.mp4 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e7b9e65b38e27a044e55ffcd7ac6c546263cd85824d6a06449eb425ef60e8ad
3
- size 801077
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3492f2a6a81fb8f93ce90e1bec6962d072ad6c52c3747876b6fb4d3eea34f04
3
+ size 1204351