-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathobs_nets.py
1254 lines (1091 loc) · 52 KB
/
obs_nets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Contains torch Modules that help deal with inputs consisting of multiple
modalities. This is extremely common when networks must deal with one or
more observation dictionaries, where each input dictionary can have
observation keys of a certain modality and shape.
As an example, an observation could consist of a flat "robot0_eef_pos" observation key,
and a 3-channel RGB "agentview_image" observation key.
"""
import sys
import numpy as np
import textwrap
from copy import deepcopy
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from robomimic.utils.python_utils import extract_class_init_kwargs_from_dict
import robomimic.utils.tensor_utils as TensorUtils
import robomimic.utils.obs_utils as ObsUtils
from robomimic.models.base_nets import Module, Sequential, MLP, RNN_Base, ResNet18Conv, SpatialSoftmax, \
FeatureAggregator
from robomimic.models.obs_core import VisualCore, Randomizer
from robomimic.models.transformers import PositionalEncoding, GPT_Backbone
def obs_encoder_factory(
obs_shapes,
feature_activation=nn.ReLU,
encoder_kwargs=None,
):
"""
Utility function to create an @ObservationEncoder from kwargs specified in config.
Args:
obs_shapes (OrderedDict): a dictionary that maps observation key to
expected shapes for observations.
feature_activation: non-linearity to apply after each obs net - defaults to ReLU. Pass
None to apply no activation.
encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should be
nested dictionary containing relevant per-modality information for encoder networks.
Should be of form:
obs_modality1: dict
feature_dimension: int
core_class: str
core_kwargs: dict
...
...
obs_randomizer_class: str
obs_randomizer_kwargs: dict
...
...
obs_modality2: dict
...
"""
enc = ObservationEncoder(feature_activation=feature_activation, fuser = encoder_kwargs["rgb"]["fuser"])
for k, obs_shape in obs_shapes.items():
obs_modality = ObsUtils.OBS_KEYS_TO_MODALITIES[k]
enc_kwargs = deepcopy(ObsUtils.DEFAULT_ENCODER_KWARGS[obs_modality]) if encoder_kwargs is None else \
deepcopy(encoder_kwargs[obs_modality])
# Sanity check for kwargs in case they don't exist / are None
if enc_kwargs.get("core_kwargs", None) is None:
enc_kwargs["core_kwargs"] = {}
# Add in input shape info
enc_kwargs["core_kwargs"]["input_shape"] = obs_shape
# If group class is specified, then make sure corresponding kwargs only contain relevant kwargs
if enc_kwargs["core_class"] is not None:
enc_kwargs["core_kwargs"] = extract_class_init_kwargs_from_dict(
cls=ObsUtils.OBS_ENCODER_CORES[enc_kwargs["core_class"]],
dic=enc_kwargs["core_kwargs"],
copy=False,
)
# Add in input shape info
randomizers = []
obs_randomizer_class_list = enc_kwargs["obs_randomizer_class"]
obs_randomizer_kwargs_list = enc_kwargs["obs_randomizer_kwargs"]
if not isinstance(obs_randomizer_class_list, list):
obs_randomizer_class_list = [obs_randomizer_class_list]
if not isinstance(obs_randomizer_kwargs_list, list):
obs_randomizer_kwargs_list = [obs_randomizer_kwargs_list]
for rand_class, rand_kwargs in zip(obs_randomizer_class_list, obs_randomizer_kwargs_list):
rand = None
if rand_class is not None:
rand_kwargs["input_shape"] = obs_shape
rand_kwargs = extract_class_init_kwargs_from_dict(
cls=ObsUtils.OBS_RANDOMIZERS[rand_class],
dic=rand_kwargs,
copy=False,
)
rand = ObsUtils.OBS_RANDOMIZERS[rand_class](**rand_kwargs)
randomizers.append(rand)
input_maps = enc_kwargs.get("input_maps", {})
if any("camera/image/varied_camera" in s for s in enc.obs_shapes.keys()) and ("camera/image/varied_camera" in k):
existing_varied_cam = [a for a in enc.obs_shapes.keys() if "camera/image/varied_camera" in a][0]
share = existing_varied_cam
net_class = None
net_kwargs = None
else:
share = None
net_class = enc_kwargs["core_class"]
net_kwargs = enc_kwargs["core_kwargs"]
enc.register_obs_key(
name=k,
shape=obs_shape,
input_map=input_maps.get(k, None),
net_class=net_class,
net_kwargs=net_kwargs,
randomizers=randomizers,
share_net_from=share,
)
enc.make()
return enc
class ObservationEncoder(Module):
"""
Module that processes inputs by observation key and then concatenates the processed
observation keys together. Each key is processed with an encoder head network.
Call @register_obs_key to register observation keys with the encoder and then
finally call @make to create the encoder networks.
"""
def __init__(self, feature_activation=nn.ReLU, fuser = None):
"""
Args:
feature_activation: non-linearity to apply after each obs net - defaults to ReLU. Pass
None to apply no activation.
"""
super(ObservationEncoder, self).__init__()
self.obs_shapes = OrderedDict()
self.obs_input_maps = OrderedDict()
self.obs_nets_classes = OrderedDict()
self.obs_nets_kwargs = OrderedDict()
self.obs_share_mods = OrderedDict()
self.obs_nets = nn.ModuleDict()
self.obs_randomizers = nn.ModuleDict()
self.feature_activation = feature_activation
self.fuser = fuser
self.num_images = 0
self._locked = False
def register_obs_key(
self,
name,
shape,
input_map=None,
net_class=None,
net_kwargs=None,
net=None,
randomizers=None,
share_net_from=None,
):
"""
Register an observation key that this encoder should be responsible for.
Args:
name (str): modality name
shape (int tuple): shape of modality
net_class (str): name of class in base_nets.py that should be used
to process this observation key before concatenation. Pass None to flatten
and concatenate the observation key directly.
net_kwargs (dict): arguments to pass to @net_class
net (Module instance): if provided, use this Module to process the observation key
instead of creating a different net
randomizer (Randomizer instance): if provided, use this Module to augment observation keys
coming in to the encoder, and possibly augment the processed output as well
share_net_from (str): if provided, use the same instance of @net_class
as another observation key. This observation key must already exist in this encoder.
Warning: Note that this does not share the observation key randomizer
"""
assert not self._locked, "ObservationEncoder: @register_obs_key called after @make"
assert name not in self.obs_shapes, "ObservationEncoder: modality {} already exists".format(name)
if "image" in name:
self.num_images += 1
if net is not None:
assert isinstance(net, Module), "ObservationEncoder: @net must be instance of Module class"
assert (net_class is None) and (net_kwargs is None) and (share_net_from is None), \
"ObservationEncoder: @net provided - ignore other net creation options"
if share_net_from is not None:
# share processing with another modality
assert (net_class is None) and (net_kwargs is None)
assert share_net_from in self.obs_shapes
net_kwargs = deepcopy(net_kwargs) if net_kwargs is not None else {}
for rand in randomizers:
if rand is not None:
assert isinstance(rand, Randomizer)
if net_kwargs is not None:
# update input shape to visual core
net_kwargs["input_shape"] = rand.output_shape_in(shape)
self.obs_shapes[name] = shape
self.obs_input_maps[name] = input_map
self.obs_nets_classes[name] = net_class
self.obs_nets_kwargs[name] = net_kwargs
self.obs_nets[name] = net
self.obs_randomizers[name] = nn.ModuleList(randomizers)
self.obs_share_mods[name] = share_net_from
def make(self):
"""
Creates the encoder networks and locks the encoder so that more modalities cannot be added.
"""
assert not self._locked, "ObservationEncoder: @make called more than once"
self._create_layers()
self._locked = True
def _create_layers(self):
"""
Creates all networks and layers required by this encoder using the registered modalities.
"""
assert not self._locked, "ObservationEncoder: layers have already been created"
for k in self.obs_shapes:
if self.obs_nets_classes[k] is not None:
# create net to process this modality
self.obs_nets[k] = ObsUtils.OBS_ENCODER_CORES[self.obs_nets_classes[k]](**self.obs_nets_kwargs[k])
elif self.obs_share_mods[k] is not None:
# make sure net is shared with another modality
self.obs_nets[k] = self.obs_nets[self.obs_share_mods[k]]
self.activation = None
if self.feature_activation is not None:
self.activation = self.feature_activation()
if self.fuser == "transformer":
## Define a fuser which takes multiple camera features as [B, sequence of pixels, features]
## and encodes them with a transformer
input_features = self.obs_nets["camera/image/hand_camera_left_image"].feat_shape
# First scales down number of features on each pixel
self.c1 = torch.nn.Conv1d(in_channels = input_features[1], out_channels=512, kernel_size=1)
self.c2 = torch.nn.Conv1d(in_channels = 512, out_channels=512, kernel_size=1)
layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first = True)
self.fusernetwork = nn.TransformerEncoder(layer, num_layers=6)
# Finally flatten and linear layer before concatenated with other low dim features
self.l1 = torch.nn.Linear(input_features[0] * self.num_images * 512, 2048)
elif self.fuser == "perceiver":
from transformers import PerceiverModel, PerceiverConfig
## Define a fuser which takes multiple camera features as [B, sequence of pixels, features]
## and encodes them with a transformer
input_features = self.obs_nets["camera/image/hand_camera_left_image"].feat_shape
# Copied all values from here: https://github.com/TRI-ML/vidar/blob/main/vidar/arch/networks/perceiver/DeFiNeNet.py#L102
self.percveiver_config = PerceiverConfig(
d_latents=512,
d_model=input_features[1],
num_latents=512,
hidden_act='gelu',
hidden_dropout_prob=0.25,
initializer_range=0.02,
layer_norm_eps=1e-12,
num_blocks=1,
num_cross_attention_heads=1,
num_self_attends_per_block=8,
num_self_attention_heads=8,
qk_channels=None,
v_channels=None,
)
self.fusernetwork = PerceiverModel(self.percveiver_config)
# Finally flatten and linear layer before concatenated with other low dim features
self.l1 = torch.nn.Linear(512 * 512, 2048)
def final_collation(self, feats):
if self.fuser is None:
## Extremely hacky fix to handle the expected shape of raw language
featsnew = [TensorUtils.flatten(x, begin_axis=1) for k, x in feats.items() if "raw" not in k]
if 'lang_fixed/language_raw' in feats.keys():
featsnew += [torch.zeros(featsnew[0].shape[0], 1).to(featsnew[0].device)]
a = torch.cat(featsnew, dim=-1)
return a
keys_with_images = [a for a in feats.keys() if "image" in a]
keys_without_images = [a for a in feats.keys() if "image" not in a]
non_image_feats = [TensorUtils.flatten(feats[k], begin_axis=1) for k in keys_without_images]
all_image_feats = []
for k in keys_with_images:
all_image_feats.append(feats[k])
if self.fuser == "transformer":
all_image_feats = torch.cat(all_image_feats, dim = 1)
all_image_feats_postconv = self.c2(F.relu(self.c1(all_image_feats.permute(0, 2, 1)))).permute(0, 2, 1)
all_image_feats_posttrans = self.fusernetwork(all_image_feats_postconv)
output = self.l1(TensorUtils.flatten(all_image_feats_posttrans, begin_axis=1))
return torch.cat(non_image_feats + [output], -1)
elif self.fuser == "perceiver":
# Concatenate all embeddings before applying Perceiver block like here:
# https://github.com/TRI-ML/vidar/blob/main/vidar/arch/networks/perceiver/DeFiNeNet.py#L357
all_image_feats = torch.cat(all_image_feats, dim = 1)
all_image_feats_posttrans = self.fusernetwork(all_image_feats).last_hidden_state
output = self.l1(TensorUtils.flatten(all_image_feats_posttrans, begin_axis=1))
# TODO(Ashwin): Ideally should include low-dim features in perceiver input too
return torch.cat(non_image_feats + [output], -1)
else:
raise NotImplementedError("Unsupported fuser")
def forward(self, obs_dict):
"""
Processes modalities according to the ordering in @self.obs_shapes. For each
modality, it is processed with a randomizer (if present), an encoder
network (if present), and again with the randomizer (if present), flattened,
and then concatenated with the other processed modalities.
Args:
obs_dict (OrderedDict): dictionary that maps modalities to torch.Tensor
batches that agree with @self.obs_shapes. All modalities in
@self.obs_shapes must be present, but additional modalities
can also be present.
Returns:
feats (torch.Tensor): flat features of shape [B, D]
"""
assert self._locked, "ObservationEncoder: @make has not been called yet"
# ensure all modalities that the encoder handles are present
assert set(self.obs_shapes.keys()).issubset(obs_dict), "ObservationEncoder: {} does not contain all modalities {}".format(
list(obs_dict.keys()), list(self.obs_shapes.keys())
)
# process modalities by order given by @self.obs_shapes
feats = {}
for k in self.obs_shapes:
if self.obs_input_maps[k] is not None:
x = dict()
for input_name, input_obs_key in self.obs_input_maps[k].items():
x[input_name] = obs_dict[input_obs_key]
else:
x = obs_dict[k]
# maybe process encoder input with randomizer
if isinstance(x, dict):
for input_name, input_obs_key in self.obs_input_maps[k].items():
randomizers = self.obs_randomizers[input_obs_key]
for rand in randomizers:
if rand is not None:
x[input_name] = rand.forward_in(x[input_name])
else:
for rand in self.obs_randomizers[k]:
if rand is not None:
x = rand.forward_in(x)
# maybe process with obs net
if self.obs_nets[k] is not None:
x = self.obs_nets[k](x)
if self.activation is not None:
x = self.activation(x)
# maybe process encoder output with randomizer
if isinstance(x, dict):
for input_name, input_obs_key in self.obs_input_maps[k].items():
randomizers = self.obs_randomizers[input_obs_key]
for rand in randomizers:
if rand is not None:
x[input_name] = rand.forward_out(x[input_name])
else:
for rand in self.obs_randomizers[k]:
if rand is not None:
x = rand.forward_out(x)
# flatten to [B, D]
# x = TensorUtils.flatten(x, begin_axis=1)
feats[k] = (x)
output = self.final_collation(feats)
# concatenate all features together
return output
def output_shape(self, input_shape=None):
"""
Compute the output shape of the encoder.
"""
feat_dim = 0
for k in self.obs_shapes:
# If the fuser is not None for image features, don't naively concatenate flattened shapes
if (self.fuser is not None) and ("image" in k):
continue
feat_shape = self.obs_shapes[k]
for rand in self.obs_randomizers[k]:
if rand is not None:
feat_shape = rand.output_shape_in(feat_shape)
if self.obs_nets[k] is not None:
feat_shape = self.obs_nets[k].output_shape(feat_shape)
for rand in self.obs_randomizers[k]:
if rand is not None:
feat_shape = rand.output_shape_out(feat_shape)
feat_dim += int(np.prod(feat_shape))
if self.fuser is not None:
feat_dim += 2048
return [feat_dim]
def __repr__(self):
"""
Pretty print the encoder.
"""
header = '{}'.format(str(self.__class__.__name__))
msg = ''
for k in self.obs_shapes:
msg += textwrap.indent('\nKey(\n', ' ' * 4)
indent = ' ' * 8
msg += textwrap.indent("name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent)
msg += textwrap.indent("modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent)
msg += textwrap.indent("randomizer={}\n".format(self.obs_randomizers[k]), indent)
msg += textwrap.indent("net={}\n".format(self.obs_nets[k]), indent)
msg += textwrap.indent("sharing_from={}\n".format(self.obs_share_mods[k]), indent)
msg += textwrap.indent(")", ' ' * 4)
msg += textwrap.indent("\noutput_shape={}".format(self.output_shape()), ' ' * 4)
msg = header + '(' + msg + '\n)'
return msg
class ObservationDecoder(Module):
"""
Module that can generate observation outputs by modality. Inputs are assumed
to be flat (usually outputs from some hidden layer). Each observation output
is generated with a linear layer from these flat inputs. Subclass this
module in order to implement more complex schemes for generating each
modality.
"""
def __init__(
self,
decode_shapes,
input_feat_dim,
):
"""
Args:
decode_shapes (OrderedDict): a dictionary that maps observation key to
expected shape. This is used to generate output modalities from the
input features.
input_feat_dim (int): flat input dimension size
"""
super(ObservationDecoder, self).__init__()
# important: sort observation keys to ensure consistent ordering of modalities
assert isinstance(decode_shapes, OrderedDict)
self.obs_shapes = OrderedDict()
for k in decode_shapes:
self.obs_shapes[k] = decode_shapes[k]
self.input_feat_dim = input_feat_dim
self._create_layers()
def _create_layers(self):
"""
Create a linear layer to predict each modality.
"""
self.nets = nn.ModuleDict()
for k in self.obs_shapes:
layer_out_dim = int(np.prod(self.obs_shapes[k]))
self.nets[k] = nn.Linear(self.input_feat_dim, layer_out_dim)
def output_shape(self, input_shape=None):
"""
Returns output shape for this module, which is a dictionary instead
of a list since outputs are dictionaries.
"""
return { k : list(self.obs_shapes[k]) for k in self.obs_shapes }
def forward(self, feats):
"""
Predict each modality from input features, and reshape to each modality's shape.
"""
output = {}
for k in self.obs_shapes:
out = self.nets[k](feats)
output[k] = out.reshape(-1, *self.obs_shapes[k])
return output
def __repr__(self):
"""Pretty print network."""
header = '{}'.format(str(self.__class__.__name__))
msg = ''
for k in self.obs_shapes:
msg += textwrap.indent('\nKey(\n', ' ' * 4)
indent = ' ' * 8
msg += textwrap.indent("name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent)
msg += textwrap.indent("modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent)
msg += textwrap.indent("net=({})\n".format(self.nets[k]), indent)
msg += textwrap.indent(")", ' ' * 4)
msg = header + '(' + msg + '\n)'
return msg
class ObservationGroupEncoder(Module):
"""
This class allows networks to encode multiple observation dictionaries into a single
flat, concatenated vector representation. It does this by assigning each observation
dictionary (observation group) an @ObservationEncoder object.
The class takes a dictionary of dictionaries, @observation_group_shapes.
Each key corresponds to a observation group (e.g. 'obs', 'subgoal', 'goal')
and each OrderedDict should be a map between modalities and
expected input shapes (e.g. { 'image' : (3, 120, 160) }).
"""
def __init__(
self,
observation_group_shapes,
feature_activation=nn.ReLU,
encoder_kwargs=None,
):
"""
Args:
observation_group_shapes (OrderedDict): a dictionary of dictionaries.
Each key in this dictionary should specify an observation group, and
the value should be an OrderedDict that maps modalities to
expected shapes.
feature_activation: non-linearity to apply after each obs net - defaults to ReLU. Pass
None to apply no activation.
encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should
be nested dictionary containing relevant per-modality information for encoder networks.
Should be of form:
obs_modality1: dict
feature_dimension: int
core_class: str
core_kwargs: dict
...
...
obs_randomizer_class: str
obs_randomizer_kwargs: dict
...
...
obs_modality2: dict
...
"""
super(ObservationGroupEncoder, self).__init__()
# type checking
assert isinstance(observation_group_shapes, OrderedDict)
assert np.all([isinstance(observation_group_shapes[k], OrderedDict) for k in observation_group_shapes])
self.observation_group_shapes = observation_group_shapes
# create an observation encoder per observation group
self.nets = nn.ModuleDict()
for obs_group in self.observation_group_shapes:
self.nets[obs_group] = obs_encoder_factory(
obs_shapes=self.observation_group_shapes[obs_group],
feature_activation=feature_activation,
encoder_kwargs=encoder_kwargs,
)
self.out_size = 512
self.combine = nn.Sequential(
nn.Linear(self.combo_output_shape(), 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, self.out_size)
)
def forward(self, **inputs):
"""
Process each set of inputs in its own observation group.
Args:
inputs (dict): dictionary that maps observation groups to observation
dictionaries of torch.Tensor batches that agree with
@self.observation_group_shapes. All observation groups in
@self.observation_group_shapes must be present, but additional
observation groups can also be present. Note that these are specified
as kwargs for ease of use with networks that name each observation
stream in their forward calls.
Returns:
outputs (torch.Tensor): flat outputs of shape [B, D]
"""
# ensure all observation groups we need are present
assert set(self.observation_group_shapes.keys()).issubset(inputs), "{} does not contain all observation groups {}".format(
list(inputs.keys()), list(self.observation_group_shapes.keys())
)
outputs = []
# Deterministic order since self.observation_group_shapes is OrderedDict
for obs_group in self.observation_group_shapes:
# pass through encoder
outputs.append(
self.nets[obs_group].forward(inputs[obs_group])
)
combo = torch.cat(outputs, dim=-1)
out = self.combine(combo)
return out
def combo_output_shape(self):
"""
Compute the output shape of this encoder.
"""
feat_dim = 0
for obs_group in self.observation_group_shapes:
# get feature dimension of these keys
feat_dim += self.nets[obs_group].output_shape()[0]
return feat_dim
def output_shape(self):
"""
Compute the output shape of this encoder.
"""
return [self.out_size]
def __repr__(self):
"""Pretty print network."""
header = '{}'.format(str(self.__class__.__name__))
msg = ''
for k in self.observation_group_shapes:
msg += '\n'
indent = ' ' * 4
msg += textwrap.indent("group={}\n{}".format(k, self.nets[k]), indent)
msg = header + '(' + msg + '\n)'
return msg
class MIMO_MLP(Module):
"""
Extension to MLP to accept multiple observation dictionaries as input and
to output dictionaries of tensors. Inputs are specified as a dictionary of
observation dictionaries, with each key corresponding to an observation group.
This module utilizes @ObservationGroupEncoder to process the multiple input dictionaries and
@ObservationDecoder to generate tensor dictionaries. The default behavior
for encoding the inputs is to process visual inputs with a learned CNN and concatenating
the flat encodings with the other flat inputs. The default behavior for generating
outputs is to use a linear layer branch to produce each modality separately
(including visual outputs).
"""
def __init__(
self,
input_obs_group_shapes,
output_shapes,
layer_dims,
layer_func=nn.Linear,
activation=nn.ReLU,
encoder_kwargs=None,
):
"""
Args:
input_obs_group_shapes (OrderedDict): a dictionary of dictionaries.
Each key in this dictionary should specify an observation group, and
the value should be an OrderedDict that maps modalities to
expected shapes.
output_shapes (OrderedDict): a dictionary that maps modality to
expected shapes for outputs.
layer_dims ([int]): sequence of integers for the MLP hidden layer sizes
layer_func: mapping per MLP layer - defaults to Linear
activation: non-linearity per MLP layer - defaults to ReLU
encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should
be nested dictionary containing relevant per-modality information for encoder networks.
Should be of form:
obs_modality1: dict
feature_dimension: int
core_class: str
core_kwargs: dict
...
...
obs_randomizer_class: str
obs_randomizer_kwargs: dict
...
...
obs_modality2: dict
...
"""
super(MIMO_MLP, self).__init__()
assert isinstance(input_obs_group_shapes, OrderedDict)
assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes])
assert isinstance(output_shapes, OrderedDict)
self.input_obs_group_shapes = input_obs_group_shapes
self.output_shapes = output_shapes
self.nets = nn.ModuleDict()
# Encoder for all observation groups.
self.nets["encoder"] = ObservationGroupEncoder(
observation_group_shapes=input_obs_group_shapes,
encoder_kwargs=encoder_kwargs,
)
# flat encoder output dimension
mlp_input_dim = self.nets["encoder"].output_shape()[0]
# intermediate MLP layers
self.nets["mlp"] = MLP(
input_dim=mlp_input_dim,
output_dim=layer_dims[-1],
layer_dims=layer_dims[:-1],
layer_func=layer_func,
activation=activation,
output_activation=activation, # make sure non-linearity is applied before decoder
)
# decoder for output modalities
self.nets["decoder"] = ObservationDecoder(
decode_shapes=self.output_shapes,
input_feat_dim=layer_dims[-1],
)
def output_shape(self, input_shape=None):
"""
Returns output shape for this module, which is a dictionary instead
of a list since outputs are dictionaries.
"""
return { k : list(self.output_shapes[k]) for k in self.output_shapes }
def forward(self, **inputs):
"""
Process each set of inputs in its own observation group.
Args:
inputs (dict): a dictionary of dictionaries with one dictionary per
observation group. Each observation group's dictionary should map
modality to torch.Tensor batches. Should be consistent with
@self.input_obs_group_shapes.
Returns:
outputs (dict): dictionary of output torch.Tensors, that corresponds
to @self.output_shapes
"""
enc_outputs = self.nets["encoder"](**inputs)
mlp_out = self.nets["mlp"](enc_outputs)
return self.nets["decoder"](mlp_out)
def _to_string(self):
"""
Subclasses should override this method to print out info about network / policy.
"""
return ''
def __repr__(self):
"""Pretty print network."""
header = '{}'.format(str(self.__class__.__name__))
msg = ''
indent = ' ' * 4
if self._to_string() != '':
msg += textwrap.indent("\n" + self._to_string() + "\n", indent)
msg += textwrap.indent("\nencoder={}".format(self.nets["encoder"]), indent)
msg += textwrap.indent("\n\nmlp={}".format(self.nets["mlp"]), indent)
msg += textwrap.indent("\n\ndecoder={}".format(self.nets["decoder"]), indent)
msg = header + '(' + msg + '\n)'
return msg
class RNN_MIMO_MLP(Module):
"""
A wrapper class for a multi-step RNN and a per-step MLP and a decoder.
Structure: [encoder -> rnn -> mlp -> decoder]
All temporal inputs are processed by a shared @ObservationGroupEncoder,
followed by an RNN, and then a per-step multi-output MLP.
"""
def __init__(
self,
input_obs_group_shapes,
output_shapes,
mlp_layer_dims,
rnn_hidden_dim,
rnn_num_layers,
rnn_type="LSTM", # [LSTM, GRU]
rnn_kwargs=None,
mlp_activation=nn.ReLU,
mlp_layer_func=nn.Linear,
per_step=True,
encoder_kwargs=None,
):
"""
Args:
input_obs_group_shapes (OrderedDict): a dictionary of dictionaries.
Each key in this dictionary should specify an observation group, and
the value should be an OrderedDict that maps modalities to
expected shapes.
output_shapes (OrderedDict): a dictionary that maps modality to
expected shapes for outputs.
rnn_hidden_dim (int): RNN hidden dimension
rnn_num_layers (int): number of RNN layers
rnn_type (str): [LSTM, GRU]
rnn_kwargs (dict): kwargs for the rnn model
per_step (bool): if True, apply the MLP and observation decoder into @output_shapes
at every step of the RNN. Otherwise, apply them to the final hidden state of the
RNN.
encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should
be nested dictionary containing relevant per-modality information for encoder networks.
Should be of form:
obs_modality1: dict
feature_dimension: int
core_class: str
core_kwargs: dict
...
...
obs_randomizer_class: str
obs_randomizer_kwargs: dict
...
...
obs_modality2: dict
...
"""
super(RNN_MIMO_MLP, self).__init__()
assert isinstance(input_obs_group_shapes, OrderedDict)
assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes])
assert isinstance(output_shapes, OrderedDict)
self.input_obs_group_shapes = input_obs_group_shapes
self.output_shapes = output_shapes
self.per_step = per_step
self.nets = nn.ModuleDict()
# Encoder for all observation groups.
self.nets["encoder"] = ObservationGroupEncoder(
observation_group_shapes=input_obs_group_shapes,
encoder_kwargs=encoder_kwargs,
)
# flat encoder output dimension
rnn_input_dim = self.nets["encoder"].output_shape()[0]
# bidirectional RNNs mean that the output of RNN will be twice the hidden dimension
rnn_is_bidirectional = rnn_kwargs.get("bidirectional", False)
num_directions = int(rnn_is_bidirectional) + 1 # 2 if bidirectional, 1 otherwise
rnn_output_dim = num_directions * rnn_hidden_dim
per_step_net = None
self._has_mlp = (len(mlp_layer_dims) > 0)
if self._has_mlp:
self.nets["mlp"] = MLP(
input_dim=rnn_output_dim,
output_dim=mlp_layer_dims[-1],
layer_dims=mlp_layer_dims[:-1],
output_activation=mlp_activation,
layer_func=mlp_layer_func
)
self.nets["decoder"] = ObservationDecoder(
decode_shapes=self.output_shapes,
input_feat_dim=mlp_layer_dims[-1],
)
if self.per_step:
per_step_net = Sequential(self.nets["mlp"], self.nets["decoder"])
else:
self.nets["decoder"] = ObservationDecoder(
decode_shapes=self.output_shapes,
input_feat_dim=rnn_output_dim,
)
if self.per_step:
per_step_net = self.nets["decoder"]
# core network
self.nets["rnn"] = RNN_Base(
input_dim=rnn_input_dim,
rnn_hidden_dim=rnn_hidden_dim,
rnn_num_layers=rnn_num_layers,
rnn_type=rnn_type,
per_step_net=per_step_net,
rnn_kwargs=rnn_kwargs
)
def get_rnn_init_state(self, batch_size, device):
"""
Get a default RNN state (zeros)
Args:
batch_size (int): batch size dimension
device: device the hidden state should be sent to.
Returns:
hidden_state (torch.Tensor or tuple): returns hidden state tensor or tuple of hidden state tensors
depending on the RNN type
"""
return self.nets["rnn"].get_rnn_init_state(batch_size, device=device)
def output_shape(self, input_shape):
"""
Returns output shape for this module, which is a dictionary instead
of a list since outputs are dictionaries.
Args:
input_shape (dict): dictionary of dictionaries, where each top-level key
corresponds to an observation group, and the low-level dictionaries
specify the shape for each modality in an observation dictionary
"""
# infers temporal dimension from input shape
obs_group = list(self.input_obs_group_shapes.keys())[0]
mod = list(self.input_obs_group_shapes[obs_group].keys())[0]
T = input_shape[obs_group][mod][0]
TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0,
msg="RNN_MIMO_MLP: input_shape inconsistent in temporal dimension")
# returns a dictionary instead of list since outputs are dictionaries
return { k : [T] + list(self.output_shapes[k]) for k in self.output_shapes }
def forward(self, rnn_init_state=None, return_state=False, **inputs):
"""
Args:
inputs (dict): a dictionary of dictionaries with one dictionary per
observation group. Each observation group's dictionary should map
modality to torch.Tensor batches. Should be consistent with
@self.input_obs_group_shapes. First two leading dimensions should
be batch and time [B, T, ...] for each tensor.
rnn_init_state: rnn hidden state, initialize to zero state if set to None
return_state (bool): whether to return hidden state
Returns:
outputs (dict): dictionary of output torch.Tensors, that corresponds
to @self.output_shapes. Leading dimensions will be batch and time [B, T, ...]
for each tensor.
rnn_state (torch.Tensor or tuple): return the new rnn state (if @return_state)
"""
for obs_group in self.input_obs_group_shapes:
for k in self.input_obs_group_shapes[obs_group]:
# first two dimensions should be [B, T] for inputs
assert inputs[obs_group][k].ndim - 2 == len(self.input_obs_group_shapes[obs_group][k])
# use encoder to extract flat rnn inputs
rnn_inputs = TensorUtils.time_distributed(inputs, self.nets["encoder"], inputs_as_kwargs=True)
assert rnn_inputs.ndim == 3 # [B, T, D]
if self.per_step:
return self.nets["rnn"].forward(inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state)
# apply MLP + decoder to last RNN output
outputs = self.nets["rnn"].forward(inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state)
if return_state:
outputs, rnn_state = outputs
assert outputs.ndim == 3 # [B, T, D]
if self._has_mlp:
outputs = self.nets["decoder"](self.nets["mlp"](outputs[:, -1]))
else:
outputs = self.nets["decoder"](outputs[:, -1])
if return_state:
return outputs, rnn_state
return outputs
def forward_step(self, rnn_state, **inputs):
"""
Unroll network over a single timestep.
Args:
inputs (dict): expects same modalities as @self.input_shapes, with
additional batch dimension (but NOT time), since this is a
single time step.
rnn_state (torch.Tensor): rnn hidden state
Returns:
outputs (dict): dictionary of output torch.Tensors, that corresponds
to @self.output_shapes. Does not contain time dimension.
rnn_state: return the new rnn state
"""
# ensure that the only extra dimension is batch dim, not temporal dim
assert np.all([inputs[k].ndim - 1 == len(self.input_shapes[k]) for k in self.input_shapes])
inputs = TensorUtils.to_sequence(inputs)
outputs, rnn_state = self.forward(
inputs,
rnn_init_state=rnn_state,
return_state=True,
)
if self.per_step:
# if outputs are not per-step, the time dimension is already reduced
outputs = outputs[:, 0]
return outputs, rnn_state
def _to_string(self):
"""
Subclasses should override this method to print out info about network / policy.
"""
return ''