File size: 38,105 Bytes
9b19c29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
import pprint
import warnings
from collections.abc import Collection, Iterable, Iterator, KeysView, Sequence
from copy import deepcopy
from numbers import Number
from types import EllipsisType
from typing import (
    Any,
    Protocol,
    Self,
    TypeVar,
    Union,
    cast,
    overload,
    runtime_checkable,
)

import numpy as np
import torch
from deepdiff import DeepDiff

_SingleIndexType = slice | int | EllipsisType
IndexType = np.ndarray | _SingleIndexType | list[_SingleIndexType] | tuple[_SingleIndexType, ...]
TBatch = TypeVar("TBatch", bound="BatchProtocol")
arr_type = torch.Tensor | np.ndarray


def _is_batch_set(obj: Any) -> bool:
    # Batch set is a list/tuple of dict/Batch objects,
    # or 1-D np.ndarray with object type,
    # where each element is a dict/Batch object
    if isinstance(obj, np.ndarray):  # most often case
        # "for element in obj" will just unpack the first dimension,
        # but obj.tolist() will flatten ndarray of objects
        # so do not use obj.tolist()
        if obj.shape == ():
            return False
        return obj.dtype == object and all(isinstance(element, dict | Batch) for element in obj)
    if (
        isinstance(obj, list | tuple)
        and len(obj) > 0
        and all(isinstance(element, dict | Batch) for element in obj)
    ):
        return True
    return False


def _is_scalar(value: Any) -> bool:
    # check if the value is a scalar
    # 1. python bool object, number object: isinstance(value, Number)
    # 2. numpy scalar: isinstance(value, np.generic)
    # 3. python object rather than dict / Batch / tensor
    # the check of dict / Batch is omitted because this only checks a value.
    # a dict / Batch will eventually check their values
    if isinstance(value, torch.Tensor):
        return value.numel() == 1 and not value.shape
    # np.asanyarray will cause dead loop in some cases
    return np.isscalar(value)


def _is_number(value: Any) -> bool:
    # isinstance(value, Number) checks 1, 1.0, np.int(1), np.float(1.0), etc.
    # isinstance(value, np.nummber) checks np.int32(1), np.float64(1.0), etc.
    # isinstance(value, np.bool_) checks np.bool_(True), etc.
    # similar to np.isscalar but np.isscalar('st') returns True
    return isinstance(value, Number | np.number | np.bool_)


def _to_array_with_correct_type(obj: Any) -> np.ndarray:
    if isinstance(obj, np.ndarray) and issubclass(obj.dtype.type, np.bool_ | np.number):
        return obj  # most often case
    # convert the value to np.ndarray
    # convert to object obj type if neither bool nor number
    # raises an exception if array's elements are tensors themselves
    try:
        obj_array = np.asanyarray(obj)
    except ValueError:
        obj_array = np.asanyarray(obj, dtype=object)
    if not issubclass(obj_array.dtype.type, np.bool_ | np.number):
        obj_array = obj_array.astype(object)
    if obj_array.dtype == object:
        # scalar ndarray with object obj type is very annoying
        # a=np.array([np.array({}, dtype=object), np.array({}, dtype=object)])
        # a is not array([{}, {}], dtype=object), and a[0]={} results in
        # something very strange:
        # array([{}, array({}, dtype=object)], dtype=object)
        if not obj_array.shape:
            obj_array = obj_array.item(0)
        elif all(isinstance(arr, np.ndarray) for arr in obj_array.reshape(-1)):
            return obj_array  # various length, np.array([[1], [2, 3], [4, 5, 6]])
        elif any(isinstance(arr, torch.Tensor) for arr in obj_array.reshape(-1)):
            raise ValueError("Numpy arrays of tensors are not supported yet.")
    return obj_array


def create_value(
    inst: Any,
    size: int,
    stack: bool = True,
) -> Union["Batch", np.ndarray, torch.Tensor]:
    """Create empty place-holders according to inst's shape.

    :param stack: whether to stack or to concatenate. E.g. if inst has shape of
        (3, 5), size = 10, stack=True returns an np.array with shape of (10, 3, 5),
        otherwise (10, 5)
    """
    has_shape = isinstance(inst, np.ndarray | torch.Tensor)
    is_scalar = _is_scalar(inst)
    if not stack and is_scalar:
        # should never hit since it has already checked in Batch.cat_ , here we do not
        # consider scalar types, following the behavior of numpy which does not support
        # concatenation of zero-dimensional arrays (scalars)
        raise TypeError(f"cannot concatenate with {inst} which is scalar")
    if has_shape:
        shape = (size, *inst.shape) if stack else (size, *inst.shape[1:])
    if isinstance(inst, np.ndarray):
        target_type = (
            inst.dtype.type if issubclass(inst.dtype.type, np.bool_ | np.number) else object
        )
        return np.full(shape, fill_value=None if target_type == object else 0, dtype=target_type)
    if isinstance(inst, torch.Tensor):
        return torch.full(shape, fill_value=0, device=inst.device, dtype=inst.dtype)
    if isinstance(inst, dict | Batch):
        zero_batch = Batch()
        for key, val in inst.items():
            zero_batch.__dict__[key] = create_value(val, size, stack=stack)
        return zero_batch
    if is_scalar:
        return create_value(np.asarray(inst), size, stack=stack)
    # fall back to object
    return np.array([None for _ in range(size)], object)


def _assert_type_keys(keys: Iterable[str]) -> None:
    assert all(isinstance(key, str) for key in keys), f"keys should all be string, but got {keys}"


def _parse_value(obj: Any) -> Union["Batch", np.ndarray, torch.Tensor] | None:
    if isinstance(obj, Batch):  # most often case
        return obj
    if (
        (isinstance(obj, np.ndarray) and issubclass(obj.dtype.type, np.bool_ | np.number))
        or isinstance(obj, torch.Tensor)
        or obj is None
    ):  # third often case
        return obj
    if _is_number(obj):  # second often case, but it is more time-consuming
        return np.asanyarray(obj)
    if isinstance(obj, dict):
        return Batch(obj)
    if (
        not isinstance(obj, np.ndarray)
        and isinstance(obj, Collection)
        and len(obj) > 0
        and all(isinstance(element, torch.Tensor) for element in obj)
    ):
        try:
            obj = cast(list[torch.Tensor], obj)
            return torch.stack(obj)
        except RuntimeError as exception:
            raise TypeError(
                "Batch does not support non-stackable iterable"
                " of torch.Tensor as unique value yet.",
            ) from exception
    if _is_batch_set(obj):
        obj = Batch(obj)  # list of dict / Batch
    else:
        # None, scalar, normal obj list (main case)
        # or an actual list of objects
        try:
            obj = _to_array_with_correct_type(obj)
        except ValueError as exception:
            raise TypeError(
                "Batch does not support heterogeneous list/tuple of tensors as unique value yet.",
            ) from exception
    return obj


def alloc_by_keys_diff(
    meta: "BatchProtocol",
    batch: "BatchProtocol",
    size: int,
    stack: bool = True,
) -> None:
    """Creates place-holders inside meta for keys that are in batch but not in meta.

    This mainly is an internal method, use it only if you know what you are doing.
    """
    for key in batch.get_keys():
        if key in meta.get_keys():
            if isinstance(meta[key], Batch) and isinstance(batch[key], Batch):
                alloc_by_keys_diff(meta[key], batch[key], size, stack)
            elif isinstance(meta[key], Batch) and meta[key].is_empty():
                meta[key] = create_value(batch[key], size, stack)
        else:
            meta[key] = create_value(batch[key], size, stack)


# Note: This is implemented as a protocol because the interface
# of Batch is always extended by adding new fields. Having a hierarchy of
# protocols building off this one allows for type safety and IDE support despite
# the dynamic nature of Batch
@runtime_checkable
class BatchProtocol(Protocol):
    """The internal data structure in Tianshou.

    Batch is a kind of supercharged array (of temporal data) stored individually in a
    (recursive) dictionary of objects that can be either numpy arrays, torch tensors, or
    batches themselves. It is designed to make it extremely easily to access, manipulate
    and set partial view of the heterogeneous data conveniently.

    For a detailed description, please refer to :ref:`batch_concept`.
    """

    @property
    def shape(self) -> list[int]:
        ...

    def __setattr__(self, key: str, value: Any) -> None:
        ...

    def __getattr__(self, key: str) -> Any:
        ...

    def __contains__(self, key: str) -> bool:
        ...

    def __getstate__(self) -> dict:
        ...

    def __setstate__(self, state: dict) -> None:
        ...

    @overload
    def __getitem__(self, index: str) -> Any:
        ...

    @overload
    def __getitem__(self, index: IndexType) -> Self:
        ...

    def __getitem__(self, index: str | IndexType) -> Any:
        ...

    def __setitem__(self, index: str | IndexType, value: Any) -> None:
        ...

    def __iadd__(self, other: Self | Number | np.number) -> Self:
        ...

    def __add__(self, other: Self | Number | np.number) -> Self:
        ...

    def __imul__(self, value: Number | np.number) -> Self:
        ...

    def __mul__(self, value: Number | np.number) -> Self:
        ...

    def __itruediv__(self, value: Number | np.number) -> Self:
        ...

    def __truediv__(self, value: Number | np.number) -> Self:
        ...

    def __repr__(self) -> str:
        ...

    def __iter__(self) -> Iterator[Self]:
        ...

    def __eq__(self, other: Any) -> bool:
        ...

    @staticmethod
    def to_numpy(batch: TBatch) -> TBatch:
        """Change all torch.Tensor to numpy.ndarray and return a new Batch."""
        ...

    def to_numpy_(self) -> None:
        """Change all torch.Tensor to numpy.ndarray in-place."""
        ...

    @staticmethod
    def to_torch(
        batch: TBatch,
        dtype: torch.dtype | None = None,
        device: str | int | torch.device = "cpu",
    ) -> TBatch:
        """Change all numpy.ndarray to torch.Tensor and return a new Batch."""
        ...

    def to_torch_(
        self,
        dtype: torch.dtype | None = None,
        device: str | int | torch.device = "cpu",
    ) -> None:
        """Change all numpy.ndarray to torch.Tensor in-place."""
        ...

    def cat_(self, batches: Self | Sequence[dict | Self]) -> None:
        """Concatenate a list of (or one) Batch objects into current batch."""
        ...

    @staticmethod
    def cat(batches: Sequence[dict | TBatch]) -> TBatch:
        """Concatenate a list of Batch object into a single new batch.

        For keys that are not shared across all batches, batches that do not
        have these keys will be padded by zeros with appropriate shapes. E.g.
        ::

            >>> a = Batch(a=np.zeros([3, 4]), common=Batch(c=np.zeros([3, 5])))
            >>> b = Batch(b=np.zeros([4, 3]), common=Batch(c=np.zeros([4, 5])))
            >>> c = Batch.cat([a, b])
            >>> c.a.shape
            (7, 4)
            >>> c.b.shape
            (7, 3)
            >>> c.common.c.shape
            (7, 5)
        """
        ...

    def stack_(self, batches: Sequence[dict | Self], axis: int = 0) -> None:
        """Stack a list of Batch object into current batch."""
        ...

    @staticmethod
    def stack(batches: Sequence[dict | TBatch], axis: int = 0) -> TBatch:
        """Stack a list of Batch object into a single new batch.

        For keys that are not shared across all batches, batches that do not
        have these keys will be padded by zeros. E.g.
        ::

            >>> a = Batch(a=np.zeros([4, 4]), common=Batch(c=np.zeros([4, 5])))
            >>> b = Batch(b=np.zeros([4, 6]), common=Batch(c=np.zeros([4, 5])))
            >>> c = Batch.stack([a, b])
            >>> c.a.shape
            (2, 4, 4)
            >>> c.b.shape
            (2, 4, 6)
            >>> c.common.c.shape
            (2, 4, 5)

        .. note::

            If there are keys that are not shared across all batches, ``stack``
            with ``axis != 0`` is undefined, and will cause an exception.
        """
        ...

    def empty_(self, index: slice | IndexType | None = None) -> Self:
        """Return an empty Batch object with 0 or None filled.

        If "index" is specified, it will only reset the specific indexed-data.
        ::

            >>> data.empty_()
            >>> print(data)
            Batch(
                a: array([[0., 0.],
                          [0., 0.]]),
                b: array([None, None], dtype=object),
            )
            >>> b={'c': [2., 'st'], 'd': [1., 0.]}
            >>> data = Batch(a=[False,  True], b=b)
            >>> data[0] = Batch.empty(data[1])
            >>> data
            Batch(
                a: array([False,  True]),
                b: Batch(
                       c: array([None, 'st']),
                       d: array([0., 0.]),
                   ),
            )
        """
        ...

    @staticmethod
    def empty(batch: TBatch, index: IndexType | None = None) -> TBatch:
        """Return an empty Batch object with 0 or None filled.

        The shape is the same as the given Batch.
        """
        ...

    def update(self, batch: dict | Self | None = None, **kwargs: Any) -> None:
        """Update this batch from another dict/Batch."""
        ...

    def __len__(self) -> int:
        ...

    def is_empty(self, recurse: bool = False) -> bool:
        ...

    def split(
        self,
        size: int,
        shuffle: bool = True,
        merge_last: bool = False,
    ) -> Iterator[Self]:
        """Split whole data into multiple small batches.

        :param size: divide the data batch with the given size, but one
            batch if the length of the batch is smaller than "size". Size of -1 means
            the whole batch.
        :param shuffle: randomly shuffle the entire data batch if it is
            True, otherwise remain in the same. Default to True.
        :param merge_last: merge the last batch into the previous one.
            Default to False.
        """
        ...

    def to_dict(self, recurse: bool = True) -> dict[str, Any]:
        ...

    def to_list_of_dicts(self) -> list[dict[str, Any]]:
        ...


class Batch(BatchProtocol):
    """See :class:`~tianshou.data.batch.BatchProtocol`."""

    __doc__ = BatchProtocol.__doc__

    def __init__(
        self,
        batch_dict: dict
        | BatchProtocol
        | Sequence[dict | BatchProtocol]
        | np.ndarray
        | None = None,
        copy: bool = False,
        **kwargs: Any,
    ) -> None:
        if copy:
            batch_dict = deepcopy(batch_dict)
        if batch_dict is not None:
            if isinstance(batch_dict, dict | BatchProtocol):
                _assert_type_keys(batch_dict.keys())
                for batch_key, obj in batch_dict.items():
                    self.__dict__[batch_key] = _parse_value(obj)
            elif _is_batch_set(batch_dict):
                batch_dict = cast(Sequence[dict | BatchProtocol], batch_dict)
                self.stack_(batch_dict)
        if len(kwargs) > 0:
            # TODO: that's a rather weird pattern, is it really needed?
            # Feels like kwargs could be just merged into batch_dict in the beginning
            self.__init__(kwargs, copy=copy)  # type: ignore

    def to_dict(self, recursive: bool = True) -> dict[str, Any]:
        result = {}
        for k, v in self.__dict__.items():
            if recursive and isinstance(v, Batch):
                v = v.to_dict(recursive=recursive)
            result[k] = v
        return result

    def get_keys(self) -> KeysView:
        return self.__dict__.keys()

    def to_list_of_dicts(self) -> list[dict[str, Any]]:
        return [entry.to_dict() for entry in self]

    def __setattr__(self, key: str, value: Any) -> None:
        """Set self.key = value."""
        self.__dict__[key] = _parse_value(value)

    def __getattr__(self, key: str) -> Any:
        """Return self.key. The "Any" return type is needed for mypy."""
        return getattr(self.__dict__, key)

    def __contains__(self, key: str) -> bool:
        """Return key in self."""
        return key in self.__dict__

    def __getstate__(self) -> dict[str, Any]:
        """Pickling interface.

        Only the actual data are serialized for both efficiency and simplicity.
        """
        state = {}
        for batch_key, obj in self.items():
            if isinstance(obj, Batch):
                state[batch_key] = obj.__getstate__()
            else:
                state[batch_key] = obj
        return state

    def __setstate__(self, state: dict[str, Any]) -> None:
        """Unpickling interface.

        At this point, self is an empty Batch instance that has not been
        initialized, so it can safely be initialized by the pickle state.
        """
        self.__init__(**state)  # type: ignore

    @overload
    def __getitem__(self, index: str) -> Any:
        ...

    @overload
    def __getitem__(self, index: IndexType) -> Self:
        ...

    def __getitem__(self, index: str | IndexType) -> Any:
        """Return self[index]."""
        if isinstance(index, str):
            return self.__dict__[index]
        batch_items = self.items()
        if len(batch_items) > 0:
            new_batch = Batch()
            for batch_key, obj in batch_items:
                if isinstance(obj, Batch) and obj.is_empty():
                    new_batch.__dict__[batch_key] = Batch()
                else:
                    new_batch.__dict__[batch_key] = obj[index]
            return new_batch
        raise IndexError("Cannot access item from empty Batch object.")

    def __eq__(self, other: Any) -> bool:
        if not isinstance(other, self.__class__):
            return False

        this_batch_no_torch_tensor: Batch = Batch.to_numpy(self)
        other_batch_no_torch_tensor: Batch = Batch.to_numpy(other)
        this_dict = this_batch_no_torch_tensor.to_dict(recursive=True)
        other_dict = other_batch_no_torch_tensor.to_dict(recursive=True)

        return not DeepDiff(this_dict, other_dict)

    def __iter__(self) -> Iterator[Self]:
        # TODO: empty batch raises an error on len and needs separate treatment, that's probably not a good idea
        if len(self.__dict__) == 0:
            yield from []
        else:
            for i in range(len(self)):
                yield self[i]

    def __setitem__(self, index: str | IndexType, value: Any) -> None:
        """Assign value to self[index]."""
        value = _parse_value(value)
        if isinstance(index, str):
            self.__dict__[index] = value
            return
        if not isinstance(value, Batch):
            raise ValueError(
                "Batch does not supported tensor assignment. "
                "Use a compatible Batch or dict instead.",
            )
        if not set(value.keys()).issubset(self.__dict__.keys()):
            raise ValueError("Creating keys is not supported by item assignment.")
        for key, val in self.items():
            try:
                self.__dict__[key][index] = value[key]
            except KeyError:
                if isinstance(val, Batch):
                    self.__dict__[key][index] = Batch()
                elif isinstance(val, torch.Tensor) or (
                    isinstance(val, np.ndarray) and issubclass(val.dtype.type, np.bool_ | np.number)
                ):
                    self.__dict__[key][index] = 0
                else:
                    self.__dict__[key][index] = None

    def __iadd__(self, other: Self | Number | np.number) -> Self:
        """Algebraic addition with another Batch instance in-place."""
        if isinstance(other, Batch):
            for (batch_key, obj), value in zip(
                self.__dict__.items(),
                other.__dict__.values(),
                strict=True,
            ):  # TODO are keys consistent?
                if isinstance(obj, Batch) and obj.is_empty():
                    continue
                self.__dict__[batch_key] += value
            return self
        if _is_number(other):
            for batch_key, obj in self.items():
                if isinstance(obj, Batch) and obj.is_empty():
                    continue
                self.__dict__[batch_key] += other
            return self
        raise TypeError("Only addition of Batch or number is supported.")

    def __add__(self, other: Self | Number | np.number) -> Self:
        """Algebraic addition with another Batch instance out-of-place."""
        return deepcopy(self).__iadd__(other)

    def __imul__(self, value: Number | np.number) -> Self:
        """Algebraic multiplication with a scalar value in-place."""
        assert _is_number(value), "Only multiplication by a number is supported."
        for batch_key, obj in self.__dict__.items():
            if isinstance(obj, Batch) and obj.is_empty():
                continue
            self.__dict__[batch_key] *= value
        return self

    def __mul__(self, value: Number | np.number) -> Self:
        """Algebraic multiplication with a scalar value out-of-place."""
        return deepcopy(self).__imul__(value)

    def __itruediv__(self, value: Number | np.number) -> Self:
        """Algebraic division with a scalar value in-place."""
        assert _is_number(value), "Only division by a number is supported."
        for batch_key, obj in self.__dict__.items():
            if isinstance(obj, Batch) and obj.is_empty():
                continue
            self.__dict__[batch_key] /= value
        return self

    def __truediv__(self, value: Number | np.number) -> Self:
        """Algebraic division with a scalar value out-of-place."""
        return deepcopy(self).__itruediv__(value)

    def __repr__(self) -> str:
        """Return str(self)."""
        self_str = self.__class__.__name__ + "(\n"
        flag = False
        for batch_key, obj in self.__dict__.items():
            rpl = "\n" + " " * (6 + len(batch_key))
            obj_name = pprint.pformat(obj).replace("\n", rpl)
            self_str += f"    {batch_key}: {obj_name},\n"
            flag = True
        if flag:
            self_str += ")"
        else:
            self_str = self.__class__.__name__ + "()"
        return self_str

    @staticmethod
    def to_numpy(batch: TBatch) -> TBatch:
        batch_dict = deepcopy(batch)
        for batch_key, obj in batch_dict.items():
            if isinstance(obj, torch.Tensor):
                batch_dict.__dict__[batch_key] = obj.detach().cpu().numpy()
            elif isinstance(obj, Batch):
                obj = Batch.to_numpy(obj)
                batch_dict.__dict__[batch_key] = obj

        return batch_dict

    def to_numpy_(self) -> None:
        for batch_key, obj in self.items():
            if isinstance(obj, torch.Tensor):
                self.__dict__[batch_key] = obj.detach().cpu().numpy()
            elif isinstance(obj, Batch):
                obj.to_numpy_()

    @staticmethod
    def to_torch(
        batch: TBatch,
        dtype: torch.dtype | None = None,
        device: str | int | torch.device = "cpu",
    ) -> TBatch:
        new_batch = Batch(batch, copy=True)
        new_batch.to_torch_(dtype=dtype, device=device)

        return new_batch  # type: ignore[return-value]

    def to_torch_(
        self,
        dtype: torch.dtype | None = None,
        device: str | int | torch.device = "cpu",
    ) -> None:
        if not isinstance(device, torch.device):
            device = torch.device(device)

        for batch_key, obj in self.items():
            if isinstance(obj, torch.Tensor):
                if (
                    dtype is not None
                    and obj.dtype != dtype
                    or obj.device.type != device.type
                    or device.index != obj.device.index
                ):
                    if dtype is not None:
                        self.__dict__[batch_key] = obj.type(dtype).to(device)
                    else:
                        self.__dict__[batch_key] = obj.to(device)
            elif isinstance(obj, Batch):
                obj.to_torch_(dtype, device)
            else:
                # ndarray or scalar
                if not isinstance(obj, np.ndarray):
                    obj = np.asanyarray(obj)
                obj = torch.from_numpy(obj).to(device)
                if dtype is not None:
                    obj = obj.type(dtype)
                self.__dict__[batch_key] = obj

    def __cat(self, batches: Sequence[dict | Self], lens: list[int]) -> None:
        """Private method for Batch.cat_.

        ::

            >>> a = Batch(a=np.random.randn(3, 4))
            >>> x = Batch(a=a, b=np.random.randn(4, 4))
            >>> y = Batch(a=Batch(a=Batch()), b=np.random.randn(4, 4))

        If we want to concatenate x and y, we want to pad y.a.a with zeros.
        Without ``lens`` as a hint, when we concatenate x.a and y.a, we would
        not be able to know how to pad y.a. So ``Batch.cat_`` should compute
        the ``lens`` to give ``Batch.__cat`` a hint.
        ::

            >>> ans = Batch.cat([x, y])
            >>> # this is equivalent to the following line
            >>> ans = Batch(); ans.__cat([x, y], lens=[3, 4])
            >>> # this lens is equal to [len(a), len(b)]
        """
        # partial keys will be padded by zeros
        # with the shape of [len, rest_shape]
        sum_lens = [0]
        for len_ in lens:
            sum_lens.append(sum_lens[-1] + len_)
        # collect non-empty keys
        keys_map = [
            {
                batch_key
                for batch_key, obj in batch.items()
                if not (isinstance(obj, Batch) and obj.is_empty())
            }
            for batch in batches
        ]
        keys_shared = set.intersection(*keys_map)
        values_shared = [[batch[key] for batch in batches] for key in keys_shared]
        for key, shared_value in zip(keys_shared, values_shared, strict=True):
            if all(isinstance(element, dict | Batch) for element in shared_value):
                batch_holder = Batch()
                batch_holder.__cat(shared_value, lens=lens)
                self.__dict__[key] = batch_holder
            elif all(isinstance(element, torch.Tensor) for element in shared_value):
                self.__dict__[key] = torch.cat(shared_value)
            else:
                # cat Batch(a=np.zeros((3, 4))) and Batch(a=Batch(b=Batch()))
                # will fail here
                self.__dict__[key] = _to_array_with_correct_type(np.concatenate(shared_value))
        keys_total = set.union(*[set(batch.keys()) for batch in batches])
        keys_reserve_or_partial = set.difference(keys_total, keys_shared)
        # keys that are reserved in all batches
        keys_reserve = set.difference(keys_total, set.union(*keys_map))
        # keys that occur only in some batches, but not all
        keys_partial = keys_reserve_or_partial.difference(keys_reserve)
        for key in keys_reserve:
            # reserved keys
            self.__dict__[key] = Batch()
        for key in keys_partial:
            for i, batch in enumerate(batches):
                if key not in batch.__dict__:
                    continue
                value = batch.get(key)
                if isinstance(value, Batch) and value.is_empty():
                    continue
                try:
                    self.__dict__[key][sum_lens[i] : sum_lens[i + 1]] = value
                except KeyError:
                    self.__dict__[key] = create_value(value, sum_lens[-1], stack=False)
                    self.__dict__[key][sum_lens[i] : sum_lens[i + 1]] = value

    def cat_(self, batches: BatchProtocol | Sequence[dict | BatchProtocol]) -> None:
        if isinstance(batches, BatchProtocol | dict):
            batches = [batches]
        # check input format
        batch_list = []
        for batch in batches:
            if isinstance(batch, dict):
                if len(batch) > 0:
                    batch_list.append(Batch(batch))
            elif isinstance(batch, Batch):
                # x.is_empty() means that x is Batch() and should be ignored
                if not batch.is_empty():
                    batch_list.append(batch)
            else:
                raise ValueError(f"Cannot concatenate {type(batch)} in Batch.cat_")
        if len(batch_list) == 0:
            return
        batches = batch_list
        try:
            # x.is_empty(recurse=True) here means x is a nested empty batch
            # like Batch(a=Batch), and we have to treat it as length zero and
            # keep it.
            lens = [0 if batch.is_empty(recurse=True) else len(batch) for batch in batches]
        except TypeError as exception:
            raise ValueError(
                "Batch.cat_ meets an exception. Maybe because there is any "
                f"scalar in {batches} but Batch.cat_ does not support the "
                "concatenation of scalar.",
            ) from exception
        if not self.is_empty():
            batches = [self, *list(batches)]
            lens = [0 if self.is_empty(recurse=True) else len(self), *lens]
        self.__cat(batches, lens)

    @staticmethod
    def cat(batches: Sequence[dict | TBatch]) -> TBatch:
        batch = Batch()
        batch.cat_(batches)
        return batch  # type: ignore

    def stack_(self, batches: Sequence[dict | BatchProtocol], axis: int = 0) -> None:
        # check input format
        batch_list = []
        for batch in batches:
            if isinstance(batch, dict):
                if len(batch) > 0:
                    batch_list.append(Batch(batch))
            elif isinstance(batch, Batch):
                # x.is_empty() means that x is Batch() and should be ignored
                if not batch.is_empty():
                    batch_list.append(batch)
            else:
                raise ValueError(f"Cannot concatenate {type(batch)} in Batch.stack_")
        if len(batch_list) == 0:
            return
        batches = batch_list
        if not self.is_empty():
            batches = [self, *batches]
        # collect non-empty keys
        keys_map = [
            {
                batch_key
                for batch_key, obj in batch.items()
                if not (isinstance(obj, BatchProtocol) and obj.is_empty())
            }
            for batch in batches
        ]
        keys_shared = set.intersection(*keys_map)
        values_shared = [[batch[key] for batch in batches] for key in keys_shared]
        for shared_key, value in zip(keys_shared, values_shared, strict=True):
            # second often
            if all(isinstance(element, torch.Tensor) for element in value):
                self.__dict__[shared_key] = torch.stack(value, axis)
            # third often
            elif all(isinstance(element, BatchProtocol | dict) for element in value):
                self.__dict__[shared_key] = Batch.stack(value, axis)
            else:  # most often case is np.ndarray
                try:
                    self.__dict__[shared_key] = _to_array_with_correct_type(np.stack(value, axis))
                except ValueError:
                    warnings.warn(
                        "You are using tensors with different shape,"
                        " fallback to dtype=object by default.",
                    )
                    self.__dict__[shared_key] = np.array(value, dtype=object)
        # all the keys
        keys_total = set.union(*[set(batch.keys()) for batch in batches])
        # keys that are reserved in all batches
        keys_reserve = set.difference(keys_total, set.union(*keys_map))
        # keys that are either partial or reserved
        keys_reserve_or_partial = set.difference(keys_total, keys_shared)
        # keys that occur only in some batches, but not all
        keys_partial = keys_reserve_or_partial.difference(keys_reserve)
        if keys_partial and axis != 0:
            raise ValueError(
                f"Stack of Batch with non-shared keys {keys_partial} is only "
                f"supported with axis=0, but got axis={axis}!",
            )
        for key in keys_reserve:
            # reserved keys
            self.__dict__[key] = Batch()
        for key in keys_partial:
            for i, batch in enumerate(batches):
                if key not in batch.__dict__:
                    continue
                value = batch.get(key)
                # TODO: fix code/annotations s.t. the ignores can be removed
                if (
                    isinstance(value, BatchProtocol)  # type: ignore
                    and value.is_empty()  # type: ignore
                ):
                    continue  # type: ignore
                try:
                    self.__dict__[key][i] = value
                except KeyError:
                    self.__dict__[key] = create_value(value, len(batches))
                    self.__dict__[key][i] = value

    @staticmethod
    def stack(batches: Sequence[dict | TBatch], axis: int = 0) -> TBatch:
        batch = Batch()
        batch.stack_(batches, axis)
        # can't cast to a generic type, so we have to ignore the type here
        return batch  # type: ignore

    def empty_(self, index: slice | IndexType | None = None) -> Self:
        for batch_key, obj in self.items():
            if isinstance(obj, torch.Tensor):  # most often case
                self.__dict__[batch_key][index] = 0
            elif obj is None:
                continue
            elif isinstance(obj, np.ndarray):
                if obj.dtype == object:
                    self.__dict__[batch_key][index] = None
                else:
                    self.__dict__[batch_key][index] = 0
            elif isinstance(obj, Batch):
                self.__dict__[batch_key].empty_(index=index)
            else:  # scalar value
                warnings.warn(
                    "You are calling Batch.empty on a NumPy scalar, "
                    "which may cause undefined behaviors.",
                )
                if _is_number(obj):
                    self.__dict__[batch_key] = obj.__class__(0)
                else:
                    self.__dict__[batch_key] = None
        return self

    @staticmethod
    def empty(batch: TBatch, index: IndexType | None = None) -> TBatch:
        return deepcopy(batch).empty_(index)

    def update(self, batch: dict | Self | None = None, **kwargs: Any) -> None:
        if batch is None:
            self.update(kwargs)
            return
        for batch_key, obj in batch.items():
            self.__dict__[batch_key] = _parse_value(obj)
        if kwargs:
            self.update(kwargs)

    def __len__(self) -> int:
        """Return len(self)."""
        lens = []
        for obj in self.__dict__.values():
            # TODO: causes inconsistent behavior to batch with empty batches
            #  and batch with empty sequences of other type. Remove, but only after
            #  Buffer and Collectors have been improved to no longer rely on this
            if isinstance(obj, Batch) and obj.is_empty(recurse=True):
                continue
            if hasattr(obj, "__len__") and (isinstance(obj, Batch) or obj.ndim > 0):
                lens.append(len(obj))
            else:
                raise TypeError(f"Object {obj} in {self} has no len()")
        if not lens:
            return 0
        return min(lens)

    def is_empty(self, recurse: bool = False) -> bool:
        """Test if a Batch is empty.

        If ``recurse=True``, it further tests the values of the object; else
        it only tests the existence of any key.

        ``b.is_empty(recurse=True)`` is mainly used to distinguish
        ``Batch(a=Batch(a=Batch()))`` and ``Batch(a=1)``. They both raise
        exceptions when applied to ``len()``, but the former can be used in
        ``cat``, while the latter is a scalar and cannot be used in ``cat``.

        Another usage is in ``__len__``, where we have to skip checking the
        length of recursively empty Batch.
        ::

            >>> Batch().is_empty()
            True
            >>> Batch(a=Batch(), b=Batch(c=Batch())).is_empty()
            False
            >>> Batch(a=Batch(), b=Batch(c=Batch())).is_empty(recurse=True)
            True
            >>> Batch(d=1).is_empty()
            False
            >>> Batch(a=np.float64(1.0)).is_empty()
            False
        """
        if len(self.__dict__) == 0:
            return True
        if not recurse:
            return False
        return all(
            False if not isinstance(obj, Batch) else obj.is_empty(recurse=True)
            for obj in self.values()
        )

    @property
    def shape(self) -> list[int]:
        """Return self.shape."""
        if self.is_empty():
            return []
        data_shape = []
        for obj in self.__dict__.values():
            try:
                data_shape.append(list(obj.shape))
            except AttributeError:
                data_shape.append([])
        return (
            list(map(min, zip(*data_shape, strict=False))) if len(data_shape) > 1 else data_shape[0]
        )

    def split(
        self,
        size: int,
        shuffle: bool = True,
        merge_last: bool = False,
    ) -> Iterator[Self]:
        length = len(self)
        if size == -1:
            size = length
        assert size >= 1  # size can be greater than length, return whole batch
        indices = np.random.permutation(length) if shuffle else np.arange(length)
        merge_last = merge_last and length % size > 0
        for idx in range(0, length, size):
            if merge_last and idx + size + size >= length:
                yield self[indices[idx:]]
                break
            yield self[indices[idx : idx + size]]