File size: 2,596 Bytes
d1ceb73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# mypy: allow-untyped-defs
from typing import Any, Iterable, NamedTuple, Optional, overload, Sequence, Tuple, Union

from typing_extensions import Self

from torch import Tensor

from torch._prims_common import DeviceLikeType
from torch.types import _dtype

class PackedSequence_(NamedTuple):
    data: Tensor
    batch_sizes: Tensor
    sorted_indices: Optional[Tensor]
    unsorted_indices: Optional[Tensor]

def bind(optional: Any, fn: Any): ...

class PackedSequence(PackedSequence_):
    def __new__(
        cls,
        data: Tensor,
        batch_sizes: Optional[Tensor] = ...,
        sorted_indices: Optional[Tensor] = ...,
        unsorted_indices: Optional[Tensor] = ...,
    ) -> Self: ...
    def pin_memory(self: Self) -> Self: ...
    def cuda(self: Self, *args: Any, **kwargs: Any) -> Self: ...
    def cpu(self: Self) -> Self: ...
    def double(self: Self) -> Self: ...
    def float(self: Self) -> Self: ...
    def half(self: Self) -> Self: ...
    def long(self: Self) -> Self: ...
    def int(self: Self) -> Self: ...
    def short(self: Self) -> Self: ...
    def char(self: Self) -> Self: ...
    def byte(self: Self) -> Self: ...
    @overload
    def to(
        self: Self,
        dtype: _dtype,
        non_blocking: bool = False,
        copy: bool = False,
    ) -> Self: ...
    @overload
    def to(
        self: Self,
        device: Optional[DeviceLikeType] = None,
        dtype: Optional[_dtype] = None,
        non_blocking: bool = False,
        copy: bool = False,
    ) -> Self: ...
    @overload
    def to(
        self: Self,
        other: Tensor,
        non_blocking: bool = False,
        copy: bool = False,
    ) -> Self: ...
    @property
    def is_cuda(self) -> bool: ...
    def is_pinned(self) -> bool: ...

def invert_permutation(permutation: Optional[Tensor]): ...
def pack_padded_sequence(
    input: Tensor,
    lengths: Tensor,
    batch_first: bool = ...,
    enforce_sorted: bool = ...,
) -> PackedSequence: ...
def pad_packed_sequence(
    sequence: PackedSequence,
    batch_first: bool = ...,
    padding_value: float = ...,
    total_length: Optional[int] = ...,
) -> Tuple[Tensor, ...]: ...
def pad_sequence(
    sequences: Union[Tensor, Iterable[Tensor]],
    batch_first: bool = False,
    padding_value: float = ...,
) -> Tensor: ...
def pack_sequence(
    sequences: Sequence[Tensor],
    enforce_sorted: bool = ...,
) -> PackedSequence: ...
def get_packed_sequence(
    data: Tensor,
    batch_sizes: Optional[Tensor],
    sorted_indices: Optional[Tensor],
    unsorted_indices: Optional[Tensor],
) -> PackedSequence: ...