danieldk HF staff commited on
Commit
7b77420
·
1 Parent(s): 98affba
Files changed (48) hide show
  1. build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py +46 -0
  2. build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_cxy6p3o2latjs.abi3.so +3 -0
  3. build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py +9 -0
  4. build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/layers.py +84 -0
  5. build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/__init__.py +46 -0
  6. build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_deformable_detr_esifsbuexbtbw.abi3.so +3 -0
  7. build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py +9 -0
  8. build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/layers.py +84 -0
  9. build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py +46 -0
  10. build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_cuzn3o54ku5iq.abi3.so +3 -0
  11. build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py +9 -0
  12. build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/layers.py +84 -0
  13. build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py +46 -0
  14. build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_gom2c5vfrl2ic.abi3.so +3 -0
  15. build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py +9 -0
  16. build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/layers.py +84 -0
  17. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/__init__.py +46 -0
  18. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_a7sajsuqrick6.abi3.so +3 -0
  19. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py +9 -0
  20. build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/layers.py +84 -0
  21. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py +46 -0
  22. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_tyogxwmtolvok.abi3.so +3 -0
  23. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py +9 -0
  24. build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/layers.py +84 -0
  25. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py +46 -0
  26. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_5kxpyt5yogkv2.abi3.so +3 -0
  27. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py +9 -0
  28. build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/layers.py +84 -0
  29. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py +46 -0
  30. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_titoehueyfqjg.abi3.so +3 -0
  31. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py +9 -0
  32. build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/layers.py +84 -0
  33. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py +46 -0
  34. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_imqt5tuqtmyt4.abi3.so +3 -0
  35. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py +9 -0
  36. build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/layers.py +84 -0
  37. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py +46 -0
  38. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_qbnaho3zp2d3o.abi3.so +3 -0
  39. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py +9 -0
  40. build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/layers.py +84 -0
  41. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py +46 -0
  42. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_5oxft6tr6jbvu.abi3.so +3 -0
  43. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py +9 -0
  44. build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/layers.py +84 -0
  45. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/__init__.py +46 -0
  46. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_po264mz2i2ffg.abi3.so +3 -0
  47. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py +9 -0
  48. build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/layers.py +84 -0
build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_cxy6p3o2latjs.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf71a0243675c22ba3207a6f895a907b0699f964575088e054220cea5e2fb2e
3
+ size 5870376
build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_cxy6p3o2latjs
3
+ ops = torch.ops._deformable_detr_cxy6p3o2latjs
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_cxy6p3o2latjs::{op_name}"
build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_deformable_detr_esifsbuexbtbw.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79dce2e84e09fb2a5bf1b47441b226343494807687d8829f141682af9b78e361
3
+ size 5856160
build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_esifsbuexbtbw
3
+ ops = torch.ops._deformable_detr_esifsbuexbtbw
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_esifsbuexbtbw::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_cuzn3o54ku5iq.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:825c7cb6f9a4350bdcdffa4383d7a527d5fa7b0d9d83222f5d1e72f1c6087841
3
+ size 5841688
build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_cuzn3o54ku5iq
3
+ ops = torch.ops._deformable_detr_cuzn3o54ku5iq
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_cuzn3o54ku5iq::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_gom2c5vfrl2ic.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe4c67fc885df711581660f72d86dbd0a237c7f106308e55a484725c88e9927
3
+ size 5863312
build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_gom2c5vfrl2ic
3
+ ops = torch.ops._deformable_detr_gom2c5vfrl2ic
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_gom2c5vfrl2ic::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_a7sajsuqrick6.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b4c64eb7931a6f580bd5b806eae1aea43b3bb8c0f115d5d202f151974a5e7b
3
+ size 5853280
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_a7sajsuqrick6
3
+ ops = torch.ops._deformable_detr_a7sajsuqrick6
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_a7sajsuqrick6::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_tyogxwmtolvok.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eef07a96ddf574e5b1e07476089a62659a70faa33c82fc79987c54fecb2711f
3
+ size 5834712
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_tyogxwmtolvok
3
+ ops = torch.ops._deformable_detr_tyogxwmtolvok
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_tyogxwmtolvok::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_5kxpyt5yogkv2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1c5bb5376002363e2008eb6db64ebe0c9f6c31f9a635b7420ddfb46dce16b02
3
+ size 5870352
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_5kxpyt5yogkv2
3
+ ops = torch.ops._deformable_detr_5kxpyt5yogkv2
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_5kxpyt5yogkv2::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_titoehueyfqjg.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76b74d4bdbb1f562474b987fd23430d12b9f033183198f35a7dfd21fcc8ce4e1
3
+ size 5837664
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_titoehueyfqjg
3
+ ops = torch.ops._deformable_detr_titoehueyfqjg
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_titoehueyfqjg::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_imqt5tuqtmyt4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1acd032c2f3bc530872e0839d8bec8950b01668c913539a2e14008a1e652560f
3
+ size 5944608
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_imqt5tuqtmyt4
3
+ ops = torch.ops._deformable_detr_imqt5tuqtmyt4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_imqt5tuqtmyt4::{op_name}"
build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_qbnaho3zp2d3o.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9e5074a5afdb137688e20182cf4c9f7cbb1e8a69651c08a570076aeedc8c76b
3
+ size 5863320
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_qbnaho3zp2d3o
3
+ ops = torch.ops._deformable_detr_qbnaho3zp2d3o
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_qbnaho3zp2d3o::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_5oxft6tr6jbvu.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4d0f47c165b9ce95c0328cb7a52e331e4c698746ea8e4d43c7d09c193e34bd
3
+ size 5834720
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_5oxft6tr6jbvu
3
+ ops = torch.ops._deformable_detr_5oxft6tr6jbvu
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_5oxft6tr6jbvu::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+ from . import layers
6
+
7
+
8
+ def ms_deform_attn_backward(
9
+ value: torch.Tensor,
10
+ spatial_shapes: torch.Tensor,
11
+ level_start_index: torch.Tensor,
12
+ sampling_loc: torch.Tensor,
13
+ attn_weight: torch.Tensor,
14
+ grad_output: torch.Tensor,
15
+ im2col_step: int,
16
+ ) -> List[torch.Tensor]:
17
+ return ops.ms_deform_attn_backward(
18
+ value,
19
+ spatial_shapes,
20
+ level_start_index,
21
+ sampling_loc,
22
+ attn_weight,
23
+ grad_output,
24
+ im2col_step,
25
+ )
26
+
27
+
28
+ def ms_deform_attn_forward(
29
+ value: torch.Tensor,
30
+ spatial_shapes: torch.Tensor,
31
+ level_start_index: torch.Tensor,
32
+ sampling_loc: torch.Tensor,
33
+ attn_weight: torch.Tensor,
34
+ im2col_step: int,
35
+ ) -> torch.Tensor:
36
+ return ops.ms_deform_attn_forward(
37
+ value,
38
+ spatial_shapes,
39
+ level_start_index,
40
+ sampling_loc,
41
+ attn_weight,
42
+ im2col_step,
43
+ )
44
+
45
+
46
+ __all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_po264mz2i2ffg.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129844ba533ee201cd3f2bb0e17a354ee8aa35176c10896454926485acdacdac
3
+ size 5945760
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _deformable_detr_po264mz2i2ffg
3
+ ops = torch.ops._deformable_detr_po264mz2i2ffg
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_deformable_detr_po264mz2i2ffg::{op_name}"
build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/layers.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Tuple
2
+
3
+ from torch import Tensor
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ import torch.nn as nn
7
+
8
+ from ._ops import ops
9
+
10
+
11
+ class MultiScaleDeformableAttentionFunction(Function):
12
+ @staticmethod
13
+ def forward(
14
+ context,
15
+ value: Tensor,
16
+ value_spatial_shapes: Tensor,
17
+ value_level_start_index: Tensor,
18
+ sampling_locations: Tensor,
19
+ attention_weights: Tensor,
20
+ im2col_step: int,
21
+ ):
22
+ context.im2col_step = im2col_step
23
+ output = ops.ms_deform_attn_forward(
24
+ value,
25
+ value_spatial_shapes,
26
+ value_level_start_index,
27
+ sampling_locations,
28
+ attention_weights,
29
+ context.im2col_step,
30
+ )
31
+ context.save_for_backward(
32
+ value,
33
+ value_spatial_shapes,
34
+ value_level_start_index,
35
+ sampling_locations,
36
+ attention_weights,
37
+ )
38
+ return output
39
+
40
+ @staticmethod
41
+ @once_differentiable
42
+ def backward(context, grad_output):
43
+ (
44
+ value,
45
+ value_spatial_shapes,
46
+ value_level_start_index,
47
+ sampling_locations,
48
+ attention_weights,
49
+ ) = context.saved_tensors
50
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
51
+ value,
52
+ value_spatial_shapes,
53
+ value_level_start_index,
54
+ sampling_locations,
55
+ attention_weights,
56
+ grad_output,
57
+ context.im2col_step,
58
+ )
59
+
60
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
61
+
62
+
63
+ class MultiScaleDeformableAttention(nn.Module):
64
+ def forward(
65
+ self,
66
+ value: Tensor,
67
+ value_spatial_shapes: Tensor,
68
+ value_spatial_shapes_list: List[Tuple],
69
+ level_start_index: Tensor,
70
+ sampling_locations: Tensor,
71
+ attention_weights: Tensor,
72
+ im2col_step: int,
73
+ ):
74
+ return MultiScaleDeformableAttentionFunction.apply(
75
+ value,
76
+ value_spatial_shapes,
77
+ level_start_index,
78
+ sampling_locations,
79
+ attention_weights,
80
+ im2col_step,
81
+ )
82
+
83
+
84
+ __all__ = ["MultiScaleDeformableAttention"]