Thastp commited on
Commit
ea77e61
·
verified ·
1 Parent(s): ab2b851

Upload model

Browse files
Files changed (3) hide show
  1. config.json +0 -1
  2. configuration_rf_detr.py +0 -4
  3. modeling_rf_detr.py +2 -5
config.json CHANGED
@@ -11,7 +11,6 @@
11
  "ca_nheads": 16,
12
  "dec_layers": 3,
13
  "dec_n_points": 2,
14
- "device": "cpu",
15
  "encoder": "dinov2_windowed_small",
16
  "gradient_checkpointing": false,
17
  "group_detr": 13,
 
11
  "ca_nheads": 16,
12
  "dec_layers": 3,
13
  "dec_n_points": 2,
 
14
  "encoder": "dinov2_windowed_small",
15
  "gradient_checkpointing": false,
16
  "group_detr": 13,
configuration_rf_detr.py CHANGED
@@ -7,8 +7,6 @@ from optimum.utils import DummyVisionInputGenerator
7
 
8
  ### modified from https://github.com/roboflow/rf-detr/blob/main/rfdetr/config.py
9
 
10
- DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
11
-
12
  class RFDetrConfig(PretrainedConfig):
13
  model_type = 'rf-detr'
14
 
@@ -25,7 +23,6 @@ class RFDetrConfig(PretrainedConfig):
25
  amp: bool = True,
26
  num_classes: int = 90,
27
  num_queries: int = 300,
28
- device: Literal["cpu", "cuda", "mps"] = DEVICE,
29
  resolution: int = 560,
30
  group_detr: int = 13,
31
  gradient_checkpointing: bool = False,
@@ -41,7 +38,6 @@ class RFDetrConfig(PretrainedConfig):
41
  self.layer_norm = layer_norm
42
  self.amp = amp
43
  self.num_classes = num_classes
44
- self.device = device
45
  self.resolution = resolution
46
  self.group_detr = group_detr
47
  self.gradient_checkpointing = gradient_checkpointing
 
7
 
8
  ### modified from https://github.com/roboflow/rf-detr/blob/main/rfdetr/config.py
9
 
 
 
10
  class RFDetrConfig(PretrainedConfig):
11
  model_type = 'rf-detr'
12
 
 
23
  amp: bool = True,
24
  num_classes: int = 90,
25
  num_queries: int = 300,
 
26
  resolution: int = 560,
27
  group_detr: int = 13,
28
  gradient_checkpointing: bool = False,
 
38
  self.layer_norm = layer_norm
39
  self.amp = amp
40
  self.num_classes = num_classes
 
41
  self.resolution = resolution
42
  self.group_detr = group_detr
43
  self.gradient_checkpointing = gradient_checkpointing
modeling_rf_detr.py CHANGED
@@ -41,7 +41,6 @@ class RFDetrModelForObjectDetection(PreTrainedModel):
41
  layer_norm = config.layer_norm,
42
  amp = config.amp,
43
  num_classes = config.num_classes,
44
- device = config.device,
45
  resolution = config.resolution,
46
  group_detr = config.group_detr,
47
  gradient_checkpointing = config.gradient_checkpointing,
@@ -112,16 +111,14 @@ class RFDetrModelForObjectDetection(PreTrainedModel):
112
  wr = self.config.resolution / float(w)
113
 
114
  for label in labels:
115
- boxes = label["boxes"].to(device=self.config.device, dtype=torch.float32)
116
  # resize boxes to model's resolution
117
  boxes[:, [0, 2]] *= wr
118
  boxes[:, [1, 3]] *= hr
119
  # normalize to [0, 1] by model's resolution
120
  boxes[:] /= self.config.resolution
121
  label["boxes"] = boxes
122
- if "labels" in label:
123
- label["labels"] = label["labels"].to(self.config.device)
124
-
125
  ### modified from https://github.com/roboflow/rf-detr/blob/develop/rfdetr/models/backbone/dinov2_with_windowed_attn.py
126
  def _onnx_interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
127
  """
 
41
  layer_norm = config.layer_norm,
42
  amp = config.amp,
43
  num_classes = config.num_classes,
 
44
  resolution = config.resolution,
45
  group_detr = config.group_detr,
46
  gradient_checkpointing = config.gradient_checkpointing,
 
111
  wr = self.config.resolution / float(w)
112
 
113
  for label in labels:
114
+ boxes = label["boxes"]
115
  # resize boxes to model's resolution
116
  boxes[:, [0, 2]] *= wr
117
  boxes[:, [1, 3]] *= hr
118
  # normalize to [0, 1] by model's resolution
119
  boxes[:] /= self.config.resolution
120
  label["boxes"] = boxes
121
+
 
 
122
  ### modified from https://github.com/roboflow/rf-detr/blob/develop/rfdetr/models/backbone/dinov2_with_windowed_attn.py
123
  def _onnx_interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
124
  """