nph4rd commited on
Commit
c86af64
·
verified ·
1 Parent(s): 286ea0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -20,14 +20,15 @@ PROCESSOR_IDS = {
20
  "Model 2 (WaveUI 896)": "google/paligemma-3b-pt-896"
21
  }
22
 
 
 
 
23
  # Load models and processors
24
  models = {name: PaliGemmaForConditionalGeneration.from_pretrained(model_id).eval().to(device)
25
  for name, model_id in MODEL_IDS.items()}
26
  processors = {name: PaliGemmaProcessor.from_pretrained(processor_id)
27
  for name, processor_id in PROCESSOR_IDS.items()}
28
 
29
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
-
31
  ###### Transformers Inference
32
  @spaces.GPU
33
  def infer(
@@ -62,7 +63,7 @@ def parse_segmentation(input_image, input_text, model_choice):
62
  obj['name'] or '',
63
  )
64
  for obj in objs
65
- if 'mask' in obj or 'xyxy' in obj
66
  ],
67
  )
68
  has_annotations = bool(annotated_img[1])
@@ -108,7 +109,6 @@ with gr.Blocks(css="style.css") as demo:
108
  outputs=seg_outputs,
109
  )
110
 
111
-
112
  _SEGMENT_DETECT_RE = re.compile(
113
  r'(.*?)' +
114
  r'<loc(\d{4})>' * 4 + r'\s*' +
 
20
  "Model 2 (WaveUI 896)": "google/paligemma-3b-pt-896"
21
  }
22
 
23
+ # Device configuration
24
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
+
26
  # Load models and processors
27
  models = {name: PaliGemmaForConditionalGeneration.from_pretrained(model_id).eval().to(device)
28
  for name, model_id in MODEL_IDS.items()}
29
  processors = {name: PaliGemmaProcessor.from_pretrained(processor_id)
30
  for name, processor_id in PROCESSOR_IDS.items()}
31
 
 
 
32
  ###### Transformers Inference
33
  @spaces.GPU
34
  def infer(
 
63
  obj['name'] or '',
64
  )
65
  for obj in objs
66
+ if 'mask' in obj or 'xyxy'
67
  ],
68
  )
69
  has_annotations = bool(annotated_img[1])
 
109
  outputs=seg_outputs,
110
  )
111
 
 
112
  _SEGMENT_DETECT_RE = re.compile(
113
  r'(.*?)' +
114
  r'<loc(\d{4})>' * 4 + r'\s*' +