dwb2023 commited on
Commit
4f58ff6
·
verified ·
1 Parent(s): 36c990c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -37
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  import spaces
3
  import gradio as gr
4
  from transformers import AutoProcessor, AutoModelForCausalLM
@@ -15,16 +14,6 @@ import matplotlib.patches as patches
15
  import random
16
  import numpy as np
17
 
18
- from transformers import AutoProcessor, AutoModelForCausalLM
19
- from detectron2.config import get_cfg
20
- from detectron2.engine import DefaultPredictor
21
- from detectron2 import model_zoo
22
- from detectron2.utils.visualizer import Visualizer, ColorMode
23
- from detectron2.data import MetadataCatalog
24
- import cv2
25
- import numpy as np
26
- import matplotlib.pyplot as plt
27
-
28
  import subprocess
29
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
30
 
@@ -140,22 +129,6 @@ single_task_list =[
140
  'Object Detection'
141
  ]
142
 
143
- # Detectron2 configuration
144
- cfg = get_cfg()
145
- cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
146
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
147
- cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
148
- cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
149
- predictor = DefaultPredictor(cfg)
150
-
151
- def process_image_with_detectron2(image_name):
152
- image_path = os.path.join(example_image_dir, image_name)
153
- image = cv2.imread(image_path)
154
- outputs = predictor(image)
155
- v = Visualizer(image[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
156
- out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
157
- return Image.fromarray(out.get_image()[:, :, ::-1])
158
-
159
  with gr.Blocks(theme="sudeepshouche/minimalist") as demo:
160
  gr.Markdown("## 🧬OmniScience - building teams of fine tuned VLM models for diagnosis and detection 🔧")
161
  gr.Markdown("- 🔬Florence-2 Model Proof of Concept, focusing on Object Detection <OD> tasks.")
@@ -198,16 +171,6 @@ with gr.Blocks(theme="sudeepshouche/minimalist") as demo:
198
 
199
  submit_btn.click(process_image, [input_img, task_prompt, model_selector], [output_text, output_img])
200
 
201
- with gr.Tab(label="Segmentation"):
202
- with gr.Row():
203
- with gr.Column():
204
- input_img = gr.Dropdown(choices=example_images, label="Input Picture", value=example_images[0])
205
- submit_btn = gr.Button(value="Submit")
206
- with gr.Column():
207
- output_img = gr.Image(label="Output Image")
208
-
209
- submit_btn.click(process_segmentation, inputs=[input_img], outputs=[output_img])
210
-
211
  gr.Markdown("## 🚀Other Cool Stuff:")
212
  gr.Markdown("- [Florence 2 Whitepaper](https://arxiv.org/pdf/2311.06242) - how I found out about the Roboflow 100 and the BCCD dataset. Turns out this nugget was from the original [Florence whitepaper](https://arxiv.org/pdf/2111.11432) but useful all the same!")
213
  gr.Markdown("- [Roboflow YouTube Video on Florence 2 fine-tuning](https://youtu.be/i3KjYgxNH6w?si=x1ZMg9hsNe25Y19-&t=1296) - bookmarked an 🧠insightful trade-off analysis of various VLMs.")
 
 
1
  import spaces
2
  import gradio as gr
3
  from transformers import AutoProcessor, AutoModelForCausalLM
 
14
  import random
15
  import numpy as np
16
 
 
 
 
 
 
 
 
 
 
 
17
  import subprocess
18
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
19
 
 
129
  'Object Detection'
130
  ]
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  with gr.Blocks(theme="sudeepshouche/minimalist") as demo:
133
  gr.Markdown("## 🧬OmniScience - building teams of fine tuned VLM models for diagnosis and detection 🔧")
134
  gr.Markdown("- 🔬Florence-2 Model Proof of Concept, focusing on Object Detection <OD> tasks.")
 
171
 
172
  submit_btn.click(process_image, [input_img, task_prompt, model_selector], [output_text, output_img])
173
 
 
 
 
 
 
 
 
 
 
 
174
  gr.Markdown("## 🚀Other Cool Stuff:")
175
  gr.Markdown("- [Florence 2 Whitepaper](https://arxiv.org/pdf/2311.06242) - how I found out about the Roboflow 100 and the BCCD dataset. Turns out this nugget was from the original [Florence whitepaper](https://arxiv.org/pdf/2111.11432) but useful all the same!")
176
  gr.Markdown("- [Roboflow YouTube Video on Florence 2 fine-tuning](https://youtu.be/i3KjYgxNH6w?si=x1ZMg9hsNe25Y19-&t=1296) - bookmarked an 🧠insightful trade-off analysis of various VLMs.")