Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -69,17 +69,17 @@ qwen_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
|
69 |
torch_dtype=torch.float16
|
70 |
).to("cuda").eval()
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
trust_remote_code=True,
|
77 |
torch_dtype=torch.bfloat16
|
78 |
).to("cuda").eval()
|
79 |
|
80 |
# Main Inference Function
|
81 |
@spaces.GPU
|
82 |
-
def model_inference(message, history,
|
83 |
text = message["text"].strip()
|
84 |
files = message.get("files", [])
|
85 |
|
@@ -125,10 +125,10 @@ def model_inference(message, history, use_rolmocr):
|
|
125 |
messages = [{"role": "user", "content": content}]
|
126 |
|
127 |
# Select processor and model
|
128 |
-
if
|
129 |
-
processor =
|
130 |
-
model =
|
131 |
-
model_name = "
|
132 |
else:
|
133 |
processor = qwen_processor
|
134 |
model = qwen_model
|
@@ -176,7 +176,7 @@ demo = gr.ChatInterface(
|
|
176 |
stop_btn="Stop Generation",
|
177 |
multimodal=True,
|
178 |
cache_examples=False,
|
179 |
-
additional_inputs=[gr.Checkbox(label="Use
|
180 |
)
|
181 |
|
182 |
-
demo.launch(debug=True)
|
|
|
69 |
torch_dtype=torch.float16
|
70 |
).to("cuda").eval()
|
71 |
|
72 |
+
DOCSCOPEOCR_MODEL_ID = "prithivMLmods/docscopeOCR-7B-050425-exp"
|
73 |
+
docscopeocr_processor = AutoProcessor.from_pretrained(DOCSCOPEOCR_MODEL_ID, trust_remote_code=True)
|
74 |
+
docscopeocr_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
75 |
+
DOCSCOPEOCR_MODEL_ID,
|
76 |
trust_remote_code=True,
|
77 |
torch_dtype=torch.bfloat16
|
78 |
).to("cuda").eval()
|
79 |
|
80 |
# Main Inference Function
|
81 |
@spaces.GPU
|
82 |
+
def model_inference(message, history, use_docscopeocr):
|
83 |
text = message["text"].strip()
|
84 |
files = message.get("files", [])
|
85 |
|
|
|
125 |
messages = [{"role": "user", "content": content}]
|
126 |
|
127 |
# Select processor and model
|
128 |
+
if use_docscopeocr:
|
129 |
+
processor = docscopeocr_processor
|
130 |
+
model = docscopeocr_model
|
131 |
+
model_name = "DocScopeOCR"
|
132 |
else:
|
133 |
processor = qwen_processor
|
134 |
model = qwen_model
|
|
|
176 |
stop_btn="Stop Generation",
|
177 |
multimodal=True,
|
178 |
cache_examples=False,
|
179 |
+
additional_inputs=[gr.Checkbox(label="Use DocScopeOCR", value=True, info="Check to use DocScopeOCR, uncheck to use Qwen2VL OCR")],
|
180 |
)
|
181 |
|
182 |
+
demo.launch(debug=True, ssr_mode=False)
|