Kabilash10 commited on
Commit
339e0fe
·
verified ·
1 Parent(s): 661934e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -1
app.py CHANGED
@@ -1,3 +1,36 @@
1
  import gradio as gr
 
 
 
2
 
3
- gr.load("models/Falconsai/medical_summarization").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from PIL import Image
3
+ import requests
4
+ import io
5
 
6
+ # Function to perform OCR using Qwen model
7
+ def perform_ocr(image):
8
+ # Convert the image to bytes
9
+ buffered = io.BytesIO()
10
+ image.save(buffered, format="JPEG")
11
+ image_bytes = buffered.getvalue()
12
+
13
+ # API URL for Qwen OCR model
14
+ api_url = "https://api-inference.huggingface.co/models/Qwen/Qwen-VL"
15
+ headers = {"Authorization": f"Bearer {HF}"}
16
+
17
+ # Make a request to the model
18
+ response = requests.post(api_url, headers=headers, files={"file": image_bytes})
19
+
20
+ # Check if the request was successful
21
+ if response.status_code == 200:
22
+ result = response.json()
23
+ return result['text'] # Adjust according to the API response format
24
+ else:
25
+ return "Error: " + response.text
26
+
27
+ # Gradio interface
28
+ with gr.Blocks() as demo:
29
+ gr.Markdown("## OCR with Qwen Model")
30
+ image_input = gr.Image(type="pil", label="Upload Image")
31
+ ocr_button = gr.Button("Perform OCR")
32
+ text_output = gr.Textbox(label="Extracted Text", lines=10)
33
+
34
+ ocr_button.click(perform_ocr, inputs=image_input, outputs=text_output)
35
+
36
+ demo.launch()