leilaaaaa commited on
Commit
b582acf
·
verified ·
1 Parent(s): 5293441
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import io
4
+ import base64
5
+ import requests # For making API requests
6
+
7
+ # Function to encode image as base64
8
+ def image_to_base64(image):
9
+ buffered = io.BytesIO()
10
+ image.save(buffered, format="PNG")
11
+ img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
12
+ return img_str
13
+
14
+ # Function to interact with LLAVA model
15
+ def chat_with_llava(image, question):
16
+ try:
17
+ # Convert image to base64
18
+ image_b64 = image_to_base64(image)
19
+
20
+ # Crafting a prompt to instruct the model to respond as a doctor
21
+ doctor_prompt = (
22
+ "You are a highly experienced and knowledgeable medical doctor. "
23
+ "Please analyze the provided medical image and give a detailed medical explanation in response to the following question.\n\n"
24
+ f"Question: {question}\n"
25
+ "Please include relevant medical terminology and explanations in your response."
26
+ )
27
+
28
+ # URL for Ollama chat API endpoint
29
+ api_url = "https://api.ollama.com/chat"
30
+
31
+ # Prepare headers with API key (replace 'YOUR_API_KEY' with your actual API key)
32
+ headers = {
33
+ "Authorization": "Bearer YOUR_API_KEY",
34
+ "Content-Type": "application/json"
35
+ }
36
+
37
+ # Prepare payload for the API request
38
+ payload = {
39
+ "model": "rohithbojja/llava-med-v1.6",
40
+ "messages": [
41
+ {
42
+ "role": "user",
43
+ "content": doctor_prompt,
44
+ "image": image_b64
45
+ }
46
+ ]
47
+ }
48
+
49
+ # Make POST request to Ollama chat API
50
+ response = requests.post(api_url, json=payload, headers=headers)
51
+ response_data = response.json()
52
+
53
+ # Extract and return model response
54
+ return response_data['message']['content']
55
+
56
+ except Exception as e:
57
+ return f"Error occurred: {str(e)}"
58
+
59
+ # Create a Gradio interface
60
+ iface = gr.Interface(
61
+ fn=chat_with_llava,
62
+ inputs=[
63
+ gr.inputs.Image(type="pil", label="Upload Medical Image"),
64
+ gr.inputs.Textbox(lines=2, label="Ask a medical question about the image")
65
+ ],
66
+ outputs=gr.outputs.Textbox(label="Response", placeholder="Model response will appear here..."),
67
+ title="LLAVA Model - Medical Image and Question",
68
+ description="Upload a medical image and ask a specific question about the image for a medical description."
69
+ )
70
+
71
+ # Launch the Gradio interface
72
+ if __name__ == "__main__":
73
+ iface.launch()