Hammad712 commited on
Commit
f01141e
·
verified ·
1 Parent(s): 39fd5bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -37
app.py CHANGED
@@ -5,49 +5,91 @@ from transformers import ViTForImageClassification, ViTImageProcessor
5
  import logging
6
  import base64
7
  from io import BytesIO
 
8
 
9
- # Setup logging
10
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
11
 
12
- # Load the model and feature extractor from Hugging Face
13
  repository_id = "EnDevSols/brainmri-vit-model"
14
  model = ViTForImageClassification.from_pretrained(repository_id)
15
  feature_extractor = ViTImageProcessor.from_pretrained(repository_id)
16
 
17
- # Function to perform inference
18
  def predict(image):
19
- # Load and preprocess the image
 
 
 
 
20
  image = image.convert("RGB")
21
  inputs = feature_extractor(images=image, return_tensors="pt")
22
-
23
- # Move the inputs to the appropriate device
24
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
  model.to(device)
26
  inputs = {k: v.to(device) for k, v in inputs.items()}
27
-
28
- # Perform inference
29
  with torch.no_grad():
30
  outputs = model(**inputs)
31
-
32
- # Get the predicted label
33
  logits = outputs.logits
34
  predicted_label = logits.argmax(-1).item()
35
-
36
- # Map the label to "No" or "Yes"
37
  label_map = {0: "No", 1: "Yes"}
38
  diagnosis = label_map[predicted_label]
39
-
40
- # Return a complete statement
41
  if diagnosis == "Yes":
42
  return "The diagnosis indicates that you have a brain tumor."
43
  else:
44
  return "The diagnosis indicates that you do not have a brain tumor."
45
 
46
- # Custom CSS
47
- def set_css(style):
48
- st.markdown(f"<style>{style}</style>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- # Combined dark mode styles
51
  combined_css = """
52
  .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; }
53
  .block-container { padding: 1rem 2rem; background-color: #333; border-radius: 10px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.5); }
@@ -68,14 +110,6 @@ combined_css = """
68
  .black-white-text {
69
  color: black;
70
  }
71
- .small-input .stTextInput>div>input {
72
- height: 2rem;
73
- font-size: 0.9rem;
74
- }
75
- .small-file-uploader .stFileUploader>div>div {
76
- height: 2rem;
77
- font-size: 0.9rem;
78
- }
79
  .custom-text {
80
  font-size: 1.2rem;
81
  color: #feb47b;
@@ -85,33 +119,51 @@ combined_css = """
85
  }
86
  """
87
 
88
- # Streamlit application
89
  st.set_page_config(layout="wide")
90
-
91
  st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True)
92
 
93
- st.markdown('<div class="title"><span class="colorful-text">Brain MRI</span> <span class="black-white-text">Tumor Detection</span></div>', unsafe_allow_html=True)
94
- st.markdown('<div class="custom-text">Upload an MRI image to detect brain tumor</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
95
 
96
- # Uploading image
97
- uploaded_file = st.file_uploader("Choose an image...", type="jpg")
98
 
99
  if uploaded_file is not None:
100
  image = Image.open(uploaded_file)
101
 
102
- # Resize the image for display
103
  resized_image = image.resize((150, 150))
104
 
105
- # Convert image to base64
106
  buffered = BytesIO()
107
  resized_image.save(buffered, format="JPEG")
108
  img_str = base64.b64encode(buffered.getvalue()).decode()
109
 
110
- # Display the image in the center
111
- st.markdown(f"<div style='text-align: center;'><img src='data:image/jpeg;base64,{img_str}' alt='Uploaded Image' width='300'></div>", unsafe_allow_html=True)
 
 
 
112
 
113
  st.write("")
114
- st.write("Result...")
115
 
 
116
  diagnosis = predict(image)
 
117
  st.write(diagnosis)
 
 
 
 
 
 
 
 
5
  import logging
6
  import base64
7
  from io import BytesIO
8
+ from groq import Groq # Import the Groq client for Deepseek R1 API
9
 
10
+ # ------------------ Setup Logging ------------------
11
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
 
13
+ # ------------------ Load the ViT Model ------------------
14
  repository_id = "EnDevSols/brainmri-vit-model"
15
  model = ViTForImageClassification.from_pretrained(repository_id)
16
  feature_extractor = ViTImageProcessor.from_pretrained(repository_id)
17
 
18
+ # ------------------ ViT Inference Function ------------------
19
  def predict(image):
20
+ """
21
+ Given an image, perform inference using the ViT model to detect brain tumor.
22
+ Returns a human-readable diagnosis string.
23
+ """
24
+ # Convert to RGB and preprocess the image
25
  image = image.convert("RGB")
26
  inputs = feature_extractor(images=image, return_tensors="pt")
27
+
28
+ # Set the device (GPU if available)
29
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
  model.to(device)
31
  inputs = {k: v.to(device) for k, v in inputs.items()}
32
+
33
+ # Perform inference without gradient computation
34
  with torch.no_grad():
35
  outputs = model(**inputs)
36
+
37
+ # Get the predicted label and map to a diagnosis
38
  logits = outputs.logits
39
  predicted_label = logits.argmax(-1).item()
 
 
40
  label_map = {0: "No", 1: "Yes"}
41
  diagnosis = label_map[predicted_label]
42
+
 
43
  if diagnosis == "Yes":
44
  return "The diagnosis indicates that you have a brain tumor."
45
  else:
46
  return "The diagnosis indicates that you do not have a brain tumor."
47
 
48
+ # ------------------ Deepseek R1 Assistance Function ------------------
49
+ def get_assistance_from_deepseek(diagnosis_text):
50
+ """
51
+ Given the diagnosis from the ViT model, call the Deepseek R1 model via the Groq API
52
+ to get additional recommendations and next steps.
53
+ """
54
+ # Instantiate the Groq client with the provided API key
55
+ client = Groq(api_key="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")
56
+
57
+ # Construct a prompt that includes the diagnosis and asks for detailed guidance
58
+ prompt = (
59
+ f"Based on the following diagnosis: '{diagnosis_text}', please provide next steps and "
60
+ "recommendations for the patient. Include whether to consult a specialist, if further tests "
61
+ "are needed, and any other immediate actions or lifestyle recommendations."
62
+ )
63
+
64
+ messages = [
65
+ {
66
+ "role": "system",
67
+ "content": "You are a helpful medical assistant providing guidance after a brain tumor diagnosis."
68
+ },
69
+ {"role": "user", "content": prompt}
70
+ ]
71
+
72
+ # Create the completion using the Deepseek R1 model (non-streaming for simplicity)
73
+ completion = client.chat.completions.create(
74
+ model="deepseek-r1-distill-llama-70b",
75
+ messages=messages,
76
+ temperature=0.6,
77
+ max_completion_tokens=4096,
78
+ top_p=0.95,
79
+ stream=False,
80
+ stop=None,
81
+ )
82
+
83
+ # Extract the response text. (Depending on the API response format, adjust as needed.)
84
+ try:
85
+ assistance_text = completion.choices[0].message.content
86
+ except AttributeError:
87
+ # Fallback in case the structure is different
88
+ assistance_text = completion.choices[0].text
89
+
90
+ return assistance_text
91
 
92
+ # ------------------ Custom CSS for Styling ------------------
93
  combined_css = """
94
  .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; }
95
  .block-container { padding: 1rem 2rem; background-color: #333; border-radius: 10px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.5); }
 
110
  .black-white-text {
111
  color: black;
112
  }
 
 
 
 
 
 
 
 
113
  .custom-text {
114
  font-size: 1.2rem;
115
  color: #feb47b;
 
119
  }
120
  """
121
 
122
+ # ------------------ Streamlit App Configuration ------------------
123
  st.set_page_config(layout="wide")
 
124
  st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True)
125
 
126
+ # App Title and Description
127
+ st.markdown(
128
+ '<div class="title"><span class="colorful-text">Brain MRI</span> <span class="black-white-text">Tumor Detection</span></div>',
129
+ unsafe_allow_html=True
130
+ )
131
+ st.markdown(
132
+ '<div class="custom-text">Upload an MRI image to detect a brain tumor and receive next steps and recommendations.</div>',
133
+ unsafe_allow_html=True
134
+ )
135
 
136
+ # ------------------ Image Upload Section ------------------
137
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
138
 
139
  if uploaded_file is not None:
140
  image = Image.open(uploaded_file)
141
 
142
+ # Resize image for display purposes
143
  resized_image = image.resize((150, 150))
144
 
145
+ # Convert image to base64 for HTML display
146
  buffered = BytesIO()
147
  resized_image.save(buffered, format="JPEG")
148
  img_str = base64.b64encode(buffered.getvalue()).decode()
149
 
150
+ # Display the uploaded image in the center
151
+ st.markdown(
152
+ f"<div style='text-align: center;'><img src='data:image/jpeg;base64,{img_str}' alt='Uploaded Image' width='300'></div>",
153
+ unsafe_allow_html=True
154
+ )
155
 
156
  st.write("")
157
+ st.write("Processing the image...")
158
 
159
+ # ------------------ Step 1: Get Diagnosis from the ViT Model ------------------
160
  diagnosis = predict(image)
161
+ st.markdown("### Diagnosis:")
162
  st.write(diagnosis)
163
+
164
+ # ------------------ Step 2: Get Further Assistance from Deepseek R1 ------------------
165
+ with st.spinner("Fetching additional guidance based on your diagnosis..."):
166
+ assistance = get_assistance_from_deepseek(diagnosis)
167
+
168
+ st.markdown("### Next Steps and Recommendations:")
169
+ st.write(assistance)