Spaces:
Running
Running
File size: 8,842 Bytes
8df396a afd3d9f 0f8851c afd3d9f 030af93 8df396a 4f6f95c 8df396a 4f6f95c afd3d9f ca891f7 e8a12b5 afd3d9f 8df396a f2b1e48 8df396a 030af93 4f6f95c e218653 4f6f95c 8e73ae2 e218653 8e73ae2 e218653 8e73ae2 1d09b2b b804667 1d09b2b 4f6f95c f2b1e48 8df396a f2b1e48 8df396a 1f303f9 8df396a 1d09b2b 8df396a 8e73ae2 8df396a 1f303f9 8df396a 1f303f9 8e73ae2 8df396a 8e73ae2 09a4c0c e8a12b5 030af93 1d09b2b 030af93 1d09b2b 030af93 1d09b2b 030af93 1d09b2b 030af93 1d09b2b 030af93 0e1fd0a 9c593fe 0e1fd0a b4f0dee 0e1fd0a 8e73ae2 0e1fd0a 9711794 0e1fd0a e8a12b5 0e1fd0a 124f9a0 0e1fd0a 9c593fe 0e1fd0a 124f9a0 0e1fd0a 124f9a0 1d09b2b 0f8851c 8df396a 205a28b 1d09b2b 205a28b 030af93 1d09b2b 8e73ae2 1c59abf 8e73ae2 1d09b2b 0b6b50a 030af93 1d09b2b e8a12b5 1d09b2b 030af93 1d09b2b 030af93 1d09b2b 030af93 1d09b2b 030af93 8df396a 0f8851c 1d09b2b 0f8851c 9c593fe 0f8851c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 |
import gradio as gr
import tensorflow as tf
import pdfplumber
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import timm
import torch
import pandas as pd
# Load pre-trained zero-shot model for text classification (using PyTorch for compatibility)
classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli", framework="pt")
# Pre-trained ResNet50 model for X-ray or image analysis using Timm
image_model = timm.create_model('resnet50', pretrained=True)
image_model.eval()
from tensorflow import keras
from tensorflow.keras.layers import TFSMLayer
# Load the model as a layer (in the SavedModel format)
#eye_model = TFSMLayer('model.h5')
# Patient database
patients_db = []
# Disease details for medical report analyzer
disease_details = {
"anemia": {"medication": "Iron supplements", "precaution": "Eat iron-rich foods", "doctor": "Hematologist"},
"viral infection": {"medication": "Antiviral drugs", "precaution": "Stay hydrated", "doctor": "Infectious Disease Specialist"},
"liver disease": {"medication": "Hepatoprotective drugs", "precaution": "Avoid alcohol", "doctor": "Hepatologist"},
"diabetes": {"medication": "Metformin or insulin", "precaution": "Monitor sugar levels", "doctor": "Endocrinologist"},
}
# Passwords
doctor_password = "doctor123"
# Loading the custom model for consultation with the doctor
try:
# Force using the slow tokenizer for compatibility
tokenizer = AutoTokenizer.from_pretrained("ahmed-7124/NeuraMedAW", use_fast=False)
except Exception as e:
print(f"Tokenizer error: {e}")
tokenizer = AutoTokenizer.from_pretrained("ahmed-7124/NeuraMedAW", use_fast=False)
model = AutoModelForCausalLM.from_pretrained("ahmed-7124/NeuraMedAW")
def consult_doctor(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=100)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Functions for the app
def register_patient(name, age, gender, password):
patient_id = len(patients_db) + 1
patients_db.append({
"ID": patient_id,
"Name": name,
"Age": age,
"Gender": gender,
"Password": password,
"Diagnosis": "",
"Medications": "",
"Precautions": "",
"Doctor": ""
})
return f"β
Patient {name} registered successfully. Patient ID: {patient_id}"
def analyze_or_extract_report(patient_id, pdf=None, report_text=None):
if pdf:
# Extract text from PDF
with pdfplumber.open(pdf.name) as pdf_file:
report_text = "".join([page.extract_text() for page in pdf_file.pages])
if not report_text:
return "β Please provide a report text or upload a PDF."
# Analyze the report
candidate_labels = list(disease_details.keys())
result = classifier(report_text, candidate_labels)
diagnosis = result['labels'][0]
# Update patient's record
medication = disease_details[diagnosis]['medication']
precaution = disease_details[diagnosis]['precaution']
doctor = disease_details[diagnosis]['doctor']
for patient in patients_db:
if patient['ID'] == patient_id:
patient.update(Diagnosis=diagnosis, Medications=medication, Precautions=precaution, Doctor=doctor)
return f"π Diagnosis: {diagnosis}\nπ Medication: {medication}\nβ Precaution: {precaution}\nπ©ββ Recommended Doctor: {doctor}"
# def extract_pdf_report(pdf):
# text = ""
# with pdfplumber.open(pdf.name) as pdf_file:
# for page in pdf_file.pages:
# text += page.extract_text()
# return text
#
# def predict_eye_disease(input_image):
# input_image = tf.image.resize(input_image, [224, 224]) / 255.0
# input_image = tf.expand_dims(input_image, 0)
# predictions = eye_model(input_image)
# labels = ['Cataract', 'Conjunctivitis', 'Glaucoma', 'Normal']
# confidence_scores = {labels[i]: round(predictions[i] * 100, 2) for i in range(len(labels))}
# if confidence_scores['Normal'] > 50:
# return f"Congrats! No disease detected. Confidence: {confidence_scores['Normal']}%"
# return "\n".join([f"{label}: {confidence}%" for label, confidence in confidence_scores.items()])
def doctor_space(patient_id):
for patient in patients_db:
if patient["ID"] == patient_id:
return f"β Precautions: {patient['Precautions']}\nπ©ββ Recommended Doctor: {patient['Doctor']}"
return "β Patient not found. Please check the ID."
def pharmacist_space(patient_id):
for patient in patients_db:
if patient["ID"] == patient_id:
return f"π Medications: {patient['Medications']}"
return "β Patient not found. Please check the ID."
def patient_dashboard(patient_id, password):
for patient in patients_db:
if patient["ID"] == patient_id and patient["Password"] == password:
return (f"π©Ί Name: {patient['Name']}\n"
f"π Diagnosis: {patient['Diagnosis']}\n"
f"π Medications: {patient['Medications']}\n"
f"β Precautions: {patient['Precautions']}\n"
f"π©ββ Recommended Doctor: {patient['Doctor']}")
return "β Access Denied: Invalid ID or Password."
def doctor_dashboard(password):
if password != doctor_password:
return "β Access Denied: Incorrect Password"
if not patients_db:
return "No patient records available."
details = []
for patient in patients_db:
details.append(f"π©Ί Name: {patient['Name']}\n"
f"π Diagnosis: {patient['Diagnosis']}\n"
f"π Medications: {patient['Medications']}\n"
f"β Precautions: {patient['Precautions']}\n"
f"π©ββ Recommended Doctor: {patient['Doctor']}")
return "\n\n".join(details)
# Gradio Interfaces
registration_interface = gr.Interface(
fn=register_patient,
inputs=[
gr.Textbox(label="Patient Name"),
gr.Number(label="Age"),
gr.Radio(label="Gender", choices=["Male", "Female", "Other"]),
gr.Textbox(label="Set Password", type="password"),
],
outputs="text",
)
#pdf_extraction_interface = gr.Interface(
# fn=extract_pdf_report,
# inputs=gr.File(label="Upload PDF Report"),
# outputs="text",
#)
# report_analysis_interface = gr.Interface(
# fn=analyze_report,
# inputs=[
# gr.Number(label="Patient ID"),
# gr.Textbox(label="Report Text"),
# ],
# outputs="text",
# )
# Unified Gradio Interface
analyze_report_interface = gr.Interface(
fn=analyze_or_extract_report,
inputs=[
gr.Number(label="Patient ID"),
gr.File(label="Upload PDF Report"), # Removed optional=True
gr.Textbox(label="Report Text (Optional)"),
],
outputs="text",
)
# eye_disease_interface = gr.Interface(
# fn=predict_eye_disease,
# inputs=gr.Image(label="Upload an Eye Image", type="numpy"),
# outputs="text",
# )
doctor_space_interface = gr.Interface(
fn=doctor_space,
inputs=gr.Number(label="Patient ID"),
outputs="text",
)
pharmacist_space_interface = gr.Interface(
fn=pharmacist_space,
inputs=gr.Number(label="Patient ID"),
outputs="text",
)
patient_dashboard_interface = gr.Interface(
fn=patient_dashboard,
inputs=[
gr.Number(label="Patient ID"),
gr.Textbox(label="Password", type="password"),
],
outputs="text",
)
doctor_dashboard_interface = gr.Interface(
fn=doctor_dashboard,
inputs=gr.Textbox(label="Doctor Password", type="password"),
outputs="text",
)
consult_doctor_interface = gr.Interface(
fn=consult_doctor,
inputs=gr.Textbox(label="Enter Your Query for the Doctor"),
outputs="text",
)
# Gradio App Layout
with gr.Blocks() as app:
gr.Markdown("# Medico GPT")
with gr.Tab("Patient Registration"):
registration_interface.render()
# with gr.Tab("Analyze Medical Report"):
# report_analysis_interface.render()
with gr.Tab("Analyze Medical Report"):
analyze_report_interface.render()
with gr.Tab("Extract PDF Report"):
pdf_extraction_interface.render()
# with gr.Tab("Ophthalmologist Space"):
# eye_disease_interface.render()
with gr.Tab("Doctor Space"):
doctor_space_interface.render()
with gr.Tab("Pharmacist Space"):
pharmacist_space_interface.render()
with gr.Tab("Patient Dashboard"):
patient_dashboard_interface.render()
with gr.Tab("Doctor Dashboard"):
doctor_dashboard_interface.render()
with gr.Tab("Doctor Consult"):
consult_doctor_interface.render()
# Launch the app
app.launch(share=True)
|