Spaces:
Running
Running
Upload 16 files
Browse files- 15class_99.8.h5 +3 -0
- 17class_98.1.h5 +3 -0
- 44class_96.5.h5 +3 -0
- README (1).md +13 -0
- README (2).md +12 -0
- README.md +6 -6
- alzheimer_99.5.h5 +3 -0
- app.py +129 -0
- gitattributes +35 -0
- gitattributes (1) +34 -0
- gitattributes (2) +35 -0
- model3-86%.pkl +3 -0
- multi_weight.pth +3 -0
- requirements (1).txt +7 -0
- requirements (2).txt +5 -0
- requirements.txt +4 -0
15class_99.8.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2766c6c89e510b22653077e844530f15a61adffc2801a1a7b84135be4250664
|
3 |
+
size 121419384
|
17class_98.1.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b0ee74f9ed304d6c528717e7fd2b1385a6df761b2d3a79d7c1a665b2a68df48
|
3 |
+
size 121425528
|
44class_96.5.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:050a428ad49f450b0baa6a7bf6689e61d6877af23289dfd7dfb816ca8e4b7b54
|
3 |
+
size 117140352
|
README (1).md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: EyeDiseaseClassifier
|
3 |
+
emoji: 👁
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.16.2
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
README (2).md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Skin Disease Classifier
|
3 |
+
emoji: 👁
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: purple
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.41.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
1 |
---
|
2 |
+
title: Brain Tumor And Alzheimer Detection
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: pink
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.34.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
alzheimer_99.5.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:278c4b20e67ce5bc2235c389b6bc96b6ab2708e087ff91114743fbbcc88b29e8
|
3 |
+
size 117136616
|
app.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""app.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1sjyLFLqBccpUzaUi4eyyP3NYE3gDtHfs
|
8 |
+
"""
|
9 |
+
|
10 |
+
!pip install gradio
|
11 |
+
import gradio as gr
|
12 |
+
from fastai.vision.all import load_learner
|
13 |
+
from PIL import Image
|
14 |
+
import torch
|
15 |
+
import torch.nn as nn
|
16 |
+
import torch.nn.functional as F
|
17 |
+
from torchvision import transforms
|
18 |
+
|
19 |
+
# Model paths for all disease types
|
20 |
+
model_path_skin_disease = 'multi_weight.pth' # Skin Disease Model
|
21 |
+
model_path_brain_tumor = 'brain_tumor_model.pkl'
|
22 |
+
model_path_alzheimers = 'alzheimers_model.pkl'
|
23 |
+
model_path_eye_disease = 'eye_disease_model.pkl'
|
24 |
+
|
25 |
+
# Load models
|
26 |
+
skin_disease_model = torch.load(model_path_skin_disease) # For Skin Disease model
|
27 |
+
brain_tumor_model = load_learner(model_path_brain_tumor)
|
28 |
+
alzheimers_model = load_learner(model_path_alzheimers)
|
29 |
+
eye_disease_model = load_learner(model_path_eye_disease)
|
30 |
+
|
31 |
+
# Diagnosis Map for Skin Disease Model
|
32 |
+
DIAGNOSIS_MAP = {
|
33 |
+
0: 'Melanoma',
|
34 |
+
1: 'Melanocytic nevus',
|
35 |
+
2: 'Basal cell carcinoma',
|
36 |
+
3: 'Actinic keratosis',
|
37 |
+
4: 'Benign keratosis',
|
38 |
+
5: 'Dermatofibroma',
|
39 |
+
6: 'Vascular lesion',
|
40 |
+
7: 'Squamous cell carcinoma',
|
41 |
+
8: 'Unknown'
|
42 |
+
}
|
43 |
+
|
44 |
+
# Image Preprocessing for Skin Disease Model
|
45 |
+
transform = transforms.Compose([
|
46 |
+
transforms.Resize(256),
|
47 |
+
transforms.CenterCrop(224),
|
48 |
+
transforms.ToTensor(),
|
49 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
50 |
+
])
|
51 |
+
|
52 |
+
# Skin Disease Prediction Function
|
53 |
+
def predict_skin_disease(img: Image.Image):
|
54 |
+
img_tensor = transform(img).unsqueeze(0)
|
55 |
+
with torch.no_grad():
|
56 |
+
outputs = skin_disease_model(img_tensor)
|
57 |
+
probs = F.softmax(outputs, dim=1)
|
58 |
+
top_probs, top_idxs = torch.topk(probs, 3, dim=1) # top 3 predictions
|
59 |
+
|
60 |
+
predictions = []
|
61 |
+
for prob, idx in zip(top_probs[0], top_idxs[0]):
|
62 |
+
label = DIAGNOSIS_MAP.get(idx.item(), "Unknown")
|
63 |
+
confidence = prob.item() * 100
|
64 |
+
predictions.append(f"{label}: {confidence:.2f}%")
|
65 |
+
|
66 |
+
return "\n".join(predictions)
|
67 |
+
|
68 |
+
# Brain Tumor Prediction Function
|
69 |
+
def predict_brain_tumor(image):
|
70 |
+
pred, _, prob = brain_tumor_model.predict(image)
|
71 |
+
return f"Prediction: {pred}, Probability: {prob.max():.2f}"
|
72 |
+
|
73 |
+
# Alzheimer's Prediction Function
|
74 |
+
def predict_alzheimers(image):
|
75 |
+
pred, _, prob = alzheimers_model.predict(image)
|
76 |
+
return f"Prediction: {pred}, Probability: {prob.max():.2f}"
|
77 |
+
|
78 |
+
# Eye Disease Prediction Function
|
79 |
+
def predict_eye_disease(image):
|
80 |
+
pred, _, prob = eye_disease_model.predict(image)
|
81 |
+
return f"Prediction: {pred}, Probability: {prob.max():.2f}"
|
82 |
+
|
83 |
+
# Gradio Interface Function
|
84 |
+
def main():
|
85 |
+
# Image input component
|
86 |
+
image_input = gr.inputs.Image(shape=(224, 224), image_mode='RGB')
|
87 |
+
|
88 |
+
# Dropdown to choose disease type
|
89 |
+
model_choice = gr.inputs.Dropdown(choices=[
|
90 |
+
"Skin Disease", "Brain Tumor", "Alzheimer's Detection", "Eye Disease"],
|
91 |
+
label="Select Disease Type")
|
92 |
+
|
93 |
+
# Gradio tabs for each category
|
94 |
+
with gr.Blocks() as demo:
|
95 |
+
gr.Markdown("# Medical Image Classifier Dashboard")
|
96 |
+
|
97 |
+
with gr.Tab("Skin Disease Prediction"):
|
98 |
+
with gr.Column():
|
99 |
+
gr.Markdown("Upload a skin lesion image for diagnosis prediction.")
|
100 |
+
image_input_skin = gr.Image(type="pil", label="Upload Skin Lesion Image")
|
101 |
+
output_skin = gr.Textbox(label="Prediction Results")
|
102 |
+
image_input_skin.change(predict_skin_disease, inputs=image_input_skin, outputs=output_skin)
|
103 |
+
|
104 |
+
with gr.Tab("Brain Tumor Prediction"):
|
105 |
+
with gr.Column():
|
106 |
+
gr.Markdown("Upload a brain scan image for tumor classification.")
|
107 |
+
image_input_brain = gr.Image(type="pil", label="Upload Brain Scan Image")
|
108 |
+
output_brain = gr.Textbox(label="Prediction Results")
|
109 |
+
image_input_brain.change(predict_brain_tumor, inputs=image_input_brain, outputs=output_brain)
|
110 |
+
|
111 |
+
with gr.Tab("Alzheimer's Prediction"):
|
112 |
+
with gr.Column():
|
113 |
+
gr.Markdown("Upload a brain image for Alzheimer's detection.")
|
114 |
+
image_input_alz = gr.Image(type="pil", label="Upload Alzheimer's Image")
|
115 |
+
output_alz = gr.Textbox(label="Prediction Results")
|
116 |
+
image_input_alz.change(predict_alzheimers, inputs=image_input_alz, outputs=output_alz)
|
117 |
+
|
118 |
+
with gr.Tab("Eye Disease Prediction"):
|
119 |
+
with gr.Column():
|
120 |
+
gr.Markdown("Upload an image for eye disease classification.")
|
121 |
+
image_input_eye = gr.Image(type="pil", label="Upload Eye Disease Image")
|
122 |
+
output_eye = gr.Textbox(label="Prediction Results")
|
123 |
+
image_input_eye.change(predict_eye_disease, inputs=image_input_eye, outputs=output_eye)
|
124 |
+
|
125 |
+
demo.launch()
|
126 |
+
|
127 |
+
# Run the Gradio app
|
128 |
+
if __name__ == "__main__":
|
129 |
+
main()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
gitattributes (1)
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
gitattributes (2)
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
model3-86%.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f5301aa2069ff72bc835793ed4bc00de99ed6314f41bb2cd24ce8c6039a81e4
|
3 |
+
size 126268015
|
multi_weight.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79a52c72fc2442a3e5a178c2b47b307b701206f75226b1f0aa6241478a745a66
|
3 |
+
size 19614586
|
requirements (1).txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastai==2.7.10
|
2 |
+
fastbook==0.0.29
|
3 |
+
Flask==2.2.2
|
4 |
+
Flask-Cors==3.0.10
|
5 |
+
gradio==3.0
|
6 |
+
torch==1.13.1
|
7 |
+
torchvision==0.14.1
|
requirements (2).txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.25.0
|
2 |
+
torch==2.0.1
|
3 |
+
torchvision==0.15.2
|
4 |
+
efficientnet-pytorch==0.7.1
|
5 |
+
Pillow==9.5.0
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
protobuf==3.20.3
|
2 |
+
tensorflow==2.12.0
|
3 |
+
streamlit==1.22.0
|
4 |
+
streamlit-option-menu==0.3.12
|