Spaces:
Sleeping
Sleeping
Initial Commit
Browse files- .gitattributes +1 -0
- Dockerfile +14 -0
- app.py +36 -0
- backend.py +43 -0
- finetune_v1_weights.keras +3 -0
- model.py +43 -0
- requirement.txt +6 -0
- utils.py +43 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
finetune_v1_weights.keras filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Get the image of python
|
2 |
+
FROM python:3.9
|
3 |
+
|
4 |
+
# Copy all the files from local-dir to machine dir
|
5 |
+
COPY . .
|
6 |
+
|
7 |
+
# Set the current directory as working dir
|
8 |
+
WORKDIR /
|
9 |
+
|
10 |
+
# Install the requirements
|
11 |
+
RUN pip install --no-cache-dir -r ./requirements.txt
|
12 |
+
|
13 |
+
# Launch the server
|
14 |
+
CMD ["uvicorn", "backend:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
from io import BytesIO
|
5 |
+
|
6 |
+
st.title('COVID-19 Prediction')
|
7 |
+
st.subheader('Provide an X-ray of lungs and check if you have covid-19, or any other desease or your normal.')
|
8 |
+
|
9 |
+
# Take image input
|
10 |
+
img_file_buffer = st.file_uploader("Upload your chest X-ray")
|
11 |
+
|
12 |
+
if img_file_buffer is not None:
|
13 |
+
# Retrive the file value
|
14 |
+
img_bytes = img_file_buffer.getvalue()
|
15 |
+
|
16 |
+
# Create a payload
|
17 |
+
files = {"x_ray_image": BytesIO(img_bytes)}
|
18 |
+
|
19 |
+
URL = "http://127.0.0.1:8000/get_prediction"
|
20 |
+
|
21 |
+
with st.spinner("Waiting for model response..."):
|
22 |
+
st.write("Came in spinner")
|
23 |
+
resp = requests.post(
|
24 |
+
URL,
|
25 |
+
files = files,
|
26 |
+
)
|
27 |
+
|
28 |
+
st.write("Executed response")
|
29 |
+
|
30 |
+
if resp._content is not None:
|
31 |
+
resp_data = json.loads(resp._content.decode('utf-8'))
|
32 |
+
st.write(resp_data)
|
33 |
+
else:
|
34 |
+
st.write(resp.__dict__)
|
35 |
+
|
36 |
+
|
backend.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
3 |
+
|
4 |
+
import tensorflow as tf
|
5 |
+
from fastapi import FastAPI, UploadFile, File
|
6 |
+
from utils import load_image, preprocess_image, predict
|
7 |
+
from model import get_model
|
8 |
+
import json
|
9 |
+
|
10 |
+
app = FastAPI()
|
11 |
+
|
12 |
+
MODEL_WEIGHT_PATH = './finetune_v1_weights.keras'
|
13 |
+
model = get_model(MODEL_WEIGHT_PATH)
|
14 |
+
|
15 |
+
@app.get("/")
|
16 |
+
def tester():
|
17 |
+
return {
|
18 |
+
"status": "Hello World"
|
19 |
+
}
|
20 |
+
|
21 |
+
@app.post("/get_prediction")
|
22 |
+
async def get_prediction(x_ray_image: UploadFile = File(...)):
|
23 |
+
|
24 |
+
# Load the image i.e. convert from bytes -> Image (unit8)
|
25 |
+
image = load_image(await x_ray_image.read())
|
26 |
+
|
27 |
+
# Preprocess image to make it compatible for model
|
28 |
+
image = preprocess_image(image)
|
29 |
+
|
30 |
+
# Retrive model prediction
|
31 |
+
prediction = predict(image, model)
|
32 |
+
|
33 |
+
print("Model Predicted: \n", prediction)
|
34 |
+
|
35 |
+
return {
|
36 |
+
'prediction': json.dumps(prediction)
|
37 |
+
}
|
38 |
+
|
39 |
+
@app.post("/test")
|
40 |
+
def test():
|
41 |
+
return {
|
42 |
+
"status": 10
|
43 |
+
}
|
finetune_v1_weights.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6dd820011318ac7e47051f31034a5da91aca452f28deb4aba1916cc7a5704997
|
3 |
+
size 135810768
|
model.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import keras
|
2 |
+
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
|
3 |
+
|
4 |
+
|
5 |
+
def create_finetune_model(input_shape, regs, n_classes, drop_rate = 0.1):
|
6 |
+
model = keras.models.Sequential()
|
7 |
+
|
8 |
+
model.add(Conv2D(32, kernel_size=(3,3), input_shape=(*input_shape, 1), kernel_regularizer=regs, padding='same', activation='relu'))
|
9 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
10 |
+
model.add(Dropout(drop_rate))
|
11 |
+
|
12 |
+
model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu', kernel_regularizer=regs))
|
13 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
14 |
+
model.add(Dropout(drop_rate))
|
15 |
+
|
16 |
+
model.add(Conv2D(128, kernel_size=(3,3), padding='same', activation='relu', kernel_regularizer=regs))
|
17 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
18 |
+
model.add(Dropout(drop_rate))
|
19 |
+
|
20 |
+
model.add(Conv2D(256, kernel_size=(3,3), padding='same', activation='relu', kernel_regularizer=regs))
|
21 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
22 |
+
model.add(Dropout(drop_rate))
|
23 |
+
|
24 |
+
model.add(Flatten())
|
25 |
+
|
26 |
+
model.add(Dense(512, activation='relu', kernel_regularizer=regs))
|
27 |
+
model.add(Dropout(drop_rate))
|
28 |
+
|
29 |
+
model.add(Dense(n_classes, activation='softmax', kernel_regularizer=regs))
|
30 |
+
|
31 |
+
return model
|
32 |
+
|
33 |
+
def get_model(weight_file):
|
34 |
+
regularizer = keras.regularizers.L1(0.0001)
|
35 |
+
model = create_finetune_model((256, 256), regularizer, 3)
|
36 |
+
model.compile(
|
37 |
+
loss = 'categorical_crossentropy',
|
38 |
+
optimizer='adam'
|
39 |
+
)
|
40 |
+
|
41 |
+
model.load_weights(weight_file)
|
42 |
+
|
43 |
+
return model
|
requirement.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
fastapi
|
3 |
+
uvicorn[standard]
|
4 |
+
tensorflow==2.15.0
|
5 |
+
pillow
|
6 |
+
numpy
|
utils.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import tensorflow as tf
|
3 |
+
from PIL import Image
|
4 |
+
from io import BytesIO
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
MODEL_IMAGE_WIDTH = 256
|
8 |
+
MODEL_IMAGE_HEIGHT = 256
|
9 |
+
|
10 |
+
def load_image(img_data):
|
11 |
+
image = Image.open(BytesIO(img_data))
|
12 |
+
|
13 |
+
return image
|
14 |
+
|
15 |
+
def preprocess_image(image):
|
16 |
+
# Resize the image to be of the model size
|
17 |
+
image = image.resize((MODEL_IMAGE_WIDTH, MODEL_IMAGE_HEIGHT))
|
18 |
+
|
19 |
+
# Convert it to grayscale if not
|
20 |
+
image = image.convert('L')
|
21 |
+
|
22 |
+
return image
|
23 |
+
|
24 |
+
def predict(image, model):
|
25 |
+
# Convert the image to numpy array
|
26 |
+
image = np.array(image)
|
27 |
+
|
28 |
+
# Add an extra dimension at the end
|
29 |
+
image = np.expand_dims(image, axis=-1)
|
30 |
+
|
31 |
+
# Also add one dimension at the front ot make it as single batch
|
32 |
+
batch_img = np.expand_dims(image, axis=0)
|
33 |
+
|
34 |
+
print("Batch Image shape: ", batch_img.shape)
|
35 |
+
|
36 |
+
# Make the prediction from the model
|
37 |
+
pred_probs = model.predict(batch_img)[0]
|
38 |
+
label = np.argmax(pred_probs, axis=-1)
|
39 |
+
|
40 |
+
return {
|
41 |
+
'pred_probs': pred_probs.tolist(),
|
42 |
+
'label': int(label)
|
43 |
+
}
|