Upload 3 files
Browse files- DockerFile +20 -0
- app/streamlit_app.py +55 -0
- requirements.txt +5 -0
DockerFile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as a parent image
|
2 |
+
FROM python:3.8-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /usr/src/app
|
6 |
+
|
7 |
+
# Copy the requirements.txt file into the container at /usr/src/app
|
8 |
+
COPY requirements.txt .
|
9 |
+
|
10 |
+
# Install any needed packages specified in requirements.txt
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Copy the rest of your app's source code from your host to your image filesystem.
|
14 |
+
COPY . .
|
15 |
+
|
16 |
+
# Streamlit runs on port 8501 by default, make this port available to the world outside this container
|
17 |
+
EXPOSE 8501
|
18 |
+
|
19 |
+
# Run Streamlit app
|
20 |
+
CMD ["streamlit", "run", "app/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
app/streamlit_app.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import requests
|
4 |
+
|
5 |
+
# Streamlit app
|
6 |
+
st.title("Penguin Species Predictor")
|
7 |
+
|
8 |
+
# Fetch and display model details
|
9 |
+
def fetch_model_details(model_id):
|
10 |
+
response = requests.get(f"https://render-fastapi-ku5n.onrender.com/model/?model_id={model_id}")
|
11 |
+
if response.status_code == 200:
|
12 |
+
model_details = response.json()["model"][0]
|
13 |
+
st.write("### Selected Model Details")
|
14 |
+
for key, value in model_details.items():
|
15 |
+
st.write(f"{key}: {value}")
|
16 |
+
else:
|
17 |
+
st.error("Failed to fetch model details.")
|
18 |
+
|
19 |
+
# Model selection
|
20 |
+
model_options = {
|
21 |
+
"Model 1": 101,
|
22 |
+
"Model 2": 102,
|
23 |
+
}
|
24 |
+
model_name = st.selectbox("Select a Model", options=list(model_options.keys()))
|
25 |
+
model_id = model_options[model_name]
|
26 |
+
|
27 |
+
# Display model details for the selected model
|
28 |
+
fetch_model_details(model_id)
|
29 |
+
|
30 |
+
# User inputs for features
|
31 |
+
st.write("## Enter Penguin Features")
|
32 |
+
bill_length_mm = st.number_input("Bill Length (mm)", min_value=0.0, format="%.2f")
|
33 |
+
bill_depth_mm = st.number_input("Bill Depth (mm)", min_value=0.0, format="%.2f")
|
34 |
+
flipper_length_mm = st.number_input("Flipper Length (mm)", min_value=0.0, format="%.2f")
|
35 |
+
body_mass_g = st.number_input("Body Mass (g)", min_value=0.0, format="%.2f")
|
36 |
+
|
37 |
+
# Predict button
|
38 |
+
if st.button("Predict"):
|
39 |
+
# Preparing the payload for the POST request
|
40 |
+
payload = {
|
41 |
+
"model_id": model_id - 100, # Adjusted field name here
|
42 |
+
"bill_length_mm": bill_length_mm,
|
43 |
+
"bill_depth_mm": bill_depth_mm,
|
44 |
+
"flipper_length_mm": flipper_length_mm,
|
45 |
+
"body_mass_g": body_mass_g
|
46 |
+
}
|
47 |
+
# Making the POST request to the FastAPI prediction endpoint
|
48 |
+
response = requests.post("https://render-fastapi-ku5n.onrender.com/predict/", json=payload)
|
49 |
+
if response.status_code == 200:
|
50 |
+
# Processing and displaying the prediction result
|
51 |
+
prediction = response.json()["prediction"]
|
52 |
+
st.write(f"## Predicted Penguin Species: {prediction}")
|
53 |
+
else:
|
54 |
+
# Handling failed prediction attempts
|
55 |
+
st.error(f"Failed to make prediction. Status code: {response.status_code} Response: {response.text}")
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
docker==7.0.0
|
2 |
+
seaborn==0.13.2
|
3 |
+
pandas==2.2.1
|
4 |
+
streamlit==1.32.2
|
5 |
+
requests==2.31.0
|