Spaces:
Sleeping
Sleeping
File size: 1,606 Bytes
53267d3 5112b0f cdc3dbf a2b18be 53267d3 a2b18be 53267d3 a2b18be 5112b0f cdc3dbf a2b18be 53267d3 a2b18be 5112b0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from fastapi import FastAPI
from risk_model import predict_risk, retrain_model, get_history_df
# Create FastAPI instance
fastapi_app = FastAPI()
# Gradio UI blocks
gradio_app = gr.Blocks()
with gradio_app:
gr.Markdown("## 🔥 Heating Mantle Safety Risk Predictor")
with gr.Row():
temp = gr.Number(label="Max Temperature (°C)", value=100)
duration = gr.Number(label="Duration (min)", value=30)
with gr.Row():
predict_btn = gr.Button("🔍 Predict")
retrain_btn = gr.Button("🔁 Retrain Model")
result = gr.Textbox(label="Risk Prediction")
score = gr.Textbox(label="Confidence (%)")
retrain_output = gr.Textbox(label="Retrain Status")
history_table = gr.Dataframe(
headers=["Temperature", "Duration", "Risk", "Confidence"],
label="📈 Prediction History"
)
def classify(temp, duration):
if temp <= 0 or duration <= 0:
return "Invalid Input", "Use values > 0", get_history_df()
risk, confidence = predict_risk(temp, duration)
emoji = "🟢" if risk == "Low" else "🟠" if risk == "Moderate" else "🔴"
return f"{emoji} {risk}", f"{confidence}%", get_history_df()
predict_btn.click(classify, inputs=[temp, duration], outputs=[result, score, history_table])
retrain_btn.click(retrain_model, outputs=[retrain_output])
# Optional test route
@fastapi_app.get("/")
def root():
return {"message": "Heating Mantle FastAPI is working"}
# ✅ Hugging Face requires this:
app = gr.mount_gradio_app(fastapi_app, gradio_app, path="/predict-ui")
|