LahiruProjects's picture
Update app.py
561c9b4 verified
import gradio as gr
from transformers import pipeline
from fastapi import FastAPI
from gradio import routes
import uvicorn
# Load the model (this runs only once!)
generator = pipeline("text2text-generation", model="LahiruProjects/recipe-generator-flan-t5")
# Function to generate recipe steps
def generate_recipe(name, ingredients, calories, time):
prompt = f"""Create a step-by-step recipe for "{name}" using these ingredients: {', '.join(ingredients.split(','))}.
Keep it under {calories} calories and make sure it's ready in less than {time} minutes."""
result = generator(prompt)
return result[0]["generated_text"]
# Gradio interface
iface = gr.Interface(
fn=generate_recipe,
inputs=[
gr.Textbox(label="Recipe Name"),
gr.Textbox(label="Ingredients (comma-separated)"),
gr.Number(label="Max Calories", value=400),
gr.Number(label="Max Cooking Time (minutes)", value=30)
],
outputs="text",
title="🍳 Recipe Generator (FLAN-T5)",
description="Generate a step-by-step recipe based on ingredients, calorie limit, and time"
)
# FastAPI integration
app = FastAPI()
# Define the API route using Gradio interface
@app.post("/api/predict")
async def predict(data: list):
inputs = data[0]
name = inputs[0]
ingredients = inputs[1]
calories = inputs[2]
time = inputs[3]
result = generate_recipe(name, ingredients, calories, time)
return {"data": [result]}
# Serve the Gradio interface and FastAPI app using Uvicorn (locally for testing)
if __name__ == "__main__":
iface.launch(server_name="0.0.0.0", server_port=7860) # This will launch the Gradio app
uvicorn.run(app, host="0.0.0.0", port=8000) # FastAPI app will run on port 8000