Hammad712 commited on
Commit
a488b5b
·
verified ·
1 Parent(s): 6e1a70b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +198 -0
app.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, HTTPException
2
+ from fastapi.responses import JSONResponse
3
+ from pydantic import BaseModel, Field
4
+ import base64
5
+ from groq import Groq
6
+ import shutil
7
+ import os
8
+
9
+ app = FastAPI()
10
+
11
+ # Initialize Groq API with your API key
12
+ client = Groq(api_key='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')
13
+
14
+ # Pydantic models for input validation
15
+ class RecipeRequest(BaseModel):
16
+ meal_name: str = Field(..., description="Name of the meal to generate a recipe for")
17
+
18
+ class IngredientAlternativesRequest(BaseModel):
19
+ ingredients: list[str] = Field(..., description="List of ingredients to suggest alternatives for")
20
+ dietary_restrictions: str = Field(..., description="Dietary restrictions, if any")
21
+ allergies: list[str] = Field(..., description="List of allergies to consider")
22
+
23
+
24
+ # Helper function to encode the image to base64
25
+ def encode_image(image_path: str) -> str:
26
+ try:
27
+ with open(image_path, "rb") as image_file:
28
+ return base64.b64encode(image_file.read()).decode('utf-8')
29
+ except FileNotFoundError:
30
+ raise HTTPException(status_code=404, detail="Image not found.")
31
+ except Exception as e:
32
+ raise HTTPException(status_code=500, detail=f"Error encoding image: {str(e)}")
33
+
34
+
35
+ # POST /infer (Image inference route)
36
+ @app.post("/infer")
37
+ async def infer_image(file: UploadFile):
38
+ """
39
+ Perform inference on an uploaded image and return the result.
40
+ """
41
+ try:
42
+ # Save the uploaded image to a temporary file
43
+ temp_image_path = f"temp_{file.filename}"
44
+ with open(temp_image_path, "wb") as buffer:
45
+ shutil.copyfileobj(file.file, buffer)
46
+
47
+ # Encode the image to base64
48
+ base64_image = encode_image(temp_image_path)
49
+
50
+ # Prepare the query and image data for Groq API
51
+ chat_completion = client.chat.completions.create(
52
+ messages=[
53
+ {
54
+ "role": "user",
55
+ "content": [
56
+ {"type": "text", "text": "What are the ingredients used in this dish?. Do not add any explanation just write the names of the ingredients. Write the name of the dish and then write the ingredients used"},
57
+ {
58
+ "type": "image_url",
59
+ "image_url": {
60
+ "url": f"data:image/jpeg;base64,{base64_image}",
61
+ },
62
+ },
63
+ ],
64
+ }
65
+ ],
66
+ model="llama-3.2-11b-vision-preview"
67
+ )
68
+
69
+ # Clean up the temporary image file
70
+ os.remove(temp_image_path)
71
+
72
+ # Get the response from the API and return the result
73
+ result = chat_completion.choices[0].message.content
74
+ return JSONResponse(status_code=200, content={"result": result})
75
+
76
+ except Exception as e:
77
+ raise HTTPException(status_code=500, detail=f"Error during inference: {str(e)}")
78
+
79
+
80
+ # POST /recipes (Recipe generation route)
81
+ @app.post("/recipes")
82
+ async def generate_recipe(request: RecipeRequest):
83
+ """
84
+ Generate a recipe based on the meal name.
85
+ """
86
+ try:
87
+ # Generate the recipe based on the meal name
88
+ recipe = Generate_recipe(request.meal_name)
89
+ return JSONResponse(status_code=200, content={"recipe": recipe})
90
+ except Exception as e:
91
+ raise HTTPException(status_code=500, detail=f"Error generating recipe: {str(e)}")
92
+
93
+
94
+ # Helper function to generate a recipe based on meal name
95
+ def Generate_recipe(meal_name: str) -> str:
96
+ prompt = f"""
97
+ You are a recipe-creating agent. Your task is to create a recipe based on the meal name provided by the user.
98
+ The recipe should be detailed and include the following information:
99
+
100
+ - A list of ingredients required for the meal.
101
+ - Step-by-step cooking instructions.
102
+ - Approximate preparation and cooking time.
103
+ - Serving suggestions or tips for best results.
104
+
105
+ Please process the user's meal name and create the appropriate recipe.
106
+
107
+ Meal Name: {meal_name}
108
+ """
109
+
110
+ # Create a chat completion request using the llama-3.1-70b-versatile model
111
+ completion = client.chat.completions.create(
112
+ model="llama-3.1-70b-versatile",
113
+ messages=[
114
+ {"role": "system", "content": prompt},
115
+ {"role": "user", "content": f"Meal Name: {meal_name}"}
116
+ ],
117
+ temperature=1,
118
+ max_tokens=2048,
119
+ top_p=1,
120
+ stream=True,
121
+ stop=None,
122
+ )
123
+
124
+ # Stream and collect the response from the model
125
+ recipe_result = ""
126
+ for chunk in completion:
127
+ recipe_result += chunk.choices[0].delta.content or ""
128
+
129
+ return recipe_result
130
+
131
+
132
+ # POST /ingredients/alternatives (Ingredient alternatives route)
133
+ @app.post("/ingredients/alternatives")
134
+ async def suggest_ingredient_alternatives(request: IngredientAlternativesRequest):
135
+ """
136
+ Suggest alternatives for specific ingredients based on dietary restrictions and allergies.
137
+ """
138
+ try:
139
+ # Generate ingredient alternatives
140
+ alternatives = Suggest_ingredient_alternatives(
141
+ request.ingredients,
142
+ request.dietary_restrictions,
143
+ request.allergies
144
+ )
145
+ return JSONResponse(status_code=200, content={"alternatives": alternatives})
146
+ except Exception as e:
147
+ raise HTTPException(status_code=500, detail=f"Error suggesting ingredient alternatives: {str(e)}")
148
+
149
+
150
+ # Helper function to suggest ingredient alternatives based on user input
151
+ def Suggest_ingredient_alternatives(ingredients, dietary_restrictions, allergies):
152
+ alternative_suggestions = ""
153
+
154
+ # Iterate over each ingredient to provide an alternative
155
+ for ingredient in ingredients:
156
+ prompt = f"""
157
+ You are an ingredient substitution agent. Your task is to suggest alternatives for specific ingredients based on the user's input, particularly for biryani recipes.
158
+
159
+ Please take the following into account:
160
+ - If the user has dietary restrictions, suggest substitutes that align with their needs (e.g., vegan, gluten-free, etc.).
161
+ - Consider the following allergies and suggest safe alternatives: {', '.join(allergies)}.
162
+ - The alternative should be commonly available, and you should provide options if multiple substitutes exist.
163
+ - Explain how the suggested alternative will impact the recipe, in terms of taste, texture, or cooking time.
164
+
165
+ Ingredient: {ingredient}
166
+ Dietary Restrictions: {dietary_restrictions}
167
+ Allergies: {', '.join(allergies)}
168
+ """
169
+
170
+ # Create a chat completion request using the llama-3.1-70b-versatile model
171
+ completion = client.chat.completions.create(
172
+ model="llama-3.1-70b-versatile",
173
+ messages=[
174
+ {"role": "system", "content": prompt},
175
+ {"role": "user", "content": f"Ingredient: {ingredient}, Dietary Restrictions: {dietary_restrictions}, Allergies: {', '.join(allergies)}"}
176
+ ],
177
+ temperature=1,
178
+ max_tokens=1024,
179
+ top_p=1,
180
+ stream=True,
181
+ stop=None,
182
+ )
183
+
184
+ # Collect the response for each ingredient
185
+ suggestion = ""
186
+ for chunk in completion:
187
+ suggestion += chunk.choices[0].delta.content or ""
188
+
189
+ # Append the suggestion to the final result
190
+ alternative_suggestions += f"Substitute suggestions for {ingredient}:\n{suggestion}\n{'-'*50}\n"
191
+
192
+ return alternative_suggestions
193
+
194
+
195
+ # Root endpoint to check if the API is running
196
+ @app.get("/")
197
+ async def root():
198
+ return {"message": "API is running!"}