Hammad712 commited on
Commit
91ff21f
·
verified ·
1 Parent(s): b2c3d9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -132
app.py CHANGED
@@ -10,8 +10,25 @@ from fastapi.responses import HTMLResponse
10
  import os
11
  import base64
12
  from groq import Groq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # Initialize Groq client
14
- client = Groq(api_key='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')
15
 
16
  # MongoDB connection setup
17
  def get_mongo_client():
@@ -56,17 +73,38 @@ class Recipe(BaseModel):
56
  directions: List[str]
57
 
58
 
 
 
 
 
59
  # Data model for LLM to generate
60
  class Alternative_Ingredient(BaseModel):
61
  name: str
62
  quantity: str
63
 
64
-
65
  class Alternative_Recipe(BaseModel):
66
  recipe_name: str
67
  alternative_ingredients: List[Alternative_Ingredient]
68
  alternative_directions: List[str]
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  def get_recipe(recipe_name: str) -> Recipe:
71
  chat_completion = client.chat.completions.create(
72
  messages=[
@@ -81,7 +119,7 @@ def get_recipe(recipe_name: str) -> Recipe:
81
  "content": f"Fetch a recipe for {recipe_name}",
82
  },
83
  ],
84
- model="llama-3.2-90b-text-preview",
85
  temperature=0,
86
  # Streaming is not supported in JSON mode
87
  stream=False,
@@ -91,9 +129,6 @@ def get_recipe(recipe_name: str) -> Recipe:
91
  return Recipe.model_validate_json(chat_completion.choices[0].message.content)
92
 
93
 
94
-
95
-
96
-
97
  def Suggest_ingredient_alternatives(recipe_name: str, dietary_restrictions: str, allergies: List) -> Alternative_Recipe:
98
  chat_completion = client.chat.completions.create(
99
  messages=[
@@ -121,7 +156,7 @@ def Suggest_ingredient_alternatives(recipe_name: str, dietary_restrictions: str,
121
  Allergies: {', '.join(allergies)}""",
122
  },
123
  ],
124
- model="llama-3.2-90b-text-preview",
125
  temperature=0,
126
  # Streaming is not supported in JSON mode
127
  stream=False,
@@ -130,123 +165,8 @@ def Suggest_ingredient_alternatives(recipe_name: str, dietary_restrictions: str,
130
  )
131
  return Alternative_Recipe.model_validate_json(chat_completion.choices[0].message.content)
132
 
133
-
134
- def get_status(content):
135
- chat_completion = client.chat.completions.create(
136
- messages=[
137
- {
138
- "role": "system",
139
- "content": """Your are an expert agent to status yes if any kind of recipe dish present in explanation other no
140
-
141
- Json output format:
142
- {'status':return'yes' if any dish present in expalantion return 'no' if not dish present in image}
143
- """,
144
- },
145
- {
146
- "role": "user",
147
- "content": f"Image Explanation {content}",
148
- },
149
- ],
150
- model="llama3-groq-70b-8192-tool-use-preview",
151
- temperature=0,
152
- # Streaming is not supported in JSON mode
153
- stream=False,
154
- # Enable JSON mode by setting the response format
155
- response_format={"type": "json_object"},
156
- )
157
- return chat_completion.choices[0].message.content
158
-
159
- # Function to encode the image
160
- def encode_image(image_path):
161
- with open(image_path, "rb") as image_file:
162
- return base64.b64encode(image_file.read()).decode('utf-8')
163
-
164
- def explain_image(base64_image):
165
- text_query = '''
166
- explain the image.
167
- '''
168
- chat_completion = client.chat.completions.create(
169
- messages=[
170
- {
171
- "role": "user",
172
- "content": [
173
- {"type": "text", "text": text_query},
174
- {
175
- "type": "image_url",
176
- "image_url": {
177
- "url": f"data:image/jpeg;base64,{base64_image}",
178
- },
179
- },
180
- ],
181
-
182
- }
183
- ],
184
- model="llama-3.2-90b-vision-preview")
185
- return chat_completion.choices[0].message.content
186
-
187
-
188
- class get_recipe_name(BaseModel):
189
- recipe_name: List[str]
190
- ingredients: List[List[str]]
191
-
192
-
193
- def generate_recipe_name(base64_image):
194
- # Example of how the JSON should look to make it clearer
195
- example_json_structure = {
196
- "recipe_name": "Chicken Karhai",
197
- "ingredients": [
198
- "chicken",
199
- "tomatoes",
200
- "onions",
201
- "ginger",
202
- "garlic",
203
- "green chilies",
204
- "yogurt",
205
- "cumin seeds",
206
- "coriander powder",
207
- "red chili powder",
208
- "turmeric powder",
209
- "garam masala",
210
- "fresh coriander leaves",
211
- "oil",
212
- "salt"
213
- ]
214
- }
215
-
216
- # Generating the query prompt to ask for ingredients
217
- text_query = f'''What are the ingredients used in these dishes? Do not add any explanation, just write the names of the ingredients in proper JSON according to the following format:
218
- The JSON object must follow this schema:
219
- {json.dumps(get_recipe_name.model_json_schema(), indent=2)}
220
-
221
- Example format:
222
- {json.dumps(example_json_structure, indent=2)}
223
-
224
- Write the name of the dish and then list the ingredients used for each recipe, focusing on traditional Pakistani ingredients and terminology.
225
- '''
226
-
227
- chat_completion = client.chat.completions.create(
228
- messages=[
229
- {
230
- "role": "user",
231
- "content": [
232
- {"type": "text", "text": text_query},
233
- {
234
- "type": "image_url",
235
- "image_url": {
236
- "url": f"data:image/jpeg;base64,{base64_image}",
237
- },
238
- },
239
- ],
240
-
241
- }
242
- ],
243
- response_format={"type": "json_object"},
244
- model="llama-3.2-90b-vision-preview")
245
- return json.loads(chat_completion.choices[0].message.content)
246
-
247
  app = FastAPI()
248
 
249
-
250
  @app.post("/get_recipe/{token}")
251
  async def get_recipe_response(token: str, recipe_user: RecipeData):
252
  user = user_collection.find_one({"token": token})
@@ -297,18 +217,10 @@ async def upload_image(token: str, file: UploadFile = File(...)):
297
  with open(file_path, "wb") as buffer:
298
  buffer.write(await file.read())
299
 
300
- # Getting the base64 string
301
- base64_image = encode_image(file_path)
302
-
303
- status = get_status(explain_image(base64_image))
304
- status_json = json.loads(status)
305
- if status_json['status'].lower() == 'no':
306
- response = {"recipe_name": [], 'ingredients': []}
307
- else:
308
- response = generate_recipe_name(base64_image)
309
 
310
  return {
311
- "Response": response
312
  }
313
 
314
 
@@ -353,3 +265,8 @@ async def check_credentials(user: UserData):
353
  "last_name": existing_user["last_name"],
354
  "token": token,
355
  }
 
 
 
 
 
 
10
  import os
11
  import base64
12
  from groq import Groq
13
+ import faiss
14
+ import pickle
15
+ import torch
16
+ from transformers import CLIPProcessor, CLIPModel
17
+ from PIL import Image
18
+
19
+ # Load the FAISS index
20
+ index = faiss.read_index("knowledge_base.faiss")
21
+
22
+ # Load the titles metadata
23
+ with open("titles.pkl", "rb") as f:
24
+ titles = pickle.load(f)
25
+
26
+ # Load CLIP model and processor on CPU
27
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to("cpu")
28
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
29
+
30
  # Initialize Groq client
31
+ client = Groq(api_key='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')
32
 
33
  # MongoDB connection setup
34
  def get_mongo_client():
 
73
  directions: List[str]
74
 
75
 
76
+ class get_recipe_name(BaseModel):
77
+ recipe_name: List[str]
78
+ ingredients: List[List[str]]
79
+
80
  # Data model for LLM to generate
81
  class Alternative_Ingredient(BaseModel):
82
  name: str
83
  quantity: str
84
 
 
85
  class Alternative_Recipe(BaseModel):
86
  recipe_name: str
87
  alternative_ingredients: List[Alternative_Ingredient]
88
  alternative_directions: List[str]
89
 
90
+ # Function for finding the most similar image
91
+ def find_similar_image(image_path, threshold=30.0):
92
+ # Load and preprocess the input image
93
+ image = Image.open(image_path).convert("RGB")
94
+ inputs = processor(images=image, return_tensors="pt")
95
+
96
+ # Generate embedding for the input image on CPU
97
+ with torch.no_grad():
98
+ image_features = model.get_image_features(**inputs).numpy() # No need for .cpu()
99
+
100
+ # Perform similarity search in FAISS
101
+ distances, indices = index.search(image_features, k=1) # Search for the most similar embedding
102
+
103
+ # Check if the closest match meets the threshold
104
+ if distances[0][0] < threshold:
105
+ return titles[indices[0][0]]
106
+ else:
107
+ return "Not Found"
108
  def get_recipe(recipe_name: str) -> Recipe:
109
  chat_completion = client.chat.completions.create(
110
  messages=[
 
119
  "content": f"Fetch a recipe for {recipe_name}",
120
  },
121
  ],
122
+ model="llama3-8b-8192",
123
  temperature=0,
124
  # Streaming is not supported in JSON mode
125
  stream=False,
 
129
  return Recipe.model_validate_json(chat_completion.choices[0].message.content)
130
 
131
 
 
 
 
132
  def Suggest_ingredient_alternatives(recipe_name: str, dietary_restrictions: str, allergies: List) -> Alternative_Recipe:
133
  chat_completion = client.chat.completions.create(
134
  messages=[
 
156
  Allergies: {', '.join(allergies)}""",
157
  },
158
  ],
159
+ model="llama3-8b-8192",
160
  temperature=0,
161
  # Streaming is not supported in JSON mode
162
  stream=False,
 
165
  )
166
  return Alternative_Recipe.model_validate_json(chat_completion.choices[0].message.content)
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  app = FastAPI()
169
 
 
170
  @app.post("/get_recipe/{token}")
171
  async def get_recipe_response(token: str, recipe_user: RecipeData):
172
  user = user_collection.find_one({"token": token})
 
217
  with open(file_path, "wb") as buffer:
218
  buffer.write(await file.read())
219
 
220
+ result = find_similar_image(file_path, threshold=30.0)
 
 
 
 
 
 
 
 
221
 
222
  return {
223
+ "Response": result
224
  }
225
 
226
 
 
265
  "last_name": existing_user["last_name"],
266
  "token": token,
267
  }
268
+
269
+
270
+ @app.get("/")
271
+ async def root():
272
+ return {"message": "API is up and running!"}