Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
import eventlet
|
| 2 |
-
|
| 3 |
import pandas as pd
|
| 4 |
import json
|
| 5 |
from PIL import Image
|
|
@@ -58,6 +56,9 @@ app.config['SECRET_KEY'] = SECRET_KEY
|
|
| 58 |
# Initialize LLM
|
| 59 |
llm = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2)
|
| 60 |
|
|
|
|
|
|
|
|
|
|
| 61 |
# Initialize Router
|
| 62 |
router = ChatGroq(model="llama-3.2-3b-preview", temperature=0, max_tokens=1024, max_retries=2, model_kwargs={"response_format": {"type": "json_object"}})
|
| 63 |
|
|
@@ -199,7 +200,6 @@ def answer_generator(formated_input, session_id):
|
|
| 199 |
JSON object as its value. The JSON object should have ingredient and its measurement as key-value pairs. Similarly if user asked for nutritional information then the output should have 'header' key with header text and 'nutrients' key
|
| 200 |
with a JSON object og nutrient and its content as key-value pairs. Similarly if the user query asks for recipe instructions then JSON output should include 'header key with header text and
|
| 201 |
'instructions' key with a list of instructions as its value.
|
| 202 |
-
|
| 203 |
Following are the output formats for some cases:
|
| 204 |
1. if user query asks for all recipe information, then output should be of following format:
|
| 205 |
{
|
|
@@ -225,14 +225,11 @@ def answer_generator(formated_input, session_id):
|
|
| 225 |
header: header text,
|
| 226 |
recipe_instructions: List of recipe instructions,
|
| 227 |
}
|
| 228 |
-
|
| 229 |
4. if user query asks for recipe instructions information, then output should be of following format:
|
| 230 |
{
|
| 231 |
header: header text,
|
| 232 |
recipe_instructions: List of recipe instructions,
|
| 233 |
}
|
| 234 |
-
|
| 235 |
-
|
| 236 |
Additional Instructions:
|
| 237 |
- Precision and Personalization: Always aim to provide precise, personalized, and relevant information to users based on both the provided context and their specific queries.
|
| 238 |
- Clarity and Coherence: Ensure all responses are clear, well-structured, and easy to understand, facilitating a seamless user experience.
|
|
@@ -240,8 +237,6 @@ def answer_generator(formated_input, session_id):
|
|
| 240 |
- Dynamic Adaptation: Adapt your responses dynamically based on whether the context is relevant to the user's current request, ensuring optimal use of available information.
|
| 241 |
- Don't mention about the context in the response, format the answer in a natural and friendly way.
|
| 242 |
|
| 243 |
-
Context:
|
| 244 |
-
{context}
|
| 245 |
"""
|
| 246 |
qa_prompt = ChatPromptTemplate.from_messages(
|
| 247 |
[
|
|
@@ -266,6 +261,132 @@ def answer_generator(formated_input, session_id):
|
|
| 266 |
return response
|
| 267 |
|
| 268 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
|
| 270 |
### Router
|
| 271 |
import json
|
|
@@ -274,10 +395,8 @@ from langchain_core.messages import HumanMessage, SystemMessage
|
|
| 274 |
def router_node(query):
|
| 275 |
# Prompt
|
| 276 |
router_instructions = """You are an expert at determining the appropriate task for a user’s question based on chat history and the current query context. You have two available tasks:
|
| 277 |
-
|
| 278 |
1. Retrieval: Fetch information based on user's chat history and current query.
|
| 279 |
2. Recommendation/Suggestion: Recommend recipes to users based on the query.
|
| 280 |
-
|
| 281 |
Return a JSON response with a single key named “task” indicating either “retrieval” or “recommendation” based on your decision.
|
| 282 |
"""
|
| 283 |
response = router.invoke(
|
|
@@ -307,7 +426,6 @@ def recommendation_node(query):
|
|
| 307 |
"recipe_nutrients": JSON object with key value pairs such as "protein: 10g",
|
| 308 |
"tags": list of tags related to recipe
|
| 309 |
} \n
|
| 310 |
-
|
| 311 |
Here is the example of an recipe json object from the JSON data: \n
|
| 312 |
{
|
| 313 |
"recipe_name": "Asian Potato Salad with Seven Minute Egg",
|
|
@@ -357,13 +475,10 @@ def recommendation_node(query):
|
|
| 357 |
"Shellfish-Free"
|
| 358 |
]
|
| 359 |
} \n
|
| 360 |
-
|
| 361 |
Based on the user query, provide a Python function to filter the JSON data. The output of the function should be a list of json objects. \n
|
| 362 |
-
|
| 363 |
Recipe filtering instructions:
|
| 364 |
- If a user asked for the highest nutrient recipe such as "high protein or high calories" then filtered recipes should be the top highest recipes from all the recipes with high nutrient.
|
| 365 |
- sort or rearrange recipes based which recipes are more appropriate for the user.
|
| 366 |
-
|
| 367 |
Your output instructions:
|
| 368 |
- The function name should be filter_recipes. The input to the function should be file name.
|
| 369 |
- The length of output recipes should not be more than 6.
|
|
@@ -412,12 +527,9 @@ def answer_formatter_node(question, context):
|
|
| 412 |
2. Ensure your response is clear and concise.
|
| 413 |
3. Mention only details related to the recipe, including the recipe name, instructions, nutrients, yield, ingredients, and image.
|
| 414 |
4. Do not include any information that is not related to the recipe context.
|
| 415 |
-
|
| 416 |
Please format an answer based on the following user question and context provided:
|
| 417 |
-
|
| 418 |
User Question:
|
| 419 |
{question}
|
| 420 |
-
|
| 421 |
Context:
|
| 422 |
{context}
|
| 423 |
"""
|
|
@@ -444,7 +556,7 @@ def get_answer(image=[], message='', sessionID='abc123'):
|
|
| 444 |
'input': message,
|
| 445 |
'context': data
|
| 446 |
}
|
| 447 |
-
response =
|
| 448 |
except Exception as e:
|
| 449 |
print(e)
|
| 450 |
response = {'content':"An error occurred while processing your request."}
|
|
@@ -462,7 +574,7 @@ def get_answer(image=[], message='', sessionID='abc123'):
|
|
| 462 |
'input': message,
|
| 463 |
'context': CURR_CONTEXT
|
| 464 |
}
|
| 465 |
-
response =
|
| 466 |
|
| 467 |
return response
|
| 468 |
|
|
@@ -533,7 +645,7 @@ def handle_message(data):
|
|
| 533 |
'context': json.dumps(context)
|
| 534 |
}
|
| 535 |
# Invoke question_answer_chain and stream the response
|
| 536 |
-
response =
|
| 537 |
emit('response', response, room=session_id)
|
| 538 |
|
| 539 |
except Exception as e:
|
|
@@ -546,21 +658,18 @@ def handle_message(data):
|
|
| 546 |
else:
|
| 547 |
message = data['message']
|
| 548 |
task = router_node(message)
|
| 549 |
-
print(task)
|
| 550 |
if task == 'retrieval':
|
| 551 |
formated_input = {
|
| 552 |
'input': message,
|
| 553 |
'context': json.dumps(CURR_CONTEXT)
|
| 554 |
}
|
| 555 |
-
response =
|
| 556 |
emit('response', response, room=session_id)
|
| 557 |
else:
|
| 558 |
response = recommendation_node(message)
|
| 559 |
-
print(response)
|
| 560 |
# response = answer_formatter_node(message, recipes)
|
| 561 |
if response is None:
|
| 562 |
response = {'content':"An error occurred while processing your request."}
|
| 563 |
-
|
| 564 |
emit('json_response', response, room=session_id)
|
| 565 |
session_store.pop(session_id, None)
|
| 566 |
|
|
@@ -605,4 +714,4 @@ def index_view():
|
|
| 605 |
|
| 606 |
# Main function to run the app
|
| 607 |
if __name__ == '__main__':
|
| 608 |
-
socketio.run(app, debug=
|
|
|
|
|
|
|
|
|
|
| 1 |
import pandas as pd
|
| 2 |
import json
|
| 3 |
from PIL import Image
|
|
|
|
| 56 |
# Initialize LLM
|
| 57 |
llm = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2)
|
| 58 |
|
| 59 |
+
# JSON response LLM
|
| 60 |
+
json_llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0, max_tokens=1024, max_retries=2, model_kwargs={"response_format": {"type": "json_object"}})
|
| 61 |
+
|
| 62 |
# Initialize Router
|
| 63 |
router = ChatGroq(model="llama-3.2-3b-preview", temperature=0, max_tokens=1024, max_retries=2, model_kwargs={"response_format": {"type": "json_object"}})
|
| 64 |
|
|
|
|
| 200 |
JSON object as its value. The JSON object should have ingredient and its measurement as key-value pairs. Similarly if user asked for nutritional information then the output should have 'header' key with header text and 'nutrients' key
|
| 201 |
with a JSON object og nutrient and its content as key-value pairs. Similarly if the user query asks for recipe instructions then JSON output should include 'header key with header text and
|
| 202 |
'instructions' key with a list of instructions as its value.
|
|
|
|
| 203 |
Following are the output formats for some cases:
|
| 204 |
1. if user query asks for all recipe information, then output should be of following format:
|
| 205 |
{
|
|
|
|
| 225 |
header: header text,
|
| 226 |
recipe_instructions: List of recipe instructions,
|
| 227 |
}
|
|
|
|
| 228 |
4. if user query asks for recipe instructions information, then output should be of following format:
|
| 229 |
{
|
| 230 |
header: header text,
|
| 231 |
recipe_instructions: List of recipe instructions,
|
| 232 |
}
|
|
|
|
|
|
|
| 233 |
Additional Instructions:
|
| 234 |
- Precision and Personalization: Always aim to provide precise, personalized, and relevant information to users based on both the provided context and their specific queries.
|
| 235 |
- Clarity and Coherence: Ensure all responses are clear, well-structured, and easy to understand, facilitating a seamless user experience.
|
|
|
|
| 237 |
- Dynamic Adaptation: Adapt your responses dynamically based on whether the context is relevant to the user's current request, ensuring optimal use of available information.
|
| 238 |
- Don't mention about the context in the response, format the answer in a natural and friendly way.
|
| 239 |
|
|
|
|
|
|
|
| 240 |
"""
|
| 241 |
qa_prompt = ChatPromptTemplate.from_messages(
|
| 242 |
[
|
|
|
|
| 261 |
return response
|
| 262 |
|
| 263 |
|
| 264 |
+
def json_answer_generator(user_query, context):
|
| 265 |
+
system_prompt = """
|
| 266 |
+
Given a recipe context in JSON format, respond to user queries by extracting and returning the requested information in JSON format with an additional `"header"` key containing a response starter. Use the following rules:
|
| 267 |
+
1. **Recipe Information Extraction**:
|
| 268 |
+
- If the user query explicitly requests specific recipe data (e.g., ingredients, nutrients, or instructions), return only those JSON objects from the provided recipe context.
|
| 269 |
+
- For example, if the user asks, “What are the ingredients?” or “Show me the nutrient details,” your output should be limited to only the requested JSON objects (e.g., `recipe_ingredients`, `recipe_nutrients`).
|
| 270 |
+
- Include `"header": "Here is the information you requested:"` at the start of each response.
|
| 271 |
+
2. **Multiple Information Points**:
|
| 272 |
+
- If a user query asks for more than one piece of information, return each requested JSON object from the recipe context in a combined JSON response.
|
| 273 |
+
- For example, if the query is “Give me the ingredients and instructions,” the output should include both `recipe_ingredients` and `recipe_instructions` objects.
|
| 274 |
+
- Include `"header": "Here is the information you requested:"` at the start of each response.
|
| 275 |
+
3. **Non-Specific Recipe Information**:
|
| 276 |
+
- If the query does not directly refer to recipe data but instead asks for a general response based on the context, return a JSON object with a single key `"content"` and a descriptive response as its value.
|
| 277 |
+
- Include `"header": "Here is a suggestion based on the recipe:"` as the response starter.
|
| 278 |
+
- For example, if the query is “How can I use this recipe for a healthy lunch?” return a response like:
|
| 279 |
+
```json
|
| 280 |
+
{
|
| 281 |
+
"header": "Here is a suggestion based on the recipe:",
|
| 282 |
+
"content": "This Asian Potato Salad with Seven Minute Egg is a nutritious and light option, ideal for a balanced lunch. It provides protein and essential nutrients with low calories."
|
| 283 |
+
}
|
| 284 |
+
```
|
| 285 |
+
**Example Context**:
|
| 286 |
+
```json
|
| 287 |
+
{
|
| 288 |
+
"recipe_name": "Asian Potato Salad with Seven Minute Egg",
|
| 289 |
+
"recipe_time": 0,
|
| 290 |
+
"recipe_yields": "4 servings",
|
| 291 |
+
"recipe_ingredients": [
|
| 292 |
+
"2 1/2 cup Multi-Colored Fingerling Potato",
|
| 293 |
+
"3/4 cup Celery",
|
| 294 |
+
"1/4 cup Red Onion",
|
| 295 |
+
"2 tablespoon Fresh Parsley",
|
| 296 |
+
"1/3 cup Mayonnaise",
|
| 297 |
+
"1 tablespoon Chili Garlic Sauce",
|
| 298 |
+
"1 teaspoon Hoisin Sauce",
|
| 299 |
+
"1 splash Soy Sauce",
|
| 300 |
+
"to taste Salt",
|
| 301 |
+
"to taste Ground Black Pepper",
|
| 302 |
+
"4 Egg"
|
| 303 |
+
],
|
| 304 |
+
"recipe_instructions": "Fill a large stock pot with water. Add the Multi-Colored Fingerling Potato...",
|
| 305 |
+
"recipe_image": "https://www.sidechef.com/recipe/eeeeeceb-493e-493d-8273-66c800821b13.jpg?d=1408x1120",
|
| 306 |
+
"blogger": "sidechef.com",
|
| 307 |
+
"recipe_nutrients": {
|
| 308 |
+
"calories": "80 calories",
|
| 309 |
+
"proteinContent": "2.1 g",
|
| 310 |
+
"fatContent": "6.2 g",
|
| 311 |
+
"carbohydrateContent": "3.9 g",
|
| 312 |
+
"fiberContent": "0.5 g",
|
| 313 |
+
"sugarContent": "0.4 g",
|
| 314 |
+
"sodiumContent": "108.0 mg",
|
| 315 |
+
"saturatedFatContent": "1.2 g",
|
| 316 |
+
"transFatContent": "0.0 g",
|
| 317 |
+
"cholesterolContent": "47.4 mg",
|
| 318 |
+
"unsaturatedFatContent": "3.8 g"
|
| 319 |
+
},
|
| 320 |
+
"tags": [
|
| 321 |
+
"Salad",
|
| 322 |
+
"Lunch",
|
| 323 |
+
"Brunch",
|
| 324 |
+
"Appetizers",
|
| 325 |
+
"Side Dish",
|
| 326 |
+
"Budget-Friendly",
|
| 327 |
+
"Vegetarian",
|
| 328 |
+
"Pescatarian",
|
| 329 |
+
"Eggs",
|
| 330 |
+
"Potatoes",
|
| 331 |
+
"Easy",
|
| 332 |
+
"Dairy-Free",
|
| 333 |
+
"Shellfish-Free",
|
| 334 |
+
"Entertaining",
|
| 335 |
+
"Fish-Free",
|
| 336 |
+
"Peanut-Free",
|
| 337 |
+
"Tree Nut-Free",
|
| 338 |
+
"Sugar-Free",
|
| 339 |
+
"Global",
|
| 340 |
+
"Tomato-Free",
|
| 341 |
+
"Stove",
|
| 342 |
+
""
|
| 343 |
+
],
|
| 344 |
+
"id_": "0000001"
|
| 345 |
+
}
|
| 346 |
+
**Example Query & Output**:
|
| 347 |
+
**Query**: "What are the ingredients and calories?"
|
| 348 |
+
**Output**:
|
| 349 |
+
```json
|
| 350 |
+
{
|
| 351 |
+
"header": "Here is the information you requested:",
|
| 352 |
+
"recipe_ingredients": [
|
| 353 |
+
"2 1/2 cup Multi-Colored Fingerling Potato",
|
| 354 |
+
"3/4 cup Celery",
|
| 355 |
+
"1/4 cup Red Onion",
|
| 356 |
+
"2 tablespoon Fresh Parsley",
|
| 357 |
+
"1/3 cup Mayonnaise",
|
| 358 |
+
"1 tablespoon Chili Garlic Sauce",
|
| 359 |
+
"1 teaspoon Hoisin Sauce",
|
| 360 |
+
"1 splash Soy Sauce",
|
| 361 |
+
"to taste Salt",
|
| 362 |
+
"to taste Ground Black Pepper",
|
| 363 |
+
"4 Egg"
|
| 364 |
+
],
|
| 365 |
+
"recipe_nutrients": {
|
| 366 |
+
"calories": "80 calories"
|
| 367 |
+
}
|
| 368 |
+
}
|
| 369 |
+
Try to format the output as JSON object with key value pairs.
|
| 370 |
+
"""
|
| 371 |
+
|
| 372 |
+
formatted_input = f"""
|
| 373 |
+
User Query: {user_query}
|
| 374 |
+
Recipe data as Context:
|
| 375 |
+
{context}
|
| 376 |
+
"""
|
| 377 |
+
response = json_llm.invoke(
|
| 378 |
+
[SystemMessage(content=system_prompt)]
|
| 379 |
+
+ [
|
| 380 |
+
HumanMessage(
|
| 381 |
+
content=formatted_input
|
| 382 |
+
)
|
| 383 |
+
]
|
| 384 |
+
)
|
| 385 |
+
res = json.loads(response.content)
|
| 386 |
+
return res
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
|
| 390 |
|
| 391 |
### Router
|
| 392 |
import json
|
|
|
|
| 395 |
def router_node(query):
|
| 396 |
# Prompt
|
| 397 |
router_instructions = """You are an expert at determining the appropriate task for a user’s question based on chat history and the current query context. You have two available tasks:
|
|
|
|
| 398 |
1. Retrieval: Fetch information based on user's chat history and current query.
|
| 399 |
2. Recommendation/Suggestion: Recommend recipes to users based on the query.
|
|
|
|
| 400 |
Return a JSON response with a single key named “task” indicating either “retrieval” or “recommendation” based on your decision.
|
| 401 |
"""
|
| 402 |
response = router.invoke(
|
|
|
|
| 426 |
"recipe_nutrients": JSON object with key value pairs such as "protein: 10g",
|
| 427 |
"tags": list of tags related to recipe
|
| 428 |
} \n
|
|
|
|
| 429 |
Here is the example of an recipe json object from the JSON data: \n
|
| 430 |
{
|
| 431 |
"recipe_name": "Asian Potato Salad with Seven Minute Egg",
|
|
|
|
| 475 |
"Shellfish-Free"
|
| 476 |
]
|
| 477 |
} \n
|
|
|
|
| 478 |
Based on the user query, provide a Python function to filter the JSON data. The output of the function should be a list of json objects. \n
|
|
|
|
| 479 |
Recipe filtering instructions:
|
| 480 |
- If a user asked for the highest nutrient recipe such as "high protein or high calories" then filtered recipes should be the top highest recipes from all the recipes with high nutrient.
|
| 481 |
- sort or rearrange recipes based which recipes are more appropriate for the user.
|
|
|
|
| 482 |
Your output instructions:
|
| 483 |
- The function name should be filter_recipes. The input to the function should be file name.
|
| 484 |
- The length of output recipes should not be more than 6.
|
|
|
|
| 527 |
2. Ensure your response is clear and concise.
|
| 528 |
3. Mention only details related to the recipe, including the recipe name, instructions, nutrients, yield, ingredients, and image.
|
| 529 |
4. Do not include any information that is not related to the recipe context.
|
|
|
|
| 530 |
Please format an answer based on the following user question and context provided:
|
|
|
|
| 531 |
User Question:
|
| 532 |
{question}
|
|
|
|
| 533 |
Context:
|
| 534 |
{context}
|
| 535 |
"""
|
|
|
|
| 556 |
'input': message,
|
| 557 |
'context': data
|
| 558 |
}
|
| 559 |
+
response = json_answer_generator(message, data)
|
| 560 |
except Exception as e:
|
| 561 |
print(e)
|
| 562 |
response = {'content':"An error occurred while processing your request."}
|
|
|
|
| 574 |
'input': message,
|
| 575 |
'context': CURR_CONTEXT
|
| 576 |
}
|
| 577 |
+
response = json_answer_generator(message, data)
|
| 578 |
|
| 579 |
return response
|
| 580 |
|
|
|
|
| 645 |
'context': json.dumps(context)
|
| 646 |
}
|
| 647 |
# Invoke question_answer_chain and stream the response
|
| 648 |
+
response = json_answer_generator(message, context)
|
| 649 |
emit('response', response, room=session_id)
|
| 650 |
|
| 651 |
except Exception as e:
|
|
|
|
| 658 |
else:
|
| 659 |
message = data['message']
|
| 660 |
task = router_node(message)
|
|
|
|
| 661 |
if task == 'retrieval':
|
| 662 |
formated_input = {
|
| 663 |
'input': message,
|
| 664 |
'context': json.dumps(CURR_CONTEXT)
|
| 665 |
}
|
| 666 |
+
response = json_answer_generator(message, context)
|
| 667 |
emit('response', response, room=session_id)
|
| 668 |
else:
|
| 669 |
response = recommendation_node(message)
|
|
|
|
| 670 |
# response = answer_formatter_node(message, recipes)
|
| 671 |
if response is None:
|
| 672 |
response = {'content':"An error occurred while processing your request."}
|
|
|
|
| 673 |
emit('json_response', response, room=session_id)
|
| 674 |
session_store.pop(session_id, None)
|
| 675 |
|
|
|
|
| 714 |
|
| 715 |
# Main function to run the app
|
| 716 |
if __name__ == '__main__':
|
| 717 |
+
socketio.run(app, debug=True)
|