Ritesh-hf commited on
Commit
f3af235
·
2 Parent(s): 3746b33 766b4e0

Merge branch 'main' of hf.co:spaces/Ritesh-hf/Nutrigenics-chatbot

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. app.py +79 -46
  3. my_recipes.json +3 -0
  4. recipes.json +3 -0
  5. requirements.txt +1 -1
.gitattributes CHANGED
@@ -36,3 +36,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  datasets/ filter=lfs diff=lfs merge=lfs -text
37
  datasets/sidechef/my_recipes.json filter=lfs diff=lfs merge=lfs -text
38
  datasets/sidechef/recipes.json filter=lfs diff=lfs merge=lfs -text
 
 
 
36
  datasets/ filter=lfs diff=lfs merge=lfs -text
37
  datasets/sidechef/my_recipes.json filter=lfs diff=lfs merge=lfs -text
38
  datasets/sidechef/recipes.json filter=lfs diff=lfs merge=lfs -text
39
+ my_recipes.json filter=lfs diff=lfs merge=lfs -text
40
+ recipes.json filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,10 +1,7 @@
1
  import pandas as pd
2
- import json
3
  from PIL import Image
4
  import numpy as np
5
-
6
  import os
7
- from pathlib import Path
8
 
9
  import torch
10
  import torch.nn.functional as F
@@ -17,23 +14,24 @@ from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStr
17
  import gradio as gr
18
  import spaces
19
 
20
- from langchain_core.output_parsers import StrOutputParser
21
- from langchain_core.prompts import ChatPromptTemplate
22
- from langchain_groq import ChatGroq
23
  from langchain_community.chat_message_histories import ChatMessageHistory
24
  from langchain_core.runnables import RunnableWithMessageHistory
25
  from langchain_core.output_parsers import StrOutputParser
 
 
26
 
27
  from dotenv import load_dotenv
28
 
 
29
  from openai import OpenAI
30
 
31
-
32
  # GROQ_API_KEY = os.getenv("GROQ_API_KEY")
33
  load_dotenv(".env")
34
  USER_AGENT = os.getenv("USER_AGENT")
35
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
36
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
37
 
38
  # Set environment variables
39
  os.environ['USER_AGENT'] = USER_AGENT
@@ -41,7 +39,6 @@ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
41
  os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
42
  os.environ["TOKENIZERS_PARALLELISM"] = 'true'
43
 
44
-
45
  # Initialize LLM
46
  llm = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2)
47
 
@@ -132,7 +129,7 @@ tar_img_feats = torch.cat(tar_img_feats, dim=0)
132
 
133
  class Chat:
134
 
135
- def __init__(self, model, transform, dataframe, tar_img_feats, device='cuda:0', stopping_criteria=None):
136
  self.device = device
137
  self.model = model
138
  self.transform = transform
@@ -173,17 +170,27 @@ chat = Chat(model,transform,df,tar_img_feats, device)
173
  print("Chat Initialized !")
174
 
175
 
 
 
 
 
 
 
 
 
 
 
176
  def answer_generator(formated_input, session_id):
177
  # QA system prompt and chain
178
  qa_system_prompt = """
179
- You are an AI assistant developed by Nutrigenics AI, specializing in intelligent recipe information retrieval and recipe suggestions. Your purpose is to help users by recommending recipes, providing detailed nutritional values, listing ingredients, offering step-by-step cooking instructions, and filtering recipes based on provide context ans user query.
180
  Operational Guidelines:
181
  1. Input Structure:
182
  - Context: You may receive contextual information related to recipes, such as specific data sets, user preferences, dietary restrictions, or previously selected dishes.
183
  - User Query: Users will pose questions or requests related to recipes, nutritional information, ingredient substitutions, cooking instructions, and more.
184
  2. Response Strategy:
185
  - Utilize Provided Context: If the context contains relevant information that addresses the user's query, base your response on this provided data to ensure accuracy and relevance.
186
- - Respond to User Query Directly: If the context does not contain the necessary information to answer the user's query, kindly state that you do not have require information.
187
  Core Functionalities:
188
  - Nutritional Information: Accurately provide nutritional values for each recipe, including calories, macronutrients (proteins, fats, carbohydrates), and essential vitamins and minerals, using contextual data when available.
189
  - Ingredient Details: List all ingredients required for recipes, including substitute options for dietary restrictions or ingredient availability, utilizing context when relevant.
@@ -191,10 +198,11 @@ def answer_generator(formated_input, session_id):
191
  - Recipe Recommendations: Suggest dishes based on user preferences, dietary restrictions, available ingredients, and contextual data if provided.
192
  Additional Instructions:
193
  - Precision and Personalization: Always aim to provide precise, personalized, and relevant information to users based on both the provided context and their specific queries.
194
- - Clarity and Coherence: Ensure that all responses are clear, well-structured, and easy to understand, facilitating a seamless user experience.
195
- - Substitute Suggestions: When suggesting ingredient substitutes, consider user preferences and dietary restrictions outlined in the context or user query.
196
  - Dynamic Adaptation: Adapt your responses dynamically based on whether the context is relevant to the user's current request, ensuring optimal use of available information.
197
- Don't mention about context in the response, format the answer in a natural and friendly way.
 
198
  Context:
199
  {context}
200
  """
@@ -230,8 +238,8 @@ def router_node(query):
230
  # Prompt
231
  router_instructions = """You are an expert at determining the appropriate task for a user’s question based on chat history and the current query context. You have two available tasks:
232
 
233
- 1. Retrieval: Fetch information based on user's chat history and current query.
234
- 2. Recommendation/Suggestion: Recommend recipes to users based on the query.
235
 
236
  Return a JSON response with a single key named “task” indicating either “retrieval” or “recommendation” based on your decision.
237
  """
@@ -256,14 +264,14 @@ def recommendation_node(query):
256
  "recipe_time": integer,
257
  "recipe_yields": string,
258
  "recipe_ingredients": list of ingredients,
259
- "recipe_instructions": list of instruections,
260
  "recipe_image": string,
261
  "blogger": string,
262
- "recipe_nutrients": JSON object with key value pairs such as "protein: 10g",
263
- "tags": list of tags related to recipe
264
  } \n
265
 
266
- Here is the example of an recipe json object from the JSON data: \n
267
  {
268
  "recipe_name": "Asian Potato Salad with Seven Minute Egg",
269
  "recipe_time": 0,
@@ -313,19 +321,19 @@ def recommendation_node(query):
313
  ]
314
  } \n
315
 
316
- Based on the user query, provide a Python function to filter the JSON data. The output of the function should be a list of json objects. \n
317
 
318
  Recipe filtering instructions:
319
- - If a user asked for the highest nutrient recipe such as "high protein or high calories" then filtered recipes should be the top highest recipes from all the recipes with high nutrient.
320
- - sort or rearrange recipes based which recipes are more appropriate for the user.
321
 
322
  Your output instructions:
323
- - The function name should be filter_recipes. The input to the function should be file name.
324
  - The length of output recipes should not be more than 6.
325
- - Only give me output function. Do not call the function.
326
- - Give the python function as a key named "code" in a json format.
327
- - Do not include any other text with the output, only give python code.
328
- - If you do not follow the above given instructions, the chat may be terminated.
329
  """
330
  max_tries = 3
331
  while True:
@@ -383,11 +391,14 @@ def answer_formatter_node(question, context):
383
  return res
384
 
385
  CURR_CONTEXT = ''
 
386
 
387
- # @spaces.GPU
388
  def get_answer(image=[], message='', sessionID='abc123'):
389
  global CURR_CONTEXT
390
- if len(image) > 0:
 
 
391
  try:
392
  # Process the image and message here
393
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -403,21 +414,21 @@ def get_answer(image=[], message='', sessionID='abc123'):
403
  except Exception as e:
404
  print(e)
405
  response = {'content':"An error occurred while processing your request."}
406
- elif len(image) == 0 and message is not None:
407
- print("I am here")
408
  task = router_node(message)
409
- if task == 'retrieval':
 
410
  recipes = recommendation_node(message)
411
  if not recipes:
412
- response = {'content':"An error occurred while processing your request."}
413
- response = answer_formatter_node(message, recipes)
 
414
  else:
415
  formated_input = {
416
  'input': message,
417
  'context': CURR_CONTEXT
418
  }
419
  response = answer_generator(formated_input, session_id=sessionID)
420
-
421
  return response
422
 
423
  import json
@@ -432,8 +443,9 @@ session_store = {}
432
  def handle_message(data):
433
  global session_store
434
  global CURR_CONTEXT
 
 
435
  context = "No data available"
436
- session_id = request.sid
437
  if session_id not in session_store:
438
  session_store[session_id] = {'image_data': b"", 'message': None, 'image_received': False}
439
 
@@ -448,7 +460,7 @@ def handle_message(data):
448
 
449
  except Exception as e:
450
  print(f"Error processing image chunk: {str(e)}")
451
- return
452
 
453
  if session_store[session_id]['image_data'] or session_store[session_id]['message']:
454
  try:
@@ -470,10 +482,11 @@ def handle_message(data):
470
  }
471
  # Invoke question_answer_chain and stream the response
472
  response = answer_generator(formated_input, session_id=session_id)
 
473
 
474
  except Exception as e:
475
  print(f"Error processing image or message: {str(e)}")
476
- return
477
  finally:
478
  # Clear session data after processing
479
  session_store.pop(session_id, None)
@@ -487,15 +500,15 @@ def handle_message(data):
487
  'context': json.dumps(CURR_CONTEXT)
488
  }
489
  response = answer_generator(formated_input, session_id=session_id)
 
 
490
  else:
491
  response = recommendation_node(message)
492
  # response = answer_formatter_node(message, recipes)
493
  if response is None:
494
  response = {'content':"An error occurred while processing your request."}
495
-
496
- session_store.pop(session_id, None)
497
-
498
-
499
 
500
  import requests
501
  from PIL import Image
@@ -519,17 +532,37 @@ def download_image_to_numpy(url):
519
  raise Exception(f"Failed to download image. Status code: {response.status_code}")
520
 
521
  def handle_message(data):
 
 
522
  img_url = data['img_url']
523
  message = data['message']
524
  image_array = download_image_to_numpy(img_url)
525
- response = get_answer(image=image_array, message=message)
526
  return response
527
 
528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529
  iface = gr.Interface(
530
- fn=respond_to_user,
531
  inputs=[gr.Image(), gr.Textbox(label="Ask Query")],
532
- outputs=[gr.Textbox(label="Nutrition-GPT"), gr.JSON(label="context")],
533
  title="Nutrition-GPT Demo",
534
  description="Upload an food image and ask queries!",
535
  css=".component-12 {background-color: red}",
 
1
  import pandas as pd
 
2
  from PIL import Image
3
  import numpy as np
 
4
  import os
 
5
 
6
  import torch
7
  import torch.nn.functional as F
 
14
  import gradio as gr
15
  import spaces
16
 
17
+ from langchain.chains import ConversationChain
 
 
18
  from langchain_community.chat_message_histories import ChatMessageHistory
19
  from langchain_core.runnables import RunnableWithMessageHistory
20
  from langchain_core.output_parsers import StrOutputParser
21
+ from langchain_core.prompts import ChatPromptTemplate
22
+ from langchain_groq import ChatGroq
23
 
24
  from dotenv import load_dotenv
25
 
26
+ import json
27
  from openai import OpenAI
28
 
 
29
  # GROQ_API_KEY = os.getenv("GROQ_API_KEY")
30
  load_dotenv(".env")
31
  USER_AGENT = os.getenv("USER_AGENT")
32
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
33
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
34
+ SECRET_KEY = os.getenv("SECRET_KEY")
35
 
36
  # Set environment variables
37
  os.environ['USER_AGENT'] = USER_AGENT
 
39
  os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
40
  os.environ["TOKENIZERS_PARALLELISM"] = 'true'
41
 
 
42
  # Initialize LLM
43
  llm = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2)
44
 
 
129
 
130
  class Chat:
131
 
132
+ def __init__(self, model, transform, dataframe, tar_img_feats, device='cuda', stopping_criteria=None):
133
  self.device = device
134
  self.model = model
135
  self.transform = transform
 
170
  print("Chat Initialized !")
171
 
172
 
173
+
174
+ import secrets
175
+ import string
176
+
177
+ def generate_session_key():
178
+ characters = string.ascii_letters + string.digits
179
+ session_key = ''.join(secrets.choice(characters) for _ in range(8))
180
+ return session_key
181
+
182
+
183
  def answer_generator(formated_input, session_id):
184
  # QA system prompt and chain
185
  qa_system_prompt = """
186
+ You are an AI assistant developed by Nutrigenics AI, specializing in intelligent recipe information retrieval and recipe suggestions. Your purpose is to help users by recommending recipes, providing detailed nutritional values, listing ingredients, offering step-by-step cooking instructions, and filtering recipes based on context and user queries.
187
  Operational Guidelines:
188
  1. Input Structure:
189
  - Context: You may receive contextual information related to recipes, such as specific data sets, user preferences, dietary restrictions, or previously selected dishes.
190
  - User Query: Users will pose questions or requests related to recipes, nutritional information, ingredient substitutions, cooking instructions, and more.
191
  2. Response Strategy:
192
  - Utilize Provided Context: If the context contains relevant information that addresses the user's query, base your response on this provided data to ensure accuracy and relevance.
193
+ - Respond to User Query Directly: If the context does not contain the necessary information to answer the user's query, kindly state that you do not have the required information.
194
  Core Functionalities:
195
  - Nutritional Information: Accurately provide nutritional values for each recipe, including calories, macronutrients (proteins, fats, carbohydrates), and essential vitamins and minerals, using contextual data when available.
196
  - Ingredient Details: List all ingredients required for recipes, including substitute options for dietary restrictions or ingredient availability, utilizing context when relevant.
 
198
  - Recipe Recommendations: Suggest dishes based on user preferences, dietary restrictions, available ingredients, and contextual data if provided.
199
  Additional Instructions:
200
  - Precision and Personalization: Always aim to provide precise, personalized, and relevant information to users based on both the provided context and their specific queries.
201
+ - Clarity and Coherence: Ensure all responses are clear, well-structured, and easy to understand, facilitating a seamless user experience.
202
+ - Substitute Suggestions: Consider user preferences and dietary restrictions outlined in the context or user query when suggesting ingredient substitutes.
203
  - Dynamic Adaptation: Adapt your responses dynamically based on whether the context is relevant to the user's current request, ensuring optimal use of available information.
204
+ - Don't mention about the context in the response, format the answer in a natural and friendly way.
205
+
206
  Context:
207
  {context}
208
  """
 
238
  # Prompt
239
  router_instructions = """You are an expert at determining the appropriate task for a user’s question based on chat history and the current query context. You have two available tasks:
240
 
241
+ 1. Retrieval: Fetch information based on the user's chat history and current query.
242
+ 2. Recommendation/Suggestion: Recommend user recipes based on the query.
243
 
244
  Return a JSON response with a single key named “task” indicating either “retrieval” or “recommendation” based on your decision.
245
  """
 
264
  "recipe_time": integer,
265
  "recipe_yields": string,
266
  "recipe_ingredients": list of ingredients,
267
+ "recipe_instructions": list of instructions,
268
  "recipe_image": string,
269
  "blogger": string,
270
+ "recipe_nutrients": JSON object with key-value pairs such as "protein: 10g",
271
+ "tags": list of tags related to a recipe
272
  } \n
273
 
274
+ Here is the example of a recipe JSON object from the JSON data: \n
275
  {
276
  "recipe_name": "Asian Potato Salad with Seven Minute Egg",
277
  "recipe_time": 0,
 
321
  ]
322
  } \n
323
 
324
+ Based on the user query, provide a Python function to filter the JSON data. The output of the function should be a list of JSON objects. \n
325
 
326
  Recipe filtering instructions:
327
+ - If a user asked for the highest nutrient recipe such as "high protein or high calories" then filtered recipes should be the top highest recipes from all the recipes with high nutrients.
328
+ - sort or rearrange recipes based on which recipes are more appropriate for the user.
329
 
330
  Your output instructions:
331
+ - The function name should be filter_recipes. The input to the function should be the file name.
332
  - The length of output recipes should not be more than 6.
333
+ - Only give me the output function. Do not call the function.
334
+ - Give the Python function as a key named "code" in a JSON format.
335
+ - Do not include any other text with the output, only give Python code.
336
+ - If you do not follow the above-given instructions, the chat may be terminated.
337
  """
338
  max_tries = 3
339
  while True:
 
391
  return res
392
 
393
  CURR_CONTEXT = ''
394
+ CURR_SESSION_KEY = generate_session_key()
395
 
396
+ @spaces.GPU
397
  def get_answer(image=[], message='', sessionID='abc123'):
398
  global CURR_CONTEXT
399
+ global CURR_SESSION_KEY
400
+ sessionID = CURR_SESSION_KEY
401
+ if image is not None:
402
  try:
403
  # Process the image and message here
404
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
414
  except Exception as e:
415
  print(e)
416
  response = {'content':"An error occurred while processing your request."}
417
+ elif (image is None) and (message is not None):
 
418
  task = router_node(message)
419
+ print(task)
420
+ if task == 'recommendation':
421
  recipes = recommendation_node(message)
422
  if not recipes:
423
+ response = {'content': "An error occurred while processing your request."}
424
+ else:
425
+ response = answer_formatter_node(message, recipes)
426
  else:
427
  formated_input = {
428
  'input': message,
429
  'context': CURR_CONTEXT
430
  }
431
  response = answer_generator(formated_input, session_id=sessionID)
 
432
  return response
433
 
434
  import json
 
443
  def handle_message(data):
444
  global session_store
445
  global CURR_CONTEXT
446
+ global CURR_SESSION_KEY
447
+ session_id = CURR_SESSION_KEY
448
  context = "No data available"
 
449
  if session_id not in session_store:
450
  session_store[session_id] = {'image_data': b"", 'message': None, 'image_received': False}
451
 
 
460
 
461
  except Exception as e:
462
  print(f"Error processing image chunk: {str(e)}")
463
+ return "An error occurred while receiving the image chunk."
464
 
465
  if session_store[session_id]['image_data'] or session_store[session_id]['message']:
466
  try:
 
482
  }
483
  # Invoke question_answer_chain and stream the response
484
  response = answer_generator(formated_input, session_id=session_id)
485
+ return response
486
 
487
  except Exception as e:
488
  print(f"Error processing image or message: {str(e)}")
489
+ return "An error occurred while processing your request."
490
  finally:
491
  # Clear session data after processing
492
  session_store.pop(session_id, None)
 
500
  'context': json.dumps(CURR_CONTEXT)
501
  }
502
  response = answer_generator(formated_input, session_id=session_id)
503
+ session_store.pop(session_id, None)
504
+ return response
505
  else:
506
  response = recommendation_node(message)
507
  # response = answer_formatter_node(message, recipes)
508
  if response is None:
509
  response = {'content':"An error occurred while processing your request."}
510
+ session_store.pop(session_id, None)
511
+ return response
 
 
512
 
513
  import requests
514
  from PIL import Image
 
532
  raise Exception(f"Failed to download image. Status code: {response.status_code}")
533
 
534
  def handle_message(data):
535
+ global CURR_SESSION_KEY
536
+ session_id = CURR_SESSION_KEY
537
  img_url = data['img_url']
538
  message = data['message']
539
  image_array = download_image_to_numpy(img_url)
540
+ response = get_answer(image=image_array, message=message, sessionID=session_id)
541
  return response
542
 
543
 
544
+
545
+ # @spaces.GPU
546
+ def respond_to_user(image, message):
547
+ # Process the image and message here
548
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
549
+ chat = Chat(model,transform,df,tar_img_feats, device)
550
+ chat.encode_image(image)
551
+ data = chat.ask()
552
+ formated_input = {
553
+ 'input': message,
554
+ 'context': data
555
+ }
556
+ try:
557
+ response = answer_generator(formated_input, session_id="123cnedc")
558
+ except Exception as e:
559
+ response = {'content':"An error occurred while processing your request."}
560
+ return response
561
+
562
  iface = gr.Interface(
563
+ fn=get_answer,
564
  inputs=[gr.Image(), gr.Textbox(label="Ask Query")],
565
+ outputs=[gr.Textbox(label="Nutrition-GPT")],
566
  title="Nutrition-GPT Demo",
567
  description="Upload an food image and ask queries!",
568
  css=".component-12 {background-color: red}",
my_recipes.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c06976151fba792ef9589980ba3773dec8529d7f3f9d25ce0db599aab3b7efe1
3
+ size 20836308
recipes.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c04c9e8ba0e463d0bc09077b466085b6cbe98b1428a21f74299115b9c3c54b51
3
+ size 23644165
requirements.txt CHANGED
@@ -12,4 +12,4 @@ gradio
12
  langchain
13
  langchain-community
14
  langchain-groq
15
- openai
 
12
  langchain
13
  langchain-community
14
  langchain-groq
15
+ openai