Yoxas commited on
Commit
3e1f1d3
·
verified ·
1 Parent(s): e7abd03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -15
app.py CHANGED
@@ -6,6 +6,10 @@ import faiss
6
  import torch
7
  import json
8
  import spaces
 
 
 
 
9
 
10
  # Load CSV data
11
  data = pd.read_csv('RBDx10kstats.csv')
@@ -15,7 +19,7 @@ def safe_json_loads(x):
15
  try:
16
  return np.array(json.loads(x), dtype=np.float32) # Ensure the array is of type float32
17
  except json.JSONDecodeError as e:
18
- print(f"Error decoding JSON: {e}")
19
  return np.array([], dtype=np.float32) # Return an empty array or handle it as appropriate
20
 
21
  # Apply the safe_json_loads function to the embedding column
@@ -59,20 +63,29 @@ def embed_question(question, model, tokenizer):
59
  # Function to retrieve the relevant document and generate a response
60
  @spaces.GPU(duration=120)
61
  def retrieve_and_generate(question):
62
- # Embed the question
63
- question_embedding = embed_question(question, model, tokenizer)
64
-
65
- # Search in FAISS index
66
- _, indices = gpu_index.search(question_embedding, k=1)
67
-
68
- # Retrieve the most relevant document
69
- relevant_doc = data.iloc[indices[0][0]]
70
-
71
- # Use the QA model to generate the answer
72
- context = relevant_doc['Abstract']
73
- response = qa_model(question=question, context=context)
74
-
75
- return response['answer']
 
 
 
 
 
 
 
 
 
76
 
77
  # Create a Gradio interface
78
  interface = gr.Interface(
 
6
  import torch
7
  import json
8
  import spaces
9
+ import logging
10
+
11
+ # Set up logging
12
+ logging.basicConfig(level=logging.DEBUG)
13
 
14
  # Load CSV data
15
  data = pd.read_csv('RBDx10kstats.csv')
 
19
  try:
20
  return np.array(json.loads(x), dtype=np.float32) # Ensure the array is of type float32
21
  except json.JSONDecodeError as e:
22
+ logging.error(f"Error decoding JSON: {e}")
23
  return np.array([], dtype=np.float32) # Return an empty array or handle it as appropriate
24
 
25
  # Apply the safe_json_loads function to the embedding column
 
63
  # Function to retrieve the relevant document and generate a response
64
  @spaces.GPU(duration=120)
65
  def retrieve_and_generate(question):
66
+ logging.debug(f"Received question: {question}")
67
+ try:
68
+ # Embed the question
69
+ question_embedding = embed_question(question, model, tokenizer)
70
+ logging.debug(f"Question embedding: {question_embedding}")
71
+
72
+ # Search in FAISS index
73
+ _, indices = gpu_index.search(question_embedding, k=1)
74
+ logging.debug(f"Indices found: {indices}")
75
+
76
+ # Retrieve the most relevant document
77
+ relevant_doc = data.iloc[indices[0][0]]
78
+ logging.debug(f"Relevant document: {relevant_doc}")
79
+
80
+ # Use the QA model to generate the answer
81
+ context = relevant_doc['Abstract']
82
+ response = qa_model(question=question, context=context)
83
+ logging.debug(f"Response: {response}")
84
+
85
+ return response['answer']
86
+ except Exception as e:
87
+ logging.error(f"Error during retrieval and generation: {e}")
88
+ return "An error occurred. Please try again."
89
 
90
  # Create a Gradio interface
91
  interface = gr.Interface(