Yoxas commited on
Commit
55ff761
·
verified ·
1 Parent(s): 08987a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -57,9 +57,12 @@ model = BertModel.from_pretrained('bert-base-uncased').to(device)
57
  def embed_question(question, model, tokenizer):
58
  try:
59
  inputs = tokenizer(question, return_tensors='pt').to(device)
 
60
  with torch.no_grad():
61
  outputs = model(**inputs)
62
- return outputs.last_hidden_state.mean(dim=1).cpu().numpy().astype(np.float32)
 
 
63
  except Exception as e:
64
  logging.error(f"Error embedding question: {e}")
65
  raise
@@ -71,22 +74,32 @@ def retrieve_and_generate(question):
71
  try:
72
  # Embed the question
73
  question_embedding = embed_question(question, model, tokenizer)
74
- logging.debug(f"Question embedding: {question_embedding}")
75
 
76
  # Search in FAISS index
77
- _, indices = gpu_index.search(question_embedding, k=1)
78
- logging.debug(f"Indices found: {indices}")
 
 
 
 
79
 
80
  # Retrieve the most relevant document
81
- relevant_doc = data.iloc[indices[0][0]]
82
- logging.debug(f"Relevant document: {relevant_doc}")
 
 
 
 
83
 
84
  # Use the QA model to generate the answer
85
- context = relevant_doc['Abstract']
86
- response = qa_model(question=question, context=context)
87
- logging.debug(f"Response: {response}")
88
-
89
- return response['answer']
 
 
 
90
  except Exception as e:
91
  logging.error(f"Error during retrieval and generation: {e}")
92
  return "An error occurred. Please try again."
 
57
  def embed_question(question, model, tokenizer):
58
  try:
59
  inputs = tokenizer(question, return_tensors='pt').to(device)
60
+ logging.debug(f"Tokenized inputs: {inputs}")
61
  with torch.no_grad():
62
  outputs = model(**inputs)
63
+ embedding = outputs.last_hidden_state.mean(dim=1).cpu().numpy().astype(np.float32)
64
+ logging.debug(f"Question embedding: {embedding}")
65
+ return embedding
66
  except Exception as e:
67
  logging.error(f"Error embedding question: {e}")
68
  raise
 
74
  try:
75
  # Embed the question
76
  question_embedding = embed_question(question, model, tokenizer)
 
77
 
78
  # Search in FAISS index
79
+ try:
80
+ _, indices = gpu_index.search(question_embedding, k=1)
81
+ logging.debug(f"Indices found: {indices}")
82
+ except Exception as e:
83
+ logging.error(f"Error during FAISS search: {e}")
84
+ return "An error occurred during search. Please try again."
85
 
86
  # Retrieve the most relevant document
87
+ try:
88
+ relevant_doc = data.iloc[indices[0][0]]
89
+ logging.debug(f"Relevant document: {relevant_doc}")
90
+ except Exception as e:
91
+ logging.error(f"Error retrieving document: {e}")
92
+ return "An error occurred while retrieving the document. Please try again."
93
 
94
  # Use the QA model to generate the answer
95
+ try:
96
+ context = relevant_doc['Abstract']
97
+ response = qa_model(question=question, context=context)
98
+ logging.debug(f"Response: {response}")
99
+ return response['answer']
100
+ except Exception as e:
101
+ logging.error(f"Error generating answer: {e}")
102
+ return "An error occurred while generating the answer. Please try again."
103
  except Exception as e:
104
  logging.error(f"Error during retrieval and generation: {e}")
105
  return "An error occurred. Please try again."