DeepVen commited on
Commit
484a648
·
1 Parent(s): 22083e2

Upload Index.py

Browse files
Files changed (1) hide show
  1. Index.py +10 -2
Index.py CHANGED
@@ -181,7 +181,8 @@ def _check_if_db_exists(db_path: str) -> bool:
181
  def _load_embeddings_from_db(
182
  db_present: bool,
183
  domain: str,
184
- path: str = "sentence-transformers/all-MiniLM-L6-v2",
 
185
  ):
186
  # Create embeddings model with content support
187
  embeddings = Embeddings({"path": path, "content": True})
@@ -218,20 +219,27 @@ def _search(query, extractor, question=None):
218
  # llm_chain = LLMChain(prompt=prompt, llm=extractor)
219
 
220
  # return {"question": question, "answer": llm_chain.run(question)}
 
221
  return extractor([("answer", query, _prompt(question), False)])[0][1]
222
 
223
 
224
  @app.get("/rag")
225
  def rag(domain: str, question: str):
 
226
  db_exists = _check_if_db_exists(db_path=f"{os.getcwd()}/index/{domain}/documents")
227
  print(db_exists)
 
 
 
 
 
228
  # if db_exists:
229
  embeddings = _load_embeddings_from_db(db_exists, domain)
230
  # Create extractor instance
231
  #extractor = Extractor(embeddings, "google/flan-t5-base")
232
  #extractor = Extractor(embeddings, "TheBloke/Llama-2-7B-GGUF")
233
  print("before calling extractor")
234
- extractor = Extractor(embeddings, "google/flan-t5-base")
235
  # llm = HuggingFaceHub(
236
  # repo_id="google/flan-t5-xxl",
237
  # model_kwargs={"temperature": 1, "max_length": 1000000},
 
181
  def _load_embeddings_from_db(
182
  db_present: bool,
183
  domain: str,
184
+ #path: str = "sentence-transformers/all-MiniLM-L6-v2",
185
+ path: str = "sentence-transformers/nli-mpnet-base-v2",
186
  ):
187
  # Create embeddings model with content support
188
  embeddings = Embeddings({"path": path, "content": True})
 
219
  # llm_chain = LLMChain(prompt=prompt, llm=extractor)
220
 
221
  # return {"question": question, "answer": llm_chain.run(question)}
222
+ print(extractor([("answer", query, _prompt(question), False)])[0][1])
223
  return extractor([("answer", query, _prompt(question), False)])[0][1]
224
 
225
 
226
  @app.get("/rag")
227
  def rag(domain: str, question: str):
228
+ print()
229
  db_exists = _check_if_db_exists(db_path=f"{os.getcwd()}/index/{domain}/documents")
230
  print(db_exists)
231
+
232
+ bool_value = _check_if_db_exists(db_path=f"{os.getcwd()}/index/{domain}/documents")
233
+ print(bool_value)
234
+
235
+
236
  # if db_exists:
237
  embeddings = _load_embeddings_from_db(db_exists, domain)
238
  # Create extractor instance
239
  #extractor = Extractor(embeddings, "google/flan-t5-base")
240
  #extractor = Extractor(embeddings, "TheBloke/Llama-2-7B-GGUF")
241
  print("before calling extractor")
242
+ extractor = Extractor(embeddings, "distilbert-base-cased-distilled-squad")
243
  # llm = HuggingFaceHub(
244
  # repo_id="google/flan-t5-xxl",
245
  # model_kwargs={"temperature": 1, "max_length": 1000000},