Gainward777 commited on
Commit
46a38fc
·
verified ·
1 Parent(s): 8cdeb29

Update llm/utils.py

Browse files
Files changed (1) hide show
  1. llm/utils.py +3 -7
llm/utils.py CHANGED
@@ -20,8 +20,6 @@ API_TOKEN=os.getenv("TOKEN")
20
  # Initialize langchain LLM chain
21
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vdb,
22
  thold=0.8, progress=gr.Progress()):
23
- #global VDB
24
- #global THOLD
25
 
26
  llm = HuggingFaceEndpoint(
27
  huggingfacehub_api_token = API_TOKEN,
@@ -36,12 +34,10 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vdb,
36
  output_key='answer',
37
  return_messages=True
38
  )
39
-
40
- #VDB=vdb
41
- #THOLD=thold
42
  qa_chain = ConversationalRetrievalChain.from_llm(
43
  llm,
44
- retriever=CustomRetriever(vectorstore=vdb, thold=thold),#RetrieverWithScores(),
45
  chain_type="stuff",
46
  memory=memory,
47
  return_source_documents=True,
@@ -76,7 +72,7 @@ def postprocess(response):
76
  #Here should be a binary classification model.
77
  if not "I don't know" in result:
78
  for doc in response['source_documents']:
79
- file_doc="\n\nFile: " + doc.metadata["source"]
80
  page="\nPage: " + str(doc.metadata["page"])
81
  content="\nFragment: " + doc.page_content.strip()
82
  result+=file_doc+page+content
 
20
  # Initialize langchain LLM chain
21
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vdb,
22
  thold=0.8, progress=gr.Progress()):
 
 
23
 
24
  llm = HuggingFaceEndpoint(
25
  huggingfacehub_api_token = API_TOKEN,
 
34
  output_key='answer',
35
  return_messages=True
36
  )
37
+
 
 
38
  qa_chain = ConversationalRetrievalChain.from_llm(
39
  llm,
40
+ retriever=CustomRetriever(vectorstore=vdb, thold=thold),
41
  chain_type="stuff",
42
  memory=memory,
43
  return_source_documents=True,
 
72
  #Here should be a binary classification model.
73
  if not "I don't know" in result:
74
  for doc in response['source_documents']:
75
+ file_doc="\n\nFile: " + doc.metadata["source"].split('/')[-1]
76
  page="\nPage: " + str(doc.metadata["page"])
77
  content="\nFragment: " + doc.page_content.strip()
78
  result+=file_doc+page+content