apahilaj commited on
Commit
b11ca01
·
1 Parent(s): d57785a

added history

Browse files
Files changed (1) hide show
  1. app.py +33 -27
app.py CHANGED
@@ -1,30 +1,16 @@
1
  import gradio as gr
2
  import pandas as pd
3
  from langchain.embeddings import HuggingFaceEmbeddings
4
- from langchain.vectorstores import Chroma, faiss
5
- from langchain_community.llms import HuggingFaceEndpoint, HuggingFaceHub
6
- from langchain.chains import LLMChain
7
- from langchain_community.document_loaders.csv_loader import CSVLoader
8
  from langchain_community.document_loaders import PyPDFLoader
9
- from langchain.text_splitter import CharacterTextSplitter
10
- from langchain_community.document_loaders import TextLoader
11
- from langchain_community import vectorstores
12
  from langchain.prompts import PromptTemplate
13
- from langchain.chains import RetrievalQA
14
  from langchain.memory import ConversationBufferMemory
15
- from langchain.chains import ConversationalRetrievalChain
16
- from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
17
- from langchain.vectorstores import DocArrayInMemorySearch
18
- from langchain.document_loaders import TextLoader
19
- from langchain.chains import RetrievalQA, ConversationalRetrievalChain
20
- from langchain.memory import ConversationBufferMemory
21
- from langchain.chat_models import ChatOpenAI
22
- from langchain.document_loaders import TextLoader
23
- from langchain.document_loaders import PyPDFLoader
24
- import panel as pn
25
- import param
26
- import re
27
  import os
 
28
 
29
  api_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
30
 
@@ -67,19 +53,39 @@ def load_db(file, k):
67
  retriever=retriever,
68
  return_source_documents=True,
69
  return_generated_question=True,
 
70
  )
71
 
72
  return qa
73
 
74
- def greet(question, pdf_file):
75
- a = load_db(pdf_file, 3)
76
- r = a.invoke({"question": question, "chat_history": []})
77
- match = re.search(r'Helpful Answer:(.*)', r['answer'])
 
 
 
 
 
 
 
 
78
  if match:
79
  helpful_answer = match.group(1).strip()
80
- return helpful_answer
81
  else:
82
- return "No helpful answer found."
 
 
 
 
 
 
83
 
84
- iface = gr.Interface(fn=greet, inputs=["text", "file"], outputs="text")
 
 
 
 
85
  iface.launch(share=True)
 
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain.vectorstores import faiss
5
+ from langchain_community.llms import HuggingFaceHub
6
+ from langchain.chains import ConversationalRetrievalChain, LLMChain
 
7
  from langchain_community.document_loaders import PyPDFLoader
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain import vectorstores
 
10
  from langchain.prompts import PromptTemplate
 
11
  from langchain.memory import ConversationBufferMemory
 
 
 
 
 
 
 
 
 
 
 
 
12
  import os
13
+ import re
14
 
15
  api_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
16
 
 
53
  retriever=retriever,
54
  return_source_documents=True,
55
  return_generated_question=True,
56
+ memory=memory,
57
  )
58
 
59
  return qa
60
 
61
+ def chat(input_text, pdf_file):
62
+ qa = load_db(pdf_file, 3)
63
+
64
+ if not memory.memory:
65
+ # If no previous conversation, start with a greeting
66
+ response = qa.invoke({"question": "Hi, how can I help you today?", "chat_history": []})
67
+ memory.update(response["chat_history"])
68
+
69
+ response = qa.invoke({"question": input_text, "chat_history": memory.memory})
70
+
71
+ # Extracting the helpful answer from the response
72
+ match = re.search(r'Helpful Answer:(.*)', response['answer'])
73
  if match:
74
  helpful_answer = match.group(1).strip()
 
75
  else:
76
+ helpful_answer = "No helpful answer found."
77
+
78
+ # Update the chat history
79
+ memory.update([(input_text, helpful_answer)])
80
+
81
+ # Combine relevant information into a single string for output
82
+ output_text = f"Question: {input_text}\nAnswer: {helpful_answer}\nGenerated Question: {response['generated_question']}\nSource Documents: {response['source_documents']}"
83
 
84
+ return output_text
85
+
86
+
87
+
88
+ iface = gr.Interface(fn=chat, inputs=["text", "file"], outputs="text")
89
  iface.launch(share=True)
90
+
91
+