nicoladisabato commited on
Commit
84bd684
·
1 Parent(s): 96c0d53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -9
app.py CHANGED
@@ -4,11 +4,13 @@ from langchain.chat_models import ChatOpenAI
4
  from langchain.embeddings import OpenAIEmbeddings
5
  from langchain.vectorstores import FAISS
6
  from langchain.chains import RetrievalQA
 
7
  from langchain.prompts import PromptTemplate
8
  from langchain.document_loaders import WebBaseLoader, AsyncHtmlLoader
9
  from langchain.document_transformers import Html2TextTransformer
10
  from langchain.callbacks import get_openai_callback
11
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
12
 
13
  import asyncio
14
  from langchain.docstore.document import Document
@@ -28,19 +30,33 @@ input_url = st.text_input("Inserisci url:")
28
  question = st.text_area("Chiedi pure:")
29
 
30
  #generate the main prompt
31
- prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
32
-
33
 
34
  {context}
35
 
36
- Question: {question}
37
- Answer:
 
 
 
 
38
  """
39
 
40
- PROMPT = PromptTemplate(
41
- template=prompt_template, input_variables=["context", "question"]
 
 
 
 
 
 
 
 
 
 
42
  )
43
 
 
44
  if st.button("Invia", type="primary"):
45
 
46
  loader = AsyncHtmlLoader(input_url)
@@ -65,15 +81,26 @@ if st.button("Invia", type="primary"):
65
  retriever = vectordb.as_retriever(search_kwargs={"k": 3})
66
  llm = ChatOpenAI(model_name = "gpt-3.5-turbo", temperature=0)
67
  relevant_docs = retriever.get_relevant_documents(question)
68
- qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs={"prompt": PROMPT})
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  # Write answer and sources
71
  answer = st.empty()
72
 
73
  with get_openai_callback() as cb:
74
  #run the chain and generate response
75
- response = qa(question)
76
  print(cb)
77
 
78
- answer.write(response['result'])
79
  st.write(relevant_docs)
 
4
  from langchain.embeddings import OpenAIEmbeddings
5
  from langchain.vectorstores import FAISS
6
  from langchain.chains import RetrievalQA
7
+ from langchain.chains import ConversationalRetrievalChain
8
  from langchain.prompts import PromptTemplate
9
  from langchain.document_loaders import WebBaseLoader, AsyncHtmlLoader
10
  from langchain.document_transformers import Html2TextTransformer
11
  from langchain.callbacks import get_openai_callback
12
  from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+ from langchain.memory import ConversationBufferWindowMemory
14
 
15
  import asyncio
16
  from langchain.docstore.document import Document
 
30
  question = st.text_area("Chiedi pure:")
31
 
32
  #generate the main prompt
33
+ prompt_template = """Intruction: You are a website agent that is talking with a human. Use only the chat history and the following information:
 
34
 
35
  {context}
36
 
37
+ to answer in a helpful manner to the question. If you don't know the answer - say that you don't know.
38
+ Keep your replies short, compassionate and informative.
39
+ {chat_history}
40
+
41
+ ### Input: {question}
42
+ ### Response:
43
  """
44
 
45
+
46
+ prompt = PromptTemplate(
47
+ template=prompt_template, input_variables=["context", "question", "chat_history"]
48
+ )
49
+
50
+
51
+ memory = ConversationBufferWindowMemory(
52
+ memory_key="chat_history",
53
+ ai_prefix="### Response",
54
+ human_prefix="### Input",
55
+ output_key="answer",
56
+ return_messages=True
57
  )
58
 
59
+
60
  if st.button("Invia", type="primary"):
61
 
62
  loader = AsyncHtmlLoader(input_url)
 
81
  retriever = vectordb.as_retriever(search_kwargs={"k": 3})
82
  llm = ChatOpenAI(model_name = "gpt-3.5-turbo", temperature=0)
83
  relevant_docs = retriever.get_relevant_documents(question)
84
+
85
+
86
+ chain = ConversationalRetrievalChain.from_llm(
87
+ llm,
88
+ chain_type='stuff',
89
+ retriever=retriever,
90
+ memory=memory,
91
+ combine_docs_chain_kwargs={"prompt": prompt},
92
+ verbose=True
93
+ )
94
+
95
+ #qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs={"prompt": prompt})
96
 
97
  # Write answer and sources
98
  answer = st.empty()
99
 
100
  with get_openai_callback() as cb:
101
  #run the chain and generate response
102
+ response = chain(question)
103
  print(cb)
104
 
105
+ answer.write(response["answer"])
106
  st.write(relevant_docs)