nickmuchi commited on
Commit
c83187d
·
1 Parent(s): 1123b8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -13
app.py CHANGED
@@ -1,16 +1,31 @@
1
  from langchain.prompts.prompt import PromptTemplate
2
  from langchain.llms import OpenAIChat
3
- from langchain.chains import ChatVectorDBChain
4
  from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
5
- from langchain.callbacks.base import CallbackManager
6
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
7
  from langchain.vectorstores import FAISS
 
8
  import os
9
  from typing import Optional, Tuple
10
  import gradio as gr
11
  import pickle
12
  from threading import Lock
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  prefix_messages = [{"role": "system", "content": "You are a helpful assistant that is very good at answering questions about investments using the information given."}]
15
 
16
  model_options = {'all-mpnet-base-v2': "sentence-transformers/all-mpnet-base-v2",
@@ -18,6 +33,38 @@ model_options = {'all-mpnet-base-v2': "sentence-transformers/all-mpnet-base-v2",
18
 
19
  model_options_list = list(model_options.keys())
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def load_vectorstore(model):
22
  '''load embeddings and vectorstore'''
23
 
@@ -64,15 +111,24 @@ QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "cont
64
 
65
 
66
  def get_chain(vectorstore):
67
- llm = OpenAIChat(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0,\
68
- prefix_messages=prefix_messages,model_name='gpt-4-0613')
69
- qa_chain = ChatVectorDBChain.from_llm(
70
- llm,
71
- vectorstore,
72
- qa_prompt=QA_PROMPT,
73
- condense_question_prompt=CONDENSE_QUESTION_PROMPT,
74
- )
75
- return qa_chain
 
 
 
 
 
 
 
 
 
76
 
77
  def load_chain():
78
  chain = get_chain(vectorstore)
@@ -93,7 +149,7 @@ class ChatWrapper:
93
  # Set OpenAI key
94
  # chain = get_chain(vectorstore)
95
  # Run chain and append input.
96
- output = chain({"question": inp, "chat_history": history})["answer"]
97
  history.append((inp, output))
98
  except Exception as e:
99
  raise e
 
1
  from langchain.prompts.prompt import PromptTemplate
2
  from langchain.llms import OpenAIChat
3
+ from langchain.chains import ConversationalRetrievalChain
4
  from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
5
+ from langchain.callbacks import StdOutCallbackHandler
6
+ from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
7
  from langchain.vectorstores import FAISS
8
+ from langchain.memory import ConversationBufferMemory
9
  import os
10
  from typing import Optional, Tuple
11
  import gradio as gr
12
  import pickle
13
  from threading import Lock
14
 
15
+ from langchain.prompts.chat import (
16
+ ChatPromptTemplate,
17
+ SystemMessagePromptTemplate,
18
+ AIMessagePromptTemplate,
19
+ HumanMessagePromptTemplate,
20
+ )
21
+ from langchain.schema import (
22
+ AIMessage,
23
+ HumanMessage,
24
+ SystemMessage
25
+ )
26
+
27
+ from langchain.prompts import PromptTemplate
28
+
29
  prefix_messages = [{"role": "system", "content": "You are a helpful assistant that is very good at answering questions about investments using the information given."}]
30
 
31
  model_options = {'all-mpnet-base-v2': "sentence-transformers/all-mpnet-base-v2",
 
33
 
34
  model_options_list = list(model_options.keys())
35
 
36
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
37
+
38
+ def load_prompt():
39
+
40
+ system_template="""Use only the following pieces of context that has been scraped from a website to answer the users question accurately.
41
+ Do not use any information not provided in the website context.
42
+ If you don't know the answer, just say 'There is no relevant answer in the Investor Website',
43
+ don't try to make up an answer.
44
+
45
+ ALWAYS return a "SOURCES" part in your answer.
46
+ The "SOURCES" part should be a reference to the source of the document from which you got your answer.
47
+
48
+ Remember, do not reference any information not given in the context.
49
+ If the answer is not available in the given context just say 'There is no relevant answer in the website content'
50
+
51
+ Follow the below format when answering:
52
+
53
+ Question: {question}
54
+ SOURCES: [xyz]
55
+
56
+ Begin!
57
+ ----------------
58
+ {context}"""
59
+
60
+ messages = [
61
+ SystemMessagePromptTemplate.from_template(system_template),
62
+ HumanMessagePromptTemplate.from_template("{question}")
63
+ ]
64
+ prompt = ChatPromptTemplate.from_messages(messages)
65
+
66
+ return prompt
67
+
68
  def load_vectorstore(model):
69
  '''load embeddings and vectorstore'''
70
 
 
111
 
112
 
113
  def get_chain(vectorstore):
114
+ llm = OpenAIChat(streaming=True,
115
+ callbacks=[StdOutCallbackHandler()]),
116
+ verbose=True,
117
+ temperature=0,
118
+ model_name='gpt-4-0613')
119
+
120
+ question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
121
+ doc_chain = load_qa_chain(llm=llm,chain_type="stuff",prompt=load_prompt())
122
+
123
+ chain = ConversationalRetrievalChain(retriever=vectorstore.as_retriever(search_kwags={"k": 3}),
124
+ question_generator=question_generator,
125
+ combine_docs_chain=doc_chain,
126
+ memory=memory,
127
+ return_source_documents=True,
128
+ get_chat_history=lambda h :h)
129
+
130
+
131
+ return chain
132
 
133
  def load_chain():
134
  chain = get_chain(vectorstore)
 
149
  # Set OpenAI key
150
  # chain = get_chain(vectorstore)
151
  # Run chain and append input.
152
+ output = chain({"question": inp})["answer"]
153
  history.append((inp, output))
154
  except Exception as e:
155
  raise e