Namitg02 commited on
Commit
1e0339f
·
verified ·
1 Parent(s): fe4f2dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -2
app.py CHANGED
@@ -28,8 +28,38 @@ vectordb = Chroma.from_documents(
28
  persist_directory=persist_directory
29
  )
30
 
31
-
32
  retriever = vectordb.as_retriever()
33
 
34
  import gradio as gr
35
- gr.load("models/HuggingFaceH4/zephyr-7b-beta").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  persist_directory=persist_directory
29
  )
30
 
 
31
  retriever = vectordb.as_retriever()
32
 
33
  import gradio as gr
34
+ gr.load("models/HuggingFaceH4/zephyr-7b-beta").launch()
35
+
36
+ #docs_ss = vectordb.similarity_search(question,k=3)
37
+
38
+
39
+ #qa_chain = RetrievalQA.from_chain_type(
40
+ # models/HuggingFaceH4/zephyr-7b-beta,
41
+ # retriever=vectordb.as_retriever()
42
+ #)
43
+
44
+ from langchain.prompts import PromptTemplate
45
+
46
+ template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
47
+ {You are a helpful dietician}
48
+ Question: {question}
49
+ Helpful Answer:"""
50
+
51
+ QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
52
+
53
+
54
+ from langchain.chains import ConversationalRetrievalChain
55
+ #qa_chain = RetrievalQA.from_chain_type(models/HuggingFaceH4/zephyr-7b-beta,retriever=vectordb.as_retriever(),chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
56
+
57
+
58
+ from langchain.memory import ConversationBufferMemory
59
+ memory = ConversationBufferMemory(
60
+ memory_key="chat_history",
61
+ return_messages=True
62
+ )
63
+
64
+ retriever=vectordb.as_retriever()
65
+ qa = ConversationalRetrievalChain.from_llm(models/HuggingFaceH4/zephyr-7b-beta,retriever=retriever,memory=memory)