Update app.py
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ vectordb = Chroma.from_documents(
|
|
33 |
#pass_input_placeholder = st.empty()
|
34 |
|
35 |
#from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
|
36 |
-
from langchain.prompts import PromptTemplate
|
37 |
|
38 |
#template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
|
39 |
#{You are a helpful dietician}
|
@@ -50,16 +50,16 @@ from langchain.prompts import PromptTemplate
|
|
50 |
#)
|
51 |
|
52 |
question = "How can I reverse Diabetes?"
|
53 |
-
print("template")
|
54 |
|
55 |
retriever = vectordb.as_retriever(
|
56 |
search_type="similarity", search_kwargs={"k": 2}
|
57 |
)
|
58 |
|
59 |
-
from langchain.chains import RetrievalQA
|
60 |
from langchain_core.prompts import ChatPromptTemplate
|
61 |
|
62 |
-
|
63 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
64 |
#from langchain import hub
|
65 |
|
@@ -68,7 +68,7 @@ READER_MODEL="HuggingFaceH4/zephyr-7b-beta"
|
|
68 |
#HuggingFaceH4/zephyr-7b-beta
|
69 |
#READER_MODEL=Ollama(model="meta-llama/Meta-Llama-Guard-2-8B")
|
70 |
#qa = ConversationalRetrievalChain.from_llm(llm=READER_MODEL,retriever=retriever,memory=memory)
|
71 |
-
qa = RetrievalQA.from_chain_type(llm=READER_MODEL,chain_type="map_reduce",retriever=retriever,verbose=True)
|
72 |
|
73 |
#retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
|
74 |
|
|
|
33 |
#pass_input_placeholder = st.empty()
|
34 |
|
35 |
#from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
|
36 |
+
#from langchain.prompts import PromptTemplate
|
37 |
|
38 |
#template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
|
39 |
#{You are a helpful dietician}
|
|
|
50 |
#)
|
51 |
|
52 |
question = "How can I reverse Diabetes?"
|
53 |
+
#print("template")
|
54 |
|
55 |
retriever = vectordb.as_retriever(
|
56 |
search_type="similarity", search_kwargs={"k": 2}
|
57 |
)
|
58 |
|
59 |
+
#from langchain.chains import RetrievalQA
|
60 |
from langchain_core.prompts import ChatPromptTemplate
|
61 |
|
62 |
+
from langchain.chains.retrieval import create_retrieval_chain
|
63 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
64 |
#from langchain import hub
|
65 |
|
|
|
68 |
#HuggingFaceH4/zephyr-7b-beta
|
69 |
#READER_MODEL=Ollama(model="meta-llama/Meta-Llama-Guard-2-8B")
|
70 |
#qa = ConversationalRetrievalChain.from_llm(llm=READER_MODEL,retriever=retriever,memory=memory)
|
71 |
+
#qa = RetrievalQA.from_chain_type(llm=READER_MODEL,chain_type="map_reduce",retriever=retriever,verbose=True)
|
72 |
|
73 |
#retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
|
74 |
|