Update app.py
Browse files
app.py
CHANGED
@@ -33,21 +33,22 @@ vectordb = Chroma.from_documents(
|
|
33 |
#pass_input_placeholder = st.empty()
|
34 |
|
35 |
#from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
|
36 |
-
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
|
43 |
#QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
51 |
|
52 |
question = "How can I reverse Diabetes?"
|
53 |
#print("template")
|
@@ -62,26 +63,49 @@ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
|
62 |
from langchain_core.messages import SystemMessage
|
63 |
from langchain_core.prompts import HumanMessagePromptTemplate
|
64 |
from langchain_core.prompts import ChatPromptTemplate
|
|
|
|
|
65 |
print("check1")
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
llm_model = "deepset/roberta-base-squad2"
|
79 |
from transformers import AutoTokenizer
|
80 |
-
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
81 |
#question = "How can I reverse diabetes?"
|
82 |
|
83 |
print("check2")
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
#"question-answering", "conversational"
|
86 |
|
87 |
print("check3")
|
|
|
33 |
#pass_input_placeholder = st.empty()
|
34 |
|
35 |
#from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
|
36 |
+
from langchain.prompts import PromptTemplate
|
37 |
|
38 |
+
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
|
39 |
+
{You are a helpful dietician}
|
40 |
+
Question: {question}
|
41 |
+
Helpful Answer:"""
|
42 |
|
43 |
#QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
|
44 |
|
45 |
+
|
46 |
+
from langchain.chains import ConversationalRetrievalChain
|
47 |
+
from langchain.memory import ConversationBufferMemory
|
48 |
+
memory = ConversationBufferMemory(
|
49 |
+
memory_key="chat_history",
|
50 |
+
return_messages=True
|
51 |
+
)
|
52 |
|
53 |
question = "How can I reverse Diabetes?"
|
54 |
#print("template")
|
|
|
63 |
from langchain_core.messages import SystemMessage
|
64 |
from langchain_core.prompts import HumanMessagePromptTemplate
|
65 |
from langchain_core.prompts import ChatPromptTemplate
|
66 |
+
from langchain.prompts import PromptTemplate
|
67 |
+
|
68 |
print("check1")
|
69 |
|
70 |
+
|
71 |
+
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
|
72 |
+
{context}
|
73 |
+
Question: {question}
|
74 |
+
Helpful Answer:"""
|
75 |
+
|
76 |
+
QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template)
|
77 |
+
|
78 |
+
|
79 |
+
#qa_chat_prompt = ChatPromptTemplate.from_messages(
|
80 |
+
#[
|
81 |
+
# SystemMessage(
|
82 |
+
# content=(
|
83 |
+
# "You are a Diabetes eductaor that provide advice to patients."
|
84 |
+
# )
|
85 |
+
# ),
|
86 |
+
# HumanMessagePromptTemplate.from_template("{context}"),
|
87 |
+
#]
|
88 |
+
#)
|
89 |
|
90 |
llm_model = "deepset/roberta-base-squad2"
|
91 |
from transformers import AutoTokenizer
|
92 |
+
#tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
93 |
#question = "How can I reverse diabetes?"
|
94 |
|
95 |
print("check2")
|
96 |
+
qa = ConversationalRetrievalChain.from_llm(
|
97 |
+
llm_model,
|
98 |
+
retriever=retriever,
|
99 |
+
memory=memory,
|
100 |
+
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
|
101 |
+
)
|
102 |
+
|
103 |
+
question = "How can I reverse diabetes?"
|
104 |
+
result = qa({"question": question})
|
105 |
+
print("result")
|
106 |
+
#result['answer']
|
107 |
+
|
108 |
+
#pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "text-generation", temperature=0.2)
|
109 |
#"question-answering", "conversational"
|
110 |
|
111 |
print("check3")
|