Update app.py
Browse files
app.py
CHANGED
@@ -89,12 +89,14 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],templat
|
|
89 |
|
90 |
llm_model = "deepset/roberta-base-squad2"
|
91 |
from transformers import AutoTokenizer
|
92 |
-
|
93 |
#question = "How can I reverse diabetes?"
|
94 |
|
|
|
|
|
95 |
print("check2")
|
96 |
qa = ConversationalRetrievalChain.from_llm(
|
97 |
-
|
98 |
retriever=retriever,
|
99 |
memory=memory,
|
100 |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
|
@@ -105,7 +107,6 @@ result = qa({"question": question})
|
|
105 |
print("result")
|
106 |
#result['answer']
|
107 |
|
108 |
-
#pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "text-generation", temperature=0.2)
|
109 |
#"question-answering", "conversational"
|
110 |
|
111 |
print("check3")
|
|
|
89 |
|
90 |
llm_model = "deepset/roberta-base-squad2"
|
91 |
from transformers import AutoTokenizer
|
92 |
+
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
93 |
#question = "How can I reverse diabetes?"
|
94 |
|
95 |
+
pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "text-generation", temperature=0.2)
|
96 |
+
|
97 |
print("check2")
|
98 |
qa = ConversationalRetrievalChain.from_llm(
|
99 |
+
pipe,
|
100 |
retriever=retriever,
|
101 |
memory=memory,
|
102 |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
|
|
|
107 |
print("result")
|
108 |
#result['answer']
|
109 |
|
|
|
110 |
#"question-answering", "conversational"
|
111 |
|
112 |
print("check3")
|