Update app.py
Browse files
app.py
CHANGED
@@ -90,16 +90,19 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],templat
|
|
90 |
llm_model = "deepset/roberta-base-squad2"
|
91 |
from transformers import AutoTokenizer
|
92 |
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
|
|
|
|
|
|
93 |
#question = "How can I reverse diabetes?"
|
94 |
|
95 |
#pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "text-generation", temperature=0.2)
|
96 |
|
97 |
from langchain.chains.question_answering import load_qa_chain
|
98 |
|
99 |
-
pipe = load_qa_chain(llm=llm_model,tokenizer =tokenizer, chain_type="map_reduce")
|
100 |
print("check2")
|
101 |
qa = ConversationalRetrievalChain.from_llm(
|
102 |
-
|
103 |
retriever=retriever,
|
104 |
memory=memory,
|
105 |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
|
|
|
90 |
llm_model = "deepset/roberta-base-squad2"
|
91 |
from transformers import AutoTokenizer
|
92 |
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
93 |
+
from transformers import AutoModelForCausalLM
|
94 |
+
model = AutoModelForCausalLM.from_pretrained(llm_model)
|
95 |
+
|
96 |
#question = "How can I reverse diabetes?"
|
97 |
|
98 |
#pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "text-generation", temperature=0.2)
|
99 |
|
100 |
from langchain.chains.question_answering import load_qa_chain
|
101 |
|
102 |
+
#pipe = load_qa_chain(llm=llm_model,tokenizer =tokenizer, chain_type="map_reduce")
|
103 |
print("check2")
|
104 |
qa = ConversationalRetrievalChain.from_llm(
|
105 |
+
model,
|
106 |
retriever=retriever,
|
107 |
memory=memory,
|
108 |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
|