Update app.py
Browse files
app.py
CHANGED
@@ -78,17 +78,19 @@ qa_chat_prompt = ChatPromptTemplate.from_messages(
|
|
78 |
llm_model = "deepset/roberta-base-squad2"
|
79 |
from transformers import AutoTokenizer
|
80 |
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
81 |
-
|
82 |
print("check2")
|
83 |
pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "question-answering", temperature=0.2)
|
84 |
#"question-answering", "conversational"
|
85 |
|
86 |
print("check3")
|
87 |
-
chain = pipe(
|
|
|
88 |
print("check3A")
|
89 |
import gradio as gr
|
90 |
#ragdemo = gr.load("models/HuggingFaceH4/zephyr-7b-beta")
|
91 |
ragdemo = gr.Interface.from_pipeline(chain)
|
|
|
92 |
print("check4")
|
93 |
ragdemo.launch()
|
94 |
print("check5")
|
|
|
78 |
llm_model = "deepset/roberta-base-squad2"
|
79 |
from transformers import AutoTokenizer
|
80 |
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
81 |
+
question = "How can I reverse diabetes?"
|
82 |
print("check2")
|
83 |
pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "question-answering", temperature=0.2)
|
84 |
#"question-answering", "conversational"
|
85 |
|
86 |
print("check3")
|
87 |
+
chain = pipe(question = question,context = retriever)
|
88 |
+
#(question = question, context = context)
|
89 |
print("check3A")
|
90 |
import gradio as gr
|
91 |
#ragdemo = gr.load("models/HuggingFaceH4/zephyr-7b-beta")
|
92 |
ragdemo = gr.Interface.from_pipeline(chain)
|
93 |
+
print(chain)
|
94 |
print("check4")
|
95 |
ragdemo.launch()
|
96 |
print("check5")
|