Update app.py
Browse files
app.py
CHANGED
@@ -62,6 +62,7 @@ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
|
62 |
from langchain_core.messages import SystemMessage
|
63 |
from langchain_core.prompts import HumanMessagePromptTemplate
|
64 |
from langchain_core.prompts import ChatPromptTemplate
|
|
|
65 |
|
66 |
qa_chat_prompt = ChatPromptTemplate.from_messages(
|
67 |
[
|
@@ -75,12 +76,15 @@ qa_chat_prompt = ChatPromptTemplate.from_messages(
|
|
75 |
)
|
76 |
|
77 |
llm_model = "HuggingFaceH4/zephyr-7b-beta"
|
|
|
78 |
pipe = pipeline(task="text-generation",model = llm_model, retriever = retriever,chat = qa_chat_prompt)
|
79 |
-
|
80 |
|
81 |
#chain = qa_chat_prompt | pipe
|
82 |
|
83 |
import gradio as gr
|
84 |
#ragdemo = gr.load("models/HuggingFaceH4/zephyr-7b-beta")
|
85 |
ragdemo = gr.Interface.from_pipeline(pipe)
|
86 |
-
|
|
|
|
|
|
62 |
from langchain_core.messages import SystemMessage
|
63 |
from langchain_core.prompts import HumanMessagePromptTemplate
|
64 |
from langchain_core.prompts import ChatPromptTemplate
|
65 |
+
print("check1")
|
66 |
|
67 |
qa_chat_prompt = ChatPromptTemplate.from_messages(
|
68 |
[
|
|
|
76 |
)
|
77 |
|
78 |
llm_model = "HuggingFaceH4/zephyr-7b-beta"
|
79 |
+
print("check2")
|
80 |
pipe = pipeline(task="text-generation",model = llm_model, retriever = retriever,chat = qa_chat_prompt)
|
81 |
+
print("check3")
|
82 |
|
83 |
#chain = qa_chat_prompt | pipe
|
84 |
|
85 |
import gradio as gr
|
86 |
#ragdemo = gr.load("models/HuggingFaceH4/zephyr-7b-beta")
|
87 |
ragdemo = gr.Interface.from_pipeline(pipe)
|
88 |
+
print("check4")
|
89 |
+
ragdemo.launch(debug=True)
|
90 |
+
print("check5")
|