Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,8 @@
|
|
1 |
import argparse
|
2 |
import os
|
3 |
-
|
4 |
import gradio as gr
|
5 |
from loguru import logger
|
6 |
from similarities import BertSimilarity
|
7 |
-
|
8 |
from chatpdf import ChatPDF
|
9 |
|
10 |
pwd_path = os.path.abspath(os.path.dirname(__file__))
|
@@ -25,9 +23,9 @@ if __name__ == '__main__':
|
|
25 |
parser.add_argument("--server_port", type=int, default=8082)
|
26 |
parser.add_argument("--share", action='store_true', help="share model")
|
27 |
args = parser.parse_args()
|
28 |
-
|
29 |
logger.info(args)
|
30 |
-
|
|
|
31 |
sim_model = BertSimilarity(model_name_or_path=args.sim_model_name, device=args.device)
|
32 |
model = ChatPDF(
|
33 |
similarity_model=sim_model,
|
@@ -43,21 +41,17 @@ if __name__ == '__main__':
|
|
43 |
)
|
44 |
logger.info(f"chatpdf model: {model}")
|
45 |
|
46 |
-
|
|
|
47 |
history_format = []
|
48 |
for human, assistant in history:
|
49 |
history_format.append([human, assistant])
|
50 |
model.history = history_format
|
51 |
-
for chunk in model.predict_stream(message):
|
52 |
-
yield chunk
|
53 |
-
|
54 |
-
def predict(message, history):
|
55 |
-
logger.debug(message)
|
56 |
response, reference_results = model.predict(message)
|
57 |
r = response + "\n\n" + '\n'.join(reference_results)
|
58 |
-
|
59 |
-
return r
|
60 |
|
|
|
61 |
chatbot_stream = gr.Chatbot(
|
62 |
height=600,
|
63 |
avatar_images=(
|
@@ -66,24 +60,28 @@ if __name__ == '__main__':
|
|
66 |
),
|
67 |
bubble_full_width=False
|
68 |
)
|
69 |
-
|
70 |
-
title = "馃ChatPDF Zonia馃"
|
71 |
css = """.toast-wrap { display: none !important } """
|
72 |
examples = ['Puede hablarme del PNL?', 'Introducci贸n a la PNL']
|
73 |
|
74 |
-
#
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
78 |
title=title,
|
79 |
-
chatbot=chatbot_stream,
|
80 |
css=css,
|
81 |
examples=examples,
|
82 |
theme='soft',
|
83 |
).queue() # Aseg煤rate de habilitar la cola aqu铆
|
84 |
|
85 |
with gr.Blocks() as demo:
|
86 |
-
|
87 |
|
88 |
-
#
|
89 |
-
demo.queue().launch(
|
|
|
|
|
|
|
|
|
|
1 |
import argparse
|
2 |
import os
|
|
|
3 |
import gradio as gr
|
4 |
from loguru import logger
|
5 |
from similarities import BertSimilarity
|
|
|
6 |
from chatpdf import ChatPDF
|
7 |
|
8 |
pwd_path = os.path.abspath(os.path.dirname(__file__))
|
|
|
23 |
parser.add_argument("--server_port", type=int, default=8082)
|
24 |
parser.add_argument("--share", action='store_true', help="share model")
|
25 |
args = parser.parse_args()
|
|
|
26 |
logger.info(args)
|
27 |
+
|
28 |
+
# Inicializar el modelo
|
29 |
sim_model = BertSimilarity(model_name_or_path=args.sim_model_name, device=args.device)
|
30 |
model = ChatPDF(
|
31 |
similarity_model=sim_model,
|
|
|
41 |
)
|
42 |
logger.info(f"chatpdf model: {model}")
|
43 |
|
44 |
+
# Funci贸n para generar respuesta (sin yield)
|
45 |
+
def predict(message, history):
|
46 |
history_format = []
|
47 |
for human, assistant in history:
|
48 |
history_format.append([human, assistant])
|
49 |
model.history = history_format
|
|
|
|
|
|
|
|
|
|
|
50 |
response, reference_results = model.predict(message)
|
51 |
r = response + "\n\n" + '\n'.join(reference_results)
|
52 |
+
return r, history_format
|
|
|
53 |
|
54 |
+
# Crear el chatbot
|
55 |
chatbot_stream = gr.Chatbot(
|
56 |
height=600,
|
57 |
avatar_images=(
|
|
|
60 |
),
|
61 |
bubble_full_width=False
|
62 |
)
|
63 |
+
|
64 |
+
title = " 馃ChatPDF Zonia馃 "
|
65 |
css = """.toast-wrap { display: none !important } """
|
66 |
examples = ['Puede hablarme del PNL?', 'Introducci贸n a la PNL']
|
67 |
|
68 |
+
# Crear la interfaz sin utilizar la funci贸n que usa yield
|
69 |
+
chat_interface = gr.Interface(
|
70 |
+
fn=predict,
|
71 |
+
inputs=[gr.Textbox(lines=4, placeholder="Ask me question", scale=7), gr.State()],
|
72 |
+
outputs=[chatbot_stream, gr.State()],
|
73 |
title=title,
|
|
|
74 |
css=css,
|
75 |
examples=examples,
|
76 |
theme='soft',
|
77 |
).queue() # Aseg煤rate de habilitar la cola aqu铆
|
78 |
|
79 |
with gr.Blocks() as demo:
|
80 |
+
chat_interface.render()
|
81 |
|
82 |
+
# Lanzar la aplicaci贸n con `.queue()`
|
83 |
+
demo.queue().launch(
|
84 |
+
server_name=args.server_name,
|
85 |
+
server_port=args.server_port,
|
86 |
+
share=args.share
|
87 |
+
)
|