Spaces:
Sleeping
Sleeping
DSv7
Browse files- app.py +30 -39
- requirements.txt +1 -1
app.py
CHANGED
@@ -14,57 +14,52 @@ tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Llama
|
|
14 |
print("Cargando modelo (puede tardar varios minutos)...")
|
15 |
model = AutoModelForCausalLM.from_pretrained(
|
16 |
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
|
17 |
-
device_map="auto",
|
18 |
-
torch_dtype=torch.float16
|
19 |
)
|
20 |
model.eval()
|
21 |
|
22 |
def respond(
|
23 |
-
message,
|
24 |
history: list[tuple[str, str]],
|
25 |
system_message: str,
|
26 |
max_tokens: int,
|
27 |
temperature: float,
|
28 |
top_p: float,
|
29 |
):
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
"""
|
37 |
-
prompt = f"[SYSTEM] {system_message}\n"
|
38 |
for (usr, bot) in history:
|
39 |
-
|
40 |
-
|
41 |
-
if bot:
|
42 |
-
prompt += f"[ASSISTANT] {bot}\n"
|
43 |
-
prompt += f"[USER] {message}\n[ASSISTANT]"
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
skip_special_tokens=True
|
48 |
-
)
|
49 |
|
|
|
50 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
51 |
-
generation_kwargs = dict(
|
52 |
-
**inputs,
|
53 |
-
streamer=streamer,
|
54 |
-
max_new_tokens=max_tokens,
|
55 |
-
temperature=temperature,
|
56 |
-
top_p=top_p,
|
57 |
-
do_sample=True,
|
58 |
-
)
|
59 |
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
generation_thread = threading.Thread(
|
62 |
target=model.generate,
|
63 |
kwargs=generation_kwargs
|
64 |
)
|
65 |
generation_thread.start()
|
66 |
|
67 |
-
# Leemos tokens a medida que se generan y los enviamos a Gradio (yield)
|
68 |
output_text = ""
|
69 |
for new_token in streamer:
|
70 |
output_text += new_token
|
@@ -76,19 +71,15 @@ demo = gr.ChatInterface(
|
|
76 |
gr.Textbox(
|
77 |
label="Mensaje del sistema",
|
78 |
value=(
|
79 |
-
"Eres Juan, un asistente virtual en español
|
80 |
-
"
|
81 |
-
"pueden tener dificultades cognitivas o escribir frases confusas. "
|
82 |
-
"Provee explicaciones simples, procura entender la intención del usuario "
|
83 |
-
"aunque la frase esté mal escrita, y mantén siempre un tono amable."
|
84 |
),
|
85 |
),
|
86 |
-
gr.Slider(1,
|
87 |
-
gr.Slider(0.1,
|
88 |
-
gr.Slider(0.1, 1.0, 0.
|
89 |
],
|
90 |
)
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
-
print("Iniciando servidor Gradio...")
|
94 |
demo.launch()
|
|
|
14 |
print("Cargando modelo (puede tardar varios minutos)...")
|
15 |
model = AutoModelForCausalLM.from_pretrained(
|
16 |
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
|
17 |
+
device_map="auto",
|
18 |
+
torch_dtype=torch.float16 # Si GPU; en CPU => float32
|
19 |
)
|
20 |
model.eval()
|
21 |
|
22 |
def respond(
|
23 |
+
message: str,
|
24 |
history: list[tuple[str, str]],
|
25 |
system_message: str,
|
26 |
max_tokens: int,
|
27 |
temperature: float,
|
28 |
top_p: float,
|
29 |
):
|
30 |
+
# Solo añade system_message si no hay historial:
|
31 |
+
prompt = ""
|
32 |
+
if not history:
|
33 |
+
prompt += f"[SYSTEM] {system_message}\n"
|
34 |
+
|
35 |
+
# Añade historial
|
|
|
|
|
36 |
for (usr, bot) in history:
|
37 |
+
prompt += f"[USER] {usr}\n"
|
38 |
+
prompt += f"[ASSISTANT] {bot}\n"
|
|
|
|
|
|
|
39 |
|
40 |
+
# Añade nuevo turno
|
41 |
+
prompt += f"[USER] {message}\n[ASSISTANT]"
|
|
|
|
|
42 |
|
43 |
+
streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_special_tokens=True)
|
44 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
+
generation_kwargs = {
|
47 |
+
"inputs": inputs["input_ids"],
|
48 |
+
"attention_mask": inputs["attention_mask"],
|
49 |
+
"streamer": streamer,
|
50 |
+
"max_new_tokens": max_tokens,
|
51 |
+
"temperature": temperature,
|
52 |
+
"top_p": top_p,
|
53 |
+
"do_sample": True,
|
54 |
+
}
|
55 |
+
|
56 |
+
# Lanza la generación en un thread de Python
|
57 |
generation_thread = threading.Thread(
|
58 |
target=model.generate,
|
59 |
kwargs=generation_kwargs
|
60 |
)
|
61 |
generation_thread.start()
|
62 |
|
|
|
63 |
output_text = ""
|
64 |
for new_token in streamer:
|
65 |
output_text += new_token
|
|
|
71 |
gr.Textbox(
|
72 |
label="Mensaje del sistema",
|
73 |
value=(
|
74 |
+
"Eres Juan, un asistente virtual en español, muy paciente "
|
75 |
+
"y empático con usuarios que puedan tener dificultades cognitivas."
|
|
|
|
|
|
|
76 |
),
|
77 |
),
|
78 |
+
gr.Slider(1, 1024, 128, 1, label="Máxima cantidad de tokens"), # bajamos a 128
|
79 |
+
gr.Slider(0.1, 2.0, 0.7, 0.1, label="Temperatura"), # bajamos rango
|
80 |
+
gr.Slider(0.1, 1.0, 0.9, 0.05, label="Top-p (nucleus)"),
|
81 |
],
|
82 |
)
|
83 |
|
84 |
if __name__ == "__main__":
|
|
|
85 |
demo.launch()
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
torch>=2.0
|
2 |
-
transformers
|
3 |
accelerate
|
4 |
gradio==5.0.1
|
5 |
requests
|
|
|
1 |
torch>=2.0
|
2 |
+
transformers>=4.28
|
3 |
accelerate
|
4 |
gradio==5.0.1
|
5 |
requests
|