Update handler.py
Browse files- handler.py +21 -23
handler.py
CHANGED
@@ -15,23 +15,21 @@ class EndpointHandler:
|
|
15 |
# Prompt personalizado para guiar al modelo
|
16 |
input_text = (
|
17 |
f"""
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
]
|
34 |
-
}}
|
35 |
Procesa este texto: "{data['inputs']}"
|
36 |
""")
|
37 |
# Imprimir el texto generado para el prompt
|
@@ -41,13 +39,13 @@ class EndpointHandler:
|
|
41 |
|
42 |
def inference(self, tokens):
|
43 |
generate_kwargs = {
|
44 |
-
"max_length":
|
45 |
-
"num_beams":
|
46 |
-
"do_sample":
|
47 |
"temperature": 0.1,
|
48 |
-
"top_k":
|
49 |
-
"top_p": 0.
|
50 |
-
"repetition_penalty": 2.
|
51 |
}
|
52 |
with torch.no_grad():
|
53 |
outputs = self.model.generate(**tokens, **generate_kwargs)
|
|
|
15 |
# Prompt personalizado para guiar al modelo
|
16 |
input_text = (
|
17 |
f"""
|
18 |
+
Genera un JSON v谩lido con estas especificaciones:
|
19 |
+
- Cada objeto tiene una clave 'id' y un valor 'value'.
|
20 |
+
- Opciones para 'id': firstName, lastName, jobTitle, address, email, phone, notes, roleFunction.
|
21 |
+
- Si 'id' es address, email o phone, debe incluir subclaves: MOBILE, WORK, PERSONAL, MAIN, OTHER.
|
22 |
+
- 'roleFunction' debe ser una de estas: BUYER, SELLER, SUPPLIER, PARTNER, COLLABORATOR, PROVIDER, CUSTOMER.
|
23 |
+
Ejemplo:
|
24 |
+
Entrada: "Contact茅 a Juan P茅rez, Gerente de Finanzas."
|
25 |
+
Salida esperada:
|
26 |
+
{{
|
27 |
+
"values": [
|
28 |
+
{{"id": "firstName", "value": "Juan"}},
|
29 |
+
{{"id": "lastName", "value": "P茅rez"}},
|
30 |
+
{{"id": "jobTitle", "value": "Gerente de Finanzas"}}
|
31 |
+
]
|
32 |
+
}}
|
|
|
|
|
33 |
Procesa este texto: "{data['inputs']}"
|
34 |
""")
|
35 |
# Imprimir el texto generado para el prompt
|
|
|
39 |
|
40 |
def inference(self, tokens):
|
41 |
generate_kwargs = {
|
42 |
+
"max_length": 1500,
|
43 |
+
"num_beams": 7,
|
44 |
+
"do_sample": False,
|
45 |
"temperature": 0.1,
|
46 |
+
"top_k": 10,
|
47 |
+
"top_p": 0.7,
|
48 |
+
"repetition_penalty": 2.8
|
49 |
}
|
50 |
with torch.no_grad():
|
51 |
outputs = self.model.generate(**tokens, **generate_kwargs)
|