ZeusCabanas commited on
Commit
939a60b
1 Parent(s): 1d90bff

arreglos 5

Browse files
Files changed (1) hide show
  1. app.py +18 -19
app.py CHANGED
@@ -1,50 +1,49 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("AuriLab/gpt-bi-instruct-cesar")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- ):
14
  messages = [{"role": "system", "content": "Gpt-Bi zara, AuriLabsek sortutako assitente digitala."}]
15
-
16
  for val in history:
17
  if val[0]:
18
  messages.append({"role": "user", "content": val[0]})
19
  if val[1]:
20
  messages.append({"role": "assistant", "content": val[1]})
21
-
22
  messages.append({"role": "user", "content": message})
23
 
24
  response = ""
25
-
26
- for message in client.chat_completion(
27
  messages,
28
- max_tokens=60,
29
  stream=True,
30
  temperature=0.7,
31
  presence_penalty=1.5,
32
  top_p=0.85,
33
  ):
34
- token = message.choices[0].delta.content
35
-
36
  response += token
37
- yield response
38
 
 
 
 
 
 
 
 
 
 
39
 
40
- """
41
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
42
- """
43
  demo = gr.ChatInterface(
44
  respond,
45
  title="Demo GPT-BI Instruct",
46
  )
47
 
48
-
49
  if __name__ == "__main__":
50
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Inicializa el cliente con el modelo deseado
 
 
5
  client = InferenceClient("AuriLab/gpt-bi-instruct-cesar")
6
 
7
+ # Define las secuencias de parada que detendr谩n la generaci贸n
8
+ stop_sequences = [".", "?", ".\n", "\n\n"]
9
 
10
+ def respond(message, history: list[tuple[str, str]]):
11
+ # Construir el historial de mensajes para la conversaci贸n
 
 
12
  messages = [{"role": "system", "content": "Gpt-Bi zara, AuriLabsek sortutako assitente digitala."}]
 
13
  for val in history:
14
  if val[0]:
15
  messages.append({"role": "user", "content": val[0]})
16
  if val[1]:
17
  messages.append({"role": "assistant", "content": val[1]})
 
18
  messages.append({"role": "user", "content": message})
19
 
20
  response = ""
21
+ # Inicia la generaci贸n en modo streaming
22
+ for token_msg in client.chat_completion(
23
  messages,
 
24
  stream=True,
25
  temperature=0.7,
26
  presence_penalty=1.5,
27
  top_p=0.85,
28
  ):
29
+ token = token_msg.choices[0].delta.content
 
30
  response += token
 
31
 
32
+ # Comprueba si la respuesta termina con alguna de las secuencias de parada
33
+ for stop_seq in stop_sequences:
34
+ if response.endswith(stop_seq):
35
+ # Opcionalmente, se puede remover la secuencia de parada final
36
+ response = response[:-len(stop_seq)]
37
+ yield response
38
+ return # Detener la generaci贸n
39
+
40
+ yield response
41
 
42
+ # Configuraci贸n de la interfaz de chat con Gradio
 
 
43
  demo = gr.ChatInterface(
44
  respond,
45
  title="Demo GPT-BI Instruct",
46
  )
47
 
 
48
  if __name__ == "__main__":
49
  demo.launch()