ZeusCabanas commited on
Commit
7fb6a75
·
1 Parent(s): 18fbc58
Files changed (1) hide show
  1. app.py +39 -45
app.py CHANGED
@@ -1,55 +1,49 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from typing import List, Tuple, Dict
4
 
 
 
 
5
  client = InferenceClient("AuriLab/gpt-bi-instruct-cesar")
6
 
7
- def format_messages(history: List[Tuple[str, str]], system_message: str, user_message: str) -> List[Dict[str, str]]:
8
- messages = [{"role": "system", "content": system_message}]
9
- messages.extend([
10
- {"role": "user" if i % 2 == 0 else "assistant", "content": str(msg)} # Convert msg to string
11
- for turn in history
12
- for i, msg in enumerate(turn)
13
- if msg is not None
14
- ])
15
- messages.append({"role": "user", "content": str(user_message)}) # Convert user_message to string
16
- return messages
17
-
18
- async def respond(message: str, history: List[Tuple[str, str]]) -> str:
19
- system_message = "You are a helpful AI assistant."
20
- max_tokens = 1000
21
- temperature = 0.7
22
- top_p = 0.85
23
-
24
- messages = format_messages(history, system_message, message)
25
  response = ""
26
-
27
- try:
28
- async for msg in client.chat_completion(
29
- messages,
30
- max_tokens=max_tokens,
31
- stream=True,
32
- temperature=temperature,
33
- top_p=top_p,
34
- ):
35
- if hasattr(msg.choices[0].delta, 'content'):
36
- token = msg.choices[0].delta.content
37
- if token is not None:
38
- response += token
39
- yield response
40
-
41
- if not response: # Handle empty response case
42
- yield "No response generated."
43
-
44
- except Exception as e:
45
- yield f"Error: {str(e)}"
46
-
47
- # Update the ChatInterface to use async function
48
  demo = gr.ChatInterface(
49
- fn=respond,
50
- title="Demo GPT-BI instruct",
51
- examples=["nola duzu izena?", "Nola egiten duzu?"]
52
  )
53
 
 
54
  if __name__ == "__main__":
55
- demo.launch(share=False)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
  client = InferenceClient("AuriLab/gpt-bi-instruct-cesar")
8
 
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ ):
14
+ messages = [{"role": "system", "content": "Gpt-Bi zara, AuriLabsek sortutako assitente digitala."}]
15
+
16
+ for val in history:
17
+ if val[0]:
18
+ messages.append({"role": "user", "content": val[0]})
19
+ if val[1]:
20
+ messages.append({"role": "assistant", "content": val[1]})
21
+
22
+ messages.append({"role": "user", "content": message})
23
+
 
 
 
24
  response = ""
25
+
26
+ for message in client.chat_completion(
27
+ messages,
28
+ max_tokens=200,
29
+ stream=True,
30
+ temperature=0.7,
31
+ top_p=50,
32
+ ):
33
+ token = message.choices[0].delta.content
34
+
35
+ response += token
36
+ yield response
37
+
38
+
39
+ """
40
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
41
+ """
 
 
 
 
 
42
  demo = gr.ChatInterface(
43
+ respond,
44
+ title="GPT-BI Instruct",
 
45
  )
46
 
47
+
48
  if __name__ == "__main__":
49
+ demo.launch()