BICORP commited on
Commit
9db2371
·
verified ·
1 Parent(s): 1191c5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -25
app.py CHANGED
@@ -1,10 +1,6 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
-
8
  # Default client with the first model
9
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
10
 
@@ -14,12 +10,12 @@ def switch_client(model_name: str):
14
 
15
  def respond(
16
  message,
17
- history: list[tuple[str, str]],
18
  system_message,
19
  max_tokens,
20
  temperature,
21
  top_p,
22
- model_name # Add this parameter for model selection
23
  ):
24
  # Switch client based on model selection
25
  global client
@@ -28,26 +24,22 @@ def respond(
28
  messages = [{"role": "system", "content": system_message}]
29
 
30
  for val in history:
31
- if val[0]:
32
- messages.append({"role": "user", "content": val[0]})
33
- if val[1]:
34
- messages.append({"role": "assistant", "content": val[1]})
35
 
36
  messages.append({"role": "user", "content": message})
37
 
38
- response = ""
39
-
40
- for message in client.chat_completion(
41
  messages,
42
  max_tokens=max_tokens,
43
- stream=True,
44
  temperature=temperature,
45
  top_p=top_p,
46
- ):
47
- token = message.choices[0].delta.content
48
 
49
- response += token
50
- yield response
 
 
51
 
52
  # Model names and their pseudonyms
53
  model_choices = [
@@ -60,7 +52,7 @@ pseudonyms = [model[1] for model in model_choices]
60
  # Function to handle model selection and pseudonyms
61
  def respond_with_pseudonym(
62
  message,
63
- history: list[tuple[str, str]],
64
  system_message,
65
  max_tokens,
66
  temperature,
@@ -71,16 +63,14 @@ def respond_with_pseudonym(
71
  model_name = next(model[0] for model in model_choices if model[1] == selected_pseudonym)
72
 
73
  # Call the existing respond function
74
- response = list(respond(message, history, system_message, max_tokens, temperature, top_p, model_name))
75
 
76
  # Add pseudonym at the end of the response
77
- response[-1] += f"\n\n[Response generated by: {selected_pseudonym}]"
78
 
79
  return response
80
 
81
- """
82
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
83
- """
84
  demo = gr.ChatInterface(
85
  respond_with_pseudonym,
86
  additional_inputs=[
@@ -99,4 +89,4 @@ demo = gr.ChatInterface(
99
  )
100
 
101
  if __name__ == "__main__":
102
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
4
  # Default client with the first model
5
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
6
 
 
10
 
11
  def respond(
12
  message,
13
+ history: list[dict],
14
  system_message,
15
  max_tokens,
16
  temperature,
17
  top_p,
18
+ model_name
19
  ):
20
  # Switch client based on model selection
21
  global client
 
24
  messages = [{"role": "system", "content": system_message}]
25
 
26
  for val in history:
27
+ messages.append({"role": val['role'], "content": val['content']})
 
 
 
28
 
29
  messages.append({"role": "user", "content": message})
30
 
31
+ # Get the response from the model
32
+ response = client.chat_completion(
 
33
  messages,
34
  max_tokens=max_tokens,
 
35
  temperature=temperature,
36
  top_p=top_p,
37
+ )
 
38
 
39
+ # Extract the content from the response
40
+ final_response = response.choices[0].message['content']
41
+
42
+ return final_response
43
 
44
  # Model names and their pseudonyms
45
  model_choices = [
 
52
  # Function to handle model selection and pseudonyms
53
  def respond_with_pseudonym(
54
  message,
55
+ history: list[dict],
56
  system_message,
57
  max_tokens,
58
  temperature,
 
63
  model_name = next(model[0] for model in model_choices if model[1] == selected_pseudonym)
64
 
65
  # Call the existing respond function
66
+ response = respond(message, history, system_message, max_tokens, temperature, top_p, model_name)
67
 
68
  # Add pseudonym at the end of the response
69
+ response += f"\n\n[Response generated by: {selected_pseudonym}]"
70
 
71
  return response
72
 
73
+ # Gradio Chat Interface
 
 
74
  demo = gr.ChatInterface(
75
  respond_with_pseudonym,
76
  additional_inputs=[
 
89
  )
90
 
91
  if __name__ == "__main__":
92
+ demo.launch()