BICORP commited on
Commit
cda6239
·
verified ·
1 Parent(s): fbbfdac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -2
app.py CHANGED
@@ -4,8 +4,13 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
 
 
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -14,7 +19,12 @@ def respond(
14
  max_tokens,
15
  temperature,
16
  top_p,
 
17
  ):
 
 
 
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
@@ -39,12 +49,45 @@ def respond(
39
  response += token
40
  yield response
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  """
44
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
  """
46
  demo = gr.ChatInterface(
47
- respond,
48
  additional_inputs=[
49
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
@@ -56,9 +99,9 @@ demo = gr.ChatInterface(
56
  step=0.05,
57
  label="Top-p (nucleus sampling)",
58
  ),
 
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
  demo.launch()
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+
8
+ # Default client with the first model
9
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
 
11
+ # Function to switch between models based on selection
12
+ def switch_client(model_name: str):
13
+ return InferenceClient(model_name)
14
 
15
  def respond(
16
  message,
 
19
  max_tokens,
20
  temperature,
21
  top_p,
22
+ model_name # Add this parameter for model selection
23
  ):
24
+ # Switch client based on model selection
25
+ global client
26
+ client = switch_client(model_name)
27
+
28
  messages = [{"role": "system", "content": system_message}]
29
 
30
  for val in history:
 
49
  response += token
50
  yield response
51
 
52
+ # Adding the model name at the end of the response
53
+ yield f"\n\n[Response generated by model: {model_name}]"
54
+
55
+ # Model names and their custom display names
56
+ model_choices = [
57
+ ("HuggingFaceH4/zephyr-7b-beta", "Lake [Test]"),
58
+ ("google/mt5-base", "Lake 1 Base"),
59
+ ("google/mt5-large", "Lake 1 Advanced")
60
+ ]
61
+
62
+ # Convert model choices into just the model names for the dropdown
63
+ model_names = [model[0] for model in model_choices]
64
+
65
+ # Function to handle model selection and display name for the model
66
+ def respond_with_model_name(
67
+ message,
68
+ history: list[tuple[str, str]],
69
+ system_message,
70
+ max_tokens,
71
+ temperature,
72
+ top_p,
73
+ model_name
74
+ ):
75
+ # Find the display name of the selected model
76
+ model_display_name = dict(model_choices)[model_name]
77
+
78
+ # Call the existing respond function
79
+ response = list(respond(message, history, system_message, max_tokens, temperature, top_p, model_name))
80
+
81
+ # Add model name at the end of the response
82
+ response[-1] += f"\n\n[Response generated by: {model_display_name}]"
83
+
84
+ return response
85
 
86
  """
87
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
88
  """
89
  demo = gr.ChatInterface(
90
+ respond_with_model_name,
91
  additional_inputs=[
92
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
93
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
99
  step=0.05,
100
  label="Top-p (nucleus sampling)",
101
  ),
102
+ gr.Dropdown(model_names, label="Select Model", value=model_names[0]) # Model selection dropdown
103
  ],
104
  )
105
 
 
106
  if __name__ == "__main__":
107
  demo.launch()