BICORP commited on
Commit
1c91ea3
·
verified ·
1 Parent(s): cda6239

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -15
app.py CHANGED
@@ -49,37 +49,34 @@ def respond(
49
  response += token
50
  yield response
51
 
52
- # Adding the model name at the end of the response
53
- yield f"\n\n[Response generated by model: {model_name}]"
54
-
55
- # Model names and their custom display names
56
  model_choices = [
57
  ("HuggingFaceH4/zephyr-7b-beta", "Lake [Test]"),
58
  ("google/mt5-base", "Lake 1 Base"),
59
  ("google/mt5-large", "Lake 1 Advanced")
60
  ]
61
 
62
- # Convert model choices into just the model names for the dropdown
63
- model_names = [model[0] for model in model_choices]
64
 
65
- # Function to handle model selection and display name for the model
66
- def respond_with_model_name(
67
  message,
68
  history: list[tuple[str, str]],
69
  system_message,
70
  max_tokens,
71
  temperature,
72
  top_p,
73
- model_name
74
  ):
75
- # Find the display name of the selected model
76
- model_display_name = dict(model_choices)[model_name]
77
 
78
  # Call the existing respond function
79
  response = list(respond(message, history, system_message, max_tokens, temperature, top_p, model_name))
80
 
81
- # Add model name at the end of the response
82
- response[-1] += f"\n\n[Response generated by: {model_display_name}]"
83
 
84
  return response
85
 
@@ -87,7 +84,7 @@ def respond_with_model_name(
87
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
88
  """
89
  demo = gr.ChatInterface(
90
- respond_with_model_name,
91
  additional_inputs=[
92
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
93
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
@@ -99,7 +96,7 @@ demo = gr.ChatInterface(
99
  step=0.05,
100
  label="Top-p (nucleus sampling)",
101
  ),
102
- gr.Dropdown(model_names, label="Select Model", value=model_names[0]) # Model selection dropdown
103
  ],
104
  )
105
 
 
49
  response += token
50
  yield response
51
 
52
+ # Model names and their pseudonyms
 
 
 
53
  model_choices = [
54
  ("HuggingFaceH4/zephyr-7b-beta", "Lake [Test]"),
55
  ("google/mt5-base", "Lake 1 Base"),
56
  ("google/mt5-large", "Lake 1 Advanced")
57
  ]
58
 
59
+ # Convert pseudonyms to model names for the dropdown
60
+ pseudonyms = [model[1] for model in model_choices]
61
 
62
+ # Function to handle model selection and pseudonyms
63
+ def respond_with_pseudonym(
64
  message,
65
  history: list[tuple[str, str]],
66
  system_message,
67
  max_tokens,
68
  temperature,
69
  top_p,
70
+ selected_pseudonym
71
  ):
72
+ # Find the actual model name from the pseudonym
73
+ model_name = next(model[0] for model in model_choices if model[1] == selected_pseudonym)
74
 
75
  # Call the existing respond function
76
  response = list(respond(message, history, system_message, max_tokens, temperature, top_p, model_name))
77
 
78
+ # Add pseudonym at the end of the response
79
+ response[-1] += f"\n\n[Response generated by: {selected_pseudonym}]"
80
 
81
  return response
82
 
 
84
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
85
  """
86
  demo = gr.ChatInterface(
87
+ respond_with_pseudonym,
88
  additional_inputs=[
89
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
90
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
96
  step=0.05,
97
  label="Top-p (nucleus sampling)",
98
  ),
99
+ gr.Dropdown(pseudonyms, label="Select Model", value=pseudonyms[0]) # Pseudonym selection dropdown
100
  ],
101
  )
102