admin commited on
Commit
f37a5b8
·
1 Parent(s): 51b4629

add 3 params

Browse files
Files changed (1) hide show
  1. app.py +62 -6
app.py CHANGED
@@ -2,7 +2,17 @@ import gradio as gr
2
  from openai import OpenAI
3
 
4
 
5
- def predict(message, history, system_prompt, model, api_url, api_key):
 
 
 
 
 
 
 
 
 
 
6
  # Format history with a given chat template
7
  msgs = [{"role": "system", "content": system_prompt}]
8
  for user, assistant in history:
@@ -15,7 +25,9 @@ def predict(message, history, system_prompt, model, api_url, api_key):
15
  response = client.chat.completions.create(
16
  model=model,
17
  messages=msgs,
18
- temperature=0.3,
 
 
19
  stream=False,
20
  ).to_dict()["choices"][0]["message"]["content"]
21
 
@@ -25,7 +37,16 @@ def predict(message, history, system_prompt, model, api_url, api_key):
25
  return response
26
 
27
 
28
- def deepseek(message, history, model, api_key, system_prompt):
 
 
 
 
 
 
 
 
 
29
  response = predict(
30
  message,
31
  history,
@@ -33,6 +54,9 @@ def deepseek(message, history, model, api_key, system_prompt):
33
  model,
34
  "https://api.deepseek.com",
35
  api_key,
 
 
 
36
  )
37
  outputs = []
38
  for new_token in response:
@@ -40,7 +64,16 @@ def deepseek(message, history, model, api_key, system_prompt):
40
  yield "".join(outputs)
41
 
42
 
43
- def kimi(message, history, model, api_key, system_prompt):
 
 
 
 
 
 
 
 
 
44
  response = predict(
45
  message,
46
  history,
@@ -48,6 +81,9 @@ def kimi(message, history, model, api_key, system_prompt):
48
  model,
49
  "https://api.moonshot.cn/v1",
50
  api_key,
 
 
 
51
  )
52
  outputs = []
53
  for new_token in response:
@@ -74,10 +110,20 @@ if __name__ == "__main__":
74
  "You are a useful assistant. first recognize user request and then reply carfuly and thinking",
75
  label="System prompt",
76
  )
 
 
 
77
 
78
  gr.ChatInterface(
79
  deepseek,
80
- additional_inputs=[ds_model, ds_key, ds_sys],
 
 
 
 
 
 
 
81
  )
82
 
83
  with gr.Tab("Kimi"):
@@ -96,10 +142,20 @@ if __name__ == "__main__":
96
  "You are a useful assistant. first recognize user request and then reply carfuly and thinking",
97
  label="System prompt",
98
  )
 
 
 
99
 
100
  gr.ChatInterface(
101
  kimi,
102
- additional_inputs=[kimi_model, kimi_key, kimi_sys],
 
 
 
 
 
 
 
103
  )
104
 
105
  demo.queue().launch()
 
2
  from openai import OpenAI
3
 
4
 
5
+ def predict(
6
+ message,
7
+ history,
8
+ system_prompt,
9
+ model,
10
+ api_url,
11
+ api_key,
12
+ max_tk,
13
+ temp,
14
+ top_p,
15
+ ):
16
  # Format history with a given chat template
17
  msgs = [{"role": "system", "content": system_prompt}]
18
  for user, assistant in history:
 
25
  response = client.chat.completions.create(
26
  model=model,
27
  messages=msgs,
28
+ max_tokens=max_tk,
29
+ temperature=temp,
30
+ top_p=top_p,
31
  stream=False,
32
  ).to_dict()["choices"][0]["message"]["content"]
33
 
 
37
  return response
38
 
39
 
40
+ def deepseek(
41
+ message,
42
+ history,
43
+ model,
44
+ api_key,
45
+ system_prompt,
46
+ max_tk,
47
+ temp,
48
+ top_p,
49
+ ):
50
  response = predict(
51
  message,
52
  history,
 
54
  model,
55
  "https://api.deepseek.com",
56
  api_key,
57
+ max_tk,
58
+ temp,
59
+ top_p,
60
  )
61
  outputs = []
62
  for new_token in response:
 
64
  yield "".join(outputs)
65
 
66
 
67
+ def kimi(
68
+ message,
69
+ history,
70
+ model,
71
+ api_key,
72
+ system_prompt,
73
+ max_tk,
74
+ temp,
75
+ top_p,
76
+ ):
77
  response = predict(
78
  message,
79
  history,
 
81
  model,
82
  "https://api.moonshot.cn/v1",
83
  api_key,
84
+ max_tk,
85
+ temp,
86
+ top_p,
87
  )
88
  outputs = []
89
  for new_token in response:
 
110
  "You are a useful assistant. first recognize user request and then reply carfuly and thinking",
111
  label="System prompt",
112
  )
113
+ ds_maxtk = gr.Slider(0, 32000, 10000, label="Max new tokens")
114
+ ds_temp = gr.Slider(0, 1, 0.3, label="Temperature")
115
+ ds_topp = gr.Slider(0, 1, 0.95, label="Top P sampling")
116
 
117
  gr.ChatInterface(
118
  deepseek,
119
+ additional_inputs=[
120
+ ds_model,
121
+ ds_key,
122
+ ds_sys,
123
+ ds_maxtk,
124
+ ds_temp,
125
+ ds_topp,
126
+ ],
127
  )
128
 
129
  with gr.Tab("Kimi"):
 
142
  "You are a useful assistant. first recognize user request and then reply carfuly and thinking",
143
  label="System prompt",
144
  )
145
+ kimi_maxtk = gr.Slider(0, 32000, 10000, label="Max new tokens")
146
+ kimi_temp = gr.Slider(0, 1, 0.3, label="Temperature")
147
+ kimi_topp = gr.Slider(0, 1, 0.95, label="Top P sampling")
148
 
149
  gr.ChatInterface(
150
  kimi,
151
+ additional_inputs=[
152
+ kimi_model,
153
+ kimi_key,
154
+ kimi_sys,
155
+ kimi_maxtk,
156
+ kimi_temp,
157
+ kimi_topp,
158
+ ],
159
  )
160
 
161
  demo.queue().launch()