Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Fix
Browse files- .pre-commit-config.yaml +64 -0
- .python-version +1 -0
- .vscode/settings.json +30 -0
- app.py +32 -496
- app_cerebras.py +17 -0
- app_claude.py +19 -0
- app_fireworks.py +16 -0
- app_gemini.py +18 -0
- app_groq.py +23 -0
- app_hf.py +28 -0
- app_hyperbolic.py +23 -0
- app_mistral.py +25 -0
- app_nvidia.py +50 -0
- app_openai.py +32 -0
- app_perplexity.py +21 -0
- app_qwen.py +23 -0
- app_sambanova.py +22 -0
- app_together.py +49 -0
- app_xai.py +16 -0
- pyproject.toml +89 -0
- requirements.txt +323 -14
- utils.py +33 -0
- uv.lock +0 -0
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v5.0.0
|
4 |
+
hooks:
|
5 |
+
- id: check-executables-have-shebangs
|
6 |
+
- id: check-json
|
7 |
+
- id: check-merge-conflict
|
8 |
+
- id: check-shebang-scripts-are-executable
|
9 |
+
- id: check-toml
|
10 |
+
- id: check-yaml
|
11 |
+
- id: end-of-file-fixer
|
12 |
+
- id: mixed-line-ending
|
13 |
+
args: ["--fix=lf"]
|
14 |
+
- id: requirements-txt-fixer
|
15 |
+
- id: trailing-whitespace
|
16 |
+
- repo: https://github.com/myint/docformatter
|
17 |
+
rev: v1.7.5
|
18 |
+
hooks:
|
19 |
+
- id: docformatter
|
20 |
+
args: ["--in-place"]
|
21 |
+
- repo: https://github.com/pycqa/isort
|
22 |
+
rev: 5.13.2
|
23 |
+
hooks:
|
24 |
+
- id: isort
|
25 |
+
args: ["--profile", "black"]
|
26 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
+
rev: v1.13.0
|
28 |
+
hooks:
|
29 |
+
- id: mypy
|
30 |
+
args: ["--ignore-missing-imports"]
|
31 |
+
additional_dependencies:
|
32 |
+
[
|
33 |
+
"types-python-slugify",
|
34 |
+
"types-requests",
|
35 |
+
"types-PyYAML",
|
36 |
+
"types-pytz",
|
37 |
+
]
|
38 |
+
- repo: https://github.com/psf/black
|
39 |
+
rev: 24.10.0
|
40 |
+
hooks:
|
41 |
+
- id: black
|
42 |
+
language_version: python3.10
|
43 |
+
args: ["--line-length", "119"]
|
44 |
+
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
45 |
+
rev: v0.7.4
|
46 |
+
hooks:
|
47 |
+
- id: ruff
|
48 |
+
- repo: https://github.com/kynan/nbstripout
|
49 |
+
rev: 0.8.1
|
50 |
+
hooks:
|
51 |
+
- id: nbstripout
|
52 |
+
args:
|
53 |
+
[
|
54 |
+
"--extra-keys",
|
55 |
+
"metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
|
56 |
+
]
|
57 |
+
- repo: https://github.com/nbQA-dev/nbQA
|
58 |
+
rev: 1.9.1
|
59 |
+
hooks:
|
60 |
+
- id: nbqa-black
|
61 |
+
- id: nbqa-pyupgrade
|
62 |
+
args: ["--py37-plus"]
|
63 |
+
- id: nbqa-isort
|
64 |
+
args: ["--float-to-top"]
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.10
|
.vscode/settings.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"editor.formatOnSave": true,
|
3 |
+
"files.insertFinalNewline": false,
|
4 |
+
"[python]": {
|
5 |
+
"editor.defaultFormatter": "ms-python.black-formatter",
|
6 |
+
"editor.formatOnType": true,
|
7 |
+
"editor.codeActionsOnSave": {
|
8 |
+
"source.organizeImports": "explicit"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"[jupyter]": {
|
12 |
+
"files.insertFinalNewline": false
|
13 |
+
},
|
14 |
+
"black-formatter.args": [
|
15 |
+
"--line-length=119"
|
16 |
+
],
|
17 |
+
"isort.args": ["--profile", "black"],
|
18 |
+
"flake8.args": [
|
19 |
+
"--max-line-length=119"
|
20 |
+
],
|
21 |
+
"ruff.lint.args": [
|
22 |
+
"--line-length=119"
|
23 |
+
],
|
24 |
+
"notebook.output.scrolling": true,
|
25 |
+
"notebook.formatOnCellExecution": true,
|
26 |
+
"notebook.formatOnSave.enabled": true,
|
27 |
+
"notebook.codeActionsOnSave": {
|
28 |
+
"source.organizeImports": "explicit"
|
29 |
+
}
|
30 |
+
}
|
app.py
CHANGED
@@ -1,516 +1,52 @@
|
|
1 |
import gradio as gr
|
2 |
-
import gemini_gradio
|
3 |
-
import openai_gradio
|
4 |
-
import anthropic_gradio
|
5 |
-
import sambanova_gradio
|
6 |
-
import xai_gradio
|
7 |
-
import hyperbolic_gradio
|
8 |
-
import perplexity_gradio
|
9 |
-
import mistral_gradio
|
10 |
-
import fireworks_gradio
|
11 |
-
import cerebras_gradio
|
12 |
-
import groq_gradio
|
13 |
-
import together_gradio
|
14 |
-
import nvidia_gradio
|
15 |
-
import dashscope_gradio
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
new_interface = create_interface(new_model, src_registry, **kwargs)
|
32 |
-
new_interface.render()
|
33 |
|
34 |
with gr.Blocks(fill_height=True) as demo:
|
35 |
-
# Meta Llama Tab
|
36 |
with gr.Tab("Meta Llama"):
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
'Meta-Llama-3.2-1B-Instruct',
|
41 |
-
'Meta-Llama-3.2-3B-Instruct',
|
42 |
-
'Llama-3.2-11B-Vision-Instruct',
|
43 |
-
'Llama-3.2-90B-Vision-Instruct',
|
44 |
-
'Meta-Llama-3.1-8B-Instruct',
|
45 |
-
'Meta-Llama-3.1-70B-Instruct',
|
46 |
-
'Meta-Llama-3.1-405B-Instruct'
|
47 |
-
],
|
48 |
-
value='Llama-3.2-90B-Vision-Instruct',
|
49 |
-
label="Select Llama Model",
|
50 |
-
interactive=True
|
51 |
-
)
|
52 |
-
|
53 |
-
with gr.Column() as llama_container:
|
54 |
-
llama_interface = create_interface(llama_model.value, sambanova_gradio.registry, multimodal=True)
|
55 |
-
|
56 |
-
llama_model.change(
|
57 |
-
fn=lambda new_model: update_model(new_model, llama_container, sambanova_gradio.registry, multimodal=True),
|
58 |
-
inputs=[llama_model],
|
59 |
-
outputs=[]
|
60 |
)
|
61 |
-
|
62 |
-
gr.Markdown("**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/).")
|
63 |
-
|
64 |
-
# Gemini Tab
|
65 |
with gr.Tab("Gemini"):
|
66 |
-
|
67 |
-
gemini_model = gr.Dropdown(
|
68 |
-
choices=[
|
69 |
-
'gemini-1.5-flash',
|
70 |
-
'gemini-1.5-flash-8b',
|
71 |
-
'gemini-1.5-pro',
|
72 |
-
'gemini-exp-1114'
|
73 |
-
],
|
74 |
-
value='gemini-1.5-pro',
|
75 |
-
label="Select Gemini Model",
|
76 |
-
interactive=True
|
77 |
-
)
|
78 |
-
|
79 |
-
with gr.Column() as gemini_container:
|
80 |
-
gemini_interface = create_interface(gemini_model.value, gemini_gradio.registry)
|
81 |
-
|
82 |
-
gemini_model.change(
|
83 |
-
fn=lambda new_model: update_model(new_model, gemini_container, gemini_gradio.registry),
|
84 |
-
inputs=[gemini_model],
|
85 |
-
outputs=[]
|
86 |
-
)
|
87 |
-
|
88 |
-
# ChatGPT Tab
|
89 |
with gr.Tab("ChatGPT"):
|
90 |
-
|
91 |
-
model_choice = gr.Dropdown(
|
92 |
-
choices=[
|
93 |
-
'gpt-4o-2024-11-20',
|
94 |
-
'gpt-4o',
|
95 |
-
'gpt-4o-2024-08-06',
|
96 |
-
'gpt-4o-2024-05-13',
|
97 |
-
'chatgpt-4o-latest',
|
98 |
-
'gpt-4o-mini',
|
99 |
-
'gpt-4o-mini-2024-07-18',
|
100 |
-
'o1-preview',
|
101 |
-
'o1-preview-2024-09-12',
|
102 |
-
'o1-mini',
|
103 |
-
'o1-mini-2024-09-12',
|
104 |
-
'gpt-4-turbo',
|
105 |
-
'gpt-4-turbo-2024-04-09',
|
106 |
-
'gpt-4-turbo-preview',
|
107 |
-
'gpt-4-0125-preview',
|
108 |
-
'gpt-4-1106-preview',
|
109 |
-
'gpt-4',
|
110 |
-
'gpt-4-0613'
|
111 |
-
],
|
112 |
-
value='gpt-4o-2024-11-20',
|
113 |
-
label="Select Model",
|
114 |
-
interactive=True
|
115 |
-
)
|
116 |
-
|
117 |
-
with gr.Column() as chatgpt_container:
|
118 |
-
chatgpt_interface = create_interface(model_choice.value, openai_gradio.registry)
|
119 |
-
|
120 |
-
model_choice.change(
|
121 |
-
fn=lambda new_model: update_model(new_model, chatgpt_container, openai_gradio.registry),
|
122 |
-
inputs=[model_choice],
|
123 |
-
outputs=[]
|
124 |
-
)
|
125 |
-
|
126 |
-
# Claude Tab
|
127 |
with gr.Tab("Claude"):
|
128 |
-
|
129 |
-
claude_model = gr.Dropdown(
|
130 |
-
choices=[
|
131 |
-
'claude-3-5-sonnet-20241022',
|
132 |
-
'claude-3-5-haiku-20241022',
|
133 |
-
'claude-3-opus-20240229',
|
134 |
-
'claude-3-sonnet-20240229',
|
135 |
-
'claude-3-haiku-20240307'
|
136 |
-
],
|
137 |
-
value='claude-3-5-sonnet-20241022',
|
138 |
-
label="Select Model",
|
139 |
-
interactive=True
|
140 |
-
)
|
141 |
-
|
142 |
-
with gr.Column() as claude_container:
|
143 |
-
claude_interface = create_interface(claude_model.value, anthropic_gradio.registry, accept_token=True)
|
144 |
-
|
145 |
-
claude_model.change(
|
146 |
-
fn=lambda new_model: update_model(new_model, claude_container, anthropic_gradio.registry, accept_token=True),
|
147 |
-
inputs=[claude_model],
|
148 |
-
outputs=[]
|
149 |
-
)
|
150 |
-
|
151 |
-
# Grok Tab
|
152 |
with gr.Tab("Grok"):
|
153 |
-
|
154 |
-
grok_model = gr.Dropdown(
|
155 |
-
choices=[
|
156 |
-
'grok-beta',
|
157 |
-
'grok-vision-beta'
|
158 |
-
],
|
159 |
-
value='grok-vision-beta',
|
160 |
-
label="Select Grok Model",
|
161 |
-
interactive=True
|
162 |
-
)
|
163 |
-
|
164 |
-
with gr.Column() as grok_container:
|
165 |
-
grok_interface = create_interface(grok_model.value, xai_gradio.registry)
|
166 |
-
|
167 |
-
grok_model.change(
|
168 |
-
fn=lambda new_model: update_model(new_model, grok_container, xai_gradio.registry),
|
169 |
-
inputs=[grok_model],
|
170 |
-
outputs=[]
|
171 |
-
)
|
172 |
-
|
173 |
-
# Hugging Face Tab
|
174 |
with gr.Tab("Hugging Face"):
|
175 |
-
|
176 |
-
hf_model = gr.Dropdown(
|
177 |
-
choices=[
|
178 |
-
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
179 |
-
'Qwen/Qwen2.5-72B-Instruct',
|
180 |
-
'meta-llama/Llama-3.1-70B-Instruct',
|
181 |
-
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
182 |
-
'meta-llama/Llama-3.1-8B-Instruct',
|
183 |
-
'google/gemma-2-9b-it',
|
184 |
-
'mistralai/Mistral-7B-v0.1',
|
185 |
-
'meta-llama/Llama-2-7b-chat-hf',
|
186 |
-
'meta-llama/Llama-3.2-3B-Instruct',
|
187 |
-
'meta-llama/Llama-3.2-1B-Instruct',
|
188 |
-
'Qwen/Qwen2.5-1.5B-Instruct',
|
189 |
-
'microsoft/Phi-3.5-mini-instruct',
|
190 |
-
'HuggingFaceTB/SmolLM2-1.7B-Instruct',
|
191 |
-
'google/gemma-2-2b-it',
|
192 |
-
'meta-llama/Llama-3.2-3B',
|
193 |
-
'meta-llama/Llama-3.2-1B',
|
194 |
-
'openai-community/gpt2'
|
195 |
-
],
|
196 |
-
value='HuggingFaceTB/SmolLM2-1.7B-Instruct',
|
197 |
-
label="Select Hugging Face Model",
|
198 |
-
interactive=True
|
199 |
-
)
|
200 |
-
|
201 |
-
with gr.Column() as hf_container:
|
202 |
-
hf_interface = create_interface(hf_model.value, "models")
|
203 |
-
|
204 |
-
hf_model.change(
|
205 |
-
fn=lambda new_model: update_model(new_model, hf_container, "models"),
|
206 |
-
inputs=[hf_model],
|
207 |
-
outputs=[]
|
208 |
-
)
|
209 |
-
|
210 |
-
gr.Markdown("""
|
211 |
-
**Note:** These models are loaded directly from Hugging Face Hub. Some models may require authentication.
|
212 |
-
""")
|
213 |
-
|
214 |
-
# Groq Tab
|
215 |
with gr.Tab("Groq"):
|
216 |
-
|
217 |
-
groq_model = gr.Dropdown(
|
218 |
-
choices=[
|
219 |
-
'llama3-groq-8b-8192-tool-use-preview',
|
220 |
-
'llama3-groq-70b-8192-tool-use-preview',
|
221 |
-
'llama-3.2-1b-preview',
|
222 |
-
'llama-3.2-3b-preview',
|
223 |
-
'llama-3.2-11b-text-preview',
|
224 |
-
'llama-3.2-90b-text-preview',
|
225 |
-
'mixtral-8x7b-32768',
|
226 |
-
'gemma2-9b-it',
|
227 |
-
'gemma-7b-it'
|
228 |
-
],
|
229 |
-
value='llama3-groq-70b-8192-tool-use-preview',
|
230 |
-
label="Select Groq Model",
|
231 |
-
interactive=True
|
232 |
-
)
|
233 |
-
|
234 |
-
with gr.Column() as groq_container:
|
235 |
-
groq_interface = create_interface(groq_model.value, groq_gradio.registry)
|
236 |
-
|
237 |
-
groq_model.change(
|
238 |
-
fn=lambda new_model: update_model(new_model, groq_container, groq_gradio.registry),
|
239 |
-
inputs=[groq_model],
|
240 |
-
outputs=[]
|
241 |
-
)
|
242 |
-
|
243 |
-
# Hyperbolic Tab
|
244 |
with gr.Tab("Hyperbolic"):
|
245 |
-
|
246 |
-
hyperbolic_model = gr.Dropdown(
|
247 |
-
choices=[
|
248 |
-
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
249 |
-
'meta-llama/Llama-3.2-3B-Instruct',
|
250 |
-
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
251 |
-
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
252 |
-
'meta-llama/Meta-Llama-3-70B-Instruct',
|
253 |
-
'NousResearch/Hermes-3-Llama-3.1-70B',
|
254 |
-
'Qwen/Qwen2.5-72B-Instruct',
|
255 |
-
'deepseek-ai/DeepSeek-V2.5',
|
256 |
-
'meta-llama/Meta-Llama-3.1-405B-Instruct'
|
257 |
-
],
|
258 |
-
value='Qwen/Qwen2.5-Coder-32B-Instruct',
|
259 |
-
label="Select Hyperbolic Model",
|
260 |
-
interactive=True
|
261 |
-
)
|
262 |
-
|
263 |
-
with gr.Column() as hyperbolic_container:
|
264 |
-
hyperbolic_interface = create_interface(hyperbolic_model.value, hyperbolic_gradio.registry)
|
265 |
-
|
266 |
-
hyperbolic_model.change(
|
267 |
-
fn=lambda new_model: update_model(new_model, hyperbolic_container, hyperbolic_gradio.registry),
|
268 |
-
inputs=[hyperbolic_model],
|
269 |
-
outputs=[]
|
270 |
-
)
|
271 |
-
|
272 |
-
# Qwen Tab
|
273 |
with gr.Tab("Qwen"):
|
274 |
-
|
275 |
-
qwen_model = gr.Dropdown(
|
276 |
-
choices=[
|
277 |
-
'qwen-turbo-latest',
|
278 |
-
'qwen-turbo',
|
279 |
-
'qwen-plus',
|
280 |
-
'qwen-max',
|
281 |
-
'qwen1.5-110b-chat',
|
282 |
-
'qwen1.5-72b-chat',
|
283 |
-
'qwen1.5-32b-chat',
|
284 |
-
'qwen1.5-14b-chat',
|
285 |
-
'qwen1.5-7b-chat'
|
286 |
-
],
|
287 |
-
value='qwen-turbo-latest',
|
288 |
-
label="Select Qwen Model",
|
289 |
-
interactive=True
|
290 |
-
)
|
291 |
-
|
292 |
-
with gr.Column() as qwen_container:
|
293 |
-
qwen_interface = create_interface(qwen_model.value, dashscope_gradio.registry)
|
294 |
-
|
295 |
-
qwen_model.change(
|
296 |
-
fn=lambda new_model: update_model(new_model, qwen_container, dashscope_gradio.registry),
|
297 |
-
inputs=[qwen_model],
|
298 |
-
outputs=[]
|
299 |
-
)
|
300 |
-
|
301 |
-
# Perplexity Tab
|
302 |
with gr.Tab("Perplexity"):
|
303 |
-
|
304 |
-
perplexity_model = gr.Dropdown(
|
305 |
-
choices=[
|
306 |
-
'llama-3.1-sonar-small-128k-online',
|
307 |
-
'llama-3.1-sonar-large-128k-online',
|
308 |
-
'llama-3.1-sonar-huge-128k-online',
|
309 |
-
'llama-3.1-sonar-small-128k-chat',
|
310 |
-
'llama-3.1-sonar-large-128k-chat',
|
311 |
-
'llama-3.1-8b-instruct',
|
312 |
-
'llama-3.1-70b-instruct'
|
313 |
-
],
|
314 |
-
value='llama-3.1-sonar-large-128k-online',
|
315 |
-
label="Select Perplexity Model",
|
316 |
-
interactive=True
|
317 |
-
)
|
318 |
-
|
319 |
-
with gr.Column() as perplexity_container:
|
320 |
-
perplexity_interface = create_interface(perplexity_model.value, perplexity_gradio.registry, accept_token=True)
|
321 |
-
|
322 |
-
perplexity_model.change(
|
323 |
-
fn=lambda new_model: update_model(new_model, perplexity_container, perplexity_gradio.registry, accept_token=True),
|
324 |
-
inputs=[perplexity_model],
|
325 |
-
outputs=[]
|
326 |
-
)
|
327 |
-
|
328 |
-
# Mistral Tab
|
329 |
with gr.Tab("Mistral"):
|
330 |
-
|
331 |
-
mistral_model = gr.Dropdown(
|
332 |
-
choices=[
|
333 |
-
'mistral-large-latest',
|
334 |
-
'pixtral-large-latest',
|
335 |
-
'ministral-3b-latest',
|
336 |
-
'ministral-8b-latest',
|
337 |
-
'mistral-small-latest',
|
338 |
-
'codestral-latest',
|
339 |
-
'mistral-embed',
|
340 |
-
'mistral-moderation-latest',
|
341 |
-
'pixtral-12b-2409',
|
342 |
-
'open-mistral-nemo',
|
343 |
-
'open-codestral-mamba'
|
344 |
-
],
|
345 |
-
value='pixtral-large-latest',
|
346 |
-
label="Select Mistral Model",
|
347 |
-
interactive=True
|
348 |
-
)
|
349 |
-
|
350 |
-
with gr.Column() as mistral_container:
|
351 |
-
mistral_interface = create_interface(mistral_model.value, mistral_gradio.registry)
|
352 |
-
|
353 |
-
mistral_model.change(
|
354 |
-
fn=lambda new_model: update_model(new_model, mistral_container, mistral_gradio.registry),
|
355 |
-
inputs=[mistral_model],
|
356 |
-
outputs=[]
|
357 |
-
)
|
358 |
-
|
359 |
-
# Fireworks Tab
|
360 |
with gr.Tab("Fireworks"):
|
361 |
-
|
362 |
-
fireworks_model = gr.Dropdown(
|
363 |
-
choices=[
|
364 |
-
'f1-preview',
|
365 |
-
'f1-mini-preview'
|
366 |
-
],
|
367 |
-
value='f1-preview',
|
368 |
-
label="Select Fireworks Model",
|
369 |
-
interactive=True
|
370 |
-
)
|
371 |
-
|
372 |
-
with gr.Column() as fireworks_container:
|
373 |
-
fireworks_interface = create_interface(fireworks_model.value, fireworks_gradio.registry)
|
374 |
-
|
375 |
-
fireworks_model.change(
|
376 |
-
fn=lambda new_model: update_model(new_model, fireworks_container, fireworks_gradio.registry),
|
377 |
-
inputs=[fireworks_model],
|
378 |
-
outputs=[]
|
379 |
-
)
|
380 |
-
|
381 |
-
# Cerebras Tab
|
382 |
-
with gr.Tab("Cerebras"):
|
383 |
-
with gr.Row():
|
384 |
-
cerebras_model = gr.Dropdown(
|
385 |
-
choices=[
|
386 |
-
'llama3.1-8b',
|
387 |
-
'llama3.1-70b',
|
388 |
-
'llama3.1-405b'
|
389 |
-
],
|
390 |
-
value='llama3.1-70b',
|
391 |
-
label="Select Cerebras Model",
|
392 |
-
interactive=True
|
393 |
-
)
|
394 |
-
|
395 |
-
with gr.Column() as cerebras_container:
|
396 |
-
cerebras_interface = create_interface(cerebras_model.value, cerebras_gradio.registry, accept_token=True)
|
397 |
-
|
398 |
-
cerebras_model.change(
|
399 |
-
fn=lambda new_model: update_model(new_model, cerebras_container, cerebras_gradio.registry, accept_token=True),
|
400 |
-
inputs=[cerebras_model],
|
401 |
-
outputs=[]
|
402 |
-
)
|
403 |
-
|
404 |
-
# Together Tab
|
405 |
with gr.Tab("Together"):
|
406 |
-
|
407 |
-
together_model = gr.Dropdown(
|
408 |
-
choices=[
|
409 |
-
'meta-llama/Llama-Vision-Free',
|
410 |
-
'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
|
411 |
-
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
412 |
-
'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
|
413 |
-
'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
414 |
-
'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
|
415 |
-
'meta-llama/Meta-Llama-3-8B-Instruct-Turbo',
|
416 |
-
'meta-llama/Meta-Llama-3-70B-Instruct-Turbo',
|
417 |
-
'meta-llama/Llama-3.2-3B-Instruct-Turbo',
|
418 |
-
'meta-llama/Meta-Llama-3-8B-Instruct-Lite',
|
419 |
-
'meta-llama/Meta-Llama-3-70B-Instruct-Lite',
|
420 |
-
'meta-llama/Llama-3-8b-chat-hf',
|
421 |
-
'meta-llama/Llama-3-70b-chat-hf',
|
422 |
-
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
|
423 |
-
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
424 |
-
'microsoft/WizardLM-2-8x22B',
|
425 |
-
'google/gemma-2-27b-it',
|
426 |
-
'google/gemma-2-9b-it',
|
427 |
-
'databricks/dbrx-instruct',
|
428 |
-
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
429 |
-
'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
430 |
-
'Qwen/Qwen2.5-7B-Instruct-Turbo',
|
431 |
-
'Qwen/Qwen2.5-72B-Instruct-Turbo',
|
432 |
-
'Qwen/Qwen2-72B-Instruct',
|
433 |
-
'deepseek-ai/deepseek-llm-67b-chat',
|
434 |
-
'google/gemma-2b-it',
|
435 |
-
'Gryphe/MythoMax-L2-13b',
|
436 |
-
'meta-llama/Llama-2-13b-chat-hf',
|
437 |
-
'mistralai/Mistral-7B-Instruct-v0.1',
|
438 |
-
'mistralai/Mistral-7B-Instruct-v0.2',
|
439 |
-
'mistralai/Mistral-7B-Instruct-v0.3',
|
440 |
-
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
|
441 |
-
'togethercomputer/StripedHyena-Nous-7B',
|
442 |
-
'upstage/SOLAR-10.7B-Instruct-v1.0'
|
443 |
-
],
|
444 |
-
value='meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
|
445 |
-
label="Select Together Model",
|
446 |
-
interactive=True
|
447 |
-
)
|
448 |
-
|
449 |
-
with gr.Column() as together_container:
|
450 |
-
together_interface = create_interface(together_model.value, together_gradio.registry, multimodal=True)
|
451 |
-
|
452 |
-
together_model.change(
|
453 |
-
fn=lambda new_model: update_model(new_model, together_container, together_gradio.registry, multimodal=True),
|
454 |
-
inputs=[together_model],
|
455 |
-
outputs=[]
|
456 |
-
)
|
457 |
-
|
458 |
-
# NVIDIA Tab
|
459 |
with gr.Tab("NVIDIA"):
|
460 |
-
|
461 |
-
nvidia_model = gr.Dropdown(
|
462 |
-
choices=[
|
463 |
-
'nvidia/llama3-chatqa-1.5-70b',
|
464 |
-
'nvidia/llama3-chatqa-1.5-8b',
|
465 |
-
'nvidia-nemotron-4-340b-instruct',
|
466 |
-
'meta/llama-3.1-70b-instruct',
|
467 |
-
'meta/codellama-70b',
|
468 |
-
'meta/llama2-70b',
|
469 |
-
'meta/llama3-8b',
|
470 |
-
'meta/llama3-70b',
|
471 |
-
'mistralai/codestral-22b-instruct-v0.1',
|
472 |
-
'mistralai/mathstral-7b-v0.1',
|
473 |
-
'mistralai/mistral-large-2-instruct',
|
474 |
-
'mistralai/mistral-7b-instruct',
|
475 |
-
'mistralai/mistral-7b-instruct-v0.3',
|
476 |
-
'mistralai/mixtral-8x7b-instruct',
|
477 |
-
'mistralai/mixtral-8x22b-instruct',
|
478 |
-
'mistralai/mistral-large',
|
479 |
-
'google/gemma-2b',
|
480 |
-
'google/gemma-7b',
|
481 |
-
'google/gemma-2-2b-it',
|
482 |
-
'google/gemma-2-9b-it',
|
483 |
-
'google/gemma-2-27b-it',
|
484 |
-
'google/codegemma-1.1-7b',
|
485 |
-
'google/codegemma-7b',
|
486 |
-
'google/recurrentgemma-2b',
|
487 |
-
'google/shieldgemma-9b',
|
488 |
-
'microsoft/phi-3-medium-128k-instruct',
|
489 |
-
'microsoft/phi-3-medium-4k-instruct',
|
490 |
-
'microsoft/phi-3-mini-128k-instruct',
|
491 |
-
'microsoft/phi-3-mini-4k-instruct',
|
492 |
-
'microsoft/phi-3-small-128k-instruct',
|
493 |
-
'microsoft/phi-3-small-8k-instruct',
|
494 |
-
'qwen/qwen2-7b-instruct',
|
495 |
-
'databricks/dbrx-instruct',
|
496 |
-
'deepseek-ai/deepseek-coder-6.7b-instruct',
|
497 |
-
'upstage/solar-10.7b-instruct',
|
498 |
-
'snowflake/arctic'
|
499 |
-
],
|
500 |
-
value='meta/llama-3.1-70b-instruct',
|
501 |
-
label="Select NVIDIA Model",
|
502 |
-
interactive=True
|
503 |
-
)
|
504 |
-
|
505 |
-
with gr.Column() as nvidia_container:
|
506 |
-
nvidia_interface = create_interface(nvidia_model.value, nvidia_gradio.registry, accept_token=True)
|
507 |
-
|
508 |
-
nvidia_model.change(
|
509 |
-
fn=lambda new_model: update_model(new_model, nvidia_container, nvidia_gradio.registry, accept_token=True),
|
510 |
-
inputs=[nvidia_model],
|
511 |
-
outputs=[]
|
512 |
-
)
|
513 |
-
|
514 |
-
demo.launch(ssr_mode=False)
|
515 |
-
|
516 |
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
from app_claude import demo as demo_claude
|
4 |
+
from app_fireworks import demo as demo_fireworks
|
5 |
+
from app_gemini import demo as demo_gemini
|
6 |
+
from app_groq import demo as demo_groq
|
7 |
+
from app_hf import demo as demo_hf
|
8 |
+
from app_hyperbolic import demo as demo_hyperbolic
|
9 |
+
from app_mistral import demo as demo_mistral
|
10 |
+
from app_nvidia import demo as demo_nvidia
|
11 |
+
from app_openai import demo as demo_openai
|
12 |
+
from app_perplexity import demo as demo_perplexity
|
13 |
+
from app_qwen import demo as demo_qwen
|
14 |
+
from app_sambanova import demo as demo_sambanova
|
15 |
+
from app_together import demo as demo_together
|
16 |
+
from app_xai import demo as demo_grok
|
|
|
|
|
17 |
|
18 |
with gr.Blocks(fill_height=True) as demo:
|
|
|
19 |
with gr.Tab("Meta Llama"):
|
20 |
+
demo_sambanova.render()
|
21 |
+
gr.Markdown(
|
22 |
+
"**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/)."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
)
|
|
|
|
|
|
|
|
|
24 |
with gr.Tab("Gemini"):
|
25 |
+
demo_gemini.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
with gr.Tab("ChatGPT"):
|
27 |
+
demo_openai.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
with gr.Tab("Claude"):
|
29 |
+
demo_claude.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
with gr.Tab("Grok"):
|
31 |
+
demo_grok.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
with gr.Tab("Hugging Face"):
|
33 |
+
demo_hf.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
with gr.Tab("Groq"):
|
35 |
+
demo_groq.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
with gr.Tab("Hyperbolic"):
|
37 |
+
demo_hyperbolic.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
with gr.Tab("Qwen"):
|
39 |
+
demo_qwen.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
with gr.Tab("Perplexity"):
|
41 |
+
demo_perplexity.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
with gr.Tab("Mistral"):
|
43 |
+
demo_mistral.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
with gr.Tab("Fireworks"):
|
45 |
+
demo_fireworks.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
with gr.Tab("Together"):
|
47 |
+
demo_together.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
with gr.Tab("NVIDIA"):
|
49 |
+
demo_nvidia.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
if __name__ == "__main__":
|
52 |
+
demo.launch(ssr_mode=False)
|
app_cerebras.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cerebras_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"llama3.1-8b",
|
8 |
+
"llama3.1-70b",
|
9 |
+
"llama3.1-405b",
|
10 |
+
],
|
11 |
+
default_model="llama3.1-70b",
|
12 |
+
registry=cerebras_gradio.registry,
|
13 |
+
accept_token=True,
|
14 |
+
)
|
15 |
+
|
16 |
+
if __name__ == "__main__":
|
17 |
+
demo.launch()
|
app_claude.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import anthropic_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"claude-3-5-sonnet-20241022",
|
8 |
+
"claude-3-5-haiku-20241022",
|
9 |
+
"claude-3-opus-20240229",
|
10 |
+
"claude-3-sonnet-20240229",
|
11 |
+
"claude-3-haiku-20240307",
|
12 |
+
],
|
13 |
+
default_model="claude-3-5-sonnet-20241022",
|
14 |
+
registry=anthropic_gradio.registry,
|
15 |
+
accept_token=True,
|
16 |
+
)
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
demo.launch()
|
app_fireworks.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fireworks_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"f1-preview",
|
8 |
+
"f1-mini-preview",
|
9 |
+
],
|
10 |
+
default_model="f1-preview",
|
11 |
+
registry=fireworks_gradio.registry,
|
12 |
+
accept_token=False,
|
13 |
+
)
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
demo.launch()
|
app_gemini.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gemini_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"gemini-1.5-flash",
|
8 |
+
"gemini-1.5-flash-8b",
|
9 |
+
"gemini-1.5-pro",
|
10 |
+
"gemini-exp-1114",
|
11 |
+
],
|
12 |
+
default_model="gemini-1.5-pro",
|
13 |
+
registry=gemini_gradio.registry,
|
14 |
+
accept_token=False,
|
15 |
+
)
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
demo.launch()
|
app_groq.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import groq_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"llama3-groq-8b-8192-tool-use-preview",
|
8 |
+
"llama3-groq-70b-8192-tool-use-preview",
|
9 |
+
"llama-3.2-1b-preview",
|
10 |
+
"llama-3.2-3b-preview",
|
11 |
+
"llama-3.2-11b-text-preview",
|
12 |
+
"llama-3.2-90b-text-preview",
|
13 |
+
"mixtral-8x7b-32768",
|
14 |
+
"gemma2-9b-it",
|
15 |
+
"gemma-7b-it",
|
16 |
+
],
|
17 |
+
default_model="llama3-groq-70b-8192-tool-use-preview",
|
18 |
+
registry=groq_gradio.registry,
|
19 |
+
accept_token=False,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_hf.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import get_app
|
2 |
+
|
3 |
+
demo = get_app(
|
4 |
+
models=[
|
5 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
6 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
7 |
+
"meta-llama/Llama-3.1-70B-Instruct",
|
8 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
9 |
+
"meta-llama/Llama-3.1-8B-Instruct",
|
10 |
+
"google/gemma-2-9b-it",
|
11 |
+
"mistralai/Mistral-7B-v0.1",
|
12 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
13 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
14 |
+
"meta-llama/Llama-3.2-1B-Instruct",
|
15 |
+
"Qwen/Qwen2.5-1.5B-Instruct",
|
16 |
+
"microsoft/Phi-3.5-mini-instruct",
|
17 |
+
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
18 |
+
"google/gemma-2-2b-it",
|
19 |
+
"meta-llama/Llama-3.2-3B",
|
20 |
+
"meta-llama/Llama-3.2-1B",
|
21 |
+
"openai-community/gpt2",
|
22 |
+
],
|
23 |
+
default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
24 |
+
registry="models",
|
25 |
+
)
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
demo.launch()
|
app_hyperbolic.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hyperbolic_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
8 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
9 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
10 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
11 |
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
12 |
+
"NousResearch/Hermes-3-Llama-3.1-70B",
|
13 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
14 |
+
"deepseek-ai/DeepSeek-V2.5",
|
15 |
+
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
16 |
+
],
|
17 |
+
default_model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
18 |
+
registry=hyperbolic_gradio.registry,
|
19 |
+
accept_token=False,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_mistral.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mistral_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"mistral-large-latest",
|
8 |
+
"pixtral-large-latest",
|
9 |
+
"ministral-3b-latest",
|
10 |
+
"ministral-8b-latest",
|
11 |
+
"mistral-small-latest",
|
12 |
+
"codestral-latest",
|
13 |
+
"mistral-embed",
|
14 |
+
"mistral-moderation-latest",
|
15 |
+
"pixtral-12b-2409",
|
16 |
+
"open-mistral-nemo",
|
17 |
+
"open-codestral-mamba",
|
18 |
+
],
|
19 |
+
default_model="pixtral-large-latest",
|
20 |
+
registry=mistral_gradio.registry,
|
21 |
+
accept_token=False,
|
22 |
+
)
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
demo.launch()
|
app_nvidia.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nvidia_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"nvidia/llama3-chatqa-1.5-70b",
|
8 |
+
"nvidia/llama3-chatqa-1.5-8b",
|
9 |
+
"nvidia-nemotron-4-340b-instruct",
|
10 |
+
"meta/llama-3.1-70b-instruct",
|
11 |
+
"meta/codellama-70b",
|
12 |
+
"meta/llama2-70b",
|
13 |
+
"meta/llama3-8b",
|
14 |
+
"meta/llama3-70b",
|
15 |
+
"mistralai/codestral-22b-instruct-v0.1",
|
16 |
+
"mistralai/mathstral-7b-v0.1",
|
17 |
+
"mistralai/mistral-large-2-instruct",
|
18 |
+
"mistralai/mistral-7b-instruct",
|
19 |
+
"mistralai/mistral-7b-instruct-v0.3",
|
20 |
+
"mistralai/mixtral-8x7b-instruct",
|
21 |
+
"mistralai/mixtral-8x22b-instruct",
|
22 |
+
"mistralai/mistral-large",
|
23 |
+
"google/gemma-2b",
|
24 |
+
"google/gemma-7b",
|
25 |
+
"google/gemma-2-2b-it",
|
26 |
+
"google/gemma-2-9b-it",
|
27 |
+
"google/gemma-2-27b-it",
|
28 |
+
"google/codegemma-1.1-7b",
|
29 |
+
"google/codegemma-7b",
|
30 |
+
"google/recurrentgemma-2b",
|
31 |
+
"google/shieldgemma-9b",
|
32 |
+
"microsoft/phi-3-medium-128k-instruct",
|
33 |
+
"microsoft/phi-3-medium-4k-instruct",
|
34 |
+
"microsoft/phi-3-mini-128k-instruct",
|
35 |
+
"microsoft/phi-3-mini-4k-instruct",
|
36 |
+
"microsoft/phi-3-small-128k-instruct",
|
37 |
+
"microsoft/phi-3-small-8k-instruct",
|
38 |
+
"qwen/qwen2-7b-instruct",
|
39 |
+
"databricks/dbrx-instruct",
|
40 |
+
"deepseek-ai/deepseek-coder-6.7b-instruct",
|
41 |
+
"upstage/solar-10.7b-instruct",
|
42 |
+
"snowflake/arctic",
|
43 |
+
],
|
44 |
+
default_model="meta/llama-3.1-70b-instruct",
|
45 |
+
registry=nvidia_gradio.registry,
|
46 |
+
accept_token=True,
|
47 |
+
)
|
48 |
+
|
49 |
+
if __name__ == "__main__":
|
50 |
+
demo.launch()
|
app_openai.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"gpt-4o-2024-11-20",
|
8 |
+
"gpt-4o",
|
9 |
+
"gpt-4o-2024-08-06",
|
10 |
+
"gpt-4o-2024-05-13",
|
11 |
+
"chatgpt-4o-latest",
|
12 |
+
"gpt-4o-mini",
|
13 |
+
"gpt-4o-mini-2024-07-18",
|
14 |
+
"o1-preview",
|
15 |
+
"o1-preview-2024-09-12",
|
16 |
+
"o1-mini",
|
17 |
+
"o1-mini-2024-09-12",
|
18 |
+
"gpt-4-turbo",
|
19 |
+
"gpt-4-turbo-2024-04-09",
|
20 |
+
"gpt-4-turbo-preview",
|
21 |
+
"gpt-4-0125-preview",
|
22 |
+
"gpt-4-1106-preview",
|
23 |
+
"gpt-4",
|
24 |
+
"gpt-4-0613",
|
25 |
+
],
|
26 |
+
default_model="gpt-4o-2024-11-20",
|
27 |
+
registry=openai_gradio.registry,
|
28 |
+
accept_token=False,
|
29 |
+
)
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
demo.launch()
|
app_perplexity.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import perplexity_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"llama-3.1-sonar-large-128k-online",
|
8 |
+
"llama-3.1-sonar-small-128k-online",
|
9 |
+
"llama-3.1-sonar-huge-128k-online",
|
10 |
+
"llama-3.1-sonar-small-128k-chat",
|
11 |
+
"llama-3.1-sonar-large-128k-chat",
|
12 |
+
"llama-3.1-8b-instruct",
|
13 |
+
"llama-3.1-70b-instruct",
|
14 |
+
],
|
15 |
+
default_model="llama-3.1-sonar-large-128k-online",
|
16 |
+
registry=perplexity_gradio.registry,
|
17 |
+
accept_token=True,
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch()
|
app_qwen.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dashscope_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"qwen-turbo-latest",
|
8 |
+
"qwen-turbo",
|
9 |
+
"qwen-plus",
|
10 |
+
"qwen-max",
|
11 |
+
"qwen1.5-110b-chat",
|
12 |
+
"qwen1.5-72b-chat",
|
13 |
+
"qwen1.5-32b-chat",
|
14 |
+
"qwen1.5-14b-chat",
|
15 |
+
"qwen1.5-7b-chat",
|
16 |
+
],
|
17 |
+
default_model="qwen-turbo-latest",
|
18 |
+
registry=dashscope_gradio.registry,
|
19 |
+
accept_token=False,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_sambanova.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sambanova_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"Meta-Llama-3.2-1B-Instruct",
|
8 |
+
"Meta-Llama-3.2-3B-Instruct",
|
9 |
+
"Llama-3.2-11B-Vision-Instruct",
|
10 |
+
"Llama-3.2-90B-Vision-Instruct",
|
11 |
+
"Meta-Llama-3.1-8B-Instruct",
|
12 |
+
"Meta-Llama-3.1-70B-Instruct",
|
13 |
+
"Meta-Llama-3.1-405B-Instruct",
|
14 |
+
],
|
15 |
+
default_model="Llama-3.2-90B-Vision-Instruct",
|
16 |
+
registry=sambanova_gradio.registry,
|
17 |
+
accept_token=False,
|
18 |
+
multimodal=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
app_together.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import together_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"meta-llama/Llama-Vision-Free",
|
8 |
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
9 |
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
10 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
11 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
12 |
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
13 |
+
"meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
|
14 |
+
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
15 |
+
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
16 |
+
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
17 |
+
"meta-llama/Meta-Llama-3-70B-Instruct-Lite",
|
18 |
+
"meta-llama/Llama-3-8b-chat-hf",
|
19 |
+
"meta-llama/Llama-3-70b-chat-hf",
|
20 |
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
21 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
22 |
+
"microsoft/WizardLM-2-8x22B",
|
23 |
+
"google/gemma-2-27b-it",
|
24 |
+
"google/gemma-2-9b-it",
|
25 |
+
"databricks/dbrx-instruct",
|
26 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
27 |
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
28 |
+
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
29 |
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
30 |
+
"Qwen/Qwen2-72B-Instruct",
|
31 |
+
"deepseek-ai/deepseek-llm-67b-chat",
|
32 |
+
"google/gemma-2b-it",
|
33 |
+
"Gryphe/MythoMax-L2-13b",
|
34 |
+
"meta-llama/Llama-2-13b-chat-hf",
|
35 |
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
36 |
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
37 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
38 |
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
39 |
+
"togethercomputer/StripedHyena-Nous-7B",
|
40 |
+
"upstage/SOLAR-10.7B-Instruct-v1.0",
|
41 |
+
],
|
42 |
+
default_model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
43 |
+
registry=together_gradio.registry,
|
44 |
+
accept_token=False,
|
45 |
+
multimodal=True,
|
46 |
+
)
|
47 |
+
|
48 |
+
if __name__ == "__main__":
|
49 |
+
demo.launch()
|
app_xai.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import xai_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"grok-beta",
|
8 |
+
"grok-vision-beta",
|
9 |
+
],
|
10 |
+
default_model="grok-vision-beta",
|
11 |
+
registry=xai_gradio.registry,
|
12 |
+
accept_token=False,
|
13 |
+
)
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
demo.launch()
|
pyproject.toml
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "anychat"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.10"
|
7 |
+
dependencies = [
|
8 |
+
"anthropic-gradio",
|
9 |
+
"cerebras-gradio",
|
10 |
+
"dashscope-gradio",
|
11 |
+
"fireworks-gradio",
|
12 |
+
"gemini-gradio>=0.0.1",
|
13 |
+
"gradio>=5.6.0",
|
14 |
+
"groq-gradio>=0.0.2",
|
15 |
+
"hyperbolic-gradio>=0.0.4",
|
16 |
+
"mistral-gradio>=0.0.2",
|
17 |
+
"nvidia-gradio",
|
18 |
+
"openai-gradio>=0.0.4",
|
19 |
+
"perplexity-gradio>=0.0.1",
|
20 |
+
"sambanova-gradio>=0.1.9",
|
21 |
+
"together-gradio>=0.0.1",
|
22 |
+
"xai-gradio>=0.0.2",
|
23 |
+
]
|
24 |
+
|
25 |
+
[tool.uv.sources]
|
26 |
+
anthropic-gradio = { git = "https://github.com/AK391/anthropic-gradio.git" }
|
27 |
+
fireworks-gradio = { git = "https://github.com/AK391/fireworks-ai-gradio.git" }
|
28 |
+
cerebras-gradio = { git = "https://github.com/gradio-app/cerebras_gradio.git" }
|
29 |
+
nvidia-gradio = { git = "https://github.com/AK391/nvidia-gradio.git" }
|
30 |
+
dashscope-gradio = { git = "https://github.com/AK391/dashscope-gradio.git" }
|
31 |
+
|
32 |
+
[tool.ruff]
|
33 |
+
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
|
34 |
+
select = ["E", "F"]
|
35 |
+
ignore = ["E501"] # line too long (black is taking care of this)
|
36 |
+
line-length = 119
|
37 |
+
fixable = [
|
38 |
+
"A",
|
39 |
+
"B",
|
40 |
+
"C",
|
41 |
+
"D",
|
42 |
+
"E",
|
43 |
+
"F",
|
44 |
+
"G",
|
45 |
+
"I",
|
46 |
+
"N",
|
47 |
+
"Q",
|
48 |
+
"S",
|
49 |
+
"T",
|
50 |
+
"W",
|
51 |
+
"ANN",
|
52 |
+
"ARG",
|
53 |
+
"BLE",
|
54 |
+
"COM",
|
55 |
+
"DJ",
|
56 |
+
"DTZ",
|
57 |
+
"EM",
|
58 |
+
"ERA",
|
59 |
+
"EXE",
|
60 |
+
"FBT",
|
61 |
+
"ICN",
|
62 |
+
"INP",
|
63 |
+
"ISC",
|
64 |
+
"NPY",
|
65 |
+
"PD",
|
66 |
+
"PGH",
|
67 |
+
"PIE",
|
68 |
+
"PL",
|
69 |
+
"PT",
|
70 |
+
"PTH",
|
71 |
+
"PYI",
|
72 |
+
"RET",
|
73 |
+
"RSE",
|
74 |
+
"RUF",
|
75 |
+
"SIM",
|
76 |
+
"SLF",
|
77 |
+
"TCH",
|
78 |
+
"TID",
|
79 |
+
"TRY",
|
80 |
+
"UP",
|
81 |
+
"YTT",
|
82 |
+
]
|
83 |
+
|
84 |
+
[tool.isort]
|
85 |
+
profile = "black"
|
86 |
+
line_length = 119
|
87 |
+
|
88 |
+
[tool.black]
|
89 |
+
line-length = 119
|
requirements.txt
CHANGED
@@ -1,14 +1,323 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
git+https://github.com/AK391/
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file was autogenerated by uv via the following command:
|
2 |
+
# uv pip compile pyproject.toml -o requirements.txt
|
3 |
+
aiofiles==23.2.1
|
4 |
+
# via gradio
|
5 |
+
annotated-types==0.7.0
|
6 |
+
# via pydantic
|
7 |
+
anthropic==0.39.0
|
8 |
+
# via anthropic-gradio
|
9 |
+
anthropic-gradio @ git+https://github.com/AK391/anthropic-gradio.git@34e5622031487ea207073177b4ec1f3067936295
|
10 |
+
# via anychat (pyproject.toml)
|
11 |
+
anyio==4.6.2.post1
|
12 |
+
# via
|
13 |
+
# anthropic
|
14 |
+
# cerebras-cloud-sdk
|
15 |
+
# gradio
|
16 |
+
# groq
|
17 |
+
# httpx
|
18 |
+
# openai
|
19 |
+
# starlette
|
20 |
+
cachetools==5.5.0
|
21 |
+
# via google-auth
|
22 |
+
cerebras-cloud-sdk==1.12.1
|
23 |
+
# via cerebras-gradio
|
24 |
+
cerebras-gradio @ git+https://github.com/gradio-app/cerebras_gradio.git@0135ea37048281ca4fae2d6a90311be3b38954ae
|
25 |
+
# via anychat (pyproject.toml)
|
26 |
+
certifi==2024.8.30
|
27 |
+
# via
|
28 |
+
# httpcore
|
29 |
+
# httpx
|
30 |
+
# requests
|
31 |
+
charset-normalizer==3.4.0
|
32 |
+
# via requests
|
33 |
+
click==8.1.7
|
34 |
+
# via
|
35 |
+
# typer
|
36 |
+
# uvicorn
|
37 |
+
dashscope-gradio @ git+https://github.com/AK391/dashscope-gradio.git@3a8bab36087cbf5efcde17c695a10e0229784db4
|
38 |
+
# via anychat (pyproject.toml)
|
39 |
+
distro==1.9.0
|
40 |
+
# via
|
41 |
+
# anthropic
|
42 |
+
# cerebras-cloud-sdk
|
43 |
+
# groq
|
44 |
+
# openai
|
45 |
+
eval-type-backport==0.2.0
|
46 |
+
# via mistralai
|
47 |
+
exceptiongroup==1.2.2
|
48 |
+
# via anyio
|
49 |
+
fastapi==0.115.5
|
50 |
+
# via gradio
|
51 |
+
ffmpy==0.4.0
|
52 |
+
# via gradio
|
53 |
+
filelock==3.16.1
|
54 |
+
# via huggingface-hub
|
55 |
+
fireworks-gradio @ git+https://github.com/AK391/fireworks-ai-gradio.git@b85f85bfe777a7ec953c8b996536e1d1cd47ae08
|
56 |
+
# via anychat (pyproject.toml)
|
57 |
+
fsspec==2024.10.0
|
58 |
+
# via
|
59 |
+
# gradio-client
|
60 |
+
# huggingface-hub
|
61 |
+
gemini-gradio==0.0.1
|
62 |
+
# via anychat (pyproject.toml)
|
63 |
+
google-ai-generativelanguage==0.6.10
|
64 |
+
# via google-generativeai
|
65 |
+
google-api-core==2.23.0
|
66 |
+
# via
|
67 |
+
# google-ai-generativelanguage
|
68 |
+
# google-api-python-client
|
69 |
+
# google-generativeai
|
70 |
+
google-api-python-client==2.154.0
|
71 |
+
# via google-generativeai
|
72 |
+
google-auth==2.36.0
|
73 |
+
# via
|
74 |
+
# google-ai-generativelanguage
|
75 |
+
# google-api-core
|
76 |
+
# google-api-python-client
|
77 |
+
# google-auth-httplib2
|
78 |
+
# google-generativeai
|
79 |
+
google-auth-httplib2==0.2.0
|
80 |
+
# via google-api-python-client
|
81 |
+
google-generativeai==0.8.3
|
82 |
+
# via gemini-gradio
|
83 |
+
googleapis-common-protos==1.66.0
|
84 |
+
# via
|
85 |
+
# google-api-core
|
86 |
+
# grpcio-status
|
87 |
+
gradio==5.6.0
|
88 |
+
# via
|
89 |
+
# anychat (pyproject.toml)
|
90 |
+
# anthropic-gradio
|
91 |
+
# cerebras-gradio
|
92 |
+
# dashscope-gradio
|
93 |
+
# fireworks-gradio
|
94 |
+
# gemini-gradio
|
95 |
+
# groq-gradio
|
96 |
+
# hyperbolic-gradio
|
97 |
+
# mistral-gradio
|
98 |
+
# nvidia-gradio
|
99 |
+
# openai-gradio
|
100 |
+
# perplexity-gradio
|
101 |
+
# sambanova-gradio
|
102 |
+
# together-gradio
|
103 |
+
# xai-gradio
|
104 |
+
gradio-client==1.4.3
|
105 |
+
# via gradio
|
106 |
+
groq==0.12.0
|
107 |
+
# via groq-gradio
|
108 |
+
groq-gradio==0.0.2
|
109 |
+
# via anychat (pyproject.toml)
|
110 |
+
grpcio==1.68.0
|
111 |
+
# via
|
112 |
+
# google-api-core
|
113 |
+
# grpcio-status
|
114 |
+
grpcio-status==1.68.0
|
115 |
+
# via google-api-core
|
116 |
+
h11==0.14.0
|
117 |
+
# via
|
118 |
+
# httpcore
|
119 |
+
# uvicorn
|
120 |
+
httpcore==1.0.7
|
121 |
+
# via httpx
|
122 |
+
httplib2==0.22.0
|
123 |
+
# via
|
124 |
+
# google-api-python-client
|
125 |
+
# google-auth-httplib2
|
126 |
+
httpx==0.27.2
|
127 |
+
# via
|
128 |
+
# anthropic
|
129 |
+
# cerebras-cloud-sdk
|
130 |
+
# gradio
|
131 |
+
# gradio-client
|
132 |
+
# groq
|
133 |
+
# mistralai
|
134 |
+
# openai
|
135 |
+
# safehttpx
|
136 |
+
huggingface-hub==0.26.2
|
137 |
+
# via
|
138 |
+
# gradio
|
139 |
+
# gradio-client
|
140 |
+
hyperbolic-gradio==0.0.4
|
141 |
+
# via anychat (pyproject.toml)
|
142 |
+
idna==3.10
|
143 |
+
# via
|
144 |
+
# anyio
|
145 |
+
# httpx
|
146 |
+
# requests
|
147 |
+
jinja2==3.1.4
|
148 |
+
# via gradio
|
149 |
+
jiter==0.7.1
|
150 |
+
# via
|
151 |
+
# anthropic
|
152 |
+
# openai
|
153 |
+
jsonpath-python==1.0.6
|
154 |
+
# via mistralai
|
155 |
+
markdown-it-py==3.0.0
|
156 |
+
# via rich
|
157 |
+
markupsafe==2.1.5
|
158 |
+
# via
|
159 |
+
# gradio
|
160 |
+
# jinja2
|
161 |
+
mdurl==0.1.2
|
162 |
+
# via markdown-it-py
|
163 |
+
mistral-gradio==0.0.2
|
164 |
+
# via anychat (pyproject.toml)
|
165 |
+
mistralai==1.2.3
|
166 |
+
# via mistral-gradio
|
167 |
+
mypy-extensions==1.0.0
|
168 |
+
# via typing-inspect
|
169 |
+
numpy==2.1.3
|
170 |
+
# via
|
171 |
+
# gradio
|
172 |
+
# pandas
|
173 |
+
nvidia-gradio @ git+https://github.com/AK391/nvidia-gradio.git@735cc0ba06afb44eeef789d8c0c35b1fc61fee16
|
174 |
+
# via anychat (pyproject.toml)
|
175 |
+
openai==1.55.0
|
176 |
+
# via
|
177 |
+
# dashscope-gradio
|
178 |
+
# fireworks-gradio
|
179 |
+
# hyperbolic-gradio
|
180 |
+
# nvidia-gradio
|
181 |
+
# openai-gradio
|
182 |
+
# perplexity-gradio
|
183 |
+
# sambanova-gradio
|
184 |
+
# together-gradio
|
185 |
+
# xai-gradio
|
186 |
+
openai-gradio==0.0.4
|
187 |
+
# via anychat (pyproject.toml)
|
188 |
+
orjson==3.10.11
|
189 |
+
# via gradio
|
190 |
+
packaging==24.2
|
191 |
+
# via
|
192 |
+
# gradio
|
193 |
+
# gradio-client
|
194 |
+
# huggingface-hub
|
195 |
+
pandas==2.2.3
|
196 |
+
# via gradio
|
197 |
+
perplexity-gradio==0.0.1
|
198 |
+
# via anychat (pyproject.toml)
|
199 |
+
pillow==11.0.0
|
200 |
+
# via gradio
|
201 |
+
proto-plus==1.25.0
|
202 |
+
# via
|
203 |
+
# google-ai-generativelanguage
|
204 |
+
# google-api-core
|
205 |
+
protobuf==5.28.3
|
206 |
+
# via
|
207 |
+
# google-ai-generativelanguage
|
208 |
+
# google-api-core
|
209 |
+
# google-generativeai
|
210 |
+
# googleapis-common-protos
|
211 |
+
# grpcio-status
|
212 |
+
# proto-plus
|
213 |
+
pyasn1==0.6.1
|
214 |
+
# via
|
215 |
+
# pyasn1-modules
|
216 |
+
# rsa
|
217 |
+
pyasn1-modules==0.4.1
|
218 |
+
# via google-auth
|
219 |
+
pydantic==2.10.0
|
220 |
+
# via
|
221 |
+
# anthropic
|
222 |
+
# cerebras-cloud-sdk
|
223 |
+
# fastapi
|
224 |
+
# google-generativeai
|
225 |
+
# gradio
|
226 |
+
# groq
|
227 |
+
# mistralai
|
228 |
+
# openai
|
229 |
+
pydantic-core==2.27.0
|
230 |
+
# via pydantic
|
231 |
+
pydub==0.25.1
|
232 |
+
# via gradio
|
233 |
+
pygments==2.18.0
|
234 |
+
# via rich
|
235 |
+
pyparsing==3.2.0
|
236 |
+
# via httplib2
|
237 |
+
python-dateutil==2.8.2
|
238 |
+
# via
|
239 |
+
# mistralai
|
240 |
+
# pandas
|
241 |
+
python-multipart==0.0.12
|
242 |
+
# via gradio
|
243 |
+
pytz==2024.2
|
244 |
+
# via pandas
|
245 |
+
pyyaml==6.0.2
|
246 |
+
# via
|
247 |
+
# gradio
|
248 |
+
# huggingface-hub
|
249 |
+
requests==2.32.3
|
250 |
+
# via
|
251 |
+
# google-api-core
|
252 |
+
# huggingface-hub
|
253 |
+
rich==13.9.4
|
254 |
+
# via typer
|
255 |
+
rsa==4.9
|
256 |
+
# via google-auth
|
257 |
+
ruff==0.7.4
|
258 |
+
# via gradio
|
259 |
+
safehttpx==0.1.1
|
260 |
+
# via gradio
|
261 |
+
sambanova-gradio==0.1.9
|
262 |
+
# via anychat (pyproject.toml)
|
263 |
+
semantic-version==2.10.0
|
264 |
+
# via gradio
|
265 |
+
shellingham==1.5.4
|
266 |
+
# via typer
|
267 |
+
six==1.16.0
|
268 |
+
# via python-dateutil
|
269 |
+
sniffio==1.3.1
|
270 |
+
# via
|
271 |
+
# anthropic
|
272 |
+
# anyio
|
273 |
+
# cerebras-cloud-sdk
|
274 |
+
# groq
|
275 |
+
# httpx
|
276 |
+
# openai
|
277 |
+
starlette==0.41.3
|
278 |
+
# via
|
279 |
+
# fastapi
|
280 |
+
# gradio
|
281 |
+
together-gradio==0.0.1
|
282 |
+
# via anychat (pyproject.toml)
|
283 |
+
tomlkit==0.12.0
|
284 |
+
# via gradio
|
285 |
+
tqdm==4.67.0
|
286 |
+
# via
|
287 |
+
# google-generativeai
|
288 |
+
# huggingface-hub
|
289 |
+
# openai
|
290 |
+
typer==0.13.1
|
291 |
+
# via gradio
|
292 |
+
typing-extensions==4.12.2
|
293 |
+
# via
|
294 |
+
# anthropic
|
295 |
+
# anyio
|
296 |
+
# cerebras-cloud-sdk
|
297 |
+
# fastapi
|
298 |
+
# google-generativeai
|
299 |
+
# gradio
|
300 |
+
# gradio-client
|
301 |
+
# groq
|
302 |
+
# huggingface-hub
|
303 |
+
# openai
|
304 |
+
# pydantic
|
305 |
+
# pydantic-core
|
306 |
+
# rich
|
307 |
+
# typer
|
308 |
+
# typing-inspect
|
309 |
+
# uvicorn
|
310 |
+
typing-inspect==0.9.0
|
311 |
+
# via mistralai
|
312 |
+
tzdata==2024.2
|
313 |
+
# via pandas
|
314 |
+
uritemplate==4.1.1
|
315 |
+
# via google-api-python-client
|
316 |
+
urllib3==2.2.3
|
317 |
+
# via requests
|
318 |
+
uvicorn==0.32.1
|
319 |
+
# via gradio
|
320 |
+
websockets==12.0
|
321 |
+
# via gradio-client
|
322 |
+
xai-gradio==0.0.2
|
323 |
+
# via anychat (pyproject.toml)
|
utils.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Callable
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
|
6 |
+
def get_app(
|
7 |
+
models: list[str],
|
8 |
+
default_model: str,
|
9 |
+
registry: Callable,
|
10 |
+
accept_token: bool = False,
|
11 |
+
**kwargs,
|
12 |
+
) -> gr.Blocks:
|
13 |
+
def update_model(new_model: str) -> list[gr.Column]:
|
14 |
+
return [gr.Column(visible=model_name == new_model) for model_name in models]
|
15 |
+
|
16 |
+
with gr.Blocks() as demo:
|
17 |
+
model = gr.Dropdown(label="Select Model", choices=models, value=default_model)
|
18 |
+
|
19 |
+
columns = []
|
20 |
+
for model_name in models:
|
21 |
+
with gr.Column(visible=model_name == default_model) as column:
|
22 |
+
gr.load(name=model_name, src=registry, accept_token=accept_token, **kwargs)
|
23 |
+
columns.append(column)
|
24 |
+
|
25 |
+
model.change(
|
26 |
+
fn=update_model,
|
27 |
+
inputs=model,
|
28 |
+
outputs=columns,
|
29 |
+
api_name=False,
|
30 |
+
queue=False,
|
31 |
+
)
|
32 |
+
|
33 |
+
return demo
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|