akhaliq HF Staff commited on
Commit
082d9d1
·
1 Parent(s): 4f0f41b
This view is limited to 50 files because it contains too many changes.   See raw diff
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore CHANGED
@@ -1,5 +1,3 @@
1
- .gradio/
2
-
3
  # Byte-compiled / optimized / DLL files
4
  __pycache__/
5
  *.py[cod]
@@ -21,16 +19,18 @@ lib64/
21
  parts/
22
  sdist/
23
  var/
24
- wheels/
25
- share/python-wheels/
26
  *.egg-info/
27
  .installed.cfg
28
  *.egg
29
  MANIFEST
30
 
 
 
 
 
 
 
31
  # PyInstaller
32
- # Usually these files are written by a python script from a template
33
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
  *.manifest
35
  *.spec
36
 
@@ -48,115 +48,34 @@ htmlcov/
48
  nosetests.xml
49
  coverage.xml
50
  *.cover
51
- *.py,cover
52
  .hypothesis/
53
  .pytest_cache/
54
- cover/
55
-
56
- # Translations
57
- *.mo
58
- *.pot
59
-
60
- # Django stuff:
61
- *.log
62
- local_settings.py
63
- db.sqlite3
64
- db.sqlite3-journal
65
-
66
- # Flask stuff:
67
- instance/
68
- .webassets-cache
69
-
70
- # Scrapy stuff:
71
- .scrapy
72
-
73
- # Sphinx documentation
74
- docs/_build/
75
-
76
- # PyBuilder
77
- .pybuilder/
78
- target/
79
 
80
  # Jupyter Notebook
81
  .ipynb_checkpoints
82
 
83
- # IPython
84
- profile_default/
85
- ipython_config.py
86
-
87
  # pyenv
88
- # For a library or package, you might want to ignore these files since the code is
89
- # intended to run in multiple environments; otherwise, check them in:
90
- # .python-version
91
-
92
- # pipenv
93
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
- # install all needed dependencies.
97
- #Pipfile.lock
98
-
99
- # poetry
100
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
- # This is especially recommended for binary packages to ensure reproducibility, and is more
102
- # commonly ignored for libraries.
103
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
- #poetry.lock
105
-
106
- # pdm
107
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
- #pdm.lock
109
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
- # in version control.
111
- # https://pdm.fming.dev/#use-with-ide
112
- .pdm.toml
113
-
114
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
- __pypackages__/
116
-
117
- # Celery stuff
118
- celerybeat-schedule
119
- celerybeat.pid
120
-
121
- # SageMath parsed files
122
- *.sage.py
123
-
124
- # Environments
125
- .env
126
- .venv
127
- env/
128
- venv/
129
- ENV/
130
- env.bak/
131
- venv.bak/
132
-
133
- # Spyder project settings
134
- .spyderproject
135
- .spyproject
136
-
137
- # Rope project settings
138
- .ropeproject
139
-
140
- # mkdocs documentation
141
- /site
142
 
143
  # mypy
144
  .mypy_cache/
145
  .dmypy.json
146
- dmypy.json
147
 
148
  # Pyre type checker
149
  .pyre/
150
 
151
- # pytype static type analyzer
152
- .pytype/
 
 
 
 
 
153
 
154
- # Cython debug symbols
155
- cython_debug/
 
 
156
 
157
- # PyCharm
158
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
- # and can be added to the global gitignore or merged into this file. For a more nuclear
161
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
- #.idea/
 
 
 
1
  # Byte-compiled / optimized / DLL files
2
  __pycache__/
3
  *.py[cod]
 
19
  parts/
20
  sdist/
21
  var/
 
 
22
  *.egg-info/
23
  .installed.cfg
24
  *.egg
25
  MANIFEST
26
 
27
+ # Virtual environments
28
+ venv/
29
+ env/
30
+ ENV/
31
+ .venv/
32
+
33
  # PyInstaller
 
 
34
  *.manifest
35
  *.spec
36
 
 
48
  nosetests.xml
49
  coverage.xml
50
  *.cover
 
51
  .hypothesis/
52
  .pytest_cache/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  # Jupyter Notebook
55
  .ipynb_checkpoints
56
 
 
 
 
 
57
  # pyenv
58
+ .python-version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  # mypy
61
  .mypy_cache/
62
  .dmypy.json
 
63
 
64
  # Pyre type checker
65
  .pyre/
66
 
67
+ # Gradio cache
68
+ log/
69
+ logs/
70
+
71
+ # System files
72
+ .DS_Store
73
+ Thumbs.db
74
 
75
+ # Lock files
76
+ uv.lock
77
+ poetry.lock
78
+ Pipfile.lock
79
 
80
+ # VSCode
81
+ .vscode/
 
 
 
 
.pre-commit-config.yaml DELETED
@@ -1,32 +0,0 @@
1
- repos:
2
- - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v5.0.0
4
- hooks:
5
- - id: check-executables-have-shebangs
6
- - id: check-json
7
- - id: check-merge-conflict
8
- - id: check-shebang-scripts-are-executable
9
- - id: check-toml
10
- - id: check-yaml
11
- - id: end-of-file-fixer
12
- - id: mixed-line-ending
13
- args: ["--fix=lf"]
14
- - id: requirements-txt-fixer
15
- - id: trailing-whitespace
16
- - repo: https://github.com/astral-sh/ruff-pre-commit
17
- rev: v0.8.6
18
- hooks:
19
- - id: ruff
20
- args: ["--fix"]
21
- - repo: https://github.com/pre-commit/mirrors-mypy
22
- rev: v1.14.1
23
- hooks:
24
- - id: mypy
25
- args: ["--ignore-missing-imports"]
26
- additional_dependencies:
27
- [
28
- "types-python-slugify",
29
- "types-requests",
30
- "types-PyYAML",
31
- "types-pytz",
32
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.python-version DELETED
@@ -1 +0,0 @@
1
- 3.10
 
 
.vscode/extensions.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "recommendations": [
3
- "ms-python.python",
4
- "charliermarsh.ruff",
5
- "streetsidesoftware.code-spell-checker",
6
- "tamasfe.even-better-toml"
7
- ]
8
- }
 
 
 
 
 
 
 
 
 
.vscode/settings.json DELETED
@@ -1,16 +0,0 @@
1
- {
2
- "editor.formatOnSave": true,
3
- "files.insertFinalNewline": false,
4
- "[python]": {
5
- "editor.defaultFormatter": "charliermarsh.ruff",
6
- "editor.formatOnType": true,
7
- "editor.codeActionsOnSave": {
8
- "source.fixAll.ruff": "explicit"
9
- }
10
- },
11
- "[jupyter]": {
12
- "files.insertFinalNewline": false
13
- },
14
- "notebook.output.scrolling": true,
15
- "notebook.formatOnSave.enabled": true
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -10,4 +10,83 @@ pinned: false
10
  disable_embedding: true
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  disable_embedding: true
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+ # Anycoder - AI Code Generation with Hugging Face Inference
16
+
17
+ An ultra-clean AI-powered code generation application using Hugging Face inference providers. Minimal files for maximum simplicity.
18
+
19
+ ## Features
20
+
21
+ - **Hugging Face Models**: Uses DeepSeek-V3-0324 via Novita provider
22
+ - **Modern UI**: Built with Gradio and ModelScope Studio components
23
+ - **Code Generation**: Generates working code based on user requirements
24
+ - **Live Preview**: Renders generated HTML code in real-time
25
+ - **History Management**: Keeps track of conversation history
26
+ - **Streaming**: Real-time code generation with streaming responses
27
+
28
+ ## Project Structure
29
+
30
+ ```
31
+ anycoder/
32
+ ├── app.py # Main application (everything included)
33
+ ├── app.css # Basic styling
34
+ ├── pyproject.toml # Dependencies
35
+ └── README.md # This file
36
+ ```
37
+
38
+ ## Setup
39
+
40
+ 1. Set your Hugging Face API token:
41
+ ```bash
42
+ export HF_TOKEN="your_huggingface_token_here"
43
+ ```
44
+
45
+ 2. Install dependencies:
46
+ ```bash
47
+ uv sync
48
+ ```
49
+
50
+ 3. Run the application:
51
+ ```bash
52
+ uv run python app.py
53
+ ```
54
+
55
+ ## Usage
56
+
57
+ 1. Enter your application requirements in the text area
58
+ 2. Click "send" to generate code
59
+ 3. View the generated code in the code drawer
60
+ 4. See the live preview in the sandbox area
61
+ 5. Use example cards for quick prompts
62
+
63
+ ## Code Example
64
+
65
+ ```python
66
+ import os
67
+ from huggingface_hub import InferenceClient
68
+
69
+ client = InferenceClient(
70
+ provider="novita",
71
+ api_key=os.environ["HF_TOKEN"],
72
+ bill_to="huggingface"
73
+ )
74
+
75
+ completion = client.chat.completions.create(
76
+ model="deepseek-ai/DeepSeek-V3-0324",
77
+ messages=[
78
+ {
79
+ "role": "user",
80
+ "content": "Create a simple todo app"
81
+ }
82
+ ],
83
+ )
84
+ ```
85
+
86
+ ## Architecture
87
+
88
+ The application uses:
89
+ - **Gradio**: For the web interface
90
+ - **Hugging Face Hub**: For model inference
91
+ - **ModelScope Studio**: For UI components
92
+ - **Streaming**: For real-time code generation
app.css ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Basic styling for the coder application */
2
+
3
+ .left_header {
4
+ text-align: center;
5
+ margin-bottom: 20px;
6
+ }
7
+
8
+ .left_header h1 {
9
+ margin-top: 10px;
10
+ color: #333;
11
+ }
12
+
13
+ .right_panel {
14
+ background: #f5f5f5;
15
+ border-radius: 8px;
16
+ padding: 20px;
17
+ height: 100%;
18
+ }
19
+
20
+ .render_header {
21
+ display: flex;
22
+ gap: 8px;
23
+ margin-bottom: 15px;
24
+ }
25
+
26
+ .header_btn {
27
+ width: 12px;
28
+ height: 12px;
29
+ border-radius: 50%;
30
+ background: #ff5f56;
31
+ }
32
+
33
+ .header_btn:nth-child(2) {
34
+ background: #ffbd2e;
35
+ }
36
+
37
+ .header_btn:nth-child(3) {
38
+ background: #27ca3f;
39
+ }
40
+
41
+ .right_content {
42
+ display: flex;
43
+ align-items: center;
44
+ justify-content: center;
45
+ height: 800px;
46
+ }
47
+
48
+ .html_content {
49
+ width: 100%;
50
+ height: 920px;
51
+ border: none;
52
+ border-radius: 8px;
53
+ background: white;
54
+ }
55
+
56
+ .history_chatbot {
57
+ max-height: 960px;
58
+ overflow-y: auto;
59
+ }
app.py CHANGED
@@ -1,54 +1,292 @@
1
- from app_huggingface import demo as demo_huggingface
2
- from app_gemini_coder import demo as demo_gemini
3
- from utils import get_app
4
- import gradio as gr
 
5
 
6
- # Create mapping of providers to their code snippets
7
- PROVIDER_SNIPPETS = {
8
- "Hugging Face": """
9
- import gradio as gr
10
- import ai_gradio
11
- gr.load(
12
- name='huggingface:deepseek-ai/DeepSeek-R1',
13
- src=ai_gradio.registry,
14
- coder=True,
15
- provider="together"
16
- ).launch()""",
17
- "Gemini Coder": """
18
  import gradio as gr
19
- import ai_gradio
20
- gr.load(
21
- name='gemini:gemini-2.5-pro-exp-03-25',
22
- src=ai_gradio.registry,
23
- coder=True,
24
- provider="together"
25
- ).launch()
26
- """,
27
- }
28
- # Create mapping of providers to their demos
29
- PROVIDERS = {
30
- "Hugging Face": demo_huggingface,
31
- "Gemini Coder": demo_gemini,
32
- }
33
-
34
- # Modified get_app implementation
35
- demo = gr.Blocks()
36
- with demo:
37
-
38
- provider_dropdown = gr.Dropdown(choices=list(PROVIDERS.keys()), value="Hugging Face", label="Select code snippet")
39
- code_display = gr.Code(label="Provider Code Snippet", language="python", value=PROVIDER_SNIPPETS["Hugging Face"])
40
-
41
- def update_code(provider):
42
- return PROVIDER_SNIPPETS.get(provider, "Code snippet not available")
43
-
44
- provider_dropdown.change(fn=update_code, inputs=[provider_dropdown], outputs=[code_display])
45
-
46
- selected_demo = get_app(
47
- models=list(PROVIDERS.keys()),
48
- default_model="Hugging Face",
49
- src=PROVIDERS,
50
- dropdown_label="Select Provider",
51
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  if __name__ == "__main__":
54
- demo.queue(api_open=False).launch(show_api=False)
 
1
+ import os
2
+ import re
3
+ from http import HTTPStatus
4
+ from typing import Dict, List, Optional, Tuple
5
+ import base64
6
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  import gradio as gr
8
+ from huggingface_hub import InferenceClient
9
+
10
+ import modelscope_studio.components.base as ms
11
+ import modelscope_studio.components.legacy as legacy
12
+ import modelscope_studio.components.antd as antd
13
+
14
+ # Configuration
15
+ SystemPrompt = """You are a helpful coding assistant. You help users create applications by generating code based on their requirements.
16
+ When asked to create an application, you should:
17
+ 1. Understand the user's requirements
18
+ 2. Generate clean, working code
19
+ 3. Provide HTML output when appropriate for web applications
20
+ 4. Include necessary comments and documentation
21
+ 5. Ensure the code is functional and follows best practices
22
+
23
+ Always respond with code that can be executed or rendered directly.
24
+
25
+ Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text."""
26
+
27
+ DEMO_LIST = [
28
+ {
29
+ "title": "Todo App",
30
+ "description": "Create a simple todo application with add, delete, and mark as complete functionality"
31
+ },
32
+ {
33
+ "title": "Calculator",
34
+ "description": "Build a basic calculator with addition, subtraction, multiplication, and division"
35
+ },
36
+ {
37
+ "title": "Weather Dashboard",
38
+ "description": "Create a weather dashboard that displays current weather information"
39
+ },
40
+ {
41
+ "title": "Chat Interface",
42
+ "description": "Build a chat interface with message history and user input"
43
+ },
44
+ {
45
+ "title": "E-commerce Product Card",
46
+ "description": "Create a product card component for an e-commerce website"
47
+ },
48
+ {
49
+ "title": "Login Form",
50
+ "description": "Build a responsive login form with validation"
51
+ },
52
+ {
53
+ "title": "Dashboard Layout",
54
+ "description": "Create a dashboard layout with sidebar navigation and main content area"
55
+ },
56
+ {
57
+ "title": "Data Table",
58
+ "description": "Build a data table with sorting and filtering capabilities"
59
+ }
60
+ ]
61
+
62
+ # HF Inference Client
63
+ YOUR_API_TOKEN = os.getenv('HF_TOKEN')
64
+ client = InferenceClient(
65
+ provider="novita",
66
+ api_key=YOUR_API_TOKEN,
67
+ bill_to="huggingface"
68
+ )
69
+
70
+ History = List[Tuple[str, str]]
71
+ Messages = List[Dict[str, str]]
72
+
73
+ def history_to_messages(history: History, system: str) -> Messages:
74
+ messages = [{'role': 'system', 'content': system}]
75
+ for h in history:
76
+ messages.append({'role': 'user', 'content': h[0]})
77
+ messages.append({'role': 'assistant', 'content': h[1]})
78
+ return messages
79
+
80
+ def messages_to_history(messages: Messages) -> Tuple[str, History]:
81
+ assert messages[0]['role'] == 'system'
82
+ history = []
83
+ for q, r in zip(messages[1::2], messages[2::2]):
84
+ history.append([q['content'], r['content']])
85
+ return history
86
+
87
+ def remove_code_block(text):
88
+ # Try to match code blocks with language markers
89
+ patterns = [
90
+ r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML
91
+ r'```\n([\s\S]+?)\n```', # Match code blocks without language markers
92
+ r'```([\s\S]+?)```' # Match code blocks without line breaks
93
+ ]
94
+ for pattern in patterns:
95
+ match = re.search(pattern, text, re.DOTALL)
96
+ if match:
97
+ extracted = match.group(1).strip()
98
+ print("Successfully extracted code block:", extracted)
99
+ return extracted
100
+ # If no code block is found, check if the entire text is HTML
101
+ if text.strip().startswith('<!DOCTYPE html>') or text.strip().startswith('<html'):
102
+ print("Text appears to be raw HTML, using as is")
103
+ return text.strip()
104
+ print("No code block found in text:", text)
105
+ return text.strip()
106
+
107
+ def history_render(history: History):
108
+ return gr.update(open=True), history
109
+
110
+ def clear_history():
111
+ return []
112
+
113
+ def send_to_sandbox(code):
114
+ # Add a wrapper to inject necessary permissions and ensure full HTML
115
+ wrapped_code = f"""
116
+ <!DOCTYPE html>
117
+ <html>
118
+ <head>
119
+ <meta charset=\"UTF-8\">
120
+ <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">
121
+ <script>
122
+ // Safe localStorage polyfill
123
+ const safeStorage = {{
124
+ _data: {{}},
125
+ getItem: function(key) {{ return this._data[key] || null; }},
126
+ setItem: function(key, value) {{ this._data[key] = value; }},
127
+ removeItem: function(key) {{ delete this._data[key]; }},
128
+ clear: function() {{ this._data = {{}}; }}
129
+ }};
130
+ Object.defineProperty(window, 'localStorage', {{
131
+ value: safeStorage,
132
+ writable: false
133
+ }});
134
+ window.onerror = function(message, source, lineno, colno, error) {{
135
+ console.error('Error:', message);
136
+ }};
137
+ </script>
138
+ </head>
139
+ <body>
140
+ {code}
141
+ </body>
142
+ </html>
143
+ """
144
+ encoded_html = base64.b64encode(wrapped_code.encode('utf-8')).decode('utf-8')
145
+ data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
146
+ iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
147
+ print("Generated iframe:", iframe)
148
+ return iframe
149
+
150
+ def demo_card_click(e: gr.EventData):
151
+ try:
152
+ # Try to get the index from the event data
153
+ if hasattr(e, '_data') and e._data and 'component' in e._data:
154
+ index = e._data['component'].get('index', 0)
155
+ else:
156
+ # Fallback to first item if we can't get the index
157
+ index = 0
158
+ return DEMO_LIST[index]['description']
159
+ except (KeyError, IndexError, AttributeError):
160
+ # Return the first demo description as fallback
161
+ return DEMO_LIST[0]['description']
162
+
163
+ # Main application
164
+ with gr.Blocks(css_paths="app.css") as demo:
165
+ history = gr.State([])
166
+ setting = gr.State({
167
+ "system": SystemPrompt,
168
+ })
169
+
170
+ with ms.Application() as app:
171
+ with antd.ConfigProvider():
172
+ with antd.Row(gutter=[32, 12]) as layout:
173
+ with antd.Col(span=24, md=8):
174
+ with antd.Flex(vertical=True, gap="middle", wrap=True):
175
+ header = gr.HTML("""
176
+ <div class="left_header">
177
+ <img src="//img.alicdn.com/imgextra/i2/O1CN01KDhOma1DUo8oa7OIU_!!6000000000220-1-tps-240-240.gif" width="200px" />
178
+ <h1>HF Coder</h1>
179
+ </div>
180
+ """)
181
+ input = antd.InputTextarea(
182
+ size="large", allow_clear=True, placeholder="Please enter what kind of application you want")
183
+ btn = antd.Button("send", type="primary", size="large")
184
+ clear_btn = antd.Button("clear history", type="default", size="large")
185
+
186
+ antd.Divider("examples")
187
+ with antd.Flex(gap="small", wrap=True):
188
+ with ms.Each(DEMO_LIST):
189
+ with antd.Card(hoverable=True, as_item="card") as demoCard:
190
+ antd.CardMeta()
191
+ demoCard.click(demo_card_click, outputs=[input])
192
+
193
+ antd.Divider("setting")
194
+
195
+ with antd.Flex(gap="small", wrap=True):
196
+ settingPromptBtn = antd.Button(
197
+ "⚙️ set system Prompt", type="default")
198
+ codeBtn = antd.Button("🧑‍💻 view code", type="default")
199
+ historyBtn = antd.Button("📜 history", type="default")
200
+
201
+ with antd.Modal(open=False, title="set system Prompt", width="800px") as system_prompt_modal:
202
+ systemPromptInput = antd.InputTextarea(
203
+ SystemPrompt, auto_size=True)
204
+
205
+ settingPromptBtn.click(lambda: gr.update(
206
+ open=True), inputs=[], outputs=[system_prompt_modal])
207
+ system_prompt_modal.ok(lambda input: ({"system": input}, gr.update(
208
+ open=False)), inputs=[systemPromptInput], outputs=[setting, system_prompt_modal])
209
+ system_prompt_modal.cancel(lambda: gr.update(
210
+ open=False), outputs=[system_prompt_modal])
211
+
212
+ with antd.Drawer(open=False, title="code", placement="left", width="750px") as code_drawer:
213
+ code_output = legacy.Markdown()
214
+
215
+ codeBtn.click(lambda: gr.update(open=True),
216
+ inputs=[], outputs=[code_drawer])
217
+ code_drawer.close(lambda: gr.update(
218
+ open=False), inputs=[], outputs=[code_drawer])
219
+
220
+ with antd.Drawer(open=False, title="history", placement="left", width="900px") as history_drawer:
221
+ history_output = legacy.Chatbot(show_label=False, flushing=False, height=960, elem_classes="history_chatbot")
222
+
223
+ historyBtn.click(history_render, inputs=[history], outputs=[history_drawer, history_output])
224
+ history_drawer.close(lambda: gr.update(
225
+ open=False), inputs=[], outputs=[history_drawer])
226
+
227
+ with antd.Col(span=24, md=16):
228
+ with ms.Div(elem_classes="right_panel"):
229
+ gr.HTML('<div class="render_header"><span class="header_btn"></span><span class="header_btn"></span><span class="header_btn"></span></div>')
230
+ # Move sandbox outside of tabs for always-on visibility
231
+ sandbox = gr.HTML(elem_classes="html_content")
232
+ with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab:
233
+ with antd.Tabs.Item(key="empty"):
234
+ empty = antd.Empty(description="empty input", elem_classes="right_content")
235
+ with antd.Tabs.Item(key="loading"):
236
+ loading = antd.Spin(True, tip="coding...", size="large", elem_classes="right_content")
237
+
238
+ def generation_code(query: Optional[str], _setting: Dict[str, str], _history: Optional[History]):
239
+ if query is None:
240
+ query = ''
241
+ if _history is None:
242
+ _history = []
243
+ messages = history_to_messages(_history, _setting['system'])
244
+ messages.append({'role': 'user', 'content': query})
245
+
246
+ try:
247
+ completion = client.chat.completions.create(
248
+ model="deepseek-ai/DeepSeek-V3-0324",
249
+ messages=messages,
250
+ stream=True
251
+ )
252
+
253
+ content = ""
254
+ for chunk in completion:
255
+ if chunk.choices[0].delta.content:
256
+ content += chunk.choices[0].delta.content
257
+ yield {
258
+ code_output: content,
259
+ state_tab: gr.update(active_key="loading"),
260
+ code_drawer: gr.update(open=True),
261
+ }
262
+
263
+ # Final response
264
+ _history = messages_to_history(messages + [{
265
+ 'role': 'assistant',
266
+ 'content': content
267
+ }])
268
+
269
+ yield {
270
+ code_output: content,
271
+ history: _history,
272
+ sandbox: send_to_sandbox(remove_code_block(content)),
273
+ state_tab: gr.update(active_key="render"),
274
+ code_drawer: gr.update(open=False),
275
+ }
276
+
277
+ except Exception as e:
278
+ error_message = f"Error: {str(e)}"
279
+ yield {
280
+ code_output: error_message,
281
+ state_tab: gr.update(active_key="empty"),
282
+ code_drawer: gr.update(open=True),
283
+ }
284
+
285
+ btn.click(generation_code,
286
+ inputs=[input, setting, history],
287
+ outputs=[code_output, history, sandbox, state_tab, code_drawer])
288
+
289
+ clear_btn.click(clear_history, inputs=[], outputs=[history])
290
 
291
  if __name__ == "__main__":
292
+ demo.queue(default_concurrency_limit=20).launch(ssr_mode=False)
app_allenai.py DELETED
@@ -1,67 +0,0 @@
1
- import gradio as gr
2
- from gradio_client import Client
3
-
4
- MODELS = {"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat", "Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"}
5
-
6
-
7
- def create_chat_fn(client):
8
- def chat(message, history):
9
- response = client.predict(
10
- message=message,
11
- system_prompt="You are a helpful AI assistant.",
12
- temperature=0.7,
13
- max_new_tokens=1024,
14
- top_k=40,
15
- repetition_penalty=1.1,
16
- top_p=0.95,
17
- api_name="/chat",
18
- )
19
- return response
20
-
21
- return chat
22
-
23
-
24
- def set_client_for_session(model_name, request: gr.Request):
25
- headers = {}
26
- if request and hasattr(request, "request") and hasattr(request.request, "headers"):
27
- x_ip_token = request.request.headers.get("x-ip-token")
28
- if x_ip_token:
29
- headers["X-IP-Token"] = x_ip_token
30
-
31
- return Client(MODELS[model_name], headers=headers)
32
-
33
-
34
- def safe_chat_fn(message, history, client):
35
- if client is None:
36
- return "Error: Client not initialized. Please refresh the page."
37
- return create_chat_fn(client)(message, history)
38
-
39
-
40
- with gr.Blocks() as demo:
41
- client = gr.State()
42
-
43
- model_dropdown = gr.Dropdown(
44
- choices=list(MODELS.keys()), value="OLMo-2-1124-13B-Instruct", label="Select Model", interactive=True
45
- )
46
-
47
- chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client])
48
-
49
- # Update client when model changes
50
- def update_model(model_name, request):
51
- return set_client_for_session(model_name, request)
52
-
53
- model_dropdown.change(
54
- fn=update_model,
55
- inputs=[model_dropdown],
56
- outputs=[client],
57
- )
58
-
59
- # Initialize client on page load
60
- demo.load(
61
- fn=set_client_for_session,
62
- inputs=gr.State("OLMo-2-1124-13B-Instruct"),
63
- outputs=client,
64
- )
65
-
66
- if __name__ == "__main__":
67
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_cerebras.py DELETED
@@ -1,19 +0,0 @@
1
- import os
2
-
3
- import cerebras_gradio
4
-
5
- from utils import get_app
6
-
7
- demo = get_app(
8
- models=[
9
- "llama3.1-8b",
10
- "llama3.1-70b",
11
- "llama3.1-405b",
12
- ],
13
- default_model="llama3.1-70b",
14
- src=cerebras_gradio.registry,
15
- accept_token=not os.getenv("CEREBRAS_API_KEY"),
16
- )
17
-
18
- if __name__ == "__main__":
19
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_claude.py DELETED
@@ -1,21 +0,0 @@
1
- import os
2
-
3
- import anthropic_gradio
4
-
5
- from utils import get_app
6
-
7
- demo = get_app(
8
- models=[
9
- "claude-3-5-sonnet-20241022",
10
- "claude-3-5-haiku-20241022",
11
- "claude-3-opus-20240229",
12
- "claude-3-sonnet-20240229",
13
- "claude-3-haiku-20240307",
14
- ],
15
- default_model="claude-3-5-sonnet-20241022",
16
- src=anthropic_gradio.registry,
17
- accept_token=not os.getenv("ANTHROPIC_API_KEY"),
18
- )
19
-
20
- if __name__ == "__main__":
21
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_cohere.py DELETED
@@ -1,21 +0,0 @@
1
- import os
2
-
3
- import cohere_gradio
4
-
5
- from utils import get_app
6
-
7
- demo = get_app(
8
- models=[
9
- "command-r",
10
- "command-r-08-2024",
11
- "command-r-plus",
12
- "command-r-plus-08-2024",
13
- "command-r7b-12-2024",
14
- ],
15
- default_model="command-r7b-12-2024",
16
- src=cohere_gradio.registry,
17
- accept_token=not os.getenv("COHERE_API_KEY"),
18
- )
19
-
20
- if __name__ == "__main__":
21
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_compare.py DELETED
@@ -1,210 +0,0 @@
1
- import os
2
-
3
- import google.generativeai as genai
4
- import gradio as gr
5
- import openai
6
- from anthropic import Anthropic
7
- from openai import OpenAI # Add explicit OpenAI import
8
-
9
-
10
- def get_all_models():
11
- """Get all available models from the registries."""
12
- return [
13
- "SambaNova: Meta-Llama-3.2-1B-Instruct",
14
- "SambaNova: Meta-Llama-3.2-3B-Instruct",
15
- "SambaNova: Llama-3.2-11B-Vision-Instruct",
16
- "SambaNova: Llama-3.2-90B-Vision-Instruct",
17
- "SambaNova: Meta-Llama-3.1-8B-Instruct",
18
- "SambaNova: Meta-Llama-3.1-70B-Instruct",
19
- "SambaNova: Meta-Llama-3.1-405B-Instruct",
20
- "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
21
- "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
22
- "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
23
- "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
24
- "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
25
- "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
26
- "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
27
- "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
28
- "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
29
- ]
30
-
31
-
32
- def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
33
- """Generate a prompt for models to discuss and build upon previous
34
- responses.
35
- """
36
- prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
37
-
38
- Previous responses from other AI models:
39
- {chr(10).join(f"- {response}" for response in previous_responses)}
40
-
41
- Please provide your perspective while:
42
- 1. Acknowledging key insights from previous responses
43
- 2. Adding any missing important points
44
- 3. Respectfully noting if you disagree with anything and explaining why
45
- 4. Building towards a complete answer
46
-
47
- Keep your response focused and concise (max 3-4 paragraphs)."""
48
- return prompt
49
-
50
-
51
- def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
52
- """Generate a prompt for final consensus building."""
53
- return f"""Review this multi-AI discussion about: "{original_question}"
54
-
55
- Discussion history:
56
- {chr(10).join(discussion_history)}
57
-
58
- As a final synthesizer, please:
59
- 1. Identify the key points where all models agreed
60
- 2. Explain how any disagreements were resolved
61
- 3. Present a clear, unified answer that represents our collective best understanding
62
- 4. Note any remaining uncertainties or caveats
63
-
64
- Keep the final consensus concise but complete."""
65
-
66
-
67
- def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
68
- import openai
69
-
70
- client = openai.OpenAI(api_key=api_key)
71
- response = client.chat.completions.create(model=model, messages=messages)
72
- return response.choices[0].message.content
73
-
74
-
75
- def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
76
- """Chat with Anthropic's Claude model."""
77
- client = Anthropic(api_key=api_key)
78
- response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
79
- return response.content[0].text
80
-
81
-
82
- def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
83
- """Chat with Gemini Pro model."""
84
- genai.configure(api_key=api_key)
85
- model = genai.GenerativeModel("gemini-pro")
86
-
87
- # Convert messages to Gemini format
88
- gemini_messages = []
89
- for msg in messages:
90
- role = "user" if msg["role"] == "user" else "model"
91
- gemini_messages.append({"role": role, "parts": [msg["content"]]})
92
-
93
- response = model.generate_content([m["parts"][0] for m in gemini_messages])
94
- return response.text
95
-
96
-
97
- def chat_with_sambanova(
98
- messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
99
- ) -> str:
100
- """Chat with SambaNova's models using their OpenAI-compatible API."""
101
- client = openai.OpenAI(
102
- api_key=api_key,
103
- base_url="https://api.sambanova.ai/v1",
104
- )
105
-
106
- response = client.chat.completions.create(
107
- model=model_name,
108
- messages=messages,
109
- temperature=0.1,
110
- top_p=0.1, # Use the specific model name passed in
111
- )
112
- return response.choices[0].message.content
113
-
114
-
115
- def chat_with_hyperbolic(
116
- messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
117
- ) -> str:
118
- """Chat with Hyperbolic's models using their OpenAI-compatible API."""
119
- client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
120
-
121
- # Add system message to the start of the messages list
122
- full_messages = [
123
- {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
124
- *messages,
125
- ]
126
-
127
- response = client.chat.completions.create(
128
- model=model_name, # Use the specific model name passed in
129
- messages=full_messages,
130
- temperature=0.7,
131
- max_tokens=1024,
132
- )
133
- return response.choices[0].message.content
134
-
135
-
136
- def multi_model_consensus(
137
- question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
138
- ) -> list[tuple[str, str]]:
139
- if not selected_models:
140
- raise gr.Error("Please select at least one model to chat with.")
141
-
142
- chat_history = []
143
- progress(0, desc="Getting responses from all models...")
144
-
145
- # Get responses from all models in parallel
146
- for i, model in enumerate(selected_models):
147
- provider, model_name = model.split(": ", 1)
148
- progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...")
149
-
150
- try:
151
- if provider == "Anthropic":
152
- api_key = os.getenv("ANTHROPIC_API_KEY")
153
- response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
154
- elif provider == "SambaNova":
155
- api_key = os.getenv("SAMBANOVA_API_KEY")
156
- response = chat_with_sambanova(
157
- messages=[
158
- {"role": "system", "content": "You are a helpful assistant"},
159
- {"role": "user", "content": question},
160
- ],
161
- api_key=api_key,
162
- model_name=model_name,
163
- )
164
- elif provider == "Hyperbolic":
165
- api_key = os.getenv("HYPERBOLIC_API_KEY")
166
- response = chat_with_hyperbolic(
167
- messages=[{"role": "user", "content": question}],
168
- api_key=api_key,
169
- model_name=model_name,
170
- )
171
- else: # Gemini
172
- api_key = os.getenv("GEMINI_API_KEY")
173
- response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
174
-
175
- chat_history.append((model, response))
176
- except Exception as e:
177
- chat_history.append((model, f"Error: {e!s}"))
178
-
179
- progress(1.0, desc="Done!")
180
- return chat_history
181
-
182
-
183
- with gr.Blocks() as demo:
184
- gr.Markdown("# Model Response Comparison")
185
- gr.Markdown("""Select multiple models to compare their responses""")
186
-
187
- with gr.Row():
188
- with gr.Column():
189
- model_selector = gr.Dropdown(
190
- choices=get_all_models(),
191
- multiselect=True,
192
- label="Select Models",
193
- info="Choose models to compare",
194
- value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
195
- )
196
-
197
- chatbot = gr.Chatbot(height=600, label="Model Responses")
198
- msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...")
199
-
200
- def respond(message, selected_models):
201
- chat_history = multi_model_consensus(message, selected_models, rounds=1)
202
- return chat_history
203
-
204
- msg.submit(respond, [msg, model_selector], [chatbot])
205
-
206
- for fn in demo.fns.values():
207
- fn.api_name = False
208
-
209
- if __name__ == "__main__":
210
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_crew.py DELETED
@@ -1,8 +0,0 @@
1
- import ai_gradio
2
- import gradio as gr
3
-
4
- demo = gr.load(
5
- name="crewai:gpt-4-turbo",
6
- crew_type="article", # or 'support'
7
- src=ai_gradio.registry,
8
- )
 
 
 
 
 
 
 
 
 
app_deepseek.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")]
7
-
8
- # Create display names without the prefix
9
- DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix
15
- default_model=DEEPSEEK_MODELS_FULL[-1],
16
- dropdown_label="Select DeepSeek Model",
17
- choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True,
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_experimental.py DELETED
@@ -1,300 +0,0 @@
1
- import os
2
- import random
3
-
4
- import google.generativeai as genai
5
- import gradio as gr
6
- import openai
7
- from anthropic import Anthropic
8
- from openai import OpenAI # Add explicit OpenAI import
9
-
10
-
11
- def get_all_models():
12
- """Get all available models from the registries."""
13
- return [
14
- "SambaNova: Meta-Llama-3.2-1B-Instruct",
15
- "SambaNova: Meta-Llama-3.2-3B-Instruct",
16
- "SambaNova: Llama-3.2-11B-Vision-Instruct",
17
- "SambaNova: Llama-3.2-90B-Vision-Instruct",
18
- "SambaNova: Meta-Llama-3.1-8B-Instruct",
19
- "SambaNova: Meta-Llama-3.1-70B-Instruct",
20
- "SambaNova: Meta-Llama-3.1-405B-Instruct",
21
- "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
22
- "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
23
- "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
24
- "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
25
- "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
26
- "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
27
- "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
28
- "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
29
- "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
30
- ]
31
-
32
-
33
- def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
34
- """Generate a prompt for models to discuss and build upon previous
35
- responses.
36
- """
37
- prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
-
39
- Previous responses from other AI models:
40
- {chr(10).join(f"- {response}" for response in previous_responses)}
41
-
42
- Please provide your perspective while:
43
- 1. Acknowledging key insights from previous responses
44
- 2. Adding any missing important points
45
- 3. Respectfully noting if you disagree with anything and explaining why
46
- 4. Building towards a complete answer
47
-
48
- Keep your response focused and concise (max 3-4 paragraphs)."""
49
- return prompt
50
-
51
-
52
- def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
53
- """Generate a prompt for final consensus building."""
54
- return f"""Review this multi-AI discussion about: "{original_question}"
55
-
56
- Discussion history:
57
- {chr(10).join(discussion_history)}
58
-
59
- As a final synthesizer, please:
60
- 1. Identify the key points where all models agreed
61
- 2. Explain how any disagreements were resolved
62
- 3. Present a clear, unified answer that represents our collective best understanding
63
- 4. Note any remaining uncertainties or caveats
64
-
65
- Keep the final consensus concise but complete."""
66
-
67
-
68
- def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
69
- import openai
70
-
71
- client = openai.OpenAI(api_key=api_key)
72
- response = client.chat.completions.create(model=model, messages=messages)
73
- return response.choices[0].message.content
74
-
75
-
76
- def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
77
- """Chat with Anthropic's Claude model."""
78
- client = Anthropic(api_key=api_key)
79
- response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
80
- return response.content[0].text
81
-
82
-
83
- def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
84
- """Chat with Gemini Pro model."""
85
- genai.configure(api_key=api_key)
86
- model = genai.GenerativeModel("gemini-pro")
87
-
88
- # Convert messages to Gemini format
89
- gemini_messages = []
90
- for msg in messages:
91
- role = "user" if msg["role"] == "user" else "model"
92
- gemini_messages.append({"role": role, "parts": [msg["content"]]})
93
-
94
- response = model.generate_content([m["parts"][0] for m in gemini_messages])
95
- return response.text
96
-
97
-
98
- def chat_with_sambanova(
99
- messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
- ) -> str:
101
- """Chat with SambaNova's models using their OpenAI-compatible API."""
102
- client = openai.OpenAI(
103
- api_key=api_key,
104
- base_url="https://api.sambanova.ai/v1",
105
- )
106
-
107
- response = client.chat.completions.create(
108
- model=model_name,
109
- messages=messages,
110
- temperature=0.1,
111
- top_p=0.1, # Use the specific model name passed in
112
- )
113
- return response.choices[0].message.content
114
-
115
-
116
- def chat_with_hyperbolic(
117
- messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
118
- ) -> str:
119
- """Chat with Hyperbolic's models using their OpenAI-compatible API."""
120
- client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
121
-
122
- # Add system message to the start of the messages list
123
- full_messages = [
124
- {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
125
- *messages,
126
- ]
127
-
128
- response = client.chat.completions.create(
129
- model=model_name, # Use the specific model name passed in
130
- messages=full_messages,
131
- temperature=0.7,
132
- max_tokens=1024,
133
- )
134
- return response.choices[0].message.content
135
-
136
-
137
- def multi_model_consensus(
138
- question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
139
- ) -> list[tuple[str, str]]:
140
- if not selected_models:
141
- raise gr.Error("Please select at least one model to chat with.")
142
-
143
- chat_history = []
144
- discussion_history = []
145
-
146
- # Initial responses
147
- progress(0, desc="Getting initial responses...")
148
- initial_responses = []
149
- for i, model in enumerate(selected_models):
150
- provider, model_name = model.split(": ", 1)
151
-
152
- try:
153
- if provider == "Anthropic":
154
- api_key = os.getenv("ANTHROPIC_API_KEY")
155
- response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
156
- elif provider == "SambaNova":
157
- api_key = os.getenv("SAMBANOVA_API_KEY")
158
- response = chat_with_sambanova(
159
- messages=[
160
- {"role": "system", "content": "You are a helpful assistant"},
161
- {"role": "user", "content": question},
162
- ],
163
- api_key=api_key,
164
- )
165
- elif provider == "Hyperbolic": # Add Hyperbolic case
166
- api_key = os.getenv("HYPERBOLIC_API_KEY")
167
- response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key)
168
- else: # Gemini
169
- api_key = os.getenv("GEMINI_API_KEY")
170
- response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
171
-
172
- initial_responses.append(f"{model}: {response}")
173
- discussion_history.append(f"Initial response from {model}:\n{response}")
174
- chat_history.append((f"Initial response from {model}", response))
175
- except Exception as e:
176
- chat_history.append((f"Error from {model}", str(e)))
177
-
178
- # Discussion rounds
179
- for round_num in range(rounds):
180
- progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
181
- round_responses = []
182
-
183
- random.shuffle(selected_models) # Randomize order each round
184
- for model in selected_models:
185
- provider, model_name = model.split(": ", 1)
186
-
187
- try:
188
- discussion_prompt = generate_discussion_prompt(question, discussion_history)
189
- if provider == "Anthropic":
190
- api_key = os.getenv("ANTHROPIC_API_KEY")
191
- response = chat_with_anthropic(
192
- messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
193
- )
194
- elif provider == "SambaNova":
195
- api_key = os.getenv("SAMBANOVA_API_KEY")
196
- response = chat_with_sambanova(
197
- messages=[
198
- {"role": "system", "content": "You are a helpful assistant"},
199
- {"role": "user", "content": discussion_prompt},
200
- ],
201
- api_key=api_key,
202
- )
203
- elif provider == "Hyperbolic": # Add Hyperbolic case
204
- api_key = os.getenv("HYPERBOLIC_API_KEY")
205
- response = chat_with_hyperbolic(
206
- messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
207
- )
208
- else: # Gemini
209
- api_key = os.getenv("GEMINI_API_KEY")
210
- response = chat_with_gemini(
211
- messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
212
- )
213
-
214
- round_responses.append(f"{model}: {response}")
215
- discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
216
- chat_history.append((f"Round {round_num + 1} - {model}", response))
217
- except Exception as e:
218
- chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
219
-
220
- # Final consensus
221
- progress(0.9, desc="Building final consensus...")
222
- model = selected_models[0]
223
- provider, model_name = model.split(": ", 1)
224
-
225
- try:
226
- consensus_prompt = generate_consensus_prompt(question, discussion_history)
227
- if provider == "Anthropic":
228
- api_key = os.getenv("ANTHROPIC_API_KEY")
229
- final_consensus = chat_with_anthropic(
230
- messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
231
- )
232
- elif provider == "SambaNova":
233
- api_key = os.getenv("SAMBANOVA_API_KEY")
234
- final_consensus = chat_with_sambanova(
235
- messages=[
236
- {"role": "system", "content": "You are a helpful assistant"},
237
- {"role": "user", "content": consensus_prompt},
238
- ],
239
- api_key=api_key,
240
- )
241
- elif provider == "Hyperbolic": # Add Hyperbolic case
242
- api_key = os.getenv("HYPERBOLIC_API_KEY")
243
- final_consensus = chat_with_hyperbolic(
244
- messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
245
- )
246
- else: # Gemini
247
- api_key = os.getenv("GEMINI_API_KEY")
248
- final_consensus = chat_with_gemini(
249
- messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
250
- )
251
- except Exception as e:
252
- final_consensus = f"Error getting consensus from {model}: {e!s}"
253
-
254
- chat_history.append(("Final Consensus", final_consensus))
255
-
256
- progress(1.0, desc="Done!")
257
- return chat_history
258
-
259
-
260
- with gr.Blocks() as demo:
261
- gr.Markdown("# Experimental Multi-Model Consensus Chat")
262
- gr.Markdown(
263
- """Select multiple models to collaborate on answering your question.
264
- The models will discuss with each other and attempt to reach a consensus.
265
- Maximum 3 models can be selected at once."""
266
- )
267
-
268
- with gr.Row():
269
- with gr.Column():
270
- model_selector = gr.Dropdown(
271
- choices=get_all_models(),
272
- multiselect=True,
273
- label="Select Models (max 3)",
274
- info="Choose up to 3 models to participate in the discussion",
275
- value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
276
- max_choices=3,
277
- )
278
- rounds_slider = gr.Slider(
279
- minimum=1,
280
- maximum=2,
281
- value=1,
282
- step=1,
283
- label="Discussion Rounds",
284
- info="Number of rounds of discussion between models",
285
- )
286
-
287
- chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
288
- msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
289
-
290
- def respond(message, selected_models, rounds):
291
- chat_history = multi_model_consensus(message, selected_models, rounds)
292
- return chat_history
293
-
294
- msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat")
295
-
296
- for fn in demo.fns.values():
297
- fn.api_name = False
298
-
299
- if __name__ == "__main__":
300
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_fal.py DELETED
@@ -1,16 +0,0 @@
1
- import fal_gradio
2
-
3
- from utils import get_app
4
-
5
- demo = get_app(
6
- models=[
7
- "fal-ai/ltx-video",
8
- "fal-ai/ltx-video/image-to-video",
9
- "fal-ai/luma-photon",
10
- ],
11
- default_model="fal-ai/luma-photon",
12
- src=fal_gradio.registry,
13
- )
14
-
15
- if __name__ == "__main__":
16
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_fireworks.py DELETED
@@ -1,19 +0,0 @@
1
- import os
2
-
3
- import fireworks_gradio
4
-
5
- from utils import get_app
6
-
7
- demo = get_app(
8
- models=[
9
- "f1-preview",
10
- "f1-mini-preview",
11
- "llama-v3p3-70b-instruct",
12
- ],
13
- default_model="llama-v3p3-70b-instruct",
14
- src=fireworks_gradio.registry,
15
- accept_token=not os.getenv("FIREWORKS_API_KEY"),
16
- )
17
-
18
- if __name__ == "__main__":
19
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_gemini.py DELETED
@@ -1,22 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the Gemini models but keep their full names for loading
6
- GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
-
8
- # Create display names without the prefix
9
- GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
- default_model=GEMINI_MODELS_FULL[-1],
15
- dropdown_label="Select Gemini Model",
16
- choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
- src=ai_gradio.registry,
18
- fill_height=True,
19
- )
20
-
21
- if __name__ == "__main__":
22
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_gemini_camera.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the Gemini models but keep their full names for loading
6
- GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
-
8
- # Create display names without the prefix
9
- GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
- default_model=GEMINI_MODELS_FULL[-2],
15
- dropdown_label="Select Gemini Model",
16
- choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
- src=ai_gradio.registry,
18
- camera=True,
19
- fill_height=True,
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_gemini_coder.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the Gemini models but keep their full names for loading
6
- GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
-
8
- # Create display names without the prefix
9
- GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
- default_model=GEMINI_MODELS_FULL[0],
15
- dropdown_label="Select Gemini Model",
16
- choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
- src=ai_gradio.registry,
18
- fill_height=True,
19
- coder=True,
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_gemini_voice.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the Gemini models but keep their full names for loading
6
- GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
-
8
- # Create display names without the prefix
9
- GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
- default_model=GEMINI_MODELS_FULL[-2],
15
- dropdown_label="Select Gemini Model",
16
- choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
- src=ai_gradio.registry,
18
- enable_voice=True,
19
- fill_height=True,
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_groq.py DELETED
@@ -1,21 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the Groq models from the registry
6
- GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
7
-
8
- # Create display names without the prefix
9
- GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
10
-
11
- demo = get_app(
12
- models=GROQ_MODELS_FULL,
13
- default_model=GROQ_MODELS_FULL[-2],
14
- src=ai_gradio.registry,
15
- dropdown_label="Select Groq Model",
16
- choices=GROQ_MODELS_DISPLAY,
17
- fill_height=True,
18
- )
19
-
20
- if __name__ == "__main__":
21
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_groq_coder.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the Groq models but keep their full names for loading
6
- GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
7
-
8
- # Create display names without the prefix
9
- GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=GROQ_MODELS_FULL, # Use the full names with prefix
14
- default_model=GROQ_MODELS_FULL[-1],
15
- dropdown_label="Select Groq Model",
16
- choices=GROQ_MODELS_DISPLAY, # Display names without prefix
17
- fill_height=True,
18
- coder=True,
19
- )
20
-
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_hf.py DELETED
@@ -1,17 +0,0 @@
1
- from utils import get_app
2
-
3
- demo = get_app(
4
- models=[
5
- "microsoft/Phi-3.5-mini-instruct",
6
- "HuggingFaceTB/SmolLM2-1.7B-Instruct",
7
- "google/gemma-2-2b-it",
8
- "openai-community/gpt2",
9
- "microsoft/phi-2",
10
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
- ],
12
- default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
13
- src="models",
14
- )
15
-
16
- if __name__ == "__main__":
17
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_huggingface.py DELETED
@@ -1,22 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")]
7
-
8
- # Create display names without the prefix
9
- HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix
15
- default_model=HUGGINGFACE_MODELS_FULL[0],
16
- dropdown_label="Select Huggingface Model",
17
- choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True,
20
- provider="fireworks-ai",
21
- bill_to="huggingface"
22
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_hyperbolic.py DELETED
@@ -1,19 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
7
-
8
- # Create display names without the prefix
9
- HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
15
- default_model=HYPERBOLIC_MODELS_FULL[-2],
16
- dropdown_label="Select Hyperbolic Model",
17
- choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_hyperbolic_coder.py DELETED
@@ -1,20 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
7
-
8
- # Create display names without the prefix
9
- HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
15
- default_model=HYPERBOLIC_MODELS_FULL[-2],
16
- dropdown_label="Select Hyperbolic Model",
17
- choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True,
20
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_langchain.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")]
7
-
8
- # Create display names without the prefix
9
- LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix
15
- default_model=LANGCHAIN_MODELS_FULL[0],
16
- dropdown_label="Select Langchain Model",
17
- choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- )
20
-
21
- if __name__ == "__main__":
22
- demo.launch()
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_lumaai.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
- import lumaai_gradio
3
-
4
- demo = gr.load(
5
- name="dream-machine",
6
- src=lumaai_gradio.registry,
7
- )
 
 
 
 
 
 
 
 
app_marco_o1.py DELETED
@@ -1,12 +0,0 @@
1
- import gradio as gr
2
- import spaces
3
- import transformers_gradio
4
-
5
- demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
6
- demo.fn = spaces.GPU()(demo.fn)
7
-
8
- for fn in demo.fns.values():
9
- fn.api_name = False
10
-
11
- if __name__ == "__main__":
12
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
app_meta.py DELETED
@@ -1,6 +0,0 @@
1
- import gradio as gr
2
-
3
- demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct")
4
-
5
- if __name__ == "__main__":
6
- demo.launch()
 
 
 
 
 
 
 
app_mindsearch.py DELETED
@@ -1,12 +0,0 @@
1
- import gradio as gr
2
-
3
- # Load the Gradio space
4
- demo = gr.load(name="internlm/MindSearch", src="spaces")
5
-
6
- # Disable API access for all functions
7
- if hasattr(demo, "fns"):
8
- for fn in demo.fns.values():
9
- fn.api_name = False
10
-
11
- if __name__ == "__main__":
12
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
app_minimax.py DELETED
@@ -1,22 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
7
-
8
- # Create display names without the prefix
9
- MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=MINIMAX_MODELS_FULL, # Use the full names with prefix
15
- default_model=MINIMAX_MODELS_FULL[0],
16
- dropdown_label="Select Minimax Model",
17
- choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- )
20
-
21
- if __name__ == "__main__":
22
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_minimax_coder.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the hyperbolic models but keep their full names for loading
6
- MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
7
-
8
- # Create display names without the prefix
9
- MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=MINIMAX_MODELS_FULL, # Use the full names with prefix
15
- default_model=MINIMAX_MODELS_FULL[0],
16
- dropdown_label="Select Minimax Model",
17
- choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_mistral.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the mistral models but keep their full names for loading
6
- MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")]
7
-
8
- # Create display names without the prefix
9
- MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=MISTRAL_MODELS_FULL, # Use the full names with prefix
15
- default_model=MISTRAL_MODELS_FULL[5],
16
- dropdown_label="Select Mistral Model",
17
- choices=MISTRAL_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_moondream.py DELETED
@@ -1,13 +0,0 @@
1
- import gradio as gr
2
-
3
- # Load the Gradio space
4
- demo = gr.load(name="akhaliq/moondream", src="spaces")
5
-
6
-
7
- # Disable API access for all functions
8
- if hasattr(demo, "fns"):
9
- for fn in demo.fns.values():
10
- fn.api_name = False
11
-
12
- if __name__ == "__main__":
13
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_nvidia.py DELETED
@@ -1,22 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the nvidia models but keep their full names for loading
6
- NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
-
8
- # Create display names without the prefix
9
- NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
- default_model=NVIDIA_MODELS_FULL[0],
16
- dropdown_label="Select Nvidia Model",
17
- choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- )
20
-
21
- if __name__ == "__main__":
22
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_nvidia_coder.py DELETED
@@ -1,23 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the nvidia models but keep their full names for loading
6
- NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
-
8
- # Create display names without the prefix
9
- NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
- default_model=NVIDIA_MODELS_FULL[-1],
16
- dropdown_label="Select Nvidia Model",
17
- choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_omini.py DELETED
@@ -1,10 +0,0 @@
1
- import gradio as gr
2
-
3
- # Load the Gradio space
4
- demo = gr.load(name="Yuanshi/OminiControl", src="spaces")
5
-
6
-
7
- # Disable API access for all functions
8
- if hasattr(demo, "fns"):
9
- for fn in demo.fns.values():
10
- fn.api_name = False
 
 
 
 
 
 
 
 
 
 
 
app_openai.py DELETED
@@ -1,21 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the OpenAI models but keep their full names for loading
6
- OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
7
-
8
- # Create display names without the prefix
9
- OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=OPENAI_MODELS_FULL, # Use the full names with prefix
14
- default_model=OPENAI_MODELS_FULL[-1],
15
- dropdown_label="Select OpenAI Model",
16
- choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
17
- fill_height=True,
18
- )
19
-
20
- if __name__ == "__main__":
21
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_openai_coder.py DELETED
@@ -1,22 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the OpenAI models but keep their full names for loading
6
- OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
7
-
8
- # Create display names without the prefix
9
- OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=OPENAI_MODELS_FULL, # Use the full names with prefix
14
- default_model=OPENAI_MODELS_FULL[-1],
15
- dropdown_label="Select OpenAI Model",
16
- choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
17
- fill_height=True,
18
- coder=True,
19
- )
20
-
21
- if __name__ == "__main__":
22
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_openai_voice.py DELETED
@@ -1,23 +0,0 @@
1
- import os
2
-
3
- import openai_gradio
4
-
5
- from utils import get_app
6
-
7
- demo = get_app(
8
- models=[
9
- "gpt-4o-realtime-preview",
10
- "gpt-4o-realtime-preview-2024-12-17",
11
- "gpt-4o-realtime-preview-2024-10-01",
12
- "gpt-4o-mini-realtime-preview",
13
- "gpt-4o-mini-realtime-preview-2024-12-17",
14
- ],
15
- default_model="gpt-4o-mini-realtime-preview-2024-12-17",
16
- src=openai_gradio.registry,
17
- accept_token=not os.getenv("OPENAI_API_KEY"),
18
- twilio_sid=os.getenv("TWILIO_SID_OPENAI"),
19
- twilio_token=os.getenv("TWILIO_AUTH_OPENAI"),
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_openrouter.py DELETED
@@ -1,22 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the OpenAI models but keep their full names for loading
6
- OPENROUTER_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openrouter:")]
7
-
8
- # Create display names without the prefix
9
- OPENROUTER_MODELS_DISPLAY = [k.replace("openrouter:", "") for k in OPENROUTER_MODELS_FULL]
10
-
11
- # Create and launch the interface using get_app utility
12
- demo = get_app(
13
- models=OPENROUTER_MODELS_FULL, # Use the full names with prefix
14
- default_model=OPENROUTER_MODELS_FULL[-1],
15
- dropdown_label="Select OpenRouter Model",
16
- choices=OPENROUTER_MODELS_DISPLAY, # Display names without prefix
17
- fill_height=True,
18
- coder=True,
19
- )
20
-
21
- if __name__ == "__main__":
22
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_paligemma.py DELETED
@@ -1,78 +0,0 @@
1
- import gradio as gr
2
- from gradio_client import Client, handle_file
3
-
4
- MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"}
5
-
6
-
7
- def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
8
- def chat(message, history):
9
- text = message.get("text", "")
10
- files = message.get("files", [])
11
- processed_files = [handle_file(f) for f in files]
12
-
13
- response = client.predict(
14
- message={"text": text, "files": processed_files},
15
- system_prompt=system_prompt,
16
- temperature=temperature,
17
- max_new_tokens=max_tokens,
18
- top_k=top_k,
19
- repetition_penalty=rep_penalty,
20
- top_p=top_p,
21
- api_name="/chat",
22
- )
23
- return response
24
-
25
- return chat
26
-
27
-
28
- def set_client_for_session(model_name, request: gr.Request):
29
- headers = {}
30
- if request and hasattr(request, "headers"):
31
- x_ip_token = request.headers.get("x-ip-token")
32
- if x_ip_token:
33
- headers["X-IP-Token"] = x_ip_token
34
-
35
- return Client(MODELS[model_name], headers=headers)
36
-
37
-
38
- def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
39
- if client is None:
40
- return "Error: Client not initialized. Please refresh the page."
41
- try:
42
- return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)(
43
- message, history
44
- )
45
- except Exception as e:
46
- print(f"Error during chat: {e!s}")
47
- return f"Error during chat: {e!s}"
48
-
49
-
50
- with gr.Blocks() as demo:
51
- client = gr.State()
52
-
53
- with gr.Accordion("Advanced Settings", open=False):
54
- system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
55
- with gr.Row():
56
- temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature")
57
- top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P")
58
- with gr.Row():
59
- top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K")
60
- rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty")
61
- max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens")
62
-
63
- chat_interface = gr.ChatInterface(
64
- fn=safe_chat_fn,
65
- additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p],
66
- multimodal=True,
67
- )
68
-
69
- # Initialize client on page load with default model
70
- demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model
71
-
72
- # Move the API access check here, after demo is defined
73
- if hasattr(demo, "fns"):
74
- for fn in demo.fns.values():
75
- fn.api_name = False
76
-
77
- if __name__ == "__main__":
78
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_perplexity.py DELETED
@@ -1,23 +0,0 @@
1
- import os
2
-
3
- import perplexity_gradio
4
-
5
- from utils import get_app
6
-
7
- demo = get_app(
8
- models=[
9
- "llama-3.1-sonar-large-128k-online",
10
- "llama-3.1-sonar-small-128k-online",
11
- "llama-3.1-sonar-huge-128k-online",
12
- "llama-3.1-sonar-small-128k-chat",
13
- "llama-3.1-sonar-large-128k-chat",
14
- "llama-3.1-8b-instruct",
15
- "llama-3.1-70b-instruct",
16
- ],
17
- default_model="llama-3.1-sonar-huge-128k-online",
18
- src=perplexity_gradio.registry,
19
- accept_token=not os.getenv("PERPLEXITY_API_KEY"),
20
- )
21
-
22
- if __name__ == "__main__":
23
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_playai.py DELETED
@@ -1,10 +0,0 @@
1
- import gradio as gr
2
- import playai_gradio
3
-
4
- demo = gr.load(
5
- name="PlayDialog",
6
- src=playai_gradio.registry,
7
- )
8
-
9
- for fn in demo.fns.values():
10
- fn.api_name = False
 
 
 
 
 
 
 
 
 
 
 
app_qwen.py DELETED
@@ -1,19 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the qwen models but keep their full names for loading
6
- QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
7
-
8
- # Create display names without the prefix
9
- QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=QWEN_MODELS_FULL, # Use the full names with prefix
15
- default_model=QWEN_MODELS_FULL[-1],
16
- dropdown_label="Select Qwen Model",
17
- choices=QWEN_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_qwen_coder.py DELETED
@@ -1,20 +0,0 @@
1
- import ai_gradio
2
-
3
- from utils_ai_gradio import get_app
4
-
5
- # Get the qwen models but keep their full names for loading
6
- QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
7
-
8
- # Create display names without the prefix
9
- QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
10
-
11
-
12
- # Create and launch the interface using get_app utility
13
- demo = get_app(
14
- models=QWEN_MODELS_FULL, # Use the full names with prefix
15
- default_model=QWEN_MODELS_FULL[-1],
16
- dropdown_label="Select Qwen Model",
17
- choices=QWEN_MODELS_DISPLAY, # Display names without prefix
18
- fill_height=True,
19
- coder=True,
20
- )