sidbhasin commited on
Commit
3542f7a
1 Parent(s): f41050c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +205 -159
app.py CHANGED
@@ -1,161 +1,207 @@
1
- import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
- import gc
5
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- def cleanup_memory():
8
- if torch.cuda.is_available():
9
- torch.cuda.empty_cache()
10
- torch.cuda.synchronize()
11
- gc.collect()
12
-
13
- def load_model():
14
- cleanup_memory()
15
- model_name = "Qwen/Qwen2.5-Coder-14b" # Using 14B model as a balance between performance and resources
16
- try:
17
- tokenizer = AutoTokenizer.from_pretrained(
18
- model_name,
19
- trust_remote_code=True,
20
- cache_dir="/tmp/.cache/huggingface"
21
- )
22
- model = AutoModelForCausalLM.from_pretrained(
23
- model_name,
24
- device_map="auto",
25
- trust_remote_code=True,
26
- torch_dtype=torch.float16,
27
- cache_dir="/tmp/.cache/huggingface"
28
- )
29
- return model, tokenizer
30
- except Exception as e:
31
- print(f"Error loading model: {str(e)}")
32
- return None, None
33
-
34
- print("Loading model...")
35
- model, tokenizer = load_model()
36
- print("Model loaded successfully!")
37
-
38
- def generate_artifact(prompt):
39
- try:
40
- cleanup_memory()
41
-
42
- # Crafting a prompt specifically for artifacts
43
- full_prompt = f"""Create a complete, runnable artifact for the following requirement:
44
- {prompt}
45
-
46
- Requirements:
47
- - Generate a complete, self-contained code
48
- - Include all necessary imports
49
- - Add proper documentation and comments
50
- - Ensure the code is runnable
51
- - Include any required HTML/CSS if it's a web component
52
-
53
- Response should be a complete artifact that can be run directly.
54
- """
55
-
56
- # Tokenize input
57
- encoded_input = tokenizer(
58
- full_prompt,
59
- return_tensors="pt",
60
- truncation=True,
61
- max_length=512,
62
- padding=True
63
- ).to(model.device)
64
-
65
- # Generate
66
- outputs = model.generate(
67
- input_ids=encoded_input['input_ids'],
68
- max_new_tokens=2048, # Increased for complete artifacts
69
- temperature=0.7,
70
- top_p=0.95,
71
- repetition_penalty=1.1,
72
- do_sample=True,
73
- pad_token_id=tokenizer.eos_token_id
74
- )
75
-
76
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
77
-
78
- # Extract code with improved parsing for artifacts
79
- code_start = response.find("```")
80
- if code_start != -1:
81
- code_end = response.find("```", code_start + 3)
82
- code = response[code_start + 3:code_end].strip()
83
- # Remove language identifier if present
84
- if code.startswith(("python", "html", "javascript", "css")):
85
- code = code[code.find("\n"):].strip()
86
- else:
87
- code = response.strip()
88
-
89
- cleanup_memory()
90
- return code
91
-
92
- except Exception as e:
93
- cleanup_memory()
94
- return f"Error generating artifact: {str(e)}"
95
-
96
- # Create the Gradio interface
97
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
98
- gr.Markdown(
99
- """
100
- # 🎨 AI Tool Builder by Syncmerce - Artifacts Generator
101
- Create complete, runnable artifacts with a simple description. Generate websites, visualizations, and more!
102
- """
103
- )
104
-
105
- with gr.Row():
106
- with gr.Column(scale=1):
107
- prompt_input = gr.Textbox(
108
- label="Artifact Description",
109
- placeholder="Describe the artifact you want to create (e.g., 'Create a 3D rotating globe visualization using Three.js')",
110
- lines=4
111
- )
112
- generate_btn = gr.Button("🔮 Generate Artifact", variant="primary")
113
-
114
- with gr.Column(scale=1):
115
- code_output = gr.Code(
116
- label="Generated Artifact Code",
117
- language="python",
118
- lines=30,
119
- show_label=True
120
- )
121
-
122
- # Add curated examples for artifacts[2][3]
123
- gr.Examples(
124
- examples=[
125
- ["Create a 3D rotating globe using Three.js with texture mapping and smooth animation"],
126
- ["Generate a real-time data visualization dashboard using D3.js with multiple charts"],
127
- ["Build a simple game using HTML5 Canvas with basic physics and user interaction"],
128
- ["Create an interactive particle system with mouse tracking using p5.js"],
129
- ],
130
- inputs=prompt_input
131
- )
132
-
133
- # Event handler
134
- generate_btn.click(
135
- fn=generate_artifact,
136
- inputs=prompt_input,
137
- outputs=code_output
138
- )
139
-
140
- gr.Markdown(
141
- """
142
- ### Tips for Better Results:
143
- - Be specific about the visual elements you want
144
- - Mention any specific libraries or frameworks
145
- - Describe interactions and animations
146
- - Specify any data requirements
147
-
148
- ### Supported Types of Artifacts:
149
- - Interactive Web Applications
150
- - Data Visualizations
151
- - 3D Graphics and Animations
152
- - Games and Interactive Experiences
153
- - Creative Coding Projects
154
- """
155
- )
156
-
157
- # Launch the app
158
- demo.launch(
159
- server_name="0.0.0.0",
160
- server_port=7860
161
- )
 
 
 
 
 
1
  import os
2
+ import re
3
+ from http import HTTPStatus
4
+ from typing import Dict, List, Optional, Tuple
5
+ import base64
6
+
7
+
8
+ import dashscope
9
+ import gradio as gr
10
+ from dashscope import Generation
11
+ from dashscope.api_entities.dashscope_response import Role
12
+
13
+ import modelscope_studio.components.base as ms
14
+ import modelscope_studio.components.legacy as legacy
15
+ import modelscope_studio.components.antd as antd
16
+ from config import DEMO_LIST, SystemPrompt
17
+
18
+ YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
19
+ dashscope.api_key = YOUR_API_TOKEN
20
+
21
+ History = List[Tuple[str, str]]
22
+ Messages = List[Dict[str, str]]
23
+
24
+ def history_to_messages(history: History, system: str) -> Messages:
25
+ messages = [{'role': Role.SYSTEM, 'content': system}]
26
+ for h in history:
27
+ messages.append({'role': Role.USER, 'content': h[0]})
28
+ messages.append({'role': Role.ASSISTANT, 'content': h[1]})
29
+ return messages
30
+
31
+
32
+ def messages_to_history(messages: Messages) -> Tuple[str, History]:
33
+ assert messages[0]['role'] == Role.SYSTEM
34
+ history = []
35
+ for q, r in zip(messages[1::2], messages[2::2]):
36
+ history.append([q['content'], r['content']])
37
+ return history
38
+
39
+
40
+ def remove_code_block(text):
41
+ pattern = r'```html\n(.+?)\n```'
42
+ match = re.search(pattern, text, re.DOTALL)
43
+ if match:
44
+ return match.group(1).strip()
45
+ else:
46
+ return text.strip()
47
+
48
+ def history_render(history: History):
49
+ return gr.update(open=True), history
50
+
51
+ def clear_history():
52
+ return []
53
+
54
+ def send_to_sandbox(code):
55
+ encoded_html = base64.b64encode(code.encode('utf-8')).decode('utf-8')
56
+ data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
57
+ return f"<iframe src=\"{data_uri}\" width=\"100%\" height=\"920px\"></iframe>"
58
+ # return {
59
+ # '/src/App.jsx': {
60
+ # 'code': code,
61
+ # 'fpath': '/src/App.jsx',
62
+ # },
63
+ # # 以路径为 key,必须以绝对路径来描述
64
+ # '/src/index.js': {
65
+ # 'code':
66
+ # 'import React from "react"; import ReactDOM from "react-dom"; import App from "./App"; const rootElement = document.getElementById("root"); ReactDOM.render(<App />, rootElement);',
67
+ # 'fpath': '/src/index.js',
68
+ # },
69
+ # '/package.json': {
70
+ # 'code': '{"name":"demo", "main": "./src/index.js", "dependencies":{ "react": "18.3.1", "react-dom": "18.3.1", "antd": "5.21.6", "styled-components": "6.1.13" }}',
71
+ # 'fpath': '/package.json',
72
+ # },
73
+ # }
74
+
75
+ def demo_card_click(e: gr.EventData):
76
+ index = e._data['component']['index']
77
+ return DEMO_LIST[index]['description']
78
+
79
+ with gr.Blocks(css_paths="app.css") as demo:
80
+ history = gr.State([])
81
+ setting = gr.State({
82
+ "system": SystemPrompt,
83
+ })
84
+
85
+ with ms.Application() as app:
86
+ with antd.ConfigProvider():
87
+ with antd.Row(gutter=[32, 12]) as layout:
88
+ with antd.Col(span=24, md=8):
89
+ with antd.Flex(vertical=True, gap="middle", wrap=True):
90
+ header = gr.HTML("""
91
+ <div class="left_header">
92
+ <img src="//img.alicdn.com/imgextra/i2/O1CN01KDhOma1DUo8oa7OIU_!!6000000000220-1-tps-240-240.gif" width="200px" />
93
+ <h1>Qwen2.5-Coder</h2>
94
+ </div>
95
+ """)
96
+ input = antd.InputTextarea(
97
+ size="large", allow_clear=True, placeholder="Please enter what kind of application you want")
98
+ # input = gr.TextArea(placeholder="请输入您想要一个什么样的应用", show_label=False, container=False)
99
+ btn = antd.Button("send", type="primary", size="large")
100
+ clear_btn = antd.Button("clear history", type="default", size="large")
101
+
102
+ antd.Divider("examples")
103
+ with antd.Flex(gap="small", wrap=True):
104
+ with ms.Each(DEMO_LIST):
105
+ with antd.Card(hoverable=True, as_item="card") as demoCard:
106
+ antd.CardMeta()
107
+ demoCard.click(demo_card_click, outputs=[input])
108
+
109
+ antd.Divider("setting")
110
+
111
+ with antd.Flex(gap="small", wrap=True):
112
+ settingPromptBtn = antd.Button(
113
+ "⚙️ set system Prompt", type="default")
114
+ codeBtn = antd.Button("🧑‍💻 view code", type="default")
115
+ historyBtn = antd.Button("📜 history", type="default")
116
+
117
+ with antd.Modal(open=False, title="set system Prompt", width="800px") as system_prompt_modal:
118
+ systemPromptInput = antd.InputTextarea(
119
+ SystemPrompt, auto_size=True)
120
+
121
+ settingPromptBtn.click(lambda: gr.update(
122
+ open=True), inputs=[], outputs=[system_prompt_modal])
123
+ system_prompt_modal.ok(lambda input: ({"system": input}, gr.update(
124
+ open=False)), inputs=[systemPromptInput], outputs=[setting, system_prompt_modal])
125
+ system_prompt_modal.cancel(lambda: gr.update(
126
+ open=False), outputs=[system_prompt_modal])
127
+
128
+ with antd.Drawer(open=False, title="code", placement="left", width="750px") as code_drawer:
129
+ code_output = legacy.Markdown()
130
+
131
+ codeBtn.click(lambda: gr.update(open=True),
132
+ inputs=[], outputs=[code_drawer])
133
+ code_drawer.close(lambda: gr.update(
134
+ open=False), inputs=[], outputs=[code_drawer])
135
+
136
+ with antd.Drawer(open=False, title="history", placement="left", width="900px") as history_drawer:
137
+ history_output = legacy.Chatbot(show_label=False, flushing=False, height=960, elem_classes="history_chatbot")
138
+
139
+ historyBtn.click(history_render, inputs=[history], outputs=[history_drawer, history_output])
140
+ history_drawer.close(lambda: gr.update(
141
+ open=False), inputs=[], outputs=[history_drawer])
142
+
143
+ with antd.Col(span=24, md=16):
144
+ with ms.Div(elem_classes="right_panel"):
145
+ gr.HTML('<div class="render_header"><span class="header_btn"></span><span class="header_btn"></span><span class="header_btn"></span></div>')
146
+ with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab:
147
+ with antd.Tabs.Item(key="empty"):
148
+ empty = antd.Empty(description="empty input", elem_classes="right_content")
149
+ with antd.Tabs.Item(key="loading"):
150
+ loading = antd.Spin(True, tip="coding...", size="large", elem_classes="right_content")
151
+ with antd.Tabs.Item(key="render"):
152
+ sandbox = gr.HTML(elem_classes="html_content")
153
+ # sandbox = pro.FrontendCodeSandbox(elem_style={
154
+ # 'height': '920px',
155
+ # 'width': '100%'
156
+ # })
157
+
158
+ def generation_code(query: Optional[str], _setting: Dict[str, str], _history: Optional[History]):
159
+ if query is None:
160
+ query = ''
161
+ if _history is None:
162
+ _history = []
163
+ messages = history_to_messages(_history, _setting['system'])
164
+ messages.append({'role': Role.USER, 'content': query})
165
+
166
+ gen = Generation.call(model="qwen2.5-coder-32b-instruct",
167
+ messages=messages,
168
+ result_format='message',
169
+ stream=True)
170
+ for response in gen:
171
+ if response.status_code == HTTPStatus.OK:
172
+ role = response.output.choices[0].message.role
173
+ content = response.output.choices[0].message.content
174
+ if response.output.choices[0].finish_reason == 'stop':
175
+ _history = messages_to_history(messages + [{
176
+ 'role': role,
177
+ 'content': content
178
+ }])
179
+ print('history')
180
+ print(_history)
181
+ yield {
182
+ code_output: content,
183
+ history: _history,
184
+ sandbox: send_to_sandbox(remove_code_block(content)),
185
+ state_tab: gr.update(active_key="render"),
186
+ code_drawer: gr.update(open=False),
187
+ }
188
+ else:
189
+ yield {
190
+ code_output: content,
191
+ state_tab: gr.update(active_key="loading"),
192
+ code_drawer: gr.update(open=True),
193
+ }
194
+ else:
195
+ raise ValueError(
196
+ 'Request id: %s, Status code: %s, error code: %s, error message: %s'
197
+ % (response.request_id, response.status_code, response.code,
198
+ response.message))
199
+
200
+ btn.click(generation_code,
201
+ inputs=[input, setting, history],
202
+ outputs=[code_output, history, sandbox, state_tab, code_drawer])
203
+
204
+ clear_btn.click(clear_history, inputs=[], outputs=[history])
205
 
206
+ if __name__ == "__main__":
207
+ demo.queue(default_concurrency_limit=20).launch(ssr_mode=False)