sidbhasin commited on
Commit
f1d75a2
·
verified ·
1 Parent(s): f75777e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -34
app.py CHANGED
@@ -1,60 +1,154 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
- # Initialize the Qwen client
5
- client = InferenceClient("Qwen/Qwen2.5-Coder")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def generate_code(prompt):
8
- messages = [
9
- {"role": "system", "content": "You are an expert Python developer who creates clean, efficient code."},
10
- {"role": "user", "content": f"Create a Python tool for the following requirement: {prompt}"}
11
- ]
12
-
13
- response = client.text_generation(
14
- messages,
15
- max_new_tokens=1024,
16
- temperature=0.7,
17
- top_p=0.9,
18
- repetition_penalty=1.1
19
- )
20
-
21
- return response["generated_text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # Create the Gradio interface
24
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple")) as demo:
25
- gr.Markdown("# 🛠️ AI Tool Builder by Syncmerce")
26
- gr.Markdown("Enter your tool requirements and get production-ready Python code!")
 
 
 
 
 
27
 
28
  with gr.Row():
29
- with gr.Column():
30
  prompt_input = gr.Textbox(
31
  label="Tool Requirements",
32
- placeholder="Describe the tool you want to build...",
33
  lines=4
34
  )
35
- generate_btn = gr.Button("Generate Tool", variant="primary")
 
 
36
 
37
- with gr.Column():
38
  code_output = gr.Code(
39
  label="Generated Code",
40
  language="python",
41
- lines=20
 
42
  )
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  generate_btn.click(
45
  fn=generate_code,
46
  inputs=prompt_input,
47
- outputs=code_output
 
48
  )
49
 
50
- gr.Markdown("### Examples:")
51
- gr.Examples(
52
- [
53
- ["Create a PDF text extractor tool that can process multiple files"],
54
- ["Build a web scraper that extracts product prices from e-commerce sites"],
55
- ["Make a tool for batch image resizing with a progress bar"]
56
- ],
57
- inputs=prompt_input
 
 
 
 
 
 
 
 
 
58
  )
59
 
60
- demo.launch()
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+ import gc
5
 
6
+ # Initialize model and tokenizer globally
7
+ def load_model():
8
+ model_name = "Qwen/Qwen2.5-Coder-32B-Instruct"
9
+ try:
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ model_name,
13
+ device_map="auto",
14
+ trust_remote_code=True,
15
+ torch_dtype=torch.float16
16
+ )
17
+ return model, tokenizer
18
+ except Exception as e:
19
+ print(f"Error loading model: {str(e)}")
20
+ return None, None
21
+
22
+ model, tokenizer = load_model()
23
 
24
  def generate_code(prompt):
25
+ try:
26
+ # Clear CUDA cache
27
+ if torch.cuda.is_available():
28
+ torch.cuda.empty_cache()
29
+ gc.collect()
30
+
31
+ # Prepare the prompt
32
+ messages = [
33
+ {"role": "system", "content": "You are an expert Python developer. Generate clean, efficient, and well-commented code based on the user's requirements."},
34
+ {"role": "user", "content": f"Create a Python tool for the following requirement: {prompt}"}
35
+ ]
36
+
37
+ # Format the messages using the chat template
38
+ text = tokenizer.apply_chat_template(
39
+ messages,
40
+ tokenize=False,
41
+ add_generation_prompt=True
42
+ )
43
+
44
+ # Generate the response
45
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
46
+ generated_ids = model.generate(
47
+ **model_inputs,
48
+ max_new_tokens=1024,
49
+ temperature=0.7,
50
+ top_p=0.9,
51
+ repetition_penalty=1.1,
52
+ do_sample=True
53
+ )
54
+
55
+ # Decode the response
56
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
57
+
58
+ # Extract only the code part from the response
59
+ code_start = response.find("```python")
60
+ code_end = response.find("```", code_start + 8)
61
+
62
+ if code_start != -1 and code_end != -1:
63
+ code = response[code_start + 8:code_end].strip()
64
+ else:
65
+ code = response
66
+
67
+ return code
68
+
69
+ except Exception as e:
70
+ raise gr.Error(f"Code generation failed: {str(e)}")
71
+ finally:
72
+ # Clear memory
73
+ if torch.cuda.is_available():
74
+ torch.cuda.empty_cache()
75
+ gc.collect()
76
 
77
  # Create the Gradio interface
78
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple")) as demo:
79
+ gr.Markdown(
80
+ """
81
+ # 🛠️ AI Tool Builder by Syncmerce
82
+
83
+ Generate production-ready Python code for your tools using AI. Simply describe what you want to build!
84
+ """
85
+ )
86
 
87
  with gr.Row():
88
+ with gr.Column(scale=1):
89
  prompt_input = gr.Textbox(
90
  label="Tool Requirements",
91
+ placeholder="Describe the tool you want to build... Be specific about features and functionality.",
92
  lines=4
93
  )
94
+ with gr.Row():
95
+ generate_btn = gr.Button("🔨 Generate Tool", variant="primary")
96
+ clear_btn = gr.Button("🗑️ Clear")
97
 
98
+ with gr.Column(scale=1):
99
  code_output = gr.Code(
100
  label="Generated Code",
101
  language="python",
102
+ lines=25,
103
+ show_label=True
104
  )
105
 
106
+ # Add examples
107
+ gr.Examples(
108
+ examples=[
109
+ ["Create a PDF text extractor that can process multiple files and save the extracted text to a txt file"],
110
+ ["Build a web scraper that extracts product prices from Amazon and saves results to CSV"],
111
+ ["Create an image processing tool that can resize multiple images and add watermarks"],
112
+ ["Build a YouTube video downloader with progress bar and quality selection"],
113
+ ],
114
+ inputs=prompt_input,
115
+ outputs=code_output,
116
+ fn=generate_code,
117
+ cache_examples=True,
118
+ )
119
+
120
+ # Add event handlers
121
  generate_btn.click(
122
  fn=generate_code,
123
  inputs=prompt_input,
124
+ outputs=code_output,
125
+ api_name="generate"
126
  )
127
 
128
+ clear_btn.click(
129
+ fn=lambda: (None, None),
130
+ inputs=None,
131
+ outputs=[prompt_input, code_output],
132
+ api_name="clear"
133
+ )
134
+
135
+ # Add footer
136
+ gr.Markdown(
137
+ """
138
+ ### Tips for better results:
139
+ - Be specific about the features you want
140
+ - Mention input/output formats
141
+ - Specify any special requirements or libraries
142
+
143
+ *Note: Generated code may need minor adjustments based on your specific use case.*
144
+ """
145
  )
146
 
147
+ # Launch the app
148
+ demo.launch(
149
+ share=True,
150
+ enable_queue=True,
151
+ show_error=True,
152
+ server_name="0.0.0.0",
153
+ server_port=7860
154
+ )