acecalisto3 commited on
Commit
647ecd3
·
verified ·
1 Parent(s): c27429b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +221 -67
app.py CHANGED
@@ -8,17 +8,17 @@ import logging
8
 
9
  import gradio as gr
10
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
- from huggingface_hub import InferenceClient, cached_download
 
12
 
13
  # --- Configuration ---
14
- VERBOSE = True
15
- MAX_HISTORY = 5
16
- MAX_TOKENS = 2048
17
- TEMPERATURE = 0.7
18
- TOP_P = 0.8
19
- REPETITION_PENALTY = 1.5
20
- MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"
21
- API_KEY = "YOUR_API_KEY"
22
 
23
  # --- Logging Setup ---
24
  logging.basicConfig(
@@ -27,15 +27,6 @@ logging.basicConfig(
27
  format="%(asctime)s - %(levelname)s - %(message)s",
28
  )
29
 
30
- # --- Agents ---
31
- agents = [
32
- "WEB_DEV",
33
- "AI_SYSTEM_PROMPT",
34
- "PYTHON_CODE_DEV",
35
- "DATA_SCIENCE",
36
- "UI_UX_DESIGN",
37
- ]
38
-
39
  # --- Prompts ---
40
  PREFIX = """
41
  {date_time_str}
@@ -51,6 +42,11 @@ LOG_RESPONSE = """
51
  RESPONSE: {resp}
52
  """
53
 
 
 
 
 
 
54
  # --- Functions ---
55
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
56
  prompt = ""
@@ -59,36 +55,31 @@ def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turn
59
  prompt += f"Human: {message}\nAssistant:"
60
  return prompt
61
 
62
- def generate(
63
  prompt: str,
64
  history: List[Tuple[str, str]],
65
- agent_name: str = agents[0],
66
  sys_prompt: str = "",
67
  temperature: float = TEMPERATURE,
68
  max_new_tokens: int = MAX_TOKENS,
69
  top_p: float = TOP_P,
70
  repetition_penalty: float = REPETITION_PENALTY,
71
  ) -> str:
72
- # Load model and tokenizer
73
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
74
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
75
-
76
- # Create a text generation pipeline
77
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
78
-
79
- # Prepare the full prompt
80
  date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
81
  full_prompt = PREFIX.format(
82
  date_time_str=date_time_str,
83
  purpose=sys_prompt,
84
  agent_name=agent_name
85
  ) + format_prompt(prompt, history)
86
-
87
  if VERBOSE:
88
  logging.info(LOG_PROMPT.format(content=full_prompt))
89
-
90
- # Generate response
91
- response = generator(
92
  full_prompt,
93
  max_new_tokens=max_new_tokens,
94
  temperature=temperature,
@@ -96,46 +87,209 @@ def generate(
96
  repetition_penalty=repetition_penalty,
97
  do_sample=True
98
  )[0]['generated_text']
99
-
100
- # Extract the assistant's response
101
  assistant_response = response.split("Assistant:")[-1].strip()
102
-
103
  if VERBOSE:
104
  logging.info(LOG_RESPONSE.format(resp=assistant_response))
105
-
106
  return assistant_response
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  def main():
109
  with gr.Blocks() as demo:
110
- gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
111
- gr.Markdown("### Your AI-Powered Development Companion")
112
-
113
- # Chat Interface
114
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
115
-
116
- # Input Components
117
- message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
118
- purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
119
- agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
120
- sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
121
- temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
122
- max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
123
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
124
- repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
125
-
126
- # Button to submit the message
127
- submit_button = gr.Button(value="Send")
128
-
129
- # Project Explorer Tab
130
- with gr.Tab("Project Explorer"):
131
- project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
132
- explore_button = gr.Button(value="Explore")
133
- project_output = gr.Textbox(label="File Tree", lines=20)
134
-
135
- # Chat App Logic Tab
136
- with gr.Tab("Chat App"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  history = gr.State([])
138
- examples = [
139
- ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
140
- ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
141
- ["Generate a simple HTML page with a heading and a paragraph.", "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  import gradio as gr
10
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
+ from huggingface_hub import InferenceClient, cached_download, Repository, HfApi
12
+ from IPython.display import display, HTML
13
 
14
  # --- Configuration ---
15
+ VERBOSE = True
16
+ MAX_HISTORY = 5
17
+ MAX_TOKENS = 2048
18
+ TEMPERATURE = 0.7
19
+ TOP_P = 0.8
20
+ REPETITION_PENALTY = 1.5
21
+ DEFAULT_PROJECT_PATH = "./my-hf-project" # Default project directory
 
22
 
23
  # --- Logging Setup ---
24
  logging.basicConfig(
 
27
  format="%(asctime)s - %(levelname)s - %(message)s",
28
  )
29
 
 
 
 
 
 
 
 
 
 
30
  # --- Prompts ---
31
  PREFIX = """
32
  {date_time_str}
 
42
  RESPONSE: {resp}
43
  """
44
 
45
+ # --- Global Variables ---
46
+ current_model = None # Store the currently loaded model
47
+ repo = None # Store the Hugging Face Repository object
48
+ model_descriptions = {} # Store model descriptions
49
+
50
  # --- Functions ---
51
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
52
  prompt = ""
 
55
  prompt += f"Human: {message}\nAssistant:"
56
  return prompt
57
 
58
+ def generate_response(
59
  prompt: str,
60
  history: List[Tuple[str, str]],
61
+ agent_name: str = "Generic Agent",
62
  sys_prompt: str = "",
63
  temperature: float = TEMPERATURE,
64
  max_new_tokens: int = MAX_TOKENS,
65
  top_p: float = TOP_P,
66
  repetition_penalty: float = REPETITION_PENALTY,
67
  ) -> str:
68
+ global current_model
69
+ if current_model is None:
70
+ return "Error: Please load a model first."
71
+
 
 
 
 
72
  date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
73
  full_prompt = PREFIX.format(
74
  date_time_str=date_time_str,
75
  purpose=sys_prompt,
76
  agent_name=agent_name
77
  ) + format_prompt(prompt, history)
78
+
79
  if VERBOSE:
80
  logging.info(LOG_PROMPT.format(content=full_prompt))
81
+
82
+ response = current_model(
 
83
  full_prompt,
84
  max_new_tokens=max_new_tokens,
85
  temperature=temperature,
 
87
  repetition_penalty=repetition_penalty,
88
  do_sample=True
89
  )[0]['generated_text']
90
+
 
91
  assistant_response = response.split("Assistant:")[-1].strip()
92
+
93
  if VERBOSE:
94
  logging.info(LOG_RESPONSE.format(resp=assistant_response))
95
+
96
  return assistant_response
97
 
98
+ def load_hf_model(model_name: str):
99
+ """Loads a language model and fetches its description."""
100
+ global current_model, model_descriptions
101
+ try:
102
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
103
+ current_model = pipeline(
104
+ "text-generation",
105
+ model=model_name,
106
+ tokenizer=tokenizer,
107
+ model_kwargs={"load_in_8bit": True}
108
+ )
109
+
110
+ # Fetch and store the model description
111
+ api = HfApi()
112
+ model_info = api.model_info(model_name)
113
+ model_descriptions[model_name] = model_info.pipeline_tag
114
+ return f"Successfully loaded model: {model_name}"
115
+ except Exception as e:
116
+ return f"Error loading model: {str(e)}"
117
+
118
+ def execute_command(command: str, project_path: str = None) -> str:
119
+ """Executes a shell command and returns the output."""
120
+ try:
121
+ if project_path:
122
+ process = subprocess.Popen(command, shell=True, cwd=project_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
123
+ else:
124
+ process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
125
+ output, error = process.communicate()
126
+ if error:
127
+ return f"Error: {error.decode('utf-8')}"
128
+ return output.decode("utf-8")
129
+ except Exception as e:
130
+ return f"Error executing command: {str(e)}"
131
+
132
+ def create_hf_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH):
133
+ """Creates a new Hugging Face project."""
134
+ global repo
135
+ try:
136
+ if os.path.exists(project_path):
137
+ return f"Error: Directory '{project_path}' already exists!"
138
+ # Create the repository
139
+ repo = Repository(local_dir=project_path, clone_from=None)
140
+ repo.git_init()
141
+
142
+ # Add basic files (optional, you can customize this)
143
+ with open(os.path.join(project_path, "README.md"), "w") as f:
144
+ f.write(f"# {project_name}\n\nA new Hugging Face project.")
145
+
146
+ # Stage all changes
147
+ repo.git_add(pattern="*")
148
+ repo.git_commit(commit_message="Initial commit")
149
+
150
+ return f"Hugging Face project '{project_name}' created successfully at '{project_path}'"
151
+ except Exception as e:
152
+ return f"Error creating Hugging Face project: {str(e)}"
153
+
154
+ def list_project_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
155
+ """Lists files in the project directory."""
156
+ try:
157
+ files = os.listdir(project_path)
158
+ if not files:
159
+ return "Project directory is empty."
160
+ return "\n".join(files)
161
+ except Exception as e:
162
+ return f"Error listing project files: {str(e)}"
163
+
164
+ def read_file_content(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
165
+ """Reads and returns the content of a file in the project."""
166
+ try:
167
+ full_path = os.path.join(project_path, file_path)
168
+ with open(full_path, "r") as f:
169
+ content = f.read()
170
+ return content
171
+ except Exception as e:
172
+ return f"Error reading file: {str(e)}"
173
+
174
+ def write_to_file(file_path: str, content: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
175
+ """Writes content to a file in the project."""
176
+ try:
177
+ full_path = os.path.join(project_path, file_path)
178
+ with open(full_path, "w") as f:
179
+ f.write(content)
180
+ return f"Successfully wrote to '{file_path}'"
181
+ except Exception as e:
182
+ return f"Error writing to file: {str(e)}"
183
+
184
+ def preview_project(project_path: str = DEFAULT_PROJECT_PATH):
185
+ """Provides a preview of the project, if applicable."""
186
+ # Assuming a simple HTML preview for now
187
+ try:
188
+ index_html_path = os.path.join(project_path, "index.html")
189
+ if os.path.exists(index_html_path):
190
+ with open(index_html_path, "r") as f:
191
+ html_content = f.read()
192
+ display(HTML(html_content))
193
+ return "Previewing 'index.html'"
194
+ else:
195
+ return "No 'index.html' found for preview."
196
+ except Exception as e:
197
+ return f"Error previewing project: {str(e)}"
198
+
199
  def main():
200
  with gr.Blocks() as demo:
201
+ gr.Markdown("## FragMixt: Your Hugging Face No-Code App Builder")
202
+
203
+ # --- Model Selection ---
204
+ with gr.Tab("Model"):
205
+ # --- Model Dropdown with Categories ---
206
+ model_categories = gr.Dropdown(
207
+ choices=["Text Generation", "Text Summarization", "Code Generation", "Translation", "Question Answering"],
208
+ label="Model Category",
209
+ value="Text Generation"
210
+ )
211
+ model_name = gr.Dropdown(
212
+ choices=[], # Initially empty, will be populated based on category
213
+ label="Hugging Face Model Name",
214
+ )
215
+ load_button = gr.Button("Load Model")
216
+ load_output = gr.Textbox(label="Output")
217
+ model_description = gr.Markdown(label="Model Description")
218
+
219
+ # --- Function to populate model names based on category ---
220
+ def update_model_dropdown(category):
221
+ models = []
222
+ api = HfApi()
223
+ for model in api.list_models():
224
+ if model.pipeline_tag == category:
225
+ models.append(model.modelId)
226
+ return gr.Dropdown.update(choices=models)
227
+
228
+ # --- Event handler for category dropdown ---
229
+ model_categories.change(
230
+ fn=update_model_dropdown,
231
+ inputs=model_categories,
232
+ outputs=model_name,
233
+ )
234
+
235
+ # --- Event handler to display model description ---
236
+ def display_model_description(model_name):
237
+ global model_descriptions
238
+ if model_name in model_descriptions:
239
+ return model_descriptions[model_name]
240
+ else:
241
+ return "Model description not available."
242
+
243
+ model_name.change(
244
+ fn=display_model_description,
245
+ inputs=model_name,
246
+ outputs=model_description,
247
+ )
248
+
249
+ load_button.click(load_hf_model, inputs=model_name, outputs=load_output)
250
+
251
+ # --- Chat Interface ---
252
+ with gr.Tab("Chat"):
253
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True)
254
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
255
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
256
+ agent_name = gr.Dropdown(label="Agents", choices=["Generic Agent"], value="Generic Agent", interactive=True)
257
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
258
+ temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
259
+ max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum numbers of new tokens")
260
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
261
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
262
+ submit_button = gr.Button(value="Send")
263
  history = gr.State([])
264
+
265
+ def run_chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
266
+ response = generate_response(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
267
+ history.append((message, response))
268
+ return history, history
269
+
270
+ submit_button.click(run_chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
271
+
272
+ # --- Project Management ---
273
+ with gr.Tab("Project"):
274
+ project_name = gr.Textbox(label="Project Name", placeholder="MyHuggingFaceApp")
275
+ create_project_button = gr.Button("Create Hugging Face Project")
276
+ project_output = gr.Textbox(label="Output", lines=5)
277
+ file_content = gr.Code(label="File Content", language="python", lines=20)
278
+ file_path = gr.Textbox(label="File Path (relative to project)", placeholder="src/main.py")
279
+ read_button = gr.Button("Read File")
280
+ write_button = gr.Button("Write to File")
281
+ command_input = gr.Textbox(label="Terminal Command", placeholder="pip install -r requirements.txt")
282
+ command_output = gr.Textbox(label="Command Output", lines=5)
283
+ run_command_button = gr.Button("Run Command")
284
+ preview_button = gr.Button("Preview Project")
285
+
286
+ create_project_button.click(create_hf_project, inputs=[project_name], outputs=project_output)
287
+ read_button.click(read_file_content, inputs=file_path, outputs=file_content)
288
+ write_button.click(write_to_file, inputs=[file_path, file_content], outputs=project_output)
289
+ run_command_button.click(execute_command, inputs=command_input, outputs=command_output)
290
+ preview_button.click(preview_project, outputs=project_output)
291
+
292
+ demo.launch()
293
+
294
+ if __name__ == "__main__":
295
+ main()