acecalisto3 commited on
Commit
3a881df
·
verified ·
1 Parent(s): 3dfa6ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -260
app.py CHANGED
@@ -7,9 +7,8 @@ from datetime import datetime
7
  import logging
8
 
9
  import gradio as gr
 
10
  from huggingface_hub import InferenceClient, cached_download
11
- from safe_search import safe_search
12
- from i_search import google, i_search as i_s
13
 
14
  # --- Configuration ---
15
  VERBOSE = True # Enable verbose logging
@@ -23,8 +22,8 @@ API_KEY = "YOUR_API_KEY" # Replace with your actual Hugging Face API key
23
 
24
  # --- Logging Setup ---
25
  logging.basicConfig(
26
- filename="app.log", # Name of the log file
27
- level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
28
  format="%(asctime)s - %(levelname)s - %(message)s",
29
  )
30
 
@@ -41,7 +40,7 @@ agents = [
41
  PREFIX = """
42
  {date_time_str}
43
  Purpose: {purpose}
44
- Safe Search: {safe_search}
45
  """
46
 
47
  LOG_PROMPT = """
@@ -52,70 +51,14 @@ LOG_RESPONSE = """
52
  RESPONSE: {resp}
53
  """
54
 
55
- COMPRESS_HISTORY_PROMPT = """
56
- You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
57
- History:
58
- {history}
59
- """
60
-
61
- ACTION_PROMPT = """
62
- You are a helpful AI assistant. You are working on the task: {task}
63
- Your current history is:
64
- {history}
65
- What is your next thought?
66
- thought:
67
- What is your next action?
68
- action:
69
- """
70
-
71
- TASK_PROMPT = """
72
- You are a helpful AI assistant. Your current history is:
73
- {history}
74
- What is the next task?
75
- task:
76
- """
77
-
78
- UNDERSTAND_TEST_RESULTS_PROMPT = """
79
- You are a helpful AI assistant. The test results are:
80
- {test_results}
81
- What do you want to know about the test results?
82
- thought:
83
- """
84
-
85
  # --- Functions ---
86
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
87
- """Formats the prompt for the LLM, including the message and relevant history."""
88
- prompt = " "
89
- # Keep only the last 'max_history_turns' turns
90
  for user_prompt, bot_response in history[-max_history_turns:]:
91
- prompt += f"[INST] {user_prompt} [/ "
92
- prompt += f" {bot_response}"
93
- prompt += f"[INST] {message} [/ "
94
  return prompt
95
 
96
- def run_llm(
97
- prompt_template: str,
98
- stop_tokens: List[str],
99
- purpose: str,
100
- **prompt_kwargs: Dict
101
- ) -> str:
102
- """Runs the LLM with the given prompt and parameters."""
103
- seed = random.randint(1, 1111111111111111)
104
- logging.info(f"Seed: {seed}") # Log the seed
105
-
106
- content = PREFIX.format(
107
- date_time_str=date_time_str,
108
- purpose=purpose,
109
- safe_search=safe_search,
110
- ) + prompt_template.format(**prompt_kwargs)
111
- if VERBOSE:
112
- logging.info(LOG_PROMPT.format(content)) # Log the prompt
113
-
114
- resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
115
- if VERBOSE:
116
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
117
- return resp
118
-
119
  def generate(
120
  prompt: str,
121
  history: List[Tuple[str, str]],
@@ -126,206 +69,43 @@ def generate(
126
  top_p: float = TOP_P,
127
  repetition_penalty: float = REPETITION_PENALTY,
128
  ) -> str:
129
- """Generates text using the LLM."""
130
- content = PREFIX.format(
 
 
 
 
 
 
 
 
131
  date_time_str=date_time_str,
132
- purpose=purpose,
133
- safe_search=safe_search,
134
- ) + prompt_template.format(**prompt_kwargs)
135
- if VERBOSE:
136
- logging.info(LOG_PROMPT.format(content)) # Log the prompt
137
-
138
- stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
139
- resp = ""
140
- for response in stream:
141
- resp += response.token.text
142
-
143
  if VERBOSE:
144
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
145
- return resp
146
-
147
- def compress_history(purpose: str, task: str, history: List[Tuple[str, str]], directory: str) -> str:
148
- """Compresses the history into a shorter summary."""
149
- resp = run_llm(
150
- COMPRESS_HISTORY_PROMPT,
151
- stop_tokens=["observation:", "task:", "action:", "thought:"],
152
- purpose=purpose,
153
- task=task,
154
- history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
155
- )
156
- history = "observation: {}\n".format(resp)
157
- return history
158
-
159
- def call_search(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
160
- """Performs a search based on the action input."""
161
- logging.info(f"CALLING SEARCH: {action_input}")
162
- try:
163
- if "http" in action_input:
164
- if "<" in action_input:
165
- action_input = action_input.strip("<")
166
- if ">" in action_input:
167
- action_input = action_input.strip(">")
168
-
169
- response = i_s(action_input)
170
- logging.info(f"Search Result: {response}")
171
- history.append(("observation: search result is: {}".format(response), ""))
172
- else:
173
- history.append(("observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n", ""))
174
- except Exception as e:
175
- history.append(("observation: {}\n".format(e), ""))
176
- return "MAIN", None, history, task
177
-
178
- def call_main(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
179
- """Handles the main agent interaction loop."""
180
- logging.info(f"CALLING MAIN: {action_input}")
181
- resp = run_llm(
182
- ACTION_PROMPT,
183
- stop_tokens=["observation:", "task:", "action:", "thought:"],
184
- purpose=purpose,
185
- task=task,
186
- history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
187
- )
188
- lines = resp.strip().strip("\n").split("\n")
189
- for line in lines:
190
- if line == "":
191
- continue
192
- if line.startswith("thought: "):
193
- history.append((line, ""))
194
- logging.info(f"Thought: {line}")
195
- elif line.startswith("action: "):
196
- action_name, action_input = parse_action(line)
197
- logging.info(f"Action: {action_name} - {action_input}")
198
- history.append((line, ""))
199
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
200
- task = "END"
201
- return action_name, action_input, history, task
202
- else:
203
- return action_name, action_input, history, task
204
- else:
205
- history.append((line, ""))
206
- logging.info(f"Other Output: {line}")
207
- return "MAIN", None, history, task
208
-
209
- def call_set_task(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
210
- """Sets a new task for the agent."""
211
- logging.info(f"CALLING SET_TASK: {action_input}")
212
- task = run_llm(
213
- TASK_PROMPT,
214
- stop_tokens=[],
215
- purpose=purpose,
216
- task=task,
217
- history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
218
- ).strip("\n")
219
- history.append(("observation: task has been updated to: {}".format(task), ""))
220
- return "MAIN", None, history, task
221
-
222
- def end_fn(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
223
- """Ends the agent interaction."""
224
- logging.info(f"CALLING END_FN: {action_input}")
225
- task = "END"
226
- return "COMPLETE", "COMPLETE", history, task
227
-
228
- NAME_TO_FUNC: Dict[str, callable] = {
229
- "MAIN": call_main,
230
- "UPDATE-TASK": call_set_task,
231
- "SEARCH": call_search,
232
- "COMPLETE": end_fn,
233
- }
234
-
235
- def run_action(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_name: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
236
- """Executes the specified action."""
237
- logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
238
- try:
239
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
240
- action_name = "COMPLETE"
241
- task = "END"
242
- return action_name, "COMPLETE", history, task
243
 
244
- # compress the history when it is long
245
- if len(history) > MAX_HISTORY:
246
- logging.info("COMPRESSING HISTORY")
247
- history = compress_history(purpose, task, history, directory)
248
- if not action_name in NAME_TO_FUNC:
249
- action_name = "MAIN"
250
- if action_name == "" or action_name is None:
251
- action_name = "MAIN"
252
- assert action_name in NAME_TO_FUNC
253
 
254
- logging.info(f"RUN: {action_name} - {action_input}")
255
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
256
- except Exception as e:
257
- history.append(("observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n", ""))
258
- logging.error(f"Error in run_action: {e}")
259
- return "MAIN", None, history, task
260
-
261
- def run(purpose: str, history: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
262
- """Main agent interaction loop."""
263
- task = None
264
- directory = "./"
265
- if history:
266
- history = str(history).strip("[]")
267
- if not history:
268
- history = []
269
 
270
- action_name = "UPDATE-TASK" if task is None else "MAIN"
271
- action_input = None
272
- while True:
273
- logging.info(f"---")
274
- logging.info(f"Purpose: {purpose}")
275
- logging.info(f"Task: {task}")
276
- logging.info(f"---")
277
- logging.info(f"History: {history}")
278
- logging.info(f"---")
279
-
280
- action_name, action_input, history, task = run_action(
281
- purpose,
282
- task,
283
- history,
284
- directory,
285
- action_name,
286
- action_input,
287
- )
288
- yield (history)
289
- if task == "END":
290
- return (history)
291
-
292
- ################################################
293
-
294
- def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 5) -> str:
295
- """Formats the prompt for the LLM, including the message and relevant history."""
296
- prompt = " "
297
- # Keep only the last 'max_history_turns' turns
298
- for user_prompt, bot_response in history[-max_history_turns:]:
299
- prompt += f"[INST] {user_prompt} [/ "
300
- prompt += f" {bot_response}"
301
- prompt += f"[INST] {message} [/ "
302
- return prompt
303
-
304
- def parse_action(line: str) -> Tuple[str, str]:
305
- """Parses the action line to get the action name and input."""
306
- parts = line.split(":", 1)
307
- if len(parts) == 2:
308
- action_name = parts[0].replace("action", "").strip()
309
- action_input = parts[1].strip()
310
- else:
311
- action_name = parts[0].replace("action", "").strip()
312
- action_input = ""
313
- return action_name, action_input
314
 
315
  def main():
316
- """Main function to run the Gradio interface."""
317
- global client
318
- # Initialize the LLM client with your API key
319
- try:
320
- client = InferenceClient(
321
- MODEL_NAME,
322
- token=API_KEY # Replace with your actual API key
323
- )
324
- except Exception as e:
325
- logging.error(f"Error initializing LLM client: {e}")
326
- print("Error initializing LLM client. Please check your API key.")
327
- return
328
-
329
  with gr.Blocks() as demo:
330
  gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
331
  gr.Markdown("### Your AI-Powered Development Companion")
@@ -365,8 +145,7 @@ def main():
365
 
366
  def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
367
  """Handles the chat interaction."""
368
- prompt = format_prompt(message, history)
369
- response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
370
  history.append((message, response))
371
  return history, history
372
 
@@ -386,4 +165,5 @@ def main():
386
  demo.launch()
387
 
388
  if __name__ == "__main__":
389
- main()
 
 
7
  import logging
8
 
9
  import gradio as gr
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
  from huggingface_hub import InferenceClient, cached_download
 
 
12
 
13
  # --- Configuration ---
14
  VERBOSE = True # Enable verbose logging
 
22
 
23
  # --- Logging Setup ---
24
  logging.basicConfig(
25
+ filename="app.log",
26
+ level=logging.INFO,
27
  format="%(asctime)s - %(levelname)s - %(message)s",
28
  )
29
 
 
40
  PREFIX = """
41
  {date_time_str}
42
  Purpose: {purpose}
43
+ Agent: {agent_name}
44
  """
45
 
46
  LOG_PROMPT = """
 
51
  RESPONSE: {resp}
52
  """
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  # --- Functions ---
55
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
56
+ prompt = ""
 
 
57
  for user_prompt, bot_response in history[-max_history_turns:]:
58
+ prompt += f"Human: {user_prompt}\nAssistant: {bot_response}\n"
59
+ prompt += f"Human: {message}\nAssistant:"
 
60
  return prompt
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def generate(
63
  prompt: str,
64
  history: List[Tuple[str, str]],
 
69
  top_p: float = TOP_P,
70
  repetition_penalty: float = REPETITION_PENALTY,
71
  ) -> str:
72
+ # Load model and tokenizer
73
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
74
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
75
+
76
+ # Create a text generation pipeline
77
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
78
+
79
+ # Prepare the full prompt
80
+ date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
81
+ full_prompt = PREFIX.format(
82
  date_time_str=date_time_str,
83
+ purpose=sys_prompt,
84
+ agent_name=agent_name
85
+ ) + format_prompt(prompt, history)
86
+
 
 
 
 
 
 
 
87
  if VERBOSE:
88
+ logging.info(LOG_PROMPT.format(content=full_prompt))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
+ # Generate response
91
+ response = generator(
92
+ full_prompt,
93
+ max_new_tokens=max_new_tokens,
94
+ temperature=temperature,
95
+ top_p=top_p,
96
+ repetition_penalty=repetition_penalty,
97
+ do_sample=True
98
+ )[0]['generated_text']
99
 
100
+ # Extract the assistant's response
101
+ assistant_response = response.split("Assistant:")[-1].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ if VERBOSE:
104
+ logging.info(LOG_RESPONSE.format(resp=assistant_response))
105
+
106
+ return assistant_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  with gr.Blocks() as demo:
110
  gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
111
  gr.Markdown("### Your AI-Powered Development Companion")
 
145
 
146
  def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
147
  """Handles the chat interaction."""
148
+ response = generate(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
 
149
  history.append((message, response))
150
  return history, history
151
 
 
165
  demo.launch()
166
 
167
  if __name__ == "__main__":
168
+ main()
169
+ ```