acecalisto3 commited on
Commit
2fd95c6
·
verified ·
1 Parent(s): 2ceb2a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -14
app.py CHANGED
@@ -151,22 +151,34 @@ def add_code_to_workspace(project_name, code, file_name):
151
  st.session_state.workspace_projects[project_name]['files'].append(file_name)
152
  return f"Code added to '{file_name}' in project '{project_name}'."
153
 
154
- def chat_interface(input_text):
155
- # Placeholder for chat interface logic
156
- return f"Chatbot response: {input_text}"
 
157
 
158
- st.title("AI Agent Creator")
 
 
 
 
 
 
159
 
160
- sidebar = st.sidebar
161
- sidebar.title("Navigation")
162
- app_mode = sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
163
 
164
- if app_mode == "AI Agent Creator":
165
- st.header("Create an AI Agent from Text")
 
 
 
166
 
167
- subheader = st.subheader
168
- agent_name = subheader("Enter agent name:")
169
- text_input = subheader("Enter skills (one per line):")
170
 
171
- if st.button("Create Agent"):
172
- agent_prompt = create_agent_from_text(agent_name, text_input)
 
 
 
 
 
151
  st.session_state.workspace_projects[project_name]['files'].append(file_name)
152
  return f"Code added to '{file_name}' in project '{project_name}'."
153
 
154
+ def chat_interface_with_agent(input_text, agent_name):
155
+ agent_prompt = load_agent_prompt(agent_name)
156
+ if agent_prompt is None:
157
+ return f"Agent {agent_name} not found."
158
 
159
+ model_name = "gpt2"
160
+ try:
161
+ model = AutoModelForCausalLM.from_pretrained(model_name)
162
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
163
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
164
+ except EnvironmentError as e:
165
+ return f"Error loading model: {e}"
166
 
167
+ # Combine the agent prompt with user input
168
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
 
169
 
170
+ # Truncate input text to avoid exceeding the model's maximum length
171
+ max_input_length = 900
172
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
173
+ if input_ids.shape[1] > max_input_length:
174
+ input_ids = input_ids[:, :max_input_length]
175
 
176
+ # Adjust max_new_tokens if needed
177
+ max_new_tokens = 50 # Reduce if necessary
 
178
 
179
+ # Generate chatbot response
180
+ outputs = model.generate(
181
+ input_ids, max_new_tokens=max_new_tokens, num_return_sequences=1, do_sample=True
182
+ )
183
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
184
+ return response