Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -151,22 +151,34 @@ def add_code_to_workspace(project_name, code, file_name):
|
|
151 |
st.session_state.workspace_projects[project_name]['files'].append(file_name)
|
152 |
return f"Code added to '{file_name}' in project '{project_name}'."
|
153 |
|
154 |
-
def
|
155 |
-
|
156 |
-
|
|
|
157 |
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
-
|
161 |
-
|
162 |
-
app_mode = sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
|
163 |
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
text_input = subheader("Enter skills (one per line):")
|
170 |
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
|
151 |
st.session_state.workspace_projects[project_name]['files'].append(file_name)
|
152 |
return f"Code added to '{file_name}' in project '{project_name}'."
|
153 |
|
154 |
+
def chat_interface_with_agent(input_text, agent_name):
|
155 |
+
agent_prompt = load_agent_prompt(agent_name)
|
156 |
+
if agent_prompt is None:
|
157 |
+
return f"Agent {agent_name} not found."
|
158 |
|
159 |
+
model_name = "gpt2"
|
160 |
+
try:
|
161 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
162 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
163 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
164 |
+
except EnvironmentError as e:
|
165 |
+
return f"Error loading model: {e}"
|
166 |
|
167 |
+
# Combine the agent prompt with user input
|
168 |
+
combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
|
|
|
169 |
|
170 |
+
# Truncate input text to avoid exceeding the model's maximum length
|
171 |
+
max_input_length = 900
|
172 |
+
input_ids = tokenizer.encode(combined_input, return_tensors="pt")
|
173 |
+
if input_ids.shape[1] > max_input_length:
|
174 |
+
input_ids = input_ids[:, :max_input_length]
|
175 |
|
176 |
+
# Adjust max_new_tokens if needed
|
177 |
+
max_new_tokens = 50 # Reduce if necessary
|
|
|
178 |
|
179 |
+
# Generate chatbot response
|
180 |
+
outputs = model.generate(
|
181 |
+
input_ids, max_new_tokens=max_new_tokens, num_return_sequences=1, do_sample=True
|
182 |
+
)
|
183 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
184 |
+
return response
|