Spaces:
Runtime error
Runtime error
Salvatore Rossitto
commited on
Commit
·
e8f1b7a
1
Parent(s):
f1d36b6
fixes to prompt message
Browse files- RBotReloaded.py +54 -28
- agent_llama_ui.py +2 -4
- requirements.txt +2 -2
- start_agent.bat +1 -1
RBotReloaded.py
CHANGED
@@ -33,14 +33,40 @@ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline #
|
|
33 |
from typing import Any, Dict, List
|
34 |
import torch
|
35 |
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
|
|
|
36 |
|
37 |
# Config
|
38 |
EMBD_CHUNK_SIZE = 512
|
39 |
AI_NAME = "Agent Llama"
|
40 |
USER_NAME = "Buddy"
|
|
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Helper to load LM
|
43 |
-
def create_llm(model_id="
|
44 |
if (model_id.startswith("http")):
|
45 |
print(f"Creating TextGen LLM base_url:{model_id}")
|
46 |
return TextGen(model_url=model_id, callbacks=[StreamingStdOutCallbackHandler()])
|
@@ -228,34 +254,36 @@ class RBotAgent:
|
|
228 |
formatted_history = get_buffer_string(chat_history, human_prefix="USER")
|
229 |
|
230 |
prompt = f"""
|
231 |
-
EXAMPLE 1:
|
232 |
USER: Find me a recipe for chocolate chip cookies.
|
233 |
AI: SearchAndReply("chocolate chip cookies recipe", 5)
|
234 |
|
235 |
-
EXAMPLE 2:
|
236 |
USER: Show me pictures of cute puppies.
|
237 |
AI: ImageGenerator("cute puppies", 512, 512)
|
238 |
-
|
|
|
239 |
USER: Explain the concept of blockchain.
|
240 |
AI: KnowledgeBaseQuery("Explain blockchain")
|
241 |
|
242 |
-
EXAMPLE 4:
|
243 |
USER: Find me recent news about cryptocurrency.
|
244 |
AI: SearchAndReply("recent cryptocurrency news")
|
245 |
|
246 |
-
EXAMPLE 5:
|
247 |
USER: Can you calculate the factorial of 5?
|
248 |
AI: Calculator("factorial(5)")
|
249 |
|
250 |
-
###
|
251 |
-
SYS:Today is {str(datetime.now().date())},
|
252 |
-
You are {AI_NAME} a smart and helpful AI assistant with access to external tools and knowledge.
|
253 |
-
Please reply to the user with a truth and useful response, if you do not know the answer or you are not sure or you need more recent informations, delegate the task replying with ActionName(action_input) with the most appropriate of the available actions (you call them like functions)
|
254 |
|
255 |
-
###
|
|
|
|
|
256 |
{self.tools_prompt()}
|
257 |
|
258 |
-
{formatted_history}
|
259 |
USER: {input}
|
260 |
AI:
|
261 |
"""
|
@@ -283,34 +311,32 @@ AI:
|
|
283 |
if match[0].strip().lower() in tool_names:
|
284 |
action_name = match[0].strip().lower()
|
285 |
action_input = match[1].strip().replace("query_params", "").strip().replace("()","")
|
286 |
-
break
|
287 |
|
288 |
# Try unformatted
|
289 |
if not action_name or not action_input:
|
290 |
lines = output.split("\n")
|
291 |
-
|
292 |
-
|
293 |
for line in lines:
|
294 |
for tool in tool_names:
|
295 |
if f"{tool}:" in line.lower() or f"{tool}(" in line.lower():
|
296 |
action_name = tool
|
297 |
action_input = line[line.lower().find(tool)+len(tool):].strip().replace("query_params", "").strip().replace("()","")
|
298 |
print(f"Matched unformatted action request. {action_name}:{action_input} from line: {line}")
|
299 |
-
break
|
300 |
|
301 |
# Call tool if found
|
302 |
if action_name and action_input:
|
303 |
for tool in self.tools:
|
304 |
if tool.name.lower() in action_name:
|
305 |
-
print(f"Calling action:{tool.name} with input:{action_input}")
|
306 |
-
observations.append(f"Calling action:{tool.name} with input:{action_input}")
|
307 |
|
308 |
params_list = action_input.split(",")
|
309 |
try:
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
|
|
|
|
314 |
except Exception as ex:
|
315 |
res = f"{action_name} execution error: {str(ex)}"
|
316 |
|
@@ -361,7 +387,7 @@ class SmartAgent:
|
|
361 |
self.chat_history.clear()
|
362 |
|
363 |
# Create image
|
364 |
-
def createImage(self, prompt, width=512, height=512, denoise_strength=0.75, guidance_scale=7.5, model_id = 'dreamshaper_8.safetensors'):
|
365 |
try:
|
366 |
init_image = None
|
367 |
if (os.path.exists("./image_gen_guide.jpg")):
|
@@ -372,11 +398,11 @@ class SmartAgent:
|
|
372 |
if self.text2image_gen_pipe is None:
|
373 |
if torch.cuda.is_available():
|
374 |
print(f"Loading Stable model {model_id} into GPU")
|
375 |
-
self.text2image_gen_pipe = StableDiffusionPipeline.from_single_file("
|
376 |
self.text2image_gen_pipe = self.text2image_gen_pipe.to("cuda")
|
377 |
else:
|
378 |
print(f"Loading Stable model {model_id} into CPU")
|
379 |
-
self.text2image_gen_pipe = StableDiffusionPipeline.from_single_file("
|
380 |
self.text2image_gen_pipe = self.text2image_gen_pipe.to("cpu")
|
381 |
print("generating image from promt...")
|
382 |
images = self.text2image_gen_pipe(prompt, width=width, height=height).images
|
@@ -384,11 +410,11 @@ class SmartAgent:
|
|
384 |
if self.image2image_gen_pipe is None:
|
385 |
if torch.cuda.is_available():
|
386 |
print(f"Loading Stable model {model_id} into GPU")
|
387 |
-
self.image2image_gen_pipe = StableDiffusionImg2ImgPipeline.from_single_file("
|
388 |
self.image2image_gen_pipe = self.image2image_gen_pipe.to("cuda")
|
389 |
else:
|
390 |
print(f"Loading Stable model {model_id} into CPU")
|
391 |
-
self.image2image_gen_pipe = StableDiffusionImg2ImgPipeline.from_single_file("
|
392 |
self.image2image_gen_pipe = self.image2image_gen_pipe.to("cpu")
|
393 |
print("generating image from promt+image...")
|
394 |
init_image = init_image.convert("RGB")
|
@@ -404,7 +430,7 @@ class SmartAgent:
|
|
404 |
paths.append(file_path)
|
405 |
return f"Generated images from prompt \"{prompt}\" saved to files: {', '.join(paths)}"
|
406 |
except Exception as e:
|
407 |
-
print(f"error in
|
408 |
return "Unable to generate file"
|
409 |
|
410 |
def load_and_split_documents(self, url, max_depth=2):
|
|
|
33 |
from typing import Any, Dict, List
|
34 |
import torch
|
35 |
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
|
36 |
+
import inspect
|
37 |
|
38 |
# Config
|
39 |
EMBD_CHUNK_SIZE = 512
|
40 |
AI_NAME = "Agent Llama"
|
41 |
USER_NAME = "Buddy"
|
42 |
+
MODELS_DIR = "models"
|
43 |
|
44 |
+
def validate_and_fix_params(tool_name, params_list):
|
45 |
+
try:
|
46 |
+
# Create a list to store the validated and fixed parameters
|
47 |
+
validated_params = []
|
48 |
+
n_param = 0
|
49 |
+
while n_param < len(params_list) and len(validated_params) < 1:
|
50 |
+
val = str(params_list[n_param]).replace("(", "").replace(")", "").replace("\"", "").replace("#", "").strip()
|
51 |
+
n_param = n_param + 1
|
52 |
+
if len(val) > 2:
|
53 |
+
validated_params.append(val)
|
54 |
+
|
55 |
+
if (n_param < len(params_list) and tool_name == "ImageGenerator" and len(validated_params) < 3):
|
56 |
+
while n_param < len(params_list):
|
57 |
+
val = int(str(params_list[n_param]).replace("(", "").replace(")", "").replace("\"", "").replace("#", "").strip())
|
58 |
+
n_param = n_param + 1
|
59 |
+
if val > 0:
|
60 |
+
validated_params.append(val)
|
61 |
+
|
62 |
+
return validated_params
|
63 |
+
except Exception as e:
|
64 |
+
# Handle any exceptions that may occur during parameter validation
|
65 |
+
print(f"Error parsing params: {str(e)}")
|
66 |
+
return []
|
67 |
+
|
68 |
# Helper to load LM
|
69 |
+
def create_llm(model_id=f"{MODELS_DIR}/mistral-7b-instruct-v0.1.Q4_K_M.gguf", load_4bit=False, load_8bit=False, ctx_len = 8192, temperature=0.5, top_p=0.95):
|
70 |
if (model_id.startswith("http")):
|
71 |
print(f"Creating TextGen LLM base_url:{model_id}")
|
72 |
return TextGen(model_url=model_id, callbacks=[StreamingStdOutCallbackHandler()])
|
|
|
254 |
formatted_history = get_buffer_string(chat_history, human_prefix="USER")
|
255 |
|
256 |
prompt = f"""
|
257 |
+
### EXAMPLE 1:
|
258 |
USER: Find me a recipe for chocolate chip cookies.
|
259 |
AI: SearchAndReply("chocolate chip cookies recipe", 5)
|
260 |
|
261 |
+
### EXAMPLE 2:
|
262 |
USER: Show me pictures of cute puppies.
|
263 |
AI: ImageGenerator("cute puppies", 512, 512)
|
264 |
+
|
265 |
+
### EXAMPLE 3:
|
266 |
USER: Explain the concept of blockchain.
|
267 |
AI: KnowledgeBaseQuery("Explain blockchain")
|
268 |
|
269 |
+
### EXAMPLE 4:
|
270 |
USER: Find me recent news about cryptocurrency.
|
271 |
AI: SearchAndReply("recent cryptocurrency news")
|
272 |
|
273 |
+
### EXAMPLE 5:
|
274 |
USER: Can you calculate the factorial of 5?
|
275 |
AI: Calculator("factorial(5)")
|
276 |
|
277 |
+
### CURRENT CONTEXT:\n
|
278 |
+
SYS: Today is {str(datetime.now().date())},
|
279 |
+
SYS: You are {AI_NAME} a smart and helpful AI assistant with access to external tools and knowledge.
|
280 |
+
SYS: Please reply to the user with a truth and useful response, if you do not know the answer or you are not sure or you need more recent informations, delegate the task replying with ActionName(action_input) with the most appropriate of the available actions (you call them like functions).
|
281 |
|
282 |
+
### CURRENT CONVERSATION:
|
283 |
+
{formatted_history}
|
284 |
+
SYS: Follow the most current user message, you can reply directly or invoke a valid action from the following available:
|
285 |
{self.tools_prompt()}
|
286 |
|
|
|
287 |
USER: {input}
|
288 |
AI:
|
289 |
"""
|
|
|
311 |
if match[0].strip().lower() in tool_names:
|
312 |
action_name = match[0].strip().lower()
|
313 |
action_input = match[1].strip().replace("query_params", "").strip().replace("()","")
|
314 |
+
if (action_name and action_input): break
|
315 |
|
316 |
# Try unformatted
|
317 |
if not action_name or not action_input:
|
318 |
lines = output.split("\n")
|
|
|
|
|
319 |
for line in lines:
|
320 |
for tool in tool_names:
|
321 |
if f"{tool}:" in line.lower() or f"{tool}(" in line.lower():
|
322 |
action_name = tool
|
323 |
action_input = line[line.lower().find(tool)+len(tool):].strip().replace("query_params", "").strip().replace("()","")
|
324 |
print(f"Matched unformatted action request. {action_name}:{action_input} from line: {line}")
|
325 |
+
if (action_name and action_input): break
|
326 |
|
327 |
# Call tool if found
|
328 |
if action_name and action_input:
|
329 |
for tool in self.tools:
|
330 |
if tool.name.lower() in action_name:
|
|
|
|
|
331 |
|
332 |
params_list = action_input.split(",")
|
333 |
try:
|
334 |
+
print(f"Fixing input for tool {tool.name}, Original params list: {str(params_list)}")
|
335 |
+
params_list = validate_and_fix_params(tool.name, params_list)
|
336 |
+
print(f"Fixed params list: {str(params_list)}")
|
337 |
+
print(f"Calling action:{tool.name} with input:{str(params_list)}")
|
338 |
+
observations.append(f"Calling action:{tool.name} with input:{str(params_list)}")
|
339 |
+
res = tool.func(*params_list)
|
340 |
except Exception as ex:
|
341 |
res = f"{action_name} execution error: {str(ex)}"
|
342 |
|
|
|
387 |
self.chat_history.clear()
|
388 |
|
389 |
# Create image
|
390 |
+
def createImage(self, prompt : str, width : int=512, height : int=512, denoise_strength : float=0.75, guidance_scale: float=7.5, model_id : str= 'dreamshaper_8.safetensors'):
|
391 |
try:
|
392 |
init_image = None
|
393 |
if (os.path.exists("./image_gen_guide.jpg")):
|
|
|
398 |
if self.text2image_gen_pipe is None:
|
399 |
if torch.cuda.is_available():
|
400 |
print(f"Loading Stable model {model_id} into GPU")
|
401 |
+
self.text2image_gen_pipe = StableDiffusionPipeline.from_single_file(f"{MODELS_DIR}/" + model_id, torch_dtype=torch.float16, verbose=True, use_safetensors=True)
|
402 |
self.text2image_gen_pipe = self.text2image_gen_pipe.to("cuda")
|
403 |
else:
|
404 |
print(f"Loading Stable model {model_id} into CPU")
|
405 |
+
self.text2image_gen_pipe = StableDiffusionPipeline.from_single_file(f"{MODELS_DIR}/" + model_id, torch_dtype=torch.float32, verbose=True, use_safetensors=True)
|
406 |
self.text2image_gen_pipe = self.text2image_gen_pipe.to("cpu")
|
407 |
print("generating image from promt...")
|
408 |
images = self.text2image_gen_pipe(prompt, width=width, height=height).images
|
|
|
410 |
if self.image2image_gen_pipe is None:
|
411 |
if torch.cuda.is_available():
|
412 |
print(f"Loading Stable model {model_id} into GPU")
|
413 |
+
self.image2image_gen_pipe = StableDiffusionImg2ImgPipeline.from_single_file(f"{MODELS_DIR}/" + model_id, torch_dtype=torch.float16, verbose=True, use_safetensors=True)
|
414 |
self.image2image_gen_pipe = self.image2image_gen_pipe.to("cuda")
|
415 |
else:
|
416 |
print(f"Loading Stable model {model_id} into CPU")
|
417 |
+
self.image2image_gen_pipe = StableDiffusionImg2ImgPipeline.from_single_file(f"{MODELS_DIR}/" + model_id, torch_dtype=torch.float32, verbose=True, use_safetensors=True)
|
418 |
self.image2image_gen_pipe = self.image2image_gen_pipe.to("cpu")
|
419 |
print("generating image from promt+image...")
|
420 |
init_image = init_image.convert("RGB")
|
|
|
430 |
paths.append(file_path)
|
431 |
return f"Generated images from prompt \"{prompt}\" saved to files: {', '.join(paths)}"
|
432 |
except Exception as e:
|
433 |
+
print(f"error in createImage: {e}")
|
434 |
return "Unable to generate file"
|
435 |
|
436 |
def load_and_split_documents(self, url, max_depth=2):
|
agent_llama_ui.py
CHANGED
@@ -7,7 +7,7 @@ import requests
|
|
7 |
import glob
|
8 |
import json
|
9 |
import shutil
|
10 |
-
from RBotReloaded import SmartAgent
|
11 |
import time
|
12 |
from PIL import Image
|
13 |
from langchain.schema import AIMessage, HumanMessage
|
@@ -15,15 +15,13 @@ from langchain.schema import AIMessage, HumanMessage
|
|
15 |
load_dotenv()
|
16 |
|
17 |
|
18 |
-
default_model = ""
|
19 |
default_context = 8192
|
20 |
default_load_type = "Auto"
|
21 |
default_iterations = 2
|
22 |
default_temperature = 0.5
|
23 |
default_topp = 0.95
|
24 |
|
25 |
-
MODELS_DIR = "models"
|
26 |
-
|
27 |
@st.cache_resource
|
28 |
def agent(model, temperature, top_p, context_length, load_8bit, load_4bit, max_iterations):
|
29 |
ag = SmartAgent(f"{MODELS_DIR}/{model}" if os.path.exists(f"{MODELS_DIR}/{model}") else model, temp=temperature, top_p=top_p, load_in_4bit=load_4bit, load_in_8bit=load_8bit, ctx_len=context_length, max_iterations=max_iterations) if model else None
|
|
|
7 |
import glob
|
8 |
import json
|
9 |
import shutil
|
10 |
+
from RBotReloaded import SmartAgent, MODELS_DIR
|
11 |
import time
|
12 |
from PIL import Image
|
13 |
from langchain.schema import AIMessage, HumanMessage
|
|
|
15 |
load_dotenv()
|
16 |
|
17 |
|
18 |
+
default_model = "mistral-7b-instruct-v0.1.Q4_K_M.gguf"
|
19 |
default_context = 8192
|
20 |
default_load_type = "Auto"
|
21 |
default_iterations = 2
|
22 |
default_temperature = 0.5
|
23 |
default_topp = 0.95
|
24 |
|
|
|
|
|
25 |
@st.cache_resource
|
26 |
def agent(model, temperature, top_p, context_length, load_8bit, load_4bit, max_iterations):
|
27 |
ag = SmartAgent(f"{MODELS_DIR}/{model}" if os.path.exists(f"{MODELS_DIR}/{model}") else model, temp=temperature, top_p=top_p, load_in_4bit=load_4bit, load_in_8bit=load_8bit, ctx_len=context_length, max_iterations=max_iterations) if model else None
|
requirements.txt
CHANGED
@@ -6,6 +6,8 @@ bs4
|
|
6 |
diffusers
|
7 |
transformers
|
8 |
faiss-cpu
|
|
|
|
|
9 |
fastapi
|
10 |
git-python
|
11 |
google-search-results
|
@@ -41,6 +43,4 @@ torchaudio
|
|
41 |
#torchaudio==2.0.1+cu117
|
42 |
torchvision
|
43 |
python-dotenv
|
44 |
-
lxml
|
45 |
-
ctransformers
|
46 |
omegaconf
|
|
|
6 |
diffusers
|
7 |
transformers
|
8 |
faiss-cpu
|
9 |
+
lxml
|
10 |
+
ctransformers
|
11 |
fastapi
|
12 |
git-python
|
13 |
google-search-results
|
|
|
43 |
#torchaudio==2.0.1+cu117
|
44 |
torchvision
|
45 |
python-dotenv
|
|
|
|
|
46 |
omegaconf
|
start_agent.bat
CHANGED
@@ -13,7 +13,7 @@ rem Activate the virtual environment
|
|
13 |
call %ENV_NAME%\Scripts\activate
|
14 |
|
15 |
rem Install the required packages from requirements.txt
|
16 |
-
python -m pip install -r requirements.txt
|
17 |
|
18 |
rem Run your Streamlit application
|
19 |
python -m streamlit run agent_llama_ui.py
|
|
|
13 |
call %ENV_NAME%\Scripts\activate
|
14 |
|
15 |
rem Install the required packages from requirements.txt
|
16 |
+
python -m pip install --user -r requirements.txt
|
17 |
|
18 |
rem Run your Streamlit application
|
19 |
python -m streamlit run agent_llama_ui.py
|