Spaces:
Paused
Paused
Better response to errors
Browse files
app.py
CHANGED
@@ -89,10 +89,14 @@ class ConversationBot:
|
|
89 |
self.tools.append(Tool(name=func.name, description=func.description, func=func))
|
90 |
|
91 |
def run_text(self, text, state):
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
96 |
|
97 |
state = state + [(text, response)]
|
98 |
|
@@ -113,13 +117,16 @@ class ConversationBot:
|
|
113 |
img = img.convert('RGB')
|
114 |
img.save(image_filename, "PNG")
|
115 |
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
|
|
|
116 |
description = self.models['ImageCaptioning'].inference(image_filename)
|
117 |
Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
|
118 |
AI_prompt = "Received. "
|
|
|
119 |
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
|
120 |
state = state + [(f"*{image_filename}*", AI_prompt)]
|
121 |
print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
|
122 |
f"Current Memory: {self.agent.memory.buffer}")
|
|
|
123 |
return state, state, f'{txt} {image_filename} '
|
124 |
|
125 |
def init_agent(self, openai_api_key):
|
|
|
89 |
self.tools.append(Tool(name=func.name, description=func.description, func=func))
|
90 |
|
91 |
def run_text(self, text, state):
|
92 |
+
try:
|
93 |
+
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
|
94 |
+
res = self.agent({"input": text})
|
95 |
+
res['output'] = res['output'].replace("\\", "/")
|
96 |
+
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
|
97 |
+
except Exception as e:
|
98 |
+
print(e)
|
99 |
+
response = f"Oops, an error occurred while generating the response.\n\nTry asking your question in another way."
|
100 |
|
101 |
state = state + [(text, response)]
|
102 |
|
|
|
117 |
img = img.convert('RGB')
|
118 |
img.save(image_filename, "PNG")
|
119 |
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
|
120 |
+
|
121 |
description = self.models['ImageCaptioning'].inference(image_filename)
|
122 |
Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
|
123 |
AI_prompt = "Received. "
|
124 |
+
|
125 |
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
|
126 |
state = state + [(f"*{image_filename}*", AI_prompt)]
|
127 |
print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
|
128 |
f"Current Memory: {self.agent.memory.buffer}")
|
129 |
+
|
130 |
return state, state, f'{txt} {image_filename} '
|
131 |
|
132 |
def init_agent(self, openai_api_key):
|