telum RamAnanth1 commited on
Commit
b602b89
·
0 Parent(s):

Duplicate from RamAnanth1/visual-chatGPT

Browse files

Co-authored-by: Ram Ananth <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +503 -0
  4. requirements.txt +31 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Visual ChatGPT
3
+ emoji: 🚀
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.20.1
8
+ app_file: app.py
9
+ pinned: true
10
+ duplicated_from: RamAnanth1/visual-chatGPT
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ sys.path.append(os.path.dirname(os.path.realpath(__file__)))
4
+ os.mkdir('image')
5
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
6
+ import gradio as gr
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation
8
+ import torch
9
+ from diffusers import StableDiffusionPipeline
10
+ from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
11
+ from langchain.agents.initialize import initialize_agent
12
+ from langchain.agents.tools import Tool
13
+ from langchain.chains.conversation.memory import ConversationBufferMemory
14
+ from langchain.llms.openai import OpenAI
15
+ import re
16
+ import uuid
17
+ from diffusers import StableDiffusionInpaintPipeline
18
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
19
+ from diffusers import UniPCMultistepScheduler
20
+ from PIL import Image
21
+ import numpy as np
22
+ from omegaconf import OmegaConf
23
+ from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
24
+ import cv2
25
+ import einops
26
+ from pytorch_lightning import seed_everything
27
+ import random
28
+ from controlnet_aux import OpenposeDetector
29
+
30
+ VISUAL_CHATGPT_PREFIX = """Visual ChatGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Visual ChatGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
31
+
32
+ Visual ChatGPT is able to process and understand large amounts of text and images. As a language model, Visual ChatGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and Visual ChatGPT can invoke different tools to indirectly understand pictures. When talking about images, Visual ChatGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Visual ChatGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Visual ChatGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
33
+
34
+ Human may provide new figures to Visual ChatGPT with a description. The description helps Visual ChatGPT to understand this image, but Visual ChatGPT should use tools to finish following tasks, rather than directly imagine from the description.
35
+
36
+ Overall, Visual ChatGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics.
37
+
38
+
39
+ TOOLS:
40
+ ------
41
+
42
+ Visual ChatGPT has access to the following tools:"""
43
+
44
+ VISUAL_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
45
+
46
+ ```
47
+ Thought: Do I need to use a tool? Yes
48
+ Action: the action to take, should be one of [{tool_names}]
49
+ Action Input: the input to the action
50
+ Observation: the result of the action
51
+ ```
52
+
53
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
54
+
55
+ ```
56
+ Thought: Do I need to use a tool? No
57
+ {ai_prefix}: [your response here]
58
+ ```
59
+ """
60
+
61
+ VISUAL_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist.
62
+ You will remember to provide the image file name loyally if it's provided in the last tool observation.
63
+
64
+ Begin!
65
+
66
+ Previous conversation history:
67
+ {chat_history}
68
+
69
+ New input: {input}
70
+ Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to observe images rather than imagination.
71
+ The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human.
72
+ Thought: Do I need to use a tool? {agent_scratchpad}"""
73
+
74
+ def cut_dialogue_history(history_memory, keep_last_n_words=500):
75
+ tokens = history_memory.split()
76
+ n_tokens = len(tokens)
77
+ print(f"hitory_memory:{history_memory}, n_tokens: {n_tokens}")
78
+ if n_tokens < keep_last_n_words:
79
+ return history_memory
80
+ else:
81
+ paragraphs = history_memory.split('\n')
82
+ last_n_tokens = n_tokens
83
+ while last_n_tokens >= keep_last_n_words:
84
+ last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
85
+ paragraphs = paragraphs[1:]
86
+ return '\n' + '\n'.join(paragraphs)
87
+
88
+ def get_new_image_name(org_img_name, func_name="update"):
89
+ head_tail = os.path.split(org_img_name)
90
+ head = head_tail[0]
91
+ tail = head_tail[1]
92
+ name_split = tail.split('.')[0].split('_')
93
+ this_new_uuid = str(uuid.uuid4())[0:4]
94
+ if len(name_split) == 1:
95
+ most_org_file_name = name_split[0]
96
+ recent_prev_file_name = name_split[0]
97
+ new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
98
+ else:
99
+ assert len(name_split) == 4
100
+ most_org_file_name = name_split[3]
101
+ recent_prev_file_name = name_split[0]
102
+ new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name)
103
+ return os.path.join(head, new_file_name)
104
+
105
+ def create_model(config_path, device):
106
+ config = OmegaConf.load(config_path)
107
+ OmegaConf.update(config, "model.params.cond_stage_config.params.device", device)
108
+ model = instantiate_from_config(config.model).cpu()
109
+ print(f'Loaded model config from [{config_path}]')
110
+ return model
111
+
112
+ class MaskFormer:
113
+ def __init__(self, device):
114
+ self.device = device
115
+ self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
116
+ self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device)
117
+
118
+ def inference(self, image_path, text):
119
+ threshold = 0.5
120
+ min_area = 0.02
121
+ padding = 20
122
+ original_image = Image.open(image_path)
123
+ image = original_image.resize((512, 512))
124
+ inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt",).to(self.device)
125
+ with torch.no_grad():
126
+ outputs = self.model(**inputs)
127
+ mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
128
+ area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
129
+ if area_ratio < min_area:
130
+ return None
131
+ true_indices = np.argwhere(mask)
132
+ mask_array = np.zeros_like(mask, dtype=bool)
133
+ for idx in true_indices:
134
+ padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
135
+ mask_array[padded_slice] = True
136
+ visual_mask = (mask_array * 255).astype(np.uint8)
137
+ image_mask = Image.fromarray(visual_mask)
138
+ return image_mask.resize(image.size)
139
+
140
+ class ImageEditing:
141
+ def __init__(self, device):
142
+ print("Initializing StableDiffusionInpaint to %s" % device)
143
+ self.device = device
144
+ self.mask_former = MaskFormer(device=self.device)
145
+ self.inpainting = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",).to(device)
146
+
147
+ def remove_part_of_image(self, input):
148
+ image_path, to_be_removed_txt = input.split(",")
149
+ print(f'remove_part_of_image: to_be_removed {to_be_removed_txt}')
150
+ return self.replace_part_of_image(f"{image_path},{to_be_removed_txt},background")
151
+
152
+ def replace_part_of_image(self, input):
153
+ image_path, to_be_replaced_txt, replace_with_txt = input.split(",")
154
+ print(f'replace_part_of_image: replace_with_txt {replace_with_txt}')
155
+ original_image = Image.open(image_path)
156
+ mask_image = self.mask_former.inference(image_path, to_be_replaced_txt)
157
+ updated_image = self.inpainting(prompt=replace_with_txt, image=original_image, mask_image=mask_image).images[0]
158
+ updated_image_path = get_new_image_name(image_path, func_name="replace-something")
159
+ updated_image.save(updated_image_path)
160
+ return updated_image_path
161
+
162
+ class Pix2Pix:
163
+ def __init__(self, device):
164
+ print("Initializing Pix2Pix to %s" % device)
165
+ self.device = device
166
+ self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None).to(device)
167
+ self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
168
+
169
+ def inference(self, inputs):
170
+ """Change style of image."""
171
+ print("===>Starting Pix2Pix Inference")
172
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
173
+ original_image = Image.open(image_path)
174
+ image = self.pipe(instruct_text,image=original_image,num_inference_steps=40,image_guidance_scale=1.2,).images[0]
175
+ updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
176
+ image.save(updated_image_path)
177
+ return updated_image_path
178
+
179
+ class T2I:
180
+ def __init__(self, device):
181
+ print("Initializing T2I to %s" % device)
182
+ self.device = device
183
+ self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
184
+ self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
185
+ self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
186
+ self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
187
+ self.pipe.to(device)
188
+
189
+ def inference(self, text):
190
+ image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
191
+ refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
192
+ print(f'{text} refined to {refined_text}')
193
+ image = self.pipe(refined_text).images[0]
194
+ image.save(image_filename)
195
+ print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
196
+ return image_filename
197
+
198
+ class ImageCaptioning:
199
+ def __init__(self, device):
200
+ print("Initializing ImageCaptioning to %s" % device)
201
+ self.device = device
202
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
203
+ self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
204
+
205
+ def inference(self, image_path):
206
+ inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
207
+ out = self.model.generate(**inputs)
208
+ captions = self.processor.decode(out[0], skip_special_tokens=True)
209
+ return captions
210
+
211
+ class image2canny:
212
+ def __init__(self):
213
+ print("Direct detect canny.")
214
+ self.low_thresh = 100
215
+ self.high_thresh = 200
216
+
217
+ def inference(self, inputs):
218
+ print("===>Starting image2canny Inference")
219
+ image = Image.open(inputs)
220
+ image = np.array(image)
221
+
222
+ image = cv2.Canny(image, self.low_thresh, self.high_thresh)
223
+ image = image[:, :, None]
224
+ image = np.concatenate([image, image, image], axis=2)
225
+ canny_image = Image.fromarray(image)
226
+ updated_image_path = get_new_image_name(inputs, func_name="edge")
227
+ canny_image.save(updated_image_path)
228
+ return updated_image_path
229
+
230
+ class canny2image:
231
+ def __init__(self, device):
232
+ print("Initialize the canny2image model.")
233
+ self.low_threshold = 100
234
+ self.high_threshold = 200
235
+
236
+ # Models
237
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
238
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
239
+ "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
240
+ )
241
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
242
+
243
+ # This command loads the individual model components on GPU on-demand. So, we don't
244
+ # need to explicitly call pipe.to("cuda").
245
+ self.pipe.enable_model_cpu_offload()
246
+
247
+ self.pipe.enable_xformers_memory_efficient_attention()
248
+
249
+ # Generator seed,
250
+ self.generator = torch.manual_seed(0)
251
+
252
+
253
+ def get_canny_filter(self,image):
254
+ if not isinstance(image, np.ndarray):
255
+ image = np.array(image)
256
+ image = cv2.Canny(image, self.low_threshold, self.high_threshold)
257
+ image = image[:, :, None]
258
+ image = np.concatenate([image, image, image], axis=2)
259
+ canny_image = Image.fromarray(image)
260
+ return canny_image
261
+
262
+ def inference(self, inputs):
263
+ print("===>Starting canny2image Inference")
264
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
265
+ image = Image.open(image_path)
266
+ image = np.array(image)
267
+ prompt = instruct_text
268
+ canny_image = self.get_canny_filter(image)
269
+ output = self.pipe(prompt,canny_image,generator=self.generator,num_images_per_prompt=1,num_inference_steps=20)
270
+
271
+ updated_image_path = get_new_image_name(image_path, func_name="canny2image")
272
+ real_image = output.images[0] # get default the index0 image
273
+ real_image.save(updated_image_path)
274
+ return updated_image_path
275
+
276
+ class image2pose:
277
+ def __init__(self):
278
+ print("Direct detect pose.")
279
+
280
+ def inference(self, inputs):
281
+ print("===>Starting image2pose Inference")
282
+ image = Image.open(inputs)
283
+ image = np.array(image)
284
+ pose_image = pose_model(image)
285
+
286
+ updated_image_path = get_new_image_name(inputs, func_name="pose")
287
+ pose_image.save(updated_image_path)
288
+ return updated_image_path
289
+
290
+
291
+ class pose2image:
292
+ def __init__(self, device):
293
+ print("Initialize the pose2image model.")
294
+
295
+
296
+ # Models
297
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16)
298
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
299
+ "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
300
+ )
301
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
302
+
303
+ # This command loads the individual model components on GPU on-demand. So, we don't
304
+ # need to explicitly call pipe.to("cuda").
305
+ self.pipe.enable_model_cpu_offload()
306
+
307
+ self.pipe.enable_xformers_memory_efficient_attention()
308
+
309
+ # Generator seed,
310
+ self.generator = torch.manual_seed(0)
311
+
312
+
313
+ def get_pose(self,image):
314
+ return pose_model(image)
315
+
316
+ def inference(self, inputs):
317
+ print("===>Starting pose2image Inference")
318
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
319
+ image = Image.open(image_path)
320
+ image = np.array(image)
321
+ prompt = instruct_text
322
+ pose_image = self.get_pose(image)
323
+ output = self.pipe(prompt,pose_image,generator=self.generator,num_images_per_prompt=1,num_inference_steps=20)
324
+
325
+ updated_image_path = get_new_image_name(image_path, func_name="pose2image")
326
+ real_image = output.images[0] # get default the index0 image
327
+ real_image.save(updated_image_path)
328
+ return updated_image_path
329
+
330
+
331
+ class BLIPVQA:
332
+ def __init__(self, device):
333
+ print("Initializing BLIP VQA to %s" % device)
334
+ self.device = device
335
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
336
+ self.model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(self.device)
337
+
338
+ def get_answer_from_question_and_image(self, inputs):
339
+ image_path, question = inputs.split(",")
340
+ raw_image = Image.open(image_path).convert('RGB')
341
+ print(F'BLIPVQA :question :{question}')
342
+ inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device)
343
+ out = self.model.generate(**inputs)
344
+ answer = self.processor.decode(out[0], skip_special_tokens=True)
345
+ return answer
346
+
347
+ class ConversationBot:
348
+ def __init__(self):
349
+ print("Initializing VisualChatGPT")
350
+ self.edit = ImageEditing(device="cuda:0")
351
+ self.i2t = ImageCaptioning(device="cuda:0")
352
+ self.t2i = T2I(device="cuda:0")
353
+ self.image2canny = image2canny()
354
+ self.canny2image = canny2image(device="cuda:0")
355
+ self.image2pose = image2pose()
356
+ self.pose2image = pose2image(device="cuda:0")
357
+ self.BLIPVQA = BLIPVQA(device="cuda:0")
358
+ self.pix2pix = Pix2Pix(device="cuda:0")
359
+ self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
360
+ self.tools = [
361
+ Tool(name="Get Photo Description", func=self.i2t.inference,
362
+ description="useful when you want to know what is inside the photo. receives image_path as input. "
363
+ "The input to this tool should be a string, representing the image_path. "),
364
+ Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
365
+ description="useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
366
+ "The input to this tool should be a string, representing the text used to generate image. "),
367
+ Tool(name="Remove Something From The Photo", func=self.edit.remove_part_of_image,
368
+ description="useful when you want to remove and object or something from the photo from its description or location. "
369
+ "The input to this tool should be a comma seperated string of two, representing the image_path and the object need to be removed. "),
370
+ Tool(name="Replace Something From The Photo", func=self.edit.replace_part_of_image,
371
+ description="useful when you want to replace an object from the object description or location with another object from its description. "
372
+ "The input to this tool should be a comma seperated string of three, representing the image_path, the object to be replaced, the object to be replaced with "),
373
+
374
+ Tool(name="Instruct Image Using Text", func=self.pix2pix.inference,
375
+ description="useful when you want to the style of the image to be like the text. like: make it look like a painting. or make it like a robot. "
376
+ "The input to this tool should be a comma seperated string of two, representing the image_path and the text. "),
377
+ Tool(name="Answer Question About The Image", func=self.BLIPVQA.get_answer_from_question_and_image,
378
+ description="useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
379
+ "The input to this tool should be a comma seperated string of two, representing the image_path and the question"),
380
+ Tool(name="Edge Detection On Image", func=self.image2canny.inference,
381
+ description="useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. "
382
+ "The input to this tool should be a string, representing the image_path"),
383
+ Tool(name="Generate Image Condition On Canny Image", func=self.canny2image.inference,
384
+ description="useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. "
385
+ "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "),
386
+ #Tool(name="Line Detection On Image", func=self.image2line.inference,
387
+ #description="useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. "
388
+ # "The input to this tool should be a string, representing the image_path"),
389
+ #Tool(name="Generate Image Condition On Line Image", func=self.line2image.inference,
390
+ #description="useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. "
391
+ # "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "),
392
+ #Tool(name="Hed Detection On Image", func=self.image2hed.inference,
393
+ #description="useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. "
394
+ # "The input to this tool should be a string, representing the image_path"),
395
+ #Tool(name="Generate Image Condition On Soft Hed Boundary Image", func=self.hed2image.inference,
396
+ #description="useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. "
397
+ # "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
398
+ #Tool(name="Segmentation On Image", func=self.image2seg.inference,
399
+ #description="useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. "
400
+ #"The input to this tool should be a string, representing the image_path"),
401
+ #Tool(name="Generate Image Condition On Segmentations", func=self.seg2image.inference,
402
+ #description="useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. "
403
+ #"The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
404
+ #Tool(name="Predict Depth On Image", func=self.image2depth.inference,
405
+ #description="useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. "
406
+ #"The input to this tool should be a string, representing the image_path"),
407
+ #Tool(name="Generate Image Condition On Depth", func=self.depth2image.inference,
408
+ #description="useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. "
409
+ #"The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
410
+ #Tool(name="Predict Normal Map On Image", func=self.image2normal.inference,
411
+ #description="useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. "
412
+ #"The input to this tool should be a string, representing the image_path"),
413
+ #Tool(name="Generate Image Condition On Normal Map", func=self.normal2image.inference,
414
+ #description="useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. "
415
+ #"The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
416
+ #Tool(name="Sketch Detection On Image", func=self.image2scribble.inference,
417
+ #description="useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. "
418
+ #"The input to this tool should be a string, representing the image_path"),
419
+ #Tool(name="Generate Image Condition On Sketch Image", func=self.scribble2image.inference,
420
+ #description="useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. "
421
+ #"The input to this tool should be a comma seperated string of two, representing the image_path and the user description"),
422
+ Tool(name="Pose Detection On Image", func=self.image2pose.inference,
423
+ description="useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. "
424
+ "The input to this tool should be a string, representing the image_path"),
425
+ Tool(name="Generate Image Condition On Pose Image", func=self.pose2image.inference,
426
+ description="useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
427
+ "The input to this tool should be a comma seperated string of two, representing the image_path and the user description")
428
+ ]
429
+
430
+ def init_langchain(self,api_key):
431
+ self.llm = OpenAI(temperature = 0, openai_api_key = api_key)
432
+ self.agent = initialize_agent(
433
+ self.tools,
434
+ self.llm,
435
+ agent="conversational-react-description",
436
+ verbose=True,
437
+ memory=self.memory,
438
+ return_intermediate_steps=True,
439
+ agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}, )
440
+ return gr.update(visible = True)
441
+
442
+ def run_text(self, text, state):
443
+ print("===============Running run_text =============")
444
+ print("Inputs:", text, state)
445
+ print("======>Previous memory:\n %s" % self.agent.memory)
446
+ self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
447
+ res = self.agent({"input": text})
448
+ print("======>Current memory:\n %s" % self.agent.memory)
449
+ response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output'])
450
+ state = state + [(text, response)]
451
+ print("Outputs:", state)
452
+ return state, state
453
+
454
+ def run_image(self, image, state, txt):
455
+ print("===============Running run_image =============")
456
+ print("Inputs:", image, state)
457
+ print("======>Previous memory:\n %s" % self.agent.memory)
458
+ image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
459
+ print("======>Auto Resize Image...")
460
+ img = Image.open(image.name)
461
+ width, height = img.size
462
+ ratio = min(512 / width, 512 / height)
463
+ width_new, height_new = (round(width * ratio), round(height * ratio))
464
+ img = img.resize((width_new, height_new))
465
+ img = img.convert('RGB')
466
+ img.save(image_filename, "PNG")
467
+ print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
468
+ description = self.i2t.inference(image_filename)
469
+ Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
470
+ "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
471
+ AI_prompt = "Received. "
472
+ self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
473
+ print("======>Current memory:\n %s" % self.agent.memory)
474
+ state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)]
475
+ print("Outputs:", state)
476
+ return state, state, txt + ' ' + image_filename + ' '
477
+
478
+
479
+ bot = ConversationBot()
480
+ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
481
+ gr.Markdown("# Visual ChatGPT <p> Visual ChatGPT combines visual foundation models and ChatGPT to send,receive and process images during chatting</p><p>This demo currently only supports text, image captioning, image generation, visual question answering, edge detection on image and generating image conditioned on canny and pose images using the diffusers implementation of ControlNet </p>")
482
+ gr.Markdown("<p> For example usages please refer to the <a href='https://raw.githubusercontent.com/microsoft/visual-chatgpt/main/assets/demo.gif'>official GIF</a></p>")
483
+ gr.Markdown('<p> For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/RamAnanth1/visual-chatGPT?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>')
484
+ openai_api_key_input = gr.Textbox(type = "password", label = "Enter your OpenAI API key here")
485
+ chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
486
+ state = gr.State([])
487
+
488
+ with gr.Row(visible = False) as input_row:
489
+ with gr.Column(scale=0.7):
490
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
491
+ with gr.Column(scale=0.15, min_width=0):
492
+ clear = gr.Button("Clear️")
493
+ with gr.Column(scale=0.15, min_width=0):
494
+ btn = gr.UploadButton("Upload", file_types=["image"])
495
+
496
+ openai_api_key_input.submit(bot.init_langchain,openai_api_key_input,[input_row])
497
+ txt.submit(bot.run_text, [txt, state], [chatbot, state])
498
+ txt.submit(lambda: "", None, txt)
499
+ btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])
500
+ clear.click(bot.memory.clear)
501
+ clear.click(lambda: [], None, chatbot)
502
+ clear.click(lambda: [], None, state)
503
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ numpy==1.23.1
4
+ transformers==4.26.1
5
+ albumentations==1.3.0
6
+ opencv-contrib-python==4.3.0.36
7
+ imageio==2.9.0
8
+ imageio-ffmpeg==0.4.2
9
+ pytorch-lightning==1.5.0
10
+ omegaconf==2.1.1
11
+ test-tube>=0.7.5
12
+ streamlit==1.12.1
13
+ einops==0.3.0
14
+ webdataset==0.2.5
15
+ kornia==0.6
16
+ open_clip_torch==2.0.2
17
+ invisible-watermark>=0.1.5
18
+ streamlit-drawable-canvas==0.8.0
19
+ torchmetrics==0.6.0
20
+ timm==0.6.12
21
+ addict==2.4.0
22
+ yapf==0.32.0
23
+ prettytable==3.6.0
24
+ safetensors==0.2.7
25
+ basicsr==1.4.2
26
+ langchain==0.0.101
27
+ diffusers
28
+ openai
29
+ accelerate
30
+ controlnet_aux
31
+ xformers==0.0.16