praeclarumjj3 commited on
Commit
831e281
1 Parent(s): 99eeeb7

Update chat.py

Browse files
Files changed (1) hide show
  1. chat.py +354 -194
chat.py CHANGED
@@ -1,206 +1,366 @@
1
- """
2
- A model worker executes the model.
3
- """
4
  import argparse
 
5
  import json
6
- import torch
7
-
8
- from vcoder_llava.utils import server_error_msg
9
- from vcoder_llava.model.builder import load_pretrained_model
10
- from vcoder_llava.mm_utils import process_images, load_image_from_base64, tokenizer_seg_token, tokenizer_depth_seg_token, tokenizer_image_token, KeywordsStoppingCriteria
11
- from vcoder_llava.constants import (
12
- IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN,
13
- SEG_TOKEN_INDEX, DEFAULT_SEG_TOKEN,
14
- DEPTH_TOKEN_INDEX, DEFAULT_DEPTH_TOKEN,
15
- )
16
- from transformers import TextIteratorStreamer
17
- from threading import Thread
18
-
19
- class Chat:
20
- def __init__(self, model_path, model_base, model_name,
21
- load_8bit, load_4bit, device, logger):
22
- if model_path.endswith("/"):
23
- model_path = model_path[:-1]
24
- if model_name is None:
25
- model_paths = model_path.split("/")
26
- if model_paths[-1].startswith('checkpoint-'):
27
- self.model_name = model_paths[-2] + "_" + model_paths[-1]
28
- else:
29
- self.model_name = model_paths[-1]
30
- else:
31
- self.model_name = model_name
32
-
33
- self.device = device
34
- logger.info(f"Loading the model {self.model_name} ...")
35
- self.tokenizer, self.model, self.image_processor, self.seg_image_processor, self.depth_image_processor, self.context_len = load_pretrained_model(
36
- model_path, model_base, self.model_name, load_8bit, load_4bit, device=self.device)
37
- self.is_multimodal = 'llava' in self.model_name.lower()
38
- self.is_seg = "vcoder" in self.model_name.lower()
39
- self.is_depth = "ds" in self.model_name.lower()
40
-
41
- @torch.inference_mode()
42
- def generate_stream(self, params):
43
- tokenizer, model, image_processor, seg_image_processor, depth_image_processor = self.tokenizer, self.model, self.image_processor, self.seg_image_processor, self.depth_image_processor
44
-
45
- prompt = params["prompt"]
46
- ori_prompt = prompt
47
- images = params.get("images", None)
48
- segs = params.get("segs", None)
49
- depths = params.get("depths", None)
50
- num_image_tokens = 0
51
- num_seg_tokens = 0
52
- num_depth_tokens = 0
53
- if images is not None and len(images) > 0 and self.is_multimodal:
54
- if len(images) > 0:
55
- if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
56
- raise ValueError("Number of images does not match number of <image> tokens in prompt")
57
-
58
- images = [load_image_from_base64(image) for image in images]
59
- images = process_images(images, image_processor, model.config)
60
-
61
- if type(images) is list:
62
- images = [image.to(self.model.device, dtype=torch.float16) for image in images]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  else:
64
- images = images.to(self.model.device, dtype=torch.float16)
65
-
66
- replace_token = DEFAULT_IMAGE_TOKEN
67
- prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
68
- num_image_tokens = prompt.count(replace_token) * model.get_vision_tower().num_patches
69
-
70
- if segs is not None and len(segs) > 0 and self.is_seg:
71
- if len(segs) != prompt.count(DEFAULT_SEG_TOKEN):
72
- raise ValueError("Number of segs does not match number of <seg> tokens in prompt")
73
-
74
- segs = [load_image_from_base64(seg) for seg in segs]
75
- segs = process_images(segs, seg_image_processor, model.config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
- if type(segs) is list:
78
- segs = [seg.to(self.model.device, dtype=torch.float16) for seg in segs]
79
- else:
80
- segs = segs.to(self.model.device, dtype=torch.float16)
81
-
82
- replace_seg_token = DEFAULT_SEG_TOKEN
83
- prompt = prompt.replace(DEFAULT_SEG_TOKEN, replace_seg_token)
84
- num_seg_tokens = prompt.count(replace_seg_token) * model.get_vision_tower().num_patches
85
-
86
- if depths is not None and len(depths) > 0 and self.is_depth:
87
- if len(depths) != prompt.count(DEFAULT_DEPTH_TOKEN):
88
- raise ValueError("Number of depths does not match number of <depth> tokens in prompt")
89
-
90
- depths = [load_image_from_base64(depth) for depth in depths]
91
- depths = process_images(depths, depth_image_processor, model.config)
92
-
93
- if type(depths) is list:
94
- depths = [depth.to(self.model.device, dtype=torch.float16) for depth in depths]
95
- else:
96
- depths = depths.to(self.model.device, dtype=torch.float16)
97
-
98
- replace_depth_token = DEFAULT_DEPTH_TOKEN
99
- prompt = prompt.replace(DEFAULT_DEPTH_TOKEN, replace_depth_token)
100
- num_depth_tokens = prompt.count(replace_depth_token) * model.get_vision_tower().num_patches
101
- else:
102
- depths = None
103
- else:
104
- segs = None
105
- depths = None
106
- else:
107
- images = None
108
- segs = None
109
- depths = None
110
- image_args = {"images": images, "segs": segs, "depths": depths}
111
- else:
112
- images = None
113
- segs = None
114
- depths = None
115
- image_args = {}
116
-
117
- temperature = float(params.get("temperature", 1.0))
118
- top_p = float(params.get("top_p", 1.0))
119
- max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
120
- max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
121
- stop_str = params.get("stop", None)
122
- do_sample = True if temperature > 0.001 else False
123
-
124
- if self.is_seg:
125
- if self.is_depth:
126
- input_ids = tokenizer_depth_seg_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
127
- else:
128
- input_ids = tokenizer_seg_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
129
- else:
130
- input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
131
- keywords = [stop_str]
132
- stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
133
- streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)
134
-
135
- max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens - num_seg_tokens - num_depth_tokens)
136
-
137
- if max_new_tokens < 1:
138
- yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
139
- return
140
-
141
- generated_text = model.generate(
142
- inputs=input_ids,
143
- do_sample=do_sample,
144
- temperature=temperature,
145
- top_p=top_p,
146
- max_new_tokens=max_new_tokens,
147
- streamer=streamer,
148
- stopping_criteria=[stopping_criteria],
149
- use_cache=True,
150
- **image_args
151
- )
152
- # thread.start()
153
-
154
- generated_text = ori_prompt
155
- for new_text in streamer:
156
- generated_text += new_text
157
- if generated_text.endswith(stop_str):
158
- generated_text = generated_text[:-len(stop_str)]
159
- yield json.dumps({"text": generated_text, "error_code": 0}).encode()
160
-
161
- def generate_stream_gate(self, params):
162
- try:
163
- for x in self.generate_stream(params):
164
- yield x
165
- except ValueError as e:
166
- print("Caught ValueError:", e)
167
- ret = {
168
- "text": server_error_msg,
169
- "error_code": 1,
170
- }
171
- yield json.dumps(ret).encode()
172
- except torch.cuda.CudaError as e:
173
- print("Caught torch.cuda.CudaError:", e)
174
- ret = {
175
- "text": server_error_msg,
176
- "error_code": 1,
177
- }
178
- yield json.dumps(ret).encode()
179
- except Exception as e:
180
- print("Caught Unknown Error", e)
181
- ret = {
182
- "text": server_error_msg,
183
- "error_code": 1,
184
- }
185
- yield json.dumps(ret).encode()
186
 
187
 
188
  if __name__ == "__main__":
189
  parser = argparse.ArgumentParser()
190
- parser.add_argument("--host", type=str, default="localhost")
191
- parser.add_argument("--port", type=int, default=21002)
192
- parser.add_argument("--worker-address", type=str,
193
- default="http://localhost:21002")
194
- parser.add_argument("--controller-address", type=str,
195
- default="http://localhost:21001")
196
- parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
197
  parser.add_argument("--model-base", type=str, default=None)
198
  parser.add_argument("--model-name", type=str)
199
- parser.add_argument("--device", type=str, default="cuda")
200
- parser.add_argument("--multi-modal", action="store_true", help="Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.")
201
- parser.add_argument("--limit-model-concurrency", type=int, default=5)
202
- parser.add_argument("--stream-interval", type=int, default=1)
203
- parser.add_argument("--no-register", action="store_true")
204
  parser.add_argument("--load-8bit", action="store_true")
205
  parser.add_argument("--load-4bit", action="store_true")
206
- args = parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import argparse
2
+ import datetime
3
  import json
4
+ import os
5
+ import time
6
+
7
+ import gradio as gr
8
+ import hashlib
9
+
10
+ from vcoder_llava.vcoder_conversation import (default_conversation, conv_templates,
11
+ SeparatorStyle)
12
+ from vcoder_llava.constants import LOGDIR
13
+ from vcoder_llava.utils import (build_logger, server_error_msg,
14
+ violates_moderation, moderation_msg)
15
+ from chat import Chat
16
+
17
+
18
+ logger = build_logger("gradio_app", "gradio_web_server.log")
19
+
20
+ headers = {"User-Agent": "VCoder Client"}
21
+
22
+ no_change_btn = gr.Button()
23
+ enable_btn = gr.Button(interactive=True)
24
+ disable_btn = gr.Button(interactive=False)
25
+
26
+ priority = {
27
+ "vicuna-13b": "aaaaaaa",
28
+ "koala-13b": "aaaaaab",
29
+ }
30
+
31
+
32
+ def get_conv_log_filename():
33
+ t = datetime.datetime.now()
34
+ name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
35
+ return name
36
+
37
+
38
+ get_window_url_params = """
39
+ function() {
40
+ const params = new URLSearchParams(window.location.search);
41
+ url_params = Object.fromEntries(params);
42
+ console.log(url_params);
43
+ return url_params;
44
+ }
45
+ """
46
+
47
+
48
+ def load_demo_refresh_model_list(request: gr.Request):
49
+ logger.info(f"load_demo. ip: {request.client.host}")
50
+ state = default_conversation.copy()
51
+ return state
52
+
53
+
54
+ def vote_last_response(state, vote_type, model_selector, request: gr.Request):
55
+ with open(get_conv_log_filename(), "a") as fout:
56
+ data = {
57
+ "tstamp": round(time.time(), 4),
58
+ "type": vote_type,
59
+ "model": model_selector,
60
+ "state": state.dict(),
61
+ }
62
+ fout.write(json.dumps(data) + "\n")
63
+
64
+
65
+ def upvote_last_response(state, model_selector, request: gr.Request):
66
+ vote_last_response(state, "upvote", model_selector, request)
67
+ return ("",) + (disable_btn,) * 3
68
+
69
+
70
+ def downvote_last_response(state, model_selector, request: gr.Request):
71
+ vote_last_response(state, "downvote", model_selector, request)
72
+ return ("",) + (disable_btn,) * 3
73
+
74
+
75
+ def flag_last_response(state, model_selector, request: gr.Request):
76
+ vote_last_response(state, "flag", model_selector, request)
77
+ return ("",) + (disable_btn,) * 3
78
+
79
+ def regenerate(state, image_process_mode, seg_process_mode, depth_process_mode):
80
+ state.messages[-1][-1] = None
81
+ prev_human_msg = state.messages[-2]
82
+ if type(prev_human_msg[1]) in (tuple, list):
83
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode, prev_human_msg[1][3], seg_process_mode, prev_human_msg[1][5], depth_process_mode)
84
+ state.skip_next = False
85
+ return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (disable_btn,) * 5
86
+
87
+
88
+ def clear_history(request: gr.Request):
89
+ state = default_conversation.copy()
90
+ return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (disable_btn,) * 5
91
+
92
+
93
+ def add_text(state, text, image, image_process_mode, seg, seg_process_mode, depth, depth_process_mode, request: gr.Request):
94
+ logger.info(f"add_text. len: {len(text)}")
95
+ if len(text) <= 0 and image is None:
96
+ state.skip_next = True
97
+ return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (no_change_btn,) * 5
98
+ if args.moderate:
99
+ flagged = violates_moderation(text)
100
+ if flagged:
101
+ state.skip_next = True
102
+ return (state, state.to_gradio_chatbot(), moderation_msg, None, None, None, None) + (
103
+ no_change_btn,) * 5
104
+
105
+ text = text[:1200] # Hard cut-off
106
+ if image is not None:
107
+ text = text[:864] # Hard cut-off for images
108
+ if '<image>' not in text:
109
+ text = '<image>\n' + text
110
+ if seg is not None:
111
+ if '<seg>' not in text:
112
+ text = '<seg>\n' + text
113
+ if depth is not None:
114
+ if '<depth>' not in text:
115
+ text = '<depth>\n' + text
116
+
117
+ text = (text, image, image_process_mode, seg, seg_process_mode, depth, depth_process_mode)
118
+ if len(state.get_images(return_pil=True)) > 0:
119
+ state = default_conversation.copy()
120
+ state.append_message(state.roles[0], text)
121
+ state.append_message(state.roles[1], None)
122
+ state.skip_next = False
123
+ return (state, state.to_gradio_chatbot(), "", None, None, None) + (disable_btn,) * 5
124
+
125
+
126
+ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request):
127
+ start_tstamp = time.time()
128
+ model_name = model_selector
129
+
130
+ if state.skip_next:
131
+ # This generate call is skipped due to invalid inputs
132
+ yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
133
+ return
134
+
135
+ if len(state.messages) == state.offset + 2:
136
+ # First round of conversation
137
+ if "llava" in model_name.lower():
138
+ template_name = "llava_v1"
139
+ new_state = conv_templates[template_name].copy()
140
+ new_state.append_message(new_state.roles[0], state.messages[-2][1])
141
+ new_state.append_message(new_state.roles[1], None)
142
+ state = new_state
143
+
144
+ # Construct prompt
145
+ prompt = state.get_prompt()
146
+
147
+ # Make requests
148
+ pload = {
149
+ "model": model_name,
150
+ "prompt": prompt,
151
+ "temperature": float(temperature),
152
+ "top_p": float(top_p),
153
+ "max_new_tokens": min(int(max_new_tokens), 1536),
154
+ "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
155
+ "images": f'List of {len(state.get_images())}',
156
+ "segs": f'List of {len(state.get_segs())}',
157
+ "depths": f'List of {len(state.get_depths())}',
158
+ }
159
+ logger.info(f"==== request ====\n{pload}")
160
+
161
+ pload['images'] = state.get_images()
162
+ pload['segs'] = state.get_segs()
163
+ pload['depths'] = state.get_depths()
164
+
165
+ state.messages[-1][-1] = "▌"
166
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
167
+
168
+
169
+ try:
170
+ # Stream output
171
+ response = chat.generate_stream_gate(pload)
172
+ for chunk in response:
173
+ if chunk:
174
+ data = json.loads(chunk.decode())
175
+ if data["error_code"] == 0:
176
+ output = data["text"][len(prompt):].strip()
177
+ state.messages[-1][-1] = output + "▌"
178
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
179
  else:
180
+ output = data["text"] + f" (error_code: {data['error_code']})"
181
+ state.messages[-1][-1] = output
182
+ yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
183
+ return
184
+ time.sleep(0.03)
185
+ except Exception:
186
+ gr.Warning(server_error_msg)
187
+ state.messages[-1][-1] = server_error_msg
188
+ yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
189
+ return
190
+
191
+ state.messages[-1][-1] = state.messages[-1][-1][:-1]
192
+ yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
193
+ logger.info(f"{output}")
194
+
195
+
196
+ title = "<h1 style='margin-bottom: -10px; text-align: center'>VCoder: Versatile Vision Encoders for Multimodal Large Language Models</h1>"
197
+ # style='
198
+ description = "<p style='font-size: 16px; margin: 5px; font-weight: w300; text-align: center'> <a href='https://praeclarumjj3.github.io/' style='text-decoration:none' target='_blank'>Jitesh Jain, </a> <a href='https://jwyang.github.io/' style='text-decoration:none' target='_blank'>Jianwei Yang, <a href='https://www.humphreyshi.com/home' style='text-decoration:none' target='_blank'>Humphrey Shi</a></p>" \
199
+ + "<p style='font-size: 16px; margin: 5px; font-weight: w600; text-align: center'> <a href='https://praeclarumjj3.github.io/vcoder/' target='_blank'>Project Page</a> | <a href='https://praeclarumjj3.github.io/vcoder/' target='_blank'>Video</a> | <a href='https://arxiv.org/abs/2211.06220' target='_blank'>ArXiv Paper</a> | <a href='https://github.com/SHI-Labs/VCoder' target='_blank'>Github Repo</a></p>" \
200
+ + "<p style='text-align: center; font-size: 16px; margin: 5px; font-weight: w300;'> [Note: You can obtain segmentation maps for your image using the <a href='https://huggingface.co/spaces/shi-labs/OneFormer' style='text-decoration:none' target='_blank'>OneFormer Demo</a> and the depth map from <a href='https://github.com/facebookresearch/dinov2/blob/main/notebooks/depth_estimation.ipynb' style='text-decoration:none' target='_blank'>DINOv2</a>. Please click on Regenerate button if you are unsatisfied with the generated response. You may find screenshots of our demo trials <a href='https://github.com/SHI-Labs/VCoder/blob/main/images/' style='text-decoration:none' target='_blank'>here</a>.]</p>"
201
+
202
+ tos_markdown = ("""
203
+ ### Terms of use
204
+ By using this service, users are required to agree to the following terms:
205
+ The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.
206
+ """)
207
+
208
+
209
+ learn_more_markdown = ("""
210
+ ### License
211
+ The service is a research preview intended for non-commercial use only, subject to the [License](https://huggingface.co/lmsys/vicuna-7b-v1.5) of Vicuna-v1.5, [License](https://github.com/haotian-liu/LLaVA/blob/main/LICENSE) of LLaVA, [Terms of Use](https://cocodataset.org/#termsofuse) of the COCO dataset, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
212
+ """)
213
+
214
+ block_css = """
215
+
216
+ #buttons button {
217
+ min-width: min(120px,100%);
218
+ }
219
+
220
+ """
221
+
222
+ def build_demo(embed_mode):
223
+
224
+ textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
225
+ with gr.Blocks(title="LLaVA", theme=gr.themes.Default(), css=block_css) as demo:
226
+ state = gr.State()
227
+
228
+ if not embed_mode:
229
+ gr.Markdown(title)
230
+ gr.Markdown(description)
231
+
232
+ with gr.Row():
233
+ with gr.Column(scale=3):
234
+ with gr.Row(elem_id="model_selector_row"):
235
+ model_selector = gr.Dropdown(
236
+ choices=[model + "-4bit" for model in models],
237
+ value=models[0]+"-4bit" if len(models) > 0 else "",
238
+ interactive=True,
239
+ show_label=False,
240
+ container=False)
241
+
242
+ # with gr.Row():
243
+ imagebox = gr.Image(type="pil", label="Image Input")
244
+ image_process_mode = gr.Radio(
245
+ ["Crop", "Resize", "Pad", "Default"],
246
+ value="Default",
247
+ label="Preprocess for non-square image", visible=False)
248
+
249
+ segbox = gr.Image(type="pil", label="Seg Map")
250
+ seg_process_mode = gr.Radio(
251
+ ["Crop", "Resize", "Pad", "Default"],
252
+ value="Default",
253
+ label="Preprocess for non-square Seg Map", visible=False)
254
 
255
+ depthbox = gr.Image(type="pil", label="Depth Map")
256
+ depth_process_mode = gr.Radio(
257
+ ["Crop", "Resize", "Pad", "Default"],
258
+ value="Default",
259
+ label="Preprocess for non-square Depth Map", visible=False)
260
+
261
+ with gr.Accordion("Parameters", open=False) as parameter_row:
262
+ temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.8, step=0.1, interactive=True, label="Temperature",)
263
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.9, step=0.1, interactive=True, label="Top P",)
264
+ max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
265
+
266
+ with gr.Column(scale=8):
267
+ chatbot = gr.Chatbot(elem_id="chatbot", label="VCoder Chatbot", height=550)
268
+ with gr.Row():
269
+ with gr.Column(scale=8):
270
+ textbox.render()
271
+ with gr.Column(scale=1, min_width=50):
272
+ submit_btn = gr.Button(value="Send", variant="primary")
273
+ with gr.Row(elem_id="buttons") as button_row:
274
+ upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
275
+ downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
276
+ flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
277
+ #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
278
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
279
+ clear_btn = gr.Button(value="🗑️ Clear", interactive=False)
280
+
281
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
282
+ gr.Examples(examples=[
283
+ [f"{cur_dir}/examples/people.jpg", f"{cur_dir}/examples/people_pan.png", None, "What objects can be seen in the image?", "0.9", "1.0"],
284
+ [f"{cur_dir}/examples/corgi.jpg", f"{cur_dir}/examples/corgi_pan.png", None, "What objects can be seen in the image?", "0.6", "0.7"],
285
+ [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_pan.png", f"{cur_dir}/examples/suits_depth.jpeg", "Can you describe the depth order of the objects in this image, from closest to farthest?", "0.5", "0.5"],
286
+ [f"{cur_dir}/examples/depth.jpeg", f"{cur_dir}/examples/depth_pan.png", f"{cur_dir}/examples/depth_depth.png", "Can you describe the depth order of the objects in this image, from closest to farthest?", "0.5", "0.5"],
287
+ [f"{cur_dir}/examples/friends.jpg", f"{cur_dir}/examples/friends_pan.png", None, "What is happening in the image?", "0.8", "0.9"],
288
+ [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_pan.png", None, "What objects can be seen in the image?", "0.5", "0.5"],
289
+ ], inputs=[imagebox, segbox, depthbox, textbox, temperature, top_p])
290
+
291
+ if not embed_mode:
292
+ gr.Markdown(tos_markdown)
293
+ gr.Markdown(learn_more_markdown)
294
+
295
+ # Register listeners
296
+ btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
297
+ upvote_btn.click(upvote_last_response,
298
+ [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
299
+ downvote_btn.click(downvote_last_response,
300
+ [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
301
+ flag_btn.click(flag_last_response,
302
+ [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
303
+ regenerate_btn.click(regenerate, [state, image_process_mode, seg_process_mode, depth_process_mode],
304
+ [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list).then(
305
+ http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
306
+ [state, chatbot] + btn_list)
307
+ clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list)
308
+
309
+ textbox.submit(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode, depthbox, depth_process_mode], [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list
310
+ ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
311
+ [state, chatbot] + btn_list)
312
+ submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode, depthbox, depth_process_mode], [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list
313
+ ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
314
+ [state, chatbot] + btn_list)
315
+
316
+ demo.load(load_demo_refresh_model_list, None, [state])
317
+
318
+ return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
 
321
  if __name__ == "__main__":
322
  parser = argparse.ArgumentParser()
323
+ parser.add_argument("--model-path", type=str, default="shi-labs/vcoder_ds_llava-v1.5-13b")
 
 
 
 
 
 
324
  parser.add_argument("--model-base", type=str, default=None)
325
  parser.add_argument("--model-name", type=str)
 
 
 
 
 
326
  parser.add_argument("--load-8bit", action="store_true")
327
  parser.add_argument("--load-4bit", action="store_true")
328
+ parser.add_argument("--device", type=str, default="cuda")
329
+ parser.add_argument("--share", action="store_true")
330
+ parser.add_argument("--moderate", action="store_true")
331
+ parser.add_argument("--embed", action="store_true")
332
+ parser.add_argument("--concurrency-count", type=int, default=10)
333
+ parser.add_argument("--host", type=str, default="0.0.0.0")
334
+ parser.add_argument("--port", type=int)
335
+ args = parser.parse_args()
336
+ logger.info(f"args: {args}")
337
+
338
+ if args.model_name is None:
339
+ model_paths = args.model_path.split("/")
340
+ if model_paths[-1].startswith('checkpoint-'):
341
+ model_name = model_paths[-2] + "_" + model_paths[-1]
342
+ else:
343
+ model_name = model_paths[-1]
344
+ else:
345
+ model_name = args.model_name
346
+
347
+ models = [model_name]
348
+ args.load_4bit = True
349
+
350
+ chat = Chat(
351
+ args.model_path,
352
+ args.model_base,
353
+ args.model_name,
354
+ args.load_8bit,
355
+ args.load_4bit,
356
+ args.device,
357
+ logger
358
+ )
359
+
360
+ logger.info(args)
361
+ demo = build_demo(args.embed)
362
+ demo.queue().launch(
363
+ server_name=args.host,
364
+ server_port=args.port,
365
+ share=args.share
366
+ )