dcahn12 commited on
Commit
a2ad303
Β·
1 Parent(s): d902cee

Edit server file

Browse files
Files changed (3) hide show
  1. gradio_utils.py +0 -18
  2. gradio_web_server copy.py +0 -227
  3. gradio_web_server.py +0 -42
gradio_utils.py CHANGED
@@ -11,9 +11,6 @@ from llava.model.builder import load_pretrained_model
11
  from llava.utils import disable_torch_init
12
  import shutil
13
 
14
- # <a href="https://github.com/SNUMPR/vlm-rlaif.git" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
15
- # <img src="https://z1.ax1x.com/2023/11/07/pil4sqH.png" alt="VLM-RLAIF" style="max-width: 120px; height: auto;">
16
- # </a>
17
 
18
  cur_dir = os.path.dirname(os.path.abspath(__file__))
19
  title_markdown = ("""
@@ -34,7 +31,6 @@ title_markdown = ("""
34
  </div>
35
  </div>
36
  """)
37
- # <a href='https://github.com/PKU-YuanGroup/Video-LLaVA/stargazers'><img src='https://img.shields.io/github/stars/PKU-YuanGroup/Video-LLaVA.svg?style=social'></a> # arXiv λ²„νŠΌ μ˜†μ— μΆ”κ°€?
38
 
39
  block_css = """
40
  #buttons button {
@@ -58,15 +54,9 @@ The service is a research preview intended for non-commercial use only, subject
58
 
59
  class Chat:
60
  def __init__(self, model_path, conv_mode, model_base=None, load_8bit=False, load_4bit=False, device='cuda', cache_dir=None):
61
- # model_base = '/dataset/yura/vlm-rlaif/pretrained/final_models/Video_LLaVA_SFT'
62
- # model_base='/dataset/yura/vlm-rlaif/pretrained/llava-v1.5-7b-lora_w_lora_16_sftv2_short1632_and_then_long_rank32_alpha32_lr1e4_allmodels/SFT_merged'
63
- # model_path = '/dataset/yura/vlm-rlaif/pretrained/LLaVA_Video-RL-Fact-RLHF-7b_SFTv2_RM_13b_v1_40k-v1.5-336-lora-padding/checkpoint-180/adapter_model/lora_policy'
64
 
65
  disable_torch_init()
66
  model_name = get_model_name_from_path(model_path)
67
- # self.tokenizer, self.model, image_processor, context_len = load_pretrained_model(model_path, model_base, model_name,
68
- # load_8bit, load_4bit,
69
- # device=device, cache_dir=cache_dir)
70
  is_rlhf_checkpoint = 'rlhf' in model_path.lower()
71
  print("MODEL_PATH", model_path)
72
  print("RLHF Checkpoint: ", is_rlhf_checkpoint)
@@ -79,16 +69,11 @@ class Chat:
79
  shutil.copy(os.path.join(model_base, "config.json"), os.path.join(model_path, "config.json")) # Copy SFT model's config -> to RLHF folder
80
  print("Listed", os.listdir(model_path))
81
  print("Copying done")
82
- # return(model_name)
83
- # return
84
- # self.tokenizer, self.model, image_processor, context_len = load_pretrained_model(model_path, model_base, model_name, load_8bit, load_4bit, device=device)
85
  self.tokenizer, self.model, image_processor, context_len = load_pretrained_model(model_path, model_base, model_name, False, False, device=device)
86
 
87
 
88
 
89
  self.image_processor = image_processor
90
- # self.image_processor = processor['image']
91
- # self.video_processor = processor['video']
92
  self.conv_mode = conv_mode
93
  self.conv = conv_templates[conv_mode].copy()
94
  self.device = self.model.device
@@ -114,9 +99,6 @@ class Chat:
114
  latest_state = self._get_latest_prompt(state)
115
  prompt = latest_state.get_prompt()
116
 
117
- # print('\n\n\n')
118
- # print(prompt)
119
-
120
  input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
121
 
122
  temperature = 0.2
 
11
  from llava.utils import disable_torch_init
12
  import shutil
13
 
 
 
 
14
 
15
  cur_dir = os.path.dirname(os.path.abspath(__file__))
16
  title_markdown = ("""
 
31
  </div>
32
  </div>
33
  """)
 
34
 
35
  block_css = """
36
  #buttons button {
 
54
 
55
  class Chat:
56
  def __init__(self, model_path, conv_mode, model_base=None, load_8bit=False, load_4bit=False, device='cuda', cache_dir=None):
 
 
 
57
 
58
  disable_torch_init()
59
  model_name = get_model_name_from_path(model_path)
 
 
 
60
  is_rlhf_checkpoint = 'rlhf' in model_path.lower()
61
  print("MODEL_PATH", model_path)
62
  print("RLHF Checkpoint: ", is_rlhf_checkpoint)
 
69
  shutil.copy(os.path.join(model_base, "config.json"), os.path.join(model_path, "config.json")) # Copy SFT model's config -> to RLHF folder
70
  print("Listed", os.listdir(model_path))
71
  print("Copying done")
 
 
 
72
  self.tokenizer, self.model, image_processor, context_len = load_pretrained_model(model_path, model_base, model_name, False, False, device=device)
73
 
74
 
75
 
76
  self.image_processor = image_processor
 
 
77
  self.conv_mode = conv_mode
78
  self.conv = conv_templates[conv_mode].copy()
79
  self.device = self.model.device
 
99
  latest_state = self._get_latest_prompt(state)
100
  prompt = latest_state.get_prompt()
101
 
 
 
 
102
  input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
103
 
104
  temperature = 0.2
gradio_web_server copy.py DELETED
@@ -1,227 +0,0 @@
1
- import shutil
2
- import subprocess
3
-
4
- import torch
5
- import gradio as gr
6
- from fastapi import FastAPI
7
- import os
8
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
9
- from PIL import Image
10
- import tempfile
11
- from decord import VideoReader, cpu
12
- from transformers import TextStreamer
13
- import argparse
14
-
15
- import sys
16
- sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "Evaluation"))
17
- from llava.constants import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
18
- from llava.conversation import conv_templates, SeparatorStyle, Conversation
19
- from llava.mm_utils import process_images
20
-
21
- from Evaluation.infer_utils import load_video_into_frames
22
- from serve.utils import load_image, image_ext, video_ext
23
- from serve.gradio_utils import Chat, tos_markdown, learn_more_markdown, title_markdown, block_css
24
-
25
-
26
-
27
- def save_image_to_local(image):
28
- filename = os.path.join('temp', next(tempfile._get_candidate_names()) + '.jpg')
29
- image = Image.open(image)
30
- image.save(filename)
31
- # print(filename)
32
- return filename
33
-
34
-
35
- def save_video_to_local(video_path):
36
- filename = os.path.join('temp', next(tempfile._get_candidate_names()) + '.mp4')
37
- shutil.copyfile(video_path, filename)
38
- return filename
39
-
40
-
41
- def generate(image1, video, textbox_in, first_run, state, state_, images_tensor, num_frames=50):
42
- # ======= manually clear the conversation
43
- # state = conv_templates[conv_mode].copy()
44
- # state_ = conv_templates[conv_mode].copy()
45
- # # =======
46
- flag = 1
47
- if not textbox_in:
48
- if len(state_.messages) > 0:
49
- textbox_in = state_.messages[-1][1]
50
- state_.messages.pop(-1)
51
- flag = 0
52
- else:
53
- return "Please enter instruction"
54
- print("Video", video) # 잘 듀어감
55
- print("Images_tensor", images_tensor) # None
56
- print("Textbox_IN", textbox_in) # 잘 듀어감
57
- print("State", state) # None
58
- print("State_", state_) # None
59
- # print(len(state_.messages))
60
-
61
- video = video if video else "none"
62
-
63
- if type(state) is not Conversation:
64
- state = conv_templates[conv_mode].copy()
65
- state_ = conv_templates[conv_mode].copy()
66
- images_tensor = []
67
-
68
- first_run = False if len(state.messages) > 0 else True
69
-
70
- text_en_in = textbox_in.replace("picture", "image")
71
-
72
- image_processor = handler.image_processor
73
- assert os.path.exists(video)
74
- if os.path.splitext(video)[-1].lower() in video_ext: # video extension
75
- video_decode_backend = 'opencv'
76
- elif os.path.splitext(os.listdir(video)[0]).lower() in image_ext: # frames folder
77
- video_decode_backend = 'frames'
78
- else:
79
- raise ValueError(f'Support video of {video_ext} and frames of {image_ext}, but found {os.path.splitext(video)[-1].lower()}')
80
-
81
- frames = load_video_into_frames(video, video_decode_backend=video_decode_backend, num_frames=num_frames)
82
- tensor = process_images(frames, image_processor, argparse.Namespace(image_aspect_ratio='pad'))
83
- # tensor = video_processor(video, return_tensors='pt')['pixel_values'][0]
84
- # print(tensor.shape)
85
- tensor = tensor.to(handler.model.device, dtype=dtype)
86
- # images_tensor.append(tensor)
87
- images_tensor = tensor
88
-
89
- if handler.model.config.mm_use_im_start_end:
90
- text_en_in = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + text_en_in
91
- else:
92
- text_en_in = DEFAULT_IMAGE_TOKEN + '\n' + text_en_in
93
- text_en_out, state_ = handler.generate(images_tensor, text_en_in, first_run=first_run, state=state_)
94
- state_.messages[-1] = (state_.roles[1], text_en_out)
95
-
96
- text_en_out = text_en_out.split('#')[0]
97
- textbox_out = text_en_out
98
-
99
- show_images = ""
100
- if os.path.exists(video):
101
- filename = save_video_to_local(video)
102
- show_images += f'<video controls playsinline width="500" style="display: inline-block;" src="./file={filename}"></video>'
103
- if flag:
104
- state.append_message(state.roles[0], textbox_in + "\n" + show_images)
105
- state.append_message(state.roles[1], textbox_out)
106
-
107
- return (state, state_, state.to_gradio_chatbot(), False, gr.update(value=None, interactive=True), images_tensor, gr.update(value=image1 if os.path.exists(video) else None, interactive=True), gr.update(value=video if os.path.exists(video) else None, interactive=True))
108
-
109
-
110
- def regenerate(state, state_):
111
- state.messages.pop(-1)
112
- state_.messages.pop(-1)
113
- if len(state.messages) > 0:
114
- return state, state_, state.to_gradio_chatbot(), False
115
- return (state, state_, state.to_gradio_chatbot(), True)
116
-
117
-
118
- def clear_history(state, state_):
119
- state = conv_templates[conv_mode].copy()
120
- state_ = conv_templates[conv_mode].copy()
121
- return (gr.update(value=None, interactive=True),
122
- gr.update(value=None, interactive=True), \
123
- gr.update(value=None, interactive=True), \
124
- True, state, state_, state.to_gradio_chatbot(), [])
125
-
126
-
127
- # ==== CHANGE HERE ====
128
- # conv_mode = "llava_v1"
129
- # model_path = 'LanguageBind/Video-LLaVA-7B'
130
- # FIXME!!!
131
-
132
- conv_mode = "llava_v0"
133
- model_path = 'SNUMPR/vlm_rlaif_video_llava_7b'
134
- # model_path = '/dataset/yura/vlm-rlaif/pretrained/final_models/Video_LLaVA_VLM_RLAIF_merged'
135
- cache_dir = './cache_dir'
136
- device = 'cuda'
137
- # device = 'cpu'
138
- load_8bit = True
139
- load_4bit = False
140
- dtype = torch.float16
141
- # =============
142
-
143
- handler = Chat(model_path, conv_mode=conv_mode, load_8bit=load_8bit, load_4bit=load_8bit, device=device, cache_dir=cache_dir)
144
- # handler.model.to(dtype=dtype)
145
- if not os.path.exists("temp"):
146
- os.makedirs("temp")
147
-
148
- app = FastAPI()
149
-
150
-
151
- textbox = gr.Textbox(
152
- show_label=False, placeholder="Enter text and press ENTER", container=False
153
- )
154
- with gr.Blocks(title='VLM-RLAIF', theme=gr.themes.Default(), css=block_css) as demo:
155
- gr.Markdown(title_markdown)
156
- state = gr.State()
157
- state_ = gr.State()
158
- first_run = gr.State()
159
- images_tensor = gr.State()
160
-
161
- image1 = gr.Image(label="Input Image", type="filepath")
162
- with gr.Row():
163
- with gr.Column(scale=3):
164
- video = gr.Video(label="Input Video")
165
-
166
- cur_dir = os.path.dirname(os.path.abspath(__file__))
167
- gr.Examples(
168
- examples=[
169
- [
170
- f"{cur_dir}/examples/sample_demo_1.mp4",
171
- "Why is this video funny?",
172
- ],
173
- [
174
- f"{cur_dir}/examples/sample_demo_3.mp4",
175
- "Can you identify any safety hazards in this video?"
176
- ],
177
- [
178
- f"{cur_dir}/examples/sample_demo_9.mp4",
179
- "Describe the video.",
180
- ],
181
- [
182
- f"{cur_dir}/examples/sample_demo_22.mp4",
183
- "Describe the activity in the video.",
184
- ],
185
- ],
186
- inputs=[video, textbox],
187
- )
188
-
189
- with gr.Column(scale=7):
190
- chatbot = gr.Chatbot(label="VLM_RLAIF", bubble_full_width=True).style(height=750)
191
- with gr.Row():
192
- with gr.Column(scale=8):
193
- textbox.render()
194
- with gr.Column(scale=1, min_width=50):
195
- submit_btn = gr.Button(
196
- value="Send", variant="primary", interactive=True
197
- )
198
- with gr.Row(elem_id="buttons") as button_row:
199
- upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=True)
200
- downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=True)
201
- flag_btn = gr.Button(value="⚠️ Flag", interactive=True)
202
- # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
203
- regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=True)
204
- # clear_btn = gr.Button(value="πŸ—‘οΈ Clear history", interactive=True)
205
-
206
- gr.Markdown(tos_markdown)
207
- gr.Markdown(learn_more_markdown)
208
-
209
- submit_btn.click(generate, [image1, video, textbox, first_run, state, state_, images_tensor],
210
- [state, state_, chatbot, first_run, textbox, images_tensor, image1, video])
211
- # submit_btn.click(generate, [video, textbox, first_run, state, state_, images_tensor],
212
- # [state, state_, chatbot, first_run, textbox, images_tensor, video])
213
-
214
- regenerate_btn.click(regenerate, [state, state_], [state, state_, chatbot, first_run]).then(
215
- generate, [image1, video, textbox, first_run, state, state_, images_tensor], [state, state_, chatbot, first_run, textbox, images_tensor, image1, video])
216
- # generate, [video, textbox, first_run, state, state_, images_tensor], [state, state_, chatbot, first_run, textbox, images_tensor, video])
217
-
218
- # clear_btn.click(clear_history, [state, state_],
219
- # [image1, video, textbox, first_run, state, state_, chatbot, images_tensor])
220
- # [video, textbox, first_run, state, state_, chatbot, images_tensor])
221
-
222
- # app = gr.mount_gradio_app(app, demo, path="/")
223
- # demo.launch(share=True)
224
- demo.launch()
225
-
226
- # uvicorn videollava.serve.gradio_web_server:app
227
- # python -m videollava.serve.gradio_web_server
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gradio_web_server.py CHANGED
@@ -26,7 +26,6 @@ def save_image_to_local(image):
26
  filename = os.path.join('temp', next(tempfile._get_candidate_names()) + '.jpg')
27
  image = Image.open(image)
28
  image.save(filename)
29
- # print(filename)
30
  return filename
31
 
32
 
@@ -37,10 +36,6 @@ def save_video_to_local(video_path):
37
 
38
 
39
  def generate(video, textbox_in, first_run, state, state_, images_tensor, num_frames=50):
40
- # ======= manually clear the conversation
41
- # state = conv_templates[conv_mode].copy()
42
- # state_ = conv_templates[conv_mode].copy()
43
- # # =======
44
  flag = 1
45
  if not textbox_in:
46
  if len(state_.messages) > 0:
@@ -49,18 +44,6 @@ def generate(video, textbox_in, first_run, state, state_, images_tensor, num_fra
49
  flag = 0
50
  else:
51
  return "Please enter instruction"
52
- # else:
53
- # if state is not None and state_ is not None:
54
- # # reset conversations
55
- # state.messages = []
56
- # state_.messages = []
57
-
58
- print("Video", video) # 잘 듀어감
59
- print("Images_tensor", images_tensor) # None
60
- print("Textbox_IN", textbox_in) # 잘 듀어감
61
- print("State", state) # None
62
- print("State_", state_) # None
63
- # print(len(state_.messages))
64
 
65
  video = video if video else "none"
66
 
@@ -84,10 +67,7 @@ def generate(video, textbox_in, first_run, state, state_, images_tensor, num_fra
84
 
85
  frames = load_video_into_frames(video, video_decode_backend=video_decode_backend, num_frames=num_frames)
86
  tensor = process_images(frames, image_processor, argparse.Namespace(image_aspect_ratio='pad'))
87
- # tensor = video_processor(video, return_tensors='pt')['pixel_values'][0]
88
- # print(tensor.shape)
89
  tensor = tensor.to(handler.model.device, dtype=dtype)
90
- # images_tensor.append(tensor)
91
  images_tensor = tensor
92
 
93
  if handler.model.config.mm_use_im_start_end:
@@ -130,23 +110,16 @@ def clear_history(state, state_):
130
 
131
 
132
  # ==== CHANGE HERE ====
133
- # conv_mode = "llava_v1"
134
- # model_path = 'LanguageBind/Video-LLaVA-7B'
135
- # FIXME!!!
136
-
137
  conv_mode = "llava_v0"
138
  model_path = 'SNUMPR/vlm_rlaif_video_llava_7b'
139
- # model_path = '/dataset/yura/vlm-rlaif/pretrained/final_models/Video_LLaVA_VLM_RLAIF_merged'
140
  cache_dir = './cache_dir'
141
  device = 'cuda'
142
- # device = 'cpu'
143
  load_8bit = True
144
  load_4bit = False
145
  dtype = torch.float16
146
  # =============
147
 
148
  handler = Chat(model_path, conv_mode=conv_mode, load_8bit=load_8bit, load_4bit=load_8bit, device=device, cache_dir=cache_dir)
149
- # handler.model.to(dtype=dtype)
150
  if not os.path.exists("temp"):
151
  os.makedirs("temp")
152
 
@@ -163,7 +136,6 @@ with gr.Blocks(title='VLM-RLAIF', theme=gr.themes.Default(), css=block_css) as d
163
  first_run = gr.State()
164
  images_tensor = gr.State()
165
 
166
- # image1 = gr.Image(label="Input Image", type="filepath")
167
  with gr.Row():
168
  with gr.Column(scale=3):
169
  video = gr.Video(label="Input Video")
@@ -204,28 +176,14 @@ with gr.Blocks(title='VLM-RLAIF', theme=gr.themes.Default(), css=block_css) as d
204
  upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=True)
205
  downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=True)
206
  flag_btn = gr.Button(value="⚠️ Flag", interactive=True)
207
- # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
208
  regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=True)
209
- # clear_btn = gr.Button(value="πŸ—‘οΈ Clear history", interactive=True)
210
 
211
  gr.Markdown(tos_markdown)
212
  gr.Markdown(learn_more_markdown)
213
 
214
  submit_btn.click(generate, [video, textbox, first_run, state, state_, images_tensor],
215
  [state, state_, chatbot, first_run, textbox, images_tensor, video])
216
- # submit_btn.click(generate, [video, textbox, first_run, state, state_, images_tensor],
217
- # [state, state_, chatbot, first_run, textbox, images_tensor, video])
218
 
219
  regenerate_btn.click(regenerate, [state, state_], [state, state_, chatbot, first_run]).then(
220
  generate, [video, textbox, first_run, state, state_, images_tensor], [state, state_, chatbot, first_run, textbox, images_tensor, video])
221
- # generate, [video, textbox, first_run, state, state_, images_tensor], [state, state_, chatbot, first_run, textbox, images_tensor, video])
222
-
223
- # clear_btn.click(clear_history, [state, state_],
224
- # [image1, video, textbox, first_run, state, state_, chatbot, images_tensor])
225
- # [video, textbox, first_run, state, state_, chatbot, images_tensor])
226
-
227
- # app = gr.mount_gradio_app(app, demo, path="/")
228
  demo.launch()
229
-
230
- # uvicorn videollava.serve.gradio_web_server:app
231
- # python -m videollava.serve.gradio_web_server
 
26
  filename = os.path.join('temp', next(tempfile._get_candidate_names()) + '.jpg')
27
  image = Image.open(image)
28
  image.save(filename)
 
29
  return filename
30
 
31
 
 
36
 
37
 
38
  def generate(video, textbox_in, first_run, state, state_, images_tensor, num_frames=50):
 
 
 
 
39
  flag = 1
40
  if not textbox_in:
41
  if len(state_.messages) > 0:
 
44
  flag = 0
45
  else:
46
  return "Please enter instruction"
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  video = video if video else "none"
49
 
 
67
 
68
  frames = load_video_into_frames(video, video_decode_backend=video_decode_backend, num_frames=num_frames)
69
  tensor = process_images(frames, image_processor, argparse.Namespace(image_aspect_ratio='pad'))
 
 
70
  tensor = tensor.to(handler.model.device, dtype=dtype)
 
71
  images_tensor = tensor
72
 
73
  if handler.model.config.mm_use_im_start_end:
 
110
 
111
 
112
  # ==== CHANGE HERE ====
 
 
 
 
113
  conv_mode = "llava_v0"
114
  model_path = 'SNUMPR/vlm_rlaif_video_llava_7b'
 
115
  cache_dir = './cache_dir'
116
  device = 'cuda'
 
117
  load_8bit = True
118
  load_4bit = False
119
  dtype = torch.float16
120
  # =============
121
 
122
  handler = Chat(model_path, conv_mode=conv_mode, load_8bit=load_8bit, load_4bit=load_8bit, device=device, cache_dir=cache_dir)
 
123
  if not os.path.exists("temp"):
124
  os.makedirs("temp")
125
 
 
136
  first_run = gr.State()
137
  images_tensor = gr.State()
138
 
 
139
  with gr.Row():
140
  with gr.Column(scale=3):
141
  video = gr.Video(label="Input Video")
 
176
  upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=True)
177
  downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=True)
178
  flag_btn = gr.Button(value="⚠️ Flag", interactive=True)
 
179
  regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=True)
 
180
 
181
  gr.Markdown(tos_markdown)
182
  gr.Markdown(learn_more_markdown)
183
 
184
  submit_btn.click(generate, [video, textbox, first_run, state, state_, images_tensor],
185
  [state, state_, chatbot, first_run, textbox, images_tensor, video])
 
 
186
 
187
  regenerate_btn.click(regenerate, [state, state_], [state, state_, chatbot, first_run]).then(
188
  generate, [video, textbox, first_run, state, state_, images_tensor], [state, state_, chatbot, first_run, textbox, images_tensor, video])
 
 
 
 
 
 
 
189
  demo.launch()