csuhan commited on
Commit
e7d575b
·
1 Parent(s): cb3735c

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -83,6 +83,10 @@ def load_rgbx(image_path, x_image_path):
83
  image = torch.stack([image, x_image], dim=0)
84
  return image
85
 
 
 
 
 
86
  def model_worker(
87
  rank: int, args: argparse.Namespace, barrier: mp.Barrier,
88
  request_queue: mp.Queue, response_queue: Optional[mp.Queue] = None,
@@ -135,6 +139,8 @@ def model_worker(
135
  barrier.wait()
136
 
137
  while True:
 
 
138
  img_path, audio_path, video_path, point_path, fmri_path, depth_path, depth_rgb_path, normal_path, normal_rgb_path, chatbot, max_gen_len, temperature, top_p, modality = request_queue.get()
139
  if 'image' in modality and img_path is not None:
140
  image = Image.open(img_path).convert('RGB')
@@ -217,6 +223,10 @@ def gradio_worker(
217
  return "", chatbot + [[msg, None]]
218
 
219
  def stream_model_output(img_path, audio_path, video_path, point_path, fmri_path, depth_path, depth_rgb_path, normal_path, normal_rgb_path, chatbot, max_gen_len, gen_t, top_p, modality):
 
 
 
 
220
  for queue in request_queues:
221
  queue.put((img_path, audio_path, video_path, point_path, fmri_path, depth_path, depth_rgb_path, normal_path, normal_rgb_path, chatbot, max_gen_len, gen_t, top_p, modality))
222
  while True:
@@ -368,7 +378,6 @@ def gradio_worker(
368
  minimum=0, maximum=1, value=0.75, interactive=True,
369
  label="Top-p",
370
  )
371
- gr.Markdown("Note: We are fixing a bug in multi-user session control.")
372
 
373
  img_tab.select(partial(change_modality, 'image'), [], [modality])
374
  video_tab.select(partial(change_modality, 'video'), [], [modality])
 
83
  image = torch.stack([image, x_image], dim=0)
84
  return image
85
 
86
+
87
+ class Ready: pass
88
+
89
+
90
  def model_worker(
91
  rank: int, args: argparse.Namespace, barrier: mp.Barrier,
92
  request_queue: mp.Queue, response_queue: Optional[mp.Queue] = None,
 
139
  barrier.wait()
140
 
141
  while True:
142
+ if response_queue is not None:
143
+ response_queue.put(Ready())
144
  img_path, audio_path, video_path, point_path, fmri_path, depth_path, depth_rgb_path, normal_path, normal_rgb_path, chatbot, max_gen_len, temperature, top_p, modality = request_queue.get()
145
  if 'image' in modality and img_path is not None:
146
  image = Image.open(img_path).convert('RGB')
 
223
  return "", chatbot + [[msg, None]]
224
 
225
  def stream_model_output(img_path, audio_path, video_path, point_path, fmri_path, depth_path, depth_rgb_path, normal_path, normal_rgb_path, chatbot, max_gen_len, gen_t, top_p, modality):
226
+ while True:
227
+ content_piece = response_queue.get()
228
+ if isinstance(content_piece, Ready):
229
+ break
230
  for queue in request_queues:
231
  queue.put((img_path, audio_path, video_path, point_path, fmri_path, depth_path, depth_rgb_path, normal_path, normal_rgb_path, chatbot, max_gen_len, gen_t, top_p, modality))
232
  while True:
 
378
  minimum=0, maximum=1, value=0.75, interactive=True,
379
  label="Top-p",
380
  )
 
381
 
382
  img_tab.select(partial(change_modality, 'image'), [], [modality])
383
  video_tab.select(partial(change_modality, 'video'), [], [modality])