Fly-ShuAI commited on
Commit
c213fc7
·
verified ·
1 Parent(s): 29b5108

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +328 -0
app.py CHANGED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces #*
2
+ import gradio as gr
3
+ import tempfile
4
+ import os
5
+ os.environ['TOKENIZERS_PARALLELISM'] = 'false'
6
+
7
+ from transformers import AutoModelForImageSegmentation
8
+ import torch
9
+ from torchvision import transforms
10
+ import decord
11
+ from PIL import Image
12
+ import numpy as np
13
+ from diffsynth import ModelManager, WanVideoPipeline, save_video
14
+
15
+
16
+ num_frames, width, height = 49, 832, 480
17
+ # gpu_id = 3
18
+ # device = f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu'
19
+ device = f'cuda' if torch.cuda.is_available() else 'cpu' #*
20
+ # pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124
21
+
22
+ from modelscope import snapshot_download
23
+ model_dir = snapshot_download( # https://www.modelscope.cn/models/AI-ModelScope/RMBG-2.0
24
+ model_id = 'AI-ModelScope/RMBG-2.0',
25
+ local_dir = 'ckpt/RMBG-2.0',
26
+ ignore_file_pattern = ['onnx*'],
27
+ )
28
+
29
+ from huggingface_hub import snapshot_download, hf_hub_download
30
+ snapshot_download( # 下载整个仓库; 下briaai/RMBG-2.0需要token
31
+ repo_id="alibaba-pai/Wan2.1-Fun-1.3B-Control",
32
+ local_dir="ckpt/Wan2.1-Fun-1.3B-Control",
33
+ local_dir_use_symlinks=False,
34
+ resume_download=True,
35
+ repo_type="model"
36
+ )
37
+
38
+ hf_hub_download(
39
+ repo_id="Kunbyte/Lumen",
40
+ filename="Lumen-T2V-1.3B-V1.0.ckpt",
41
+ local_dir="ckpt/",
42
+ local_dir_use_symlinks=False,
43
+ resume_download=True,
44
+ )
45
+
46
+ rmbg_model = AutoModelForImageSegmentation.from_pretrained('ckpt/RMBG-2.0', trust_remote_code=True) # ckpt/RMBG-2.0
47
+ torch.set_float32_matmul_precision(['high', 'highest'][0])
48
+ rmbg_model.to(device)
49
+ rmbg_model.eval()
50
+
51
+ model_manager = ModelManager(device="cpu") # 1.3b: device=cpu: uses 6G VRAM, device=device: uses 16G VRAM; about 1-2 min per video
52
+ wan_dit_path = 'train_res/wan1.3b_zh/full_wc0.5_f1gt0.5_real1_2_zh_en_l_s/lightning_logs/version_0/checkpoints/step-step=30000.ckpt'
53
+
54
+ if 'wan14b' in wan_dit_path.lower(): # 14B: uses about 36G, about 10 min per video
55
+ model_manager.load_models(
56
+ [
57
+ wan_dit_path if wan_dit_path else 'ckpt/Wan2.1-Fun-14B-Control/diffusion_pytorch_model.safetensors',
58
+ 'ckpt/Wan2.1-Fun-1.3B-Control/Wan2.1_VAE.pth',
59
+ 'ckpt/Wan2.1-Fun-1.3B-Control/models_t5_umt5-xxl-enc-bf16.pth',
60
+ 'ckpt/Wan2.1-Fun-1.3B-Control/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth',
61
+ ],
62
+ torch_dtype=torch.bfloat16, # float8_e4m3fn fp8量化; bfloat16
63
+ )
64
+ else:
65
+ model_manager.load_models(
66
+ [
67
+ wan_dit_path if wan_dit_path else 'ckpt/Wan2.1-Fun-1.3B-Control/diffusion_pytorch_model.safetensors',
68
+ 'ckpt/Wan2.1-Fun-1.3B-Control/Wan2.1_VAE.pth',
69
+ 'ckpt/Wan2.1-Fun-1.3B-Control/models_t5_umt5-xxl-enc-bf16.pth',
70
+ 'ckpt/Wan2.1-Fun-1.3B-Control/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth',
71
+ ],
72
+ torch_dtype=torch.bfloat16,
73
+ )
74
+ wan_pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device=device)
75
+ wan_pipe.enable_vram_management(num_persistent_param_in_dit=None)
76
+
77
+
78
+ gr_info_duration = 2 # gradio popup information duration
79
+
80
+ # @spaces.GPU
81
+ def rmbg_mask(video_path, mask_path=None, progress=gr.Progress()):
82
+ """Extract foreground from video, return foreground video path"""
83
+ if not video_path:
84
+ gr.Warning("Please upload a video first!", duration=gr_info_duration)
85
+ return None
86
+
87
+ try:
88
+ progress(0, desc="Preparing foreground extraction...")
89
+
90
+ if mask_path and os.path.exists(mask_path):
91
+ gr.Info("Using uploaded mask video for foreground extraction.", duration=gr_info_duration)
92
+
93
+ video_frames = decord.VideoReader(uri=video_path, width=width, height=height)
94
+ video_frames = video_frames.get_batch(range(num_frames)).asnumpy().astype(np.uint8)
95
+
96
+ mask_frames = decord.VideoReader(uri=mask_path, width=width, height=height)
97
+ mask_frames = mask_frames.get_batch(range(num_frames)).asnumpy().astype(np.uint8)
98
+
99
+ fg_frames = np.where( mask_frames >= 127, video_frames, 0)
100
+ fg_frames = [Image.fromarray(frame) for frame in fg_frames]
101
+
102
+ else:
103
+ image_size = (width, height)
104
+ transform_image = transforms.Compose([
105
+ transforms.Resize(image_size),
106
+ transforms.ToTensor(),
107
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
108
+ ])
109
+
110
+ video_reader = decord.VideoReader(uri=video_path, width=width, height=height)
111
+ video_frames = video_reader.get_batch(range(num_frames)).asnumpy()
112
+ fg_frames = []
113
+
114
+ # Use progress bar in the loop
115
+ for i in range(num_frames):
116
+ # Update progress bar based on processed frames
117
+ progress((i + 1) / num_frames, desc=f"Processing frame {i+1}/{num_frames}...")
118
+
119
+ image = Image.fromarray(video_frames[i])
120
+ input_images = transform_image(image).unsqueeze(0).to(device)
121
+ with torch.no_grad():
122
+ preds = rmbg_model(input_images)[-1].sigmoid().cpu()
123
+ pred = preds[0].squeeze()
124
+ pred_pil = transforms.ToPILImage()(pred)
125
+ mask = pred_pil.resize(image.size) # PIL.Image mode=L
126
+ # Extract foreground from image based on mask
127
+ fg_image = Image.composite(image, Image.new('RGB', image.size), mask) # white areas of mask take image1, black areas take image2
128
+ fg_frames.append(fg_image)
129
+
130
+ progress(1.0, desc="Saving video...")
131
+ with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
132
+ fg_video_path = temp_file.name
133
+ save_video(fg_frames, fg_video_path, fps=16, quality=7)
134
+
135
+ progress(1.0, desc="Foreground extraction completed!")
136
+ # gr.Info("Foreground extraction successful!")
137
+ # gr.Video.update(value=fg_video_path, visible=True)
138
+
139
+ return fg_video_path
140
+ except Exception as e:
141
+ error_msg = f"Foreground extraction error: {str(e)}"
142
+ gr.Error(error_msg)
143
+ return None
144
+
145
+ # @spaces.GPU
146
+ def video_relighting(fg_video_path, prompt, seed=-1, num_inference_steps=50, video_quality=7,
147
+ progress=gr.Progress()):
148
+ """Relighting the foreground video base on the text """
149
+ if not fg_video_path or not os.path.exists(fg_video_path):
150
+ gr.Warning("Please extract foreground first!", duration = gr_info_duration)
151
+ return None
152
+ if not prompt:
153
+ gr.Warning("Please provide text prompt for relighting!", duration = gr_info_duration)
154
+ return None
155
+
156
+ try:
157
+ fg_video = decord.VideoReader(uri=fg_video_path, width=width, height=height)
158
+ fg_video = fg_video.get_batch(range(num_frames)).asnumpy().astype('uint8')
159
+
160
+ progress(0.1, desc="relighting video...")
161
+ relit_video = wan_pipe(
162
+ prompt=prompt,
163
+ # negative_prompt = 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards',
164
+ negative_prompt = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走',
165
+ num_inference_steps=num_inference_steps,
166
+ control_video=fg_video,
167
+ height=height, width=width, num_frames=num_frames,
168
+ seed=seed, tiled=True,
169
+ )
170
+ with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
171
+ relit_video_path = temp_file.name
172
+ save_video(relit_video, relit_video_path, fps=16, quality=video_quality)
173
+ progress(1.0, desc="Relighting processing completed!")
174
+ gr.Info(f"Relighting successful! Used seed={seed}, steps={num_inference_steps}", duration=gr_info_duration)
175
+
176
+ return relit_video_path
177
+ except Exception as e:
178
+ error_msg = f"Relighting processing error: {str(e)}"
179
+ gr.Error(error_msg)
180
+ return None
181
+
182
+ # gradio app_lumen.py python app_lumen.py
183
+ # Examples
184
+ bg_prompt_path = 'my_data/zh_en_short_prompts.txt'
185
+ with open(bg_prompt_path, 'r') as f:
186
+ bg_prompts = f.readlines()
187
+ bg_prompts = [bg.strip() for bg in bg_prompts if bg.strip()] # 去除空行
188
+ bg_prompts_zh = bg_prompts[ : len(bg_prompts)//2]
189
+ bg_prompts_en = bg_prompts[ len(bg_prompts)//2 :]
190
+
191
+ video_names = [ 191947, 922930, 1217498, 1302135, 1371894,
192
+ 1428515, 1628805, 1873403, 2259812, 2445920,
193
+ 2639840, 2779867, 2974076 ] # 13
194
+ video_names = video_names * 2
195
+ video_dir = 'test/pachong_test/video/single_13'
196
+ relight_dir = 'test/pachong_test/video/single_13_2-res-v1.0-gradio_demo'
197
+
198
+ header = """
199
+ # <center>💡Lumen: Consistent Video Relighting and Harmonious Background Replacement with Video Generative Models </center>
200
+
201
+ <div style="display: flex; justify-content: center; gap: 5px;">
202
+ <a href="https://lumen-relight.github.io"><img src="https://img.shields.io/badge/Project%20Page-Lumen-blue" alt="Project"></a>
203
+ <a href="https://arxiv.org/abs/2508.12945"><img src="https://img.shields.io/badge/arXiv-Paper-red" alt="arXiv"></a>
204
+ <a href="https://github.com/Kunbyte-AI/Lumen"><img src="https://img.shields.io/badge/GitHub-Code-black" alt="GitHub"></a>
205
+ <a href="https://huggingface.co/Kunbyte/Lumen"><img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Model-yellow" alt="HuggingFace"></a>
206
+ <a href="https://huggingface.co/spaces/Kunbyte/Lumen"><img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Space-yellow" alt="HuggingFace"></a>
207
+ </div>
208
+
209
+ 💡 **Lumen** is a video relighting model that can relight the foreground and replace the background of a video base on the input text.
210
+
211
+ The **usage steps** are as follows:
212
+ 1. **Upload Video** (will use the first 49 frames and resize them to 832*480).
213
+ 2. **Extract Foreground**. We use [RMBG2.0](https://github.com/ai-anchorite/BRIA-RMBG-2.0) to extract the foreground but it may get unstable results. If so, we recommend to use [MatAnyone](https://huggingface.co/spaces/PeiqingYang/MatAnyone) to get the **black-and-white mask video**(Alpha Output) and upload it, and then click the **S2** button.
214
+ 3. **Input Caption**. Select or input the caption you want the video to be. We recommend you to use any LLM ( e.g. [Deepseek](https://chat.deepseek.com/), [Qwen](https://www.tongyi.com/) ) to expand the caption with a simple prompt (请发挥想象力, 扩充下面的视频描述, 如背景, 环境光对前景的影响等), since long prompts may get better results.
215
+ 4. **Relight Video**.
216
+ """
217
+
218
+ # Create Gradio interface
219
+ with gr.Blocks(title="Lumen: Video Relighting Model").queue() as demo:
220
+ gr.Markdown(header, elem_id="header")
221
+
222
+ # Row 1: video area, using nested layout to achieve 0.4:0.2:0.4 ratio
223
+ with gr.Row():
224
+ # Left area: uploaded video and foreground video
225
+ with gr.Column(scale=3):
226
+ with gr.Row():
227
+ video_input = gr.Video(label="S1. Upload Origin Video") # , scale=0.5
228
+ fg_video = gr.Video(label="Foreground Video or Upload Mask Video")
229
+
230
+ # Right area: relit video
231
+ with gr.Column(scale=2):
232
+ relit_video = gr.Video(label="S4. Relighted Video")
233
+
234
+ # Row 2: two buttons on left and right
235
+ with gr.Row():
236
+ extract_btn = gr.Button("S2. Extract Foreground", variant="secondary", size="md")
237
+ relight_btn = gr.Button("S4. Relight Video (~2 min)", variant="secondary", size="md")
238
+
239
+ # Row 3: text input box and advanced parameters
240
+ with gr.Row():
241
+ # with gr.Column(scale=3):
242
+ combined_text = gr.Textbox(label="S3. Text Prompt", lines=2,
243
+ placeholder="Click options below to add captions or fill it with your imagination..."
244
+ )
245
+
246
+ # Row 4: More settings; can be
247
+ with gr.Accordion("More Settings", open=False):
248
+ with gr.Row():
249
+ seed = gr.Number(value=-1, minimum=-1, label="Seed", precision=0, info="Set to -1 for random seed (seed>=-1)")
250
+ steps = gr.Number(value=50, minimum=1, label="Inference Steps", precision=0, info="More steps = better result but slower (step>0)")
251
+ video_quality = gr.Number(value=7, minimum=1, maximum=10, label="Video Quality", precision=0, info="The picture quality of the output video (1-10)")
252
+
253
+ # Row 5: 将中英文提示合并为tab选项
254
+ with gr.Row():
255
+ with gr.Column():
256
+ with gr.Tabs():
257
+ with gr.Tab("中文描述"):
258
+ zh_prompts = gr.Dataset(
259
+ components=[gr.Textbox(visible=False)],
260
+ samples=[[text] for text in bg_prompts_zh],
261
+ label="点击选择视频描述, 多选将叠加",
262
+ samples_per_page=len(bg_prompts_zh),
263
+ )
264
+ with gr.Tab("English Prompts"):
265
+ en_prompts = gr.Dataset(
266
+ components=[gr.Textbox(visible=False)],
267
+ samples=[[text] for text in bg_prompts_en],
268
+ label="Click to select the video caption",
269
+ samples_per_page=len(bg_prompts_en),
270
+ )
271
+
272
+ with gr.Column():
273
+ gr.Markdown("### Video Relighting Examples of Lumen(1.3B)")
274
+ # 准备示例数据
275
+ example_inputs = []
276
+ for i in range(len(video_names)):
277
+ # demo_ori_path, text, demo_res_path
278
+ demo_ori_path = os.path.join(video_dir, f"{video_names[i]}.mp4")
279
+ text = bg_prompts[i]
280
+ demo_res_path = os.path.join(relight_dir, f"{i+1:03d}.mp4")
281
+ example_inputs.append([demo_ori_path, text, demo_res_path])
282
+
283
+ # 使用 gr.Examples 组件直接显示视频
284
+ gr.Examples(
285
+ examples=example_inputs,
286
+ inputs=[video_input, combined_text, relit_video],
287
+ # cache_examples=True,
288
+ label="Move your mouse over the video to preview it (may wait for seconds). Click to select an example video and caption. (seed=-1, steps=50, quality=7)",
289
+ examples_per_page = len(video_names)//2,
290
+ )
291
+
292
+ # Set foreground extraction button event - directly call rmbg_mask
293
+ extract_btn.click(
294
+ rmbg_mask,
295
+ inputs=[video_input, fg_video],
296
+ outputs=[fg_video],
297
+ )
298
+
299
+ # Set relighting button event - directly call video_relighting with new parameters
300
+ relight_btn.click(
301
+ video_relighting,
302
+ inputs=[fg_video, combined_text, seed, steps, video_quality],
303
+ outputs=[relit_video]
304
+ )
305
+
306
+ # Add selection event for Dataset component
307
+ def select_option(evt: gr.SelectData, current_text):
308
+ selected_text = evt.value[0] # Get selected text value
309
+ if not current_text:
310
+ return selected_text
311
+ return f"{current_text}, {selected_text}"
312
+
313
+ # Bind Dataset selection event
314
+ zh_prompts.select(
315
+ select_option,
316
+ inputs=[combined_text],
317
+ outputs=[combined_text]
318
+ )
319
+ en_prompts.select(
320
+ select_option,
321
+ inputs=[combined_text],
322
+ outputs=[combined_text]
323
+ )
324
+
325
+
326
+ # Launch application
327
+ if __name__ == "__main__":
328
+ demo.launch() # max_threads