jadechoghari commited on
Commit
cac2b0c
1 Parent(s): 9ec3715

Create pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +83 -0
pipeline.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline
2
+ from invert import Inverter
3
+ from generate import Generator
4
+ from utils import init_model, seed_everything, get_frame_ids
5
+
6
+ class VidToMePipeline(DiffusionPipeline):
7
+ def __init__(self, device="cuda", sd_version="2.1", float_precision="fp16", height=512, width=512):
8
+ # this will initlize the core pipeline components
9
+ pipe, scheduler, model_key = init_model(device, sd_version, None, "none", float_precision)
10
+ self.pipe = pipe
11
+ self.scheduler = scheduler
12
+ self.model_key = model_key
13
+ self.device = device
14
+ self.sd_version = sd_version
15
+ self.float_precision = float_precision
16
+ self.height = height
17
+ self.width = width
18
+
19
+ def __call__(self, video_path=None, video_prompt=None, edit_prompt=None,
20
+ control_type="none", n_timesteps=50, guidance_scale=7.5,
21
+ negative_prompt="ugly, blurry, low res", frame_range=None,
22
+ use_lora=False, seed=123, local_merge_ratio=0.9, global_merge_ratio=0.8):
23
+
24
+ # dynamic config built from user inputs
25
+ config = self._build_config(video_path, video_prompt, edit_prompt, control_type,
26
+ n_timesteps, guidance_scale, negative_prompt,
27
+ frame_range, use_lora, seed, local_merge_ratio, global_merge_ratio)
28
+
29
+ # seed for reproducibility - change as you need
30
+ seed_everything(config['seed'])
31
+
32
+ # inversion stage
33
+ print("Start inversion!")
34
+ inversion = Inverter(self.pipe, self.scheduler, config)
35
+ inversion(config['input_path'], config['inversion']['save_path'])
36
+
37
+ # generation stage
38
+ print("Start generation!")
39
+ generator = Generator(self.pipe, self.scheduler, config)
40
+ frame_ids = get_frame_ids(config['generation']['frame_range'], None)
41
+ generator(config['input_path'], config['generation']['latents_path'],
42
+ config['generation']['output_path'], frame_ids=frame_ids)
43
+ print(f"Output generated at: {config['generation']['output_path']}")
44
+
45
+ def _build_config(self, video_path, video_prompt, edit_prompt, control_type,
46
+ n_timesteps, guidance_scale, negative_prompt, frame_range,
47
+ use_lora, seed, local_merge_ratio, global_merge_ratio):
48
+ # constructing config dictionary from user prompts
49
+ config = {
50
+ 'sd_version': self.sd_version,
51
+ 'input_path': video_path,
52
+ 'work_dir': "outputs/",
53
+ 'height': self.height,
54
+ 'width': self.width,
55
+ 'inversion': {
56
+ 'prompt': video_prompt or "Default video prompt.",
57
+ 'save_path': "outputs/latents",
58
+ 'steps': 50,
59
+ 'save_intermediate': False
60
+ },
61
+ 'generation': {
62
+ 'control': control_type,
63
+ 'guidance_scale': guidance_scale,
64
+ 'n_timesteps': n_timesteps,
65
+ 'negative_prompt': negative_prompt,
66
+ 'prompt': edit_prompt or "Default edit prompt.",
67
+ 'latents_path': "outputs/latents",
68
+ 'output_path': "outputs/final",
69
+ 'frame_range': frame_range or [0, 32],
70
+ 'use_lora': use_lora,
71
+ 'local_merge_ratio': local_merge_ratio,
72
+ 'global_merge_ratio': global_merge_ratio
73
+ },
74
+ 'seed': seed,
75
+ 'device': self.device,
76
+ 'float_precision': self.float_precision
77
+ }
78
+ return config
79
+
80
+ # Sample usage
81
+ pipeline = VidToMePipeline(device="cuda", sd_version="2.1", float_precision="fp16")
82
+ pipeline(video_path="path/to/video.mp4", video_prompt="A beautiful scene of a sunset",
83
+ edit_prompt="Make the sunset look more vibrant", control_type="depth", n_timesteps=50)