jjz5463 commited on
Commit
97af337
·
1 Parent(s): 6d94bcb
Files changed (1) hide show
  1. baseline_utils.py +32 -29
baseline_utils.py CHANGED
@@ -123,38 +123,40 @@ def scenes_caption(scenes, api_key):
123
 
124
  return "\n\n".join(captions)
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
- @spaces.GPU
128
- def generate_video(scene_list, writer_description, opt, fps=24): # Lower fps
 
 
 
 
 
 
 
129
 
130
- # Function to generate a single video
131
- def generate_single_video(gpu_id, prompt, writer_description, fps, i):
132
- # Assign the specific GPU for this process
133
- device = f"cuda:{gpu_id}"
134
 
135
- # Initialize the pipeline for this GPU
136
- pipe = CogVideoXPipeline.from_pretrained(
137
- "THUDM/CogVideoX-5b",
138
- torch_dtype=torch.bfloat16,
139
- cache_dir="./CogVideoX-5b"
140
- )
141
- pipe.to(device) # Move the model to the assigned GPU
142
- pipe.enable_model_cpu_offload()
143
- pipe.vae.enable_tiling()
144
-
145
- # Generate the video
146
- video = pipe(
147
- prompt=prompt + f'\nThe main character is described as: {writer_description}.',
148
- num_videos_per_prompt=1,
149
- num_inference_steps=50,
150
- num_frames=fps,
151
- guidance_scale=6,
152
- generator=torch.Generator(device=device).manual_seed(42),
153
- ).frames[0]
154
-
155
- # Save the video
156
- video_path = export_to_video(video, output_video_path=f'videos/video{i}.mp4')
157
- return video_path
158
 
159
  # Ensure the output directory exists
160
  os.makedirs("videos", exist_ok=True)
@@ -199,6 +201,7 @@ def concatenate_videos_music(video_paths, output_path, audio_path):
199
  # Write the concatenated video to a file
200
  final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac")
201
 
 
202
  def summarizer_for_audio(input_text):
203
  if torch.cuda.is_available():
204
  device = "cuda"
 
123
 
124
  return "\n\n".join(captions)
125
 
126
+ # Define the single video generation function in the global scope
127
+ def generate_single_video(gpu_id, prompt, writer_description, fps, i):
128
+ # Assign the specific GPU for this process
129
+ device = f"cuda:{gpu_id}"
130
+
131
+ # Initialize the pipeline for this GPU
132
+ pipe = CogVideoXPipeline.from_pretrained(
133
+ "THUDM/CogVideoX-5b",
134
+ torch_dtype=torch.bfloat16,
135
+ cache_dir="./CogVideoX-5b"
136
+ )
137
+ pipe.to(device) # Move the model to the assigned GPU
138
+ pipe.enable_model_cpu_offload()
139
+ pipe.vae.enable_tiling()
140
 
141
+ # Generate the video
142
+ video = pipe(
143
+ prompt=prompt + f'\nThe main character is described as: {writer_description}.',
144
+ num_videos_per_prompt=1,
145
+ num_inference_steps=50,
146
+ num_frames=fps,
147
+ guidance_scale=6,
148
+ generator=torch.Generator(device=device).manual_seed(42),
149
+ ).frames[0]
150
 
151
+ # Save the video
152
+ video_path = export_to_video(video, output_video_path=f'videos/video{i}.mp4')
153
+ return video_path
 
154
 
155
+
156
+ @spaces.GPU
157
+ def generate_video(scene_list, writer_description, opt, fps=24): # Lower fps
158
+ # Set TOKENIZERS_PARALLELISM to avoid tokenizer warnings
159
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  # Ensure the output directory exists
162
  os.makedirs("videos", exist_ok=True)
 
201
  # Write the concatenated video to a file
202
  final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac")
203
 
204
+ @spaces.GPU
205
  def summarizer_for_audio(input_text):
206
  if torch.cuda.is_available():
207
  device = "cuda"