jbilcke-hf HF staff commited on
Commit
b5d7f4c
1 Parent(s): aee088c

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +22 -7
handler.py CHANGED
@@ -5,6 +5,7 @@ from PIL import Image
5
  import base64
6
  import io
7
  import tempfile
 
8
  import numpy as np
9
  from moviepy.editor import ImageSequenceClip
10
  import os
@@ -14,9 +15,15 @@ import logging
14
  logging.basicConfig(level=logging.INFO)
15
  logger = logging.getLogger(__name__)
16
 
17
- ENABLE_CPU_OFFLOAD = False
18
  EXPERIMENTAL_STUFF = False
19
 
 
 
 
 
 
 
20
  class EndpointHandler:
21
  # Default configuration
22
  DEFAULT_FPS = 24
@@ -187,15 +194,21 @@ class EndpointHandler:
187
  # Get other parameters with defaults
188
  guidance_scale = data.get("guidance_scale", 7.5)
189
  num_inference_steps = data.get("num_inference_steps", self.DEFAULT_NUM_STEPS)
 
190
  seed = data.get("seed", -1)
191
- seed = None if seed == -1 else int(seed)
192
-
193
  logger.info(f"Generating video with prompt: '{prompt}'")
194
- logger.info(f"Parameters: size={width}x{height}, num_frames={num_frames}, fps={fps}")
195
- logger.info(f"Additional params: guidance_scale={guidance_scale}, num_inference_steps={num_inference_steps}")
196
 
197
  try:
198
  with torch.no_grad():
 
 
 
 
 
199
  generation_kwargs = {
200
  "prompt": prompt,
201
  "height": height,
@@ -203,7 +216,8 @@ class EndpointHandler:
203
  "num_frames": num_frames,
204
  "guidance_scale": guidance_scale,
205
  "num_inference_steps": num_inference_steps,
206
- "output_type": "pt"
 
207
  }
208
 
209
  # Check if image is provided for image-to-video generation
@@ -240,7 +254,8 @@ class EndpointHandler:
240
  "num_frames": num_frames,
241
  "fps": fps,
242
  "duration": num_frames / fps,
243
- "num_inference_steps": num_inference_steps
 
244
  }
245
  }
246
 
 
5
  import base64
6
  import io
7
  import tempfile
8
+ import random
9
  import numpy as np
10
  from moviepy.editor import ImageSequenceClip
11
  import os
 
15
  logging.basicConfig(level=logging.INFO)
16
  logger = logging.getLogger(__name__)
17
 
18
+ ENABLE_CPU_OFFLOAD = True
19
  EXPERIMENTAL_STUFF = False
20
 
21
+ random.seed(0)
22
+ np.random.seed(0)
23
+ generator = torch.manual_seed(0)
24
+ # you can notice we don't use device=cuda, for more info see:
25
+ # https://huggingface.co/docs/diffusers/v0.16.0/en/using-diffusers/reproducibility#gpu
26
+
27
  class EndpointHandler:
28
  # Default configuration
29
  DEFAULT_FPS = 24
 
194
  # Get other parameters with defaults
195
  guidance_scale = data.get("guidance_scale", 7.5)
196
  num_inference_steps = data.get("num_inference_steps", self.DEFAULT_NUM_STEPS)
197
+
198
  seed = data.get("seed", -1)
199
+ seed = random.randint(0, 2**32 - 1) if seed == -1 else int(seed)
200
+
201
  logger.info(f"Generating video with prompt: '{prompt}'")
202
+ logger.info(f"Video params: size={width}x{height}, num_frames={num_frames}, fps={fps}")
203
+ logger.info(f"Generation params: seed={seed}, guidance_scale={guidance_scale}, num_inference_steps={num_inference_steps}")
204
 
205
  try:
206
  with torch.no_grad():
207
+
208
+ random.seed(seed)
209
+ np.random.seed(seed)
210
+ generator.manual_seed(args.seed)
211
+
212
  generation_kwargs = {
213
  "prompt": prompt,
214
  "height": height,
 
216
  "num_frames": num_frames,
217
  "guidance_scale": guidance_scale,
218
  "num_inference_steps": num_inference_steps,
219
+ "output_type": "pt",
220
+ "generator": generator
221
  }
222
 
223
  # Check if image is provided for image-to-video generation
 
254
  "num_frames": num_frames,
255
  "fps": fps,
256
  "duration": num_frames / fps,
257
+ "num_inference_steps": num_inference_steps,
258
+ "seed": seed
259
  }
260
  }
261