smoothieAI commited on
Commit
f668249
·
verified ·
1 Parent(s): 1a81101

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +23 -10
pipeline.py CHANGED
@@ -1492,19 +1492,26 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
1492
  added_cond_kwargs=added_cond_kwargs,
1493
  ).sample
1494
 
1495
- # sum the noise predictions for the unconditional and text conditioned noise
1496
- start_guidance_time = time.time()
1497
-
1498
  if do_classifier_free_guidance:
1499
- # Split tensor along its first dimension
1500
  torch.cuda.synchronize() # Synchronize GPU before starting timing
1501
- time_chunk = time.time()
 
 
 
 
1502
 
1503
  noise_pred_uncond, noise_pred_text = torch.chunk(noise_pred, 2, dim=0)
1504
 
1505
- torch.cuda.synchronize() # Synchronize GPU after operation
1506
- print("chunk time", time.time() - time_chunk)
1507
-
 
 
 
 
 
1508
  for i, context_index in enumerate(current_context_indexes):
1509
  # Perform batch addition
1510
  noise_pred_uncond_sum[..., context_index, :, :] += noise_pred_uncond[:, :, i, :, :]
@@ -1513,8 +1520,14 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
1513
  # Batch increment for latent_counter
1514
  latent_counter[context_index] += 1
1515
 
1516
-
1517
- print("guidance time", time.time() - start_guidance_time)
 
 
 
 
 
 
1518
 
1519
  # set the step index to the current batch
1520
  self.scheduler._step_index = i
 
1492
  added_cond_kwargs=added_cond_kwargs,
1493
  ).sample
1494
 
1495
+
 
 
1496
  if do_classifier_free_guidance:
1497
+ # Start timing for overall guidance process
1498
  torch.cuda.synchronize() # Synchronize GPU before starting timing
1499
+ start_guidance_time = time.time()
1500
+
1501
+ # Timing for chunk operation
1502
+ torch.cuda.synchronize() # Synchronize GPU before chunking
1503
+ time_chunk_start = time.time()
1504
 
1505
  noise_pred_uncond, noise_pred_text = torch.chunk(noise_pred, 2, dim=0)
1506
 
1507
+ torch.cuda.synchronize() # Synchronize GPU after chunking
1508
+ time_chunk_end = time.time()
1509
+ print("Chunk time: {:.6f} seconds".format(time_chunk_end - time_chunk_start))
1510
+
1511
+ # Timing for batch addition and latent counter increment
1512
+ torch.cuda.synchronize() # Synchronize GPU before batch addition
1513
+ time_batch_addition_start = time.time()
1514
+
1515
  for i, context_index in enumerate(current_context_indexes):
1516
  # Perform batch addition
1517
  noise_pred_uncond_sum[..., context_index, :, :] += noise_pred_uncond[:, :, i, :, :]
 
1520
  # Batch increment for latent_counter
1521
  latent_counter[context_index] += 1
1522
 
1523
+ torch.cuda.synchronize() # Synchronize GPU after batch addition
1524
+ time_batch_addition_end = time.time()
1525
+ print("Batch addition and counter increment time: {:.6f} seconds".format(time_batch_addition_end - time_batch_addition_start))
1526
+
1527
+ # End timing for overall guidance process
1528
+ torch.cuda.synchronize() # Synchronize GPU after overall guidance process
1529
+ end_guidance_time = time.time()
1530
+ print("Total guidance time: {:.6f} seconds".format(end_guidance_time - start_guidance_time))
1531
 
1532
  # set the step index to the current batch
1533
  self.scheduler._step_index = i