eustlb HF staff commited on
Commit
e3d1391
·
1 Parent(s): f4a680a
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,6 +5,7 @@ from transformers.pipelines.audio_utils import ffmpeg_read
5
  import torch
6
  import gradio as gr
7
  import time
 
8
  import numpy as np
9
 
10
  BATCH_SIZE = 16
@@ -98,7 +99,6 @@ def transcribe(inputs):
98
  return result
99
 
100
  pipe._forward = _forward_time
101
- print(inputs)
102
  text = pipe(inputs, batch_size=BATCH_SIZE)["text"]
103
 
104
  yield distil_text, distil_runtime, text, runtime
@@ -109,8 +109,8 @@ inputs = np.random.randn(30 * pipe.feature_extractor.sampling_rate)
109
  inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
110
 
111
  for _ in range(N_WARMUP):
112
- _ = pipe_forward(inputs.copy(), batch_size=BATCH_SIZE)["text"]
113
- _ = distil_pipe_forward(inputs.copy(), batch_size=BATCH_SIZE)["text"]
114
  print(_)
115
  print("Models warmed up!")
116
 
 
5
  import torch
6
  import gradio as gr
7
  import time
8
+ import copy
9
  import numpy as np
10
 
11
  BATCH_SIZE = 16
 
99
  return result
100
 
101
  pipe._forward = _forward_time
 
102
  text = pipe(inputs, batch_size=BATCH_SIZE)["text"]
103
 
104
  yield distil_text, distil_runtime, text, runtime
 
109
  inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
110
 
111
  for _ in range(N_WARMUP):
112
+ _ = pipe_forward(copy.deepcopy(inputs), batch_size=BATCH_SIZE)["text"]
113
+ _ = distil_pipe_forward(copy.deepcopy(inputs), batch_size=BATCH_SIZE)["text"]
114
  print(_)
115
  print("Models warmed up!")
116