gpt-omni commited on
Commit
724204b
·
1 Parent(s): 41c7b36
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -11,7 +11,6 @@ import os
11
  import lightning as L
12
  import torch
13
  import time
14
- import spaces
15
  from snac import SNAC
16
  from litgpt import Tokenizer
17
  from litgpt.utils import (
@@ -39,7 +38,7 @@ from litgpt.generate.base import sample
39
 
40
  device = "cuda" if torch.cuda.is_available() else "cpu"
41
  ckpt_dir = "./checkpoint"
42
- streaming_output = False
43
 
44
 
45
  OUT_CHUNK = 4096
@@ -304,7 +303,7 @@ def run_AT_batch_stream(
304
 
305
 
306
  for chunk in run_AT_batch_stream('./data/samples/output1.wav'):
307
- pass
308
 
309
 
310
  def process_audio(audio):
 
11
  import lightning as L
12
  import torch
13
  import time
 
14
  from snac import SNAC
15
  from litgpt import Tokenizer
16
  from litgpt.utils import (
 
38
 
39
  device = "cuda" if torch.cuda.is_available() else "cpu"
40
  ckpt_dir = "./checkpoint"
41
+ streaming_output = True
42
 
43
 
44
  OUT_CHUNK = 4096
 
303
 
304
 
305
  for chunk in run_AT_batch_stream('./data/samples/output1.wav'):
306
+ audio_data = np.frombuffer(chunk, dtype=np.int16)
307
 
308
 
309
  def process_audio(audio):