Spaces:
Running
on
Zero
Running
on
Zero
AshwinSankar
commited on
Commit
•
5150d64
1
Parent(s):
6134bec
mod chunk 10->15
Browse files- .gitignore +0 -0
- app.py +3 -3
.gitignore
ADDED
File without changes
|
app.py
CHANGED
@@ -14,7 +14,7 @@ from parler_tts import ParlerTTSForConditionalGeneration
|
|
14 |
from pydub import AudioSegment
|
15 |
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
|
16 |
|
17 |
-
device = "cuda
|
18 |
torch_dtype = torch.bfloat16 if device != "cpu" else torch.float32
|
19 |
|
20 |
repo_id = "ai4bharat/indic-parler-tts-pretrained"
|
@@ -200,7 +200,7 @@ frame_rate = model.audio_encoder.config.frame_rate
|
|
200 |
def generate_base(text, description, play_steps_in_s=2.0):
|
201 |
# Initialize variables
|
202 |
play_steps = int(frame_rate * play_steps_in_s)
|
203 |
-
chunk_size =
|
204 |
|
205 |
# Tokenize the full text and description
|
206 |
inputs = description_tokenizer(description, return_tensors="pt").to(device)
|
@@ -272,7 +272,7 @@ def generate_base(text, description, play_steps_in_s=2.0):
|
|
272 |
def generate_jenny(text, description, play_steps_in_s=2.0):
|
273 |
# Initialize variables
|
274 |
play_steps = int(frame_rate * play_steps_in_s)
|
275 |
-
chunk_size =
|
276 |
|
277 |
# Tokenize the full text and description
|
278 |
inputs = description_tokenizer(description, return_tensors="pt").to(device)
|
|
|
14 |
from pydub import AudioSegment
|
15 |
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
|
16 |
|
17 |
+
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
18 |
torch_dtype = torch.bfloat16 if device != "cpu" else torch.float32
|
19 |
|
20 |
repo_id = "ai4bharat/indic-parler-tts-pretrained"
|
|
|
200 |
def generate_base(text, description, play_steps_in_s=2.0):
|
201 |
# Initialize variables
|
202 |
play_steps = int(frame_rate * play_steps_in_s)
|
203 |
+
chunk_size = 15 # Process 10 words at a time
|
204 |
|
205 |
# Tokenize the full text and description
|
206 |
inputs = description_tokenizer(description, return_tensors="pt").to(device)
|
|
|
272 |
def generate_jenny(text, description, play_steps_in_s=2.0):
|
273 |
# Initialize variables
|
274 |
play_steps = int(frame_rate * play_steps_in_s)
|
275 |
+
chunk_size = 15 # Process 10 words at a time
|
276 |
|
277 |
# Tokenize the full text and description
|
278 |
inputs = description_tokenizer(description, return_tensors="pt").to(device)
|