Update app.py
Browse files
app.py
CHANGED
@@ -18,17 +18,12 @@ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
|
|
18 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
19 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
20 |
model_id = "Kushtrim/whisper-large-v3-turbo-shqip-115h"
|
21 |
-
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
22 |
-
|
23 |
-
processor = AutoProcessor.from_pretrained(model_id, token=True)
|
24 |
pipe = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor,
|
25 |
-
chunk_length_s=
|
26 |
token=os.environ["HF"])
|
27 |
|
28 |
-
# pipe = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor,
|
29 |
-
# max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device,
|
30 |
-
# token=os.environ["HF"])
|
31 |
-
|
32 |
@spaces.GPU
|
33 |
def transcribe(inputs, task):
|
34 |
if inputs is None:
|
|
|
18 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
19 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
20 |
model_id = "Kushtrim/whisper-large-v3-turbo-shqip-115h"
|
21 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, use_safetensors=True).to(device)
|
22 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
|
|
23 |
pipe = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor,
|
24 |
+
max_new_tokens=256, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device,
|
25 |
token=os.environ["HF"])
|
26 |
|
|
|
|
|
|
|
|
|
27 |
@spaces.GPU
|
28 |
def transcribe(inputs, task):
|
29 |
if inputs is None:
|