cdactvm commited on
Commit
a1377ca
·
verified ·
1 Parent(s): f3490eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import torch
2
  import gradio as gr
3
  import torchaudio
4
- from transformers import AutoModel, AutoProcessor
5
  from quanto import qint8, quantize, freeze
6
 
7
  # Load and quantize the model
8
  model_name = "cdactvm/w2v-bert-punjabi"
9
- model = AutoModel.from_pretrained(model_name)
10
  processor = AutoProcessor.from_pretrained(model_name)
11
 
12
  # Quantization
@@ -26,7 +26,7 @@ def transcribe(audio):
26
 
27
  # Run inference
28
  with torch.no_grad():
29
- logits = model(**inputs).logits
30
 
31
  # Decode transcription
32
  predicted_ids = torch.argmax(logits, dim=-1)
 
1
  import torch
2
  import gradio as gr
3
  import torchaudio
4
+ from transformers import Wav2Vec2ForCTC, AutoProcessor
5
  from quanto import qint8, quantize, freeze
6
 
7
  # Load and quantize the model
8
  model_name = "cdactvm/w2v-bert-punjabi"
9
+ model = Wav2Vec2ForCTC.from_pretrained(model_name) # Ensure it's a CTC model
10
  processor = AutoProcessor.from_pretrained(model_name)
11
 
12
  # Quantization
 
26
 
27
  # Run inference
28
  with torch.no_grad():
29
+ logits = model(**inputs).logits # Ensure model has 'logits'
30
 
31
  # Decode transcription
32
  predicted_ids = torch.argmax(logits, dim=-1)