maher13 commited on
Commit
ea0a23f
·
1 Parent(s): 9ccbaed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -22,22 +22,22 @@ def inference(audio_file, audio_file2):
22
  predicted_ids = torch.argmax(logits, dim=-1)
23
  predicted_ids[predicted_ids == -100] = processor.tokenizer.pad_token_id
24
  transcription1 = processor.tokenizer.batch_decode(predicted_ids)[0]
25
- else:
26
  transcription1 = "N/A"
27
 
28
- if audio_file2:
29
- input_values = processor(map_to_array(audio_file2.name), return_tensors="pt", padding="longest").input_values # Batch size 1
30
- logits = model(input_values).logits
31
 
32
- with torch.no_grad():
33
- predicted_ids = torch.argmax(logits, dim=-1)
34
- predicted_ids[predicted_ids == -100] = processor.tokenizer.pad_token_id
35
- transcription2 = processor.tokenizer.batch_decode(predicted_ids)[0]
36
- else :
37
- transcription2 = "N/A"
38
 
39
 
40
- return transcription1, transcription2
41
 
42
 
43
  gradio_ui = gr.Interface(
 
22
  predicted_ids = torch.argmax(logits, dim=-1)
23
  predicted_ids[predicted_ids == -100] = processor.tokenizer.pad_token_id
24
  transcription1 = processor.tokenizer.batch_decode(predicted_ids)[0]
25
+ else:
26
  transcription1 = "N/A"
27
 
28
+ if audio_file2:
29
+ input_values = processor(map_to_array(audio_file2.name), return_tensors="pt", padding="longest").input_values # Batch size 1
30
+ logits = model(input_values).logits
31
 
32
+ with torch.no_grad():
33
+ predicted_ids = torch.argmax(logits, dim=-1)
34
+ predicted_ids[predicted_ids == -100] = processor.tokenizer.pad_token_id
35
+ transcription2 = processor.tokenizer.batch_decode(predicted_ids)[0]
36
+ else :
37
+ transcription2 = "N/A"
38
 
39
 
40
+ return transcription1, transcription2
41
 
42
 
43
  gradio_ui = gr.Interface(