Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ app = FastAPI()
|
|
12 |
|
13 |
import torch
|
14 |
|
15 |
-
model_id = "
|
16 |
model = WhisperForConditionalGeneration.from_pretrained(
|
17 |
model_id
|
18 |
)
|
@@ -50,38 +50,3 @@ async def transcribe_audio(request: Request):
|
|
50 |
raise HTTPException(status_code=500, detail=str(e))
|
51 |
|
52 |
|
53 |
-
model_id_2 = "openai/whisper-medium"
|
54 |
-
model_2 = WhisperForConditionalGeneration.from_pretrained(
|
55 |
-
model_id_2
|
56 |
-
)
|
57 |
-
processor_2 = WhisperProcessor.from_pretrained(model_id_2, attn_implementation="sdpa")
|
58 |
-
model_2.config.forced_decoder_ids = None
|
59 |
-
forced_decoder_ids_2 = processor_2.get_decoder_prompt_ids(language="Arabic", task="transcribe")
|
60 |
-
model_2.generation_config.cache_implementation = "static"
|
61 |
-
pipe = pipeline(
|
62 |
-
"automatic-speech-recognition",
|
63 |
-
model=model_2,
|
64 |
-
tokenizer=processor_2.tokenizer,
|
65 |
-
feature_extractor=processor_2.feature_extractor,
|
66 |
-
)
|
67 |
-
@app.post("/transcribe-large/")
|
68 |
-
async def transcribe_audio_2(request: Request):
|
69 |
-
try:
|
70 |
-
# Read binary data from the request
|
71 |
-
audio_data = await request.body()
|
72 |
-
# Convert binary data to a file-like object
|
73 |
-
audio_file = io.BytesIO(audio_data)
|
74 |
-
# Load the audio file using pydub
|
75 |
-
audio_array, sampling_rate = librosa.load(audio_file, sr=16000)
|
76 |
-
# Process the audio array
|
77 |
-
input_features = processor_2(audio_array, sampling_rate=sampling_rate, return_tensors="pt").input_features
|
78 |
-
# Generate token ids
|
79 |
-
predicted_ids = model_2.generate(input_features, forced_decoder_ids=forced_decoder_ids_2)
|
80 |
-
# Decode token ids to text
|
81 |
-
transcription = processor_2.batch_decode(predicted_ids, skip_special_tokens=True)
|
82 |
-
# Print the transcription
|
83 |
-
print(transcription[0]) # Display the transcriptiontry:
|
84 |
-
return {"transcription": transcription[0]}
|
85 |
-
except Exception as e:
|
86 |
-
raise HTTPException(status_code=500, detail=str(e))
|
87 |
-
|
|
|
12 |
|
13 |
import torch
|
14 |
|
15 |
+
model_id = "WajeehAzeemX/whisper-smal-ar-testing-kale-5000"
|
16 |
model = WhisperForConditionalGeneration.from_pretrained(
|
17 |
model_id
|
18 |
)
|
|
|
50 |
raise HTTPException(status_code=500, detail=str(e))
|
51 |
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|