Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -32,6 +32,7 @@ whisper_api = InferenceClient("openai/whisper-small", token=huggingface_token)
|
|
32 |
|
33 |
print(f"ACCOUNT_ID: {ACCOUNT_ID}")
|
34 |
print(f"CLOUDFLARE_AUTH_TOKEN: {API_TOKEN[:5]}..." if API_TOKEN else "Not set")
|
|
|
35 |
|
36 |
MODELS = [
|
37 |
"mistralai/Mistral-7B-Instruct-v0.3",
|
@@ -621,11 +622,16 @@ def transcribe(audio_file):
|
|
621 |
with open(audio_file, "rb") as f:
|
622 |
audio_data = f.read()
|
623 |
|
624 |
-
#
|
625 |
-
|
|
|
626 |
|
627 |
-
#
|
|
|
|
|
|
|
628 |
return response["text"] if isinstance(response, dict) and "text" in response else str(response)
|
|
|
629 |
|
630 |
def vote(data: gr.LikeData):
|
631 |
if data.liked:
|
|
|
32 |
|
33 |
print(f"ACCOUNT_ID: {ACCOUNT_ID}")
|
34 |
print(f"CLOUDFLARE_AUTH_TOKEN: {API_TOKEN[:5]}..." if API_TOKEN else "Not set")
|
35 |
+
print(dir(whisper_api))
|
36 |
|
37 |
MODELS = [
|
38 |
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
|
622 |
with open(audio_file, "rb") as f:
|
623 |
audio_data = f.read()
|
624 |
|
625 |
+
# Create a file-like object from the audio data
|
626 |
+
audio_file = io.BytesIO(audio_data)
|
627 |
+
audio_file.name = "audio.wav" # The name is important for the API to recognize the file type
|
628 |
|
629 |
+
# Use the automatic_speech_recognition method
|
630 |
+
response = whisper_api.automatic_speech_recognition(audio=audio_file)
|
631 |
+
|
632 |
+
# The response should be a dictionary with a 'text' key
|
633 |
return response["text"] if isinstance(response, dict) and "text" in response else str(response)
|
634 |
+
Make sure your InferenceClient is initialized correctly:
|
635 |
|
636 |
def vote(data: gr.LikeData):
|
637 |
if data.liked:
|