antfraia commited on
Commit
35c0394
·
1 Parent(s): 46bbfbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -27
app.py CHANGED
@@ -1,37 +1,16 @@
1
  import gradio as gr
2
- import requests
3
 
4
- # Updated API Endpoint
5
- API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v2"
6
- HEADERS = {"Authorization": "Bearer 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"}
7
 
8
- def transcribe_audio(audio_path: str) -> str:
9
- # Read audio file
10
- with open(audio_path, "rb") as f:
11
- audio_data = f.read()
12
-
13
- # Make API request to OpenAI Whisper v2 API
14
- try:
15
- response = requests.post(API_URL, headers=HEADERS, data=audio_data)
16
- response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code
17
- except requests.RequestException as error:
18
- print(f"API request failed: {error}")
19
- return "Failed to transcribe. Please try again."
20
-
21
- result = response.json()
22
-
23
- # Print the full JSON response for troubleshooting
24
- print(result)
25
-
26
- # Extract the transcribed text from the response
27
- # TODO: Replace 'your-key-here' with the actual key used by the API's response
28
- transcribed_text = result.get("your-key-here", "Failed to retrieve transcription.")
29
-
30
- return transcribed_text
31
 
32
  audio_input = gr.inputs.Audio(type="filepath")
33
  text_output = gr.outputs.Textbox()
34
 
 
35
  iface = gr.Interface(
36
  fn=transcribe_audio,
37
  inputs=audio_input,
 
1
  import gradio as gr
 
2
 
3
+ # Load the model without launching the interface
4
+ loaded_model = gr.Interface.load("models/openai/whisper-large-v2", allow_launch=False)
 
5
 
6
+ def transcribe_audio(audio_file):
7
+ # Use the loaded model to transcribe the audio
8
+ return loaded_model(audio_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  audio_input = gr.inputs.Audio(type="filepath")
11
  text_output = gr.outputs.Textbox()
12
 
13
+ # Setup the custom Gradio interface with your configurations
14
  iface = gr.Interface(
15
  fn=transcribe_audio,
16
  inputs=audio_input,