avfranco commited on
Commit
5195d28
1 Parent(s): 413af2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -29
app.py CHANGED
@@ -18,12 +18,39 @@ pipe = pipeline(
18
  device=device,
19
  )
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  with gr.Blocks() as transcriberUI:
22
  gr.Markdown(
23
  """
24
  # Ola!
25
- Clicar no botao abaixo para selecionar o Audio a ser transcrito!
26
- Ambiente Demo disponivel 24x7. Running on ZeroGPU with openai/whisper-large-v3
27
  """
28
  )
29
  inp = gr.File(label="Arquivo de Audio", show_label=True, type="filepath", file_count="single", file_types=["mp3"])
@@ -32,33 +59,6 @@ with gr.Blocks() as transcriberUI:
32
  response_output = gr.Textbox(label="Response", visible=True)
33
  submit_question = gr.Button("Submit question", visible=True)
34
 
35
- @spaces.GPU
36
- def respond_to_question(transcript, question):
37
- # Optionally, use OpenAI API to generate a response to the user's question
38
- # based on the transcript
39
- response = ""
40
- # Replace this with your OpenAI API key
41
- openai.api_key = os.environ["OPENAI_API_KEY"]
42
- response = openai.Completion.create(
43
- engine="gpt-4o-mini",
44
- prompt=f"Transcript: {transcript}\n\nUser: {question}\n\nAI:",
45
- temperature=0.3,
46
- max_tokens=60,
47
- top_p=1,
48
- frequency_penalty=0,
49
- presence_penalty=0
50
- ).choices[0].text
51
- return response
52
-
53
- @spaces.GPU
54
- def audio_transcribe(inputs):
55
- if inputs is None:
56
- raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
57
-
58
- text = pipe(inputs, batch_size=BATCH_SIZE, return_timestamps=True)["text"]
59
-
60
- return text
61
-
62
  def ask_question_callback(transcription,question):
63
  if ask_question:
64
  response = respond_to_question(transcription, question)
 
18
  device=device,
19
  )
20
 
21
+ @spaces.GPU
22
+ def respond_to_question(transcript, question):
23
+ # Optionally, use OpenAI API to generate a response to the user's question
24
+ # based on the transcript
25
+ response = ""
26
+ # Replace this with your OpenAI API key
27
+ openai.api_key = os.environ["OPENAI_API_KEY"]
28
+ response = openai.Completion.create(
29
+ engine="gpt-4o-mini",
30
+ prompt=f"Transcript: {transcript}\n\nUser: {question}\n\nAI:",
31
+ temperature=0.3,
32
+ max_tokens=60,
33
+ top_p=1,
34
+ frequency_penalty=0,
35
+ presence_penalty=0
36
+ ).choices[0].text
37
+ return response
38
+
39
+ @spaces.GPU
40
+ def audio_transcribe(inputs):
41
+ if inputs is None:
42
+ raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
43
+
44
+ text = pipe(inputs, batch_size=BATCH_SIZE, return_timestamps=True)["text"]
45
+
46
+ return text
47
+
48
  with gr.Blocks() as transcriberUI:
49
  gr.Markdown(
50
  """
51
  # Ola!
52
+ Clique no botao abaixo para selecionar o Audio que deseja conversar!
53
+ Ambiente disponivel 24x7. Running on ZeroGPU with openai/whisper-large-v3
54
  """
55
  )
56
  inp = gr.File(label="Arquivo de Audio", show_label=True, type="filepath", file_count="single", file_types=["mp3"])
 
59
  response_output = gr.Textbox(label="Response", visible=True)
60
  submit_question = gr.Button("Submit question", visible=True)
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def ask_question_callback(transcription,question):
63
  if ask_question:
64
  response = respond_to_question(transcription, question)