File size: 2,466 Bytes
5ad5566
03ddc3f
 
3597c88
03ddc3f
 
3597c88
03ddc3f
 
 
3597c88
03ddc3f
3597c88
03ddc3f
 
 
 
 
 
87f602f
03ddc3f
 
 
 
87f602f
03ddc3f
5ad5566
 
03ddc3f
3597c88
5ad5566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3597c88
 
 
03ddc3f
3597c88
03ddc3f
3597c88
a3ea009
3597c88
5ad5566
 
 
 
03ddc3f
5ad5566
 
3597c88
a3ea009
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import spaces
import torch
import gradio as gr
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read

MODEL_NAME = "openai/whisper-large-v3"
BATCH_SIZE = 8
FILE_LIMIT_MB = 1000

device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device,
)

@spaces.GPU
def audio_transcribe(inputs, task):
    if inputs is None:
        raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")

    text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
    ask_question.visible = True
    
    return  text

@spaces.GPU
def respond_to_question(transcript, question):
    # Optionally, use OpenAI API to generate a response to the user's question
    # based on the transcript
    response = ""
    # Replace this with your OpenAI API key
    openai.api_key = os.environ["OPENAI_API_KEY"]
    response = openai.Completion.create(
        engine="text-davinci-002",
        prompt=f"Transcript: {transcript}\n\nUser: {question}\n\nAI:",
        temperature=0.7,
        max_tokens=60,
        top_p=1,
        frequency_penalty=0,
        presence_penalty=0
    ).choices[0].text
    return response
    
def ask_question_callback():
    if ask_question.value:
        response = respond_to_question(transcript_output.value, ask_question.value)
        response_output.visible = True
        response_output.value = response
    else:
        response_output.value = "No question asked"


with gr.Blocks() as transcriberUI:
    gr.Markdown(
    """
    # Ola!
    Clicar no botao abaixo para selecionar o Audio a ser transcrito!
    Ambiente Demo disponivel 24x7. Running on ZeroGPU with openai/whisper-large-v3
    """)
    inp = gr.File(label="Arquivo de Audio", show_label=True, type="filepath", file_count="single", file_types=["mp3"])
    transcribe = gr.Textbox(label="Transcricao", show_label=True, show_copy_button=True)
    ask_question = gr.Textbox(label="Ask a question", visible=False)
    response_output = gr.Textbox(label="Response", visible=False)
    submit_question = gr.Button("Submit question")
    
    inp.upload(audio_transcribe, inp, transcribe)
    submit_question.click(ask_question_callback, outputs=[response_output], inputs=[transcribe, ask_question])


transcriberUI.queue().launch()