File size: 2,894 Bytes
5ad5566
03ddc3f
 
3597c88
03ddc3f
3597c88
03ddc3f
 
 
3597c88
03ddc3f
3597c88
03ddc3f
 
 
 
 
 
87f602f
3e9dc66
 
 
 
a84f44c
5f49ba8
3e9dc66
 
 
10fea9b
5aa403b
787c0bf
3e9dc66
 
 
5195d28
 
db0a348
42907f8
 
5195d28
42907f8
db0a348
 
42907f8
5195d28
db0a348
0c9b4d5
be72dc1
 
5195d28
3597c88
 
0d16ed8
 
5195d28
 
0d16ed8
 
7894a90
812682a
3597c88
0c9b4d5
 
 
4cf57ed
763daee
0d16ed8
5f013ee
1a99cb6
3e9dc66
0d16ed8
ac8ebf8
0d16ed8
ac8ebf8
29000fa
870e59b
812682a
870e59b
0c9b4d5
5ad5566
3597c88
763daee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import os
import spaces
import torch
import gradio as gr
from transformers import pipeline

MODEL_NAME = "openai/whisper-large-v3"
BATCH_SIZE = 8
FILE_LIMIT_MB = 1000

device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device,
)

def respond_to_question_llama(transcript, question):
    from huggingface_hub import InferenceClient

    client = InferenceClient(
        "meta-llama/Meta-Llama-3.1-70B-Instruct",
        token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
    )

    response = client.chat_completion(
        messages=[{"role": "user", "content": f"Transcript: {transcript}\n\nUser: {question}"}],
        max_tokens=4096,
    ).choices[0].message.content

    return response

@spaces.GPU
def audio_transcribe(inputs):
    status=True
    text="Arquivo de audio nao carregado!"
    status=False
    if inputs is None:
        raise gr.Error("No audio file submitted! Please upload an audio file before submitting your request.")
    else:
        text = pipe(inputs, batch_size=BATCH_SIZE, return_timestamps=True)['text']
        status = True

    return [text, gr.Textbox(visible=status),gr.Textbox(visible=status),gr.Textbox(visible=status)]

def hidden_ask_question():
    return [gr.Textbox(visible=False),gr.Textbox(visible=False),gr.Textbox(visible=False)]

with gr.Blocks() as transcriberUI:
    gr.Markdown(
        """
        # Ola!
        Clique no botao abaixo para selecionar o Audio que deseja conversar!
        Ambiente disponivel 24x7. Running on ZeroGPU with openai/whisper-large-v3
        """
    )

    inp = gr.Audio(sources="upload", type="filepath", label="Audio file")
    transcribe = gr.Textbox(label="Transcricao", show_label=True, show_copy_button=True)
    ask_question = gr.Textbox(label="Ask a question", visible=False)
    response_output = gr.Textbox(label="Response", visible=False)
    submit_question = gr.Button("Submit question", visible=False)
    submit_button = gr.Button("Transcribe to Chat", variant='primary', size='sm')
    clear_button = gr.ClearButton([transcribe,response_output,inp, ask_question]) 

    def ask_question_callback(transcription,question):
        if ask_question:
            response = respond_to_question_llama(transcription, question)
        else:
            response = "No question asked"

        return response

    #inp.upload(audio_transcribe, inputs=inp, outputs=[transcribe,ask_question,submit_question, response_output])
    submit_button.click(audio_transcribe, inputs=inp, outputs=[transcribe,ask_question,submit_question, response_output])
    submit_question.click(ask_question_callback, outputs=[response_output], inputs=[transcribe, ask_question])
    clear_button.click(hidden_ask_question,outputs=[ask_question,response_output,submit_question])


transcriberUI.queue().launch()