File size: 1,132 Bytes
2ed7223
 
2ba8923
c621812
039f770
011a958
dc03737
2ba8923
dc03737
2ba8923
 
 
 
 
 
 
 
 
 
 
 
 
c621812
2ba8923
dc03737
2ba8923
2ed7223
ab07d9e
2ba8923
2ed7223
 
 
2ba8923
 
 
c621812
2ba8923
2ed7223
 
c621812
011a958
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import transformers
import gradio as gr
import librosa
import torch
import spaces

@spaces.GPU(duration=120)
def transcribe_and_respond(audio_file):
    try:
        pipe = transformers.pipeline(
     model='sarvamai/shuka_v1',
     trust_remote_code=True,
     device=0,
     torch_dtype=torch.bfloat16
     )

        audio, sr = librosa.load(audio_file, sr=16000)

        turns = [
            {'role': 'system', 'content': 'Respond naturally and informatively.'},
            {'role': 'user', 'content': ''}
        ]

        output = pipe({'audio': audio, 'turns': turns, 'sampling_rate': sr}, max_new_tokens=512)

        return output

    except Exception as e:
        return f"Error: {str(e)}"

iface = gr.Interface(
    fn=transcribe_and_respond,
    inputs=gr.Audio(sources="microphone", type="filepath"),  # Accept audio input from microphone
    outputs="text",  # Output as text
    title="Live Transcription and Response",
    description="Speak into your microphone, and the model will respond naturally and informatively.",
    live=True  # Enable live processing
)

if __name__ == "__main__":
    iface.launch()