import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset from datasets import load_dataset from transformers import pipeline import numpy as np import gradio as gr device = torch.device("cuda" if torch.cuda.is_available() else "cpu") asr_model = pipeline(task = "automatic-speech-recognition", model = "distil-whisper/distil-small.en") llm_model = AutoModelForCausalLM.from_pretrained("OuteAI/Lite-Mistral-150M-v2-Instruct").to(device) tokenizer = AutoTokenizer.from_pretrained("OuteAI/Lite-Mistral-150M-v2-Instruct") def generate_response(message: str, temperature: float = 0.3, repetition_penalty: float = 1.2) -> str: # Apply the chat template and convert to PyTorch tensors messages = [ {"role": "system", "content": "You are my best close friend. you are full of human emotions and you have feelings. provide short responses."}, {"role": "user", "content": message} ] input_ids = tokenizer.apply_chat_template( messages, add_generation_prompt=True, return_tensors="pt" ).to(device) # Generate the response output = llm_model.generate( input_ids, max_length = 150, pad_token_id=tokenizer.eos_token_id, temperature=temperature, repetition_penalty=repetition_penalty, do_sample=True ) # Decode the generated output generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text.split('\n assistant\n')[1] processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") tts_model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7300]["xvector"]).unsqueeze(0) def text_to_speech(input_text): inputs = processor(text=input_text, return_tensors="pt") speech = tts_model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) return speech def spoken_llm(input_voice_file): if input_voice_file is None: gr.Warning("No input audio") return None asr_text = asr_model(input_voice_file)['text'] print("\n\nASR\n\n") print(asr_text) llm_response = generate_response(asr_text) print("\n\nLLM\n\n") print(llm_response) audio_out = text_to_speech(llm_response) print("\n\nTTS\n\n") rate = 17000 return rate, (audio_out.cpu().numpy().reshape(-1)*2e4).astype(np.int16) interface = gr.Interface(fn = spoken_llm, inputs = gr.Audio(sources = "microphone", type = "filepath"), outputs = "audio") interface.launch()