Spaces:
Sleeping
Sleeping
import gradio as gr | |
from gtts import gTTS | |
from io import BytesIO | |
import IPython.display as ipd | |
Load model | |
codellama_model = gr.Interface.load("models/meta-llama/CodeLlama-7b-Python-hf") | |
deepseek_model = gr.Interface.load("models/deepseek-ai/deepseek-coder-1.3b-instruct") | |
def process_text_codellama(input_text): | |
return codellama_model.predict(input_text) | |
def process_speech_codellama(audio): | |
response = codellama_model.predict(audio) | |
tts = gTTS(text=response, lang='en') | |
fp = BytesIO() | |
tts.write_to_fp(fp) | |
fp.seek(0) | |
return ipd.Audio(fp.read(), autoplay=True) | |
def process_text_deepseek(input_text): | |
return deepseek_model.predict(input_text) | |
def process_speech_deepseek(audio): | |
response = deepseek_model.predict(audio) | |
tts = gTTS(text=response, lang='en') | |
fp = BytesIO() | |
tts.write_to_fp(fp) | |
fp.seek(0) | |
return ipd.Audio(fp.read(), autoplay=True) | |
def main(input_text): | |
if input_text[1]: | |
return process_text_deepseek(input_text[0]), process_speech_deepseek(input_text[0]) | |
else: | |
return process_text_codellama(input_text[0]), process_speech_codellama(input_text[0]) | |
gr.Interface( | |
fn=main, | |
inputs=["text", "checkbox"], # ["input text", "enable voice input"] | |
outputs=["text", "audio"], | |
live=True | |
).launch() |