File size: 1,135 Bytes
d38e956 3b11cb7 dc024f2 3b11cb7 dc024f2 3b11cb7 dc024f2 3b11cb7 dc024f2 3b11cb7 dc024f2 3b11cb7 d38e956 dc024f2 3b11cb7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
from gtts import gTTS
from io import BytesIO
import IPython.display as ipd
# Load model
codellama_model = gr.Interface.load("models/meta-llama/CodeLlama-7b-Python-hf")
deepseek_model = gr.Interface.load("models/deepseek-ai/deepseek-coder-1.3b-instruct")
def process_text_codellama(input_text):
return codellama_model.predict(input_text)
def process_speech_codellama(audio):
response = codellama_model.predict(audio)
tts = gTTS(text=response, lang='en')
fp = BytesIO()
tts.write_to_fp(fp)
fp.seek(0)
return ipd.Audio(fp.read(), autoplay=True)
def process_text_deepseek(input_text):
return deepseek_model.predict(input_text)
def process_speech_deepseek(audio):
response = deepseek_model.predict(audio)
tts = gTTS(text=response, lang='en')
fp = BytesIO()
tts.write_to_fp(fp)
fp.seek(0)
return ipd.Audio(fp.read(), autoplay=True)
gr.Interface(
fn=[
[process_text_codellama, process_speech_codellama],
[process_text_deepseek, process_speech_deepseek]
],
inputs="text",
outputs=["text", "audio"],
live=True
).launch()
|