chatBot / app.py
dioarafl's picture
Update app.py
3b11cb7 verified
raw
history blame
1.72 kB
import gradio as gr
from gtts import gTTS
from io import BytesIO
import IPython.display as ipd
# Load CodeLlama model
codellama_model = gr.Interface.load("models/meta-llama/CodeLlama-7b-Python-hf")
# Load deepseek-coder model
deepseek_model = gr.Interface.load("models/deepseek-ai/deepseek-coder-1.3b-instruct")
# Define function to process text input for CodeLlama model
def process_text_codellama(input_text):
return codellama_model.predict(input_text)
# Define function to process speech input for CodeLlama model
def process_speech_codellama(audio):
response = codellama_model.predict(audio)
tts = gTTS(text=response, lang='en')
fp = BytesIO()
tts.write_to_fp(fp)
fp.seek(0)
return ipd.Audio(fp.read(), autoplay=True)
# Define function to process text input for deepseek model
def process_text_deepseek(input_text):
return deepseek_model.predict(input_text)
# Define function to process speech input for deepseek model
def process_speech_deepseek(audio):
response = deepseek_model.predict(audio)
tts = gTTS(text=response, lang='en')
fp = BytesIO()
tts.write_to_fp(fp)
fp.seek(0)
return ipd.Audio(fp.read(), autoplay=True)
# Add voice input and text input with responsive button
inputs = [
gr.inputs.Textbox(label="Type your code", placeholder="Type your code here...", lines=10),
gr.inputs.Checkbox(label="Enable voice input", default=False),
"text" # Dropdown for model selection
]
# Launch interface with live=True
gr.Interface(
fn=[
[process_text_codellama, process_speech_codellama],
[process_text_deepseek, process_speech_deepseek]
],
inputs=inputs,
outputs=["text", "audio"],
live=True
).launch()