|
import os |
|
import re |
|
import gradio as gr |
|
import edge_tts |
|
import asyncio |
|
import time |
|
import tempfile |
|
from huggingface_hub import InferenceClient |
|
|
|
class JarvisModels: |
|
def __init__(self): |
|
self.client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") |
|
self.client2 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") |
|
self.client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") |
|
|
|
async def generate_model1(self, prompt): |
|
generate_kwargs = dict( |
|
temperature=0.6, |
|
max_new_tokens=256, |
|
top_p=0.95, |
|
repetition_penalty=1, |
|
do_sample=True, |
|
seed=42, |
|
) |
|
formatted_prompt = system_instructions1 + prompt + "[JARVIS]" |
|
stream = self.client1.text_generation( |
|
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True) |
|
output = "" |
|
for response in stream: |
|
output += response.token.text |
|
|
|
communicate = edge_tts.Communicate(output) |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: |
|
tmp_path = tmp_file.name |
|
await communicate.save(tmp_path) |
|
yield tmp_path |
|
|
|
async def generate_model2(self, prompt): |
|
generate_kwargs = dict( |
|
temperature=0.6, |
|
max_new_tokens=512, |
|
top_p=0.95, |
|
repetition_penalty=1, |
|
do_sample=True, |
|
) |
|
formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]" |
|
stream = self.client2.text_generation( |
|
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True) |
|
output = "" |
|
for response in stream: |
|
output += response.token.text |
|
|
|
communicate = edge_tts.Communicate(output) |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: |
|
tmp_path = tmp_file.name |
|
await communicate.save(tmp_path) |
|
yield tmp_path |
|
|
|
async def generate_model3(self, prompt): |
|
generate_kwargs = dict( |
|
temperature=0.6, |
|
max_new_tokens=2048, |
|
top_p=0.95, |
|
repetition_penalty=1, |
|
do_sample=True, |
|
) |
|
formatted_prompt = system_instructions3 + prompt + "[ASSISTANT]" |
|
stream = self.client3.text_generation( |
|
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True) |
|
output = "" |
|
for response in stream: |
|
output += response.token.text |
|
|
|
communicate = edge_tts.Communicate(output) |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: |
|
tmp_path = tmp_file.name |
|
await communicate.save(tmp_path) |
|
yield tmp_path |
|
|
|
class JarvisApp: |
|
def __init__(self): |
|
self.models = JarvisModels() |
|
|
|
def launch_app(self): |
|
with gr.Blocks(css="style.css") as demo: |
|
gr.Markdown(DESCRIPTION) |
|
with gr.Row(): |
|
user_input = gr.Textbox(label="Prompt", value="What is Wikipedia") |
|
input_text = gr.Textbox(label="Input Text", elem_id="important") |
|
output_audio = gr.Audio(label="JARVIS", type="filepath", |
|
interactive=False, |
|
autoplay=True, |
|
elem_classes="audio") |
|
with gr.Row(): |
|
translate_btn = gr.Button("Response") |
|
translate_btn.click(fn=self.models.generate_model1, inputs=user_input, |
|
outputs=output_audio, api_name="translate") |
|
|
|
gr.Markdown(MORE) |
|
|
|
if __name__ == "__main__": |
|
demo.queue(max_size=200).launch() |
|
|
|
if __name__ == "__main__": |
|
app = JarvisApp() |
|
app.launch_app() |
|
|