|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") |
|
|
|
|
|
def generate_text(messages): |
|
print("generate_text") |
|
print(messages) |
|
generated = "" |
|
for token in client.chat_completion(messages, max_tokens=100,stream=True): |
|
content = (token.choices[0].delta.content) |
|
generated += content |
|
yield generated |
|
|
|
last = generated[-1] |
|
if last not in [",",".","!","?"]: |
|
yield generated+"," |
|
|
|
def call_generate_text(message, history): |
|
|
|
|
|
|
|
print(message) |
|
print(history) |
|
|
|
user_message = [{"role":"user","content":message}] |
|
messages = history + user_message |
|
try: |
|
|
|
assistant_message={"role":"assistant","content":""} |
|
text_generator = generate_text(messages) |
|
|
|
for text_chunk in text_generator: |
|
|
|
assistant_message["content"] = text_chunk |
|
updated_history = messages + [assistant_message] |
|
yield "", updated_history |
|
|
|
except RuntimeError as e: |
|
print(f"An unexpected error occurred: {e}") |
|
yield "",history |
|
|
|
head = ''' |
|
<script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.webgpu.min.js" ></script> |
|
<script type="module"> |
|
import { matccha_tts_onnx_env ,matcha_tts_raw_env} from "https://akjava.github.io/Matcha-TTS-Japanese/js-esm/v002-20240924/matcha_tts_onnx_en.js"; |
|
matccha_tts_onnx_env.matcha_tts_model_path = "/file=models/ljspeech_sim.onnx" |
|
matcha_tts_raw_env.maxInputLength = 140 //if Device removed reason: DXGI_ERROR_DEVICE_HUNG happend reduce to HALF |
|
</script> |
|
''' |
|
|
|
with gr.Blocks(title="LLM with TTS",head=head) as demo: |
|
gr.Markdown(""" |
|
## Warnings |
|
- Don't listen large volume or with headone until confirm your machine can play aduio |
|
- some time gpu crash because of maxInputLength if you crash let me know with your gpu-info |
|
## Notice |
|
- LLM is unstable:The inference client used in this demo exhibits inconsistent performance. While it can provide responses in milliseconds, it sometimes becomes unresponsive and times out. |
|
- TTS talke a long loading time:Please be patient, the first response may have a delay of up to over 40 seconds while loading. |
|
|
|
""") |
|
|
|
gr.Markdown("**Mistral-7B-Instruct-v0.3/LJSpeech** - LLM and TTS models will change without notice.") |
|
|
|
js = """ |
|
async function(chatbot){ |
|
await window.matcha_tts_update_chatbot(chatbot) |
|
//auto scroll |
|
var chatElement = document.getElementById('gr-chatbot'); |
|
chatElement.scrollTop = chatElement.scrollHeight; |
|
var logElement = chatElement.querySelector('div[role="log"]'); |
|
logElement.scrollTop = logElement.scrollHeight; |
|
} |
|
""" |
|
chatbot = gr.Chatbot(type="messages",elem_id="gr-chatbot") |
|
chatbot.change(None,[chatbot],[],js=js) |
|
msg = gr.Textbox() |
|
with gr.Row(): |
|
clear = gr.ClearButton([msg, chatbot]) |
|
submit = gr.Button("Submit",variant="primary").click(call_generate_text, inputs=[msg, chatbot], outputs=[msg,chatbot]) |
|
|
|
gr.HTML(""" |
|
<br> |
|
<div id="footer"> |
|
<b>Spaces</b><br> |
|
<a href="https://huggingface.co/spaces/Akjava/matcha-tts_vctk-onnx" style="font-size: 9px" target="link">Match-TTS VCTK-ONNX</a> | |
|
<a href="https://huggingface.co/spaces/Akjava/matcha-tts-onnx-benchmarks" style="font-size: 9px" target="link">Match-TTS ONNX-Benchmark</a> | |
|
<a href="https://huggingface.co/spaces/Akjava/AIChat-matcha-tts-onnx-en" style="font-size: 9px" target="link">AIChat-Matcha-TTS ONNX English</a> | |
|
|
|
<br><br> |
|
<b>Credits</b><br> |
|
<a href="https://github.com/akjava/Matcha-TTS-Japanese" style="font-size: 9px" target="link">Matcha-TTS-Japanese</a> | |
|
<a href = "http://www.udialogue.org/download/cstr-vctk-corpus.html" style="font-size: 9px" target="link">CSTR VCTK Corpus</a> | |
|
<a href = "https://github.com/cmusphinx/cmudict" style="font-size: 9px" target="link">CMUDict</a> | |
|
<a href = "https://huggingface.co/docs/transformers.js/index" style="font-size: 9px" target="link">Transformer.js</a> | |
|
<a href = "https://huggingface.co/cisco-ai/mini-bart-g2p" style="font-size: 9px" target="link">mini-bart-g2p</a> | |
|
<a href = "https://onnxruntime.ai/docs/get-started/with-javascript/web.html" style="font-size: 9px" target="link">ONNXRuntime-Web</a> | |
|
<a href = "https://github.com/akjava/English-To-IPA-Collections" style="font-size: 9px" target="link">English-To-IPA-Collections</a> | |
|
<a href ="https://huggingface.co/papers/2309.03199" style="font-size: 9px" target="link">Matcha-TTS Paper</a> |
|
</div> |
|
""") |
|
|
|
msg.submit(call_generate_text, [msg, chatbot], [msg, chatbot]) |
|
|
|
import os |
|
remote_dir ="/home/user/app/" |
|
local_dir = "C:\\Users\\owner\\Documents\\pythons\\huggingface\\mistral-7b-v0.3-matcha-tts-en" |
|
|
|
|
|
|
|
demo.launch(allowed_paths=[os.path.join(remote_dir,"models","ljspeech_sim.onnx"),os.path.join(local_dir,"models","ljspeech_sim.onnx")]) |
|
|