import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") def generate_text(messages): print("generate_text") print(messages) generated = "" for token in client.chat_completion(messages, max_tokens=100,stream=True): content = (token.choices[0].delta.content) generated += content yield generated last = generated[-1] if last not in [",",".","!","?"]: yield generated+"," #no stram version def call_generate_text(message, history): #if len(message) == 0: # messages.append({"role": "system", "content": "you response around 10 words"}) print(message) print(history) user_message = [{"role":"user","content":message}] messages = history + user_message try: assistant_message={"role":"assistant","content":""} text_generator = generate_text(messages) for text_chunk in text_generator: #print(f"chunk={text_chunk}") assistant_message["content"] = text_chunk updated_history = messages + [assistant_message] yield "", updated_history except RuntimeError as e: print(f"An unexpected error occurred: {e}") yield "",history head = ''' ''' with gr.Blocks(title="LLM with TTS",head=head) as demo: gr.Markdown(""" ## Warnings - Don't listen large volume or with headone until confirm your machine can play aduio - some time gpu crash because of maxInputLength if you crash let me know with your gpu-info ## Notice - LLM is unstable:The inference client used in this demo exhibits inconsistent performance. While it can provide responses in milliseconds, it sometimes becomes unresponsive and times out. - TTS talke a long loading time:Please be patient, the first response may have a delay of up to over 40 seconds while loading. """) gr.Markdown("**Mistral-7B-Instruct-v0.3/LJSpeech** - LLM and TTS models will change without notice.") js = """ async function(chatbot){ await window.matcha_tts_update_chatbot(chatbot) //auto scroll var chatElement = document.getElementById('gr-chatbot'); chatElement.scrollTop = chatElement.scrollHeight; var logElement = chatElement.querySelector('div[role="log"]'); logElement.scrollTop = logElement.scrollHeight; } """ chatbot = gr.Chatbot(type="messages",elem_id="gr-chatbot") chatbot.change(None,[chatbot],[],js=js) msg = gr.Textbox() with gr.Row(): clear = gr.ClearButton([msg, chatbot]) submit = gr.Button("Submit",variant="primary").click(call_generate_text, inputs=[msg, chatbot], outputs=[msg,chatbot]) gr.HTML("""
""") msg.submit(call_generate_text, [msg, chatbot], [msg, chatbot]) import os remote_dir ="/home/user/app/" local_dir = "C:\\Users\\owner\\Documents\\pythons\\huggingface\\mistral-7b-v0.3-matcha-tts-en" #sorry this is my develop env # set not dir but file #demo.launch(allowed_paths=[os.path.join(remote_dir,"models","ljspeech_sim.onnx")]) demo.launch(allowed_paths=[os.path.join(remote_dir,"models","ljspeech_sim.onnx"),os.path.join(local_dir,"models","ljspeech_sim.onnx")])