import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") def chat_inf(system_prompt,prompt,history): if not history: history = [] hist_len=0 if history: hist_len=len(history) print(hist_len) seed = random.randint(1,1111111111111111) generate_kwargs = dict( temperature=0.9, max_new_tokens=10480, top_p=0.95, repetition_penalty=1.0, do_sample=True, seed=seed, ) formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text yield [(prompt,output)] history.append((prompt,output)) yield history with gr.Blocks() as app: with gr.Group(): chat_b = gr.Chatbot() with gr.Row(): with gr.Column(scale=3): inp = gr.Textbox(label="Prompt") sys_inp = gr.Textbox(label="System Prompt (optional)") btn = gr.Button("Chat") with gr.Column(scale=1): with gr.Group(): stop_btn=gr.Button("Stop") clear_btn=gr.Button("Clear") chatblock=gr.Dropdown(label="Chatblocks",choices=[c for c in range(1,40)],multiselect=True) btn.click(chat_inf,[sys_inp,inp,chat_b],chat_b) app.launch()