|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
client = InferenceClient("Grandediw/lora_model") |
|
|
|
def respond(message, history, system_message, max_tokens, temperature, top_p): |
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
for user_msg, assistant_msg in history: |
|
if user_msg: |
|
messages.append({"role": "user", "content": user_msg}) |
|
if assistant_msg: |
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
messages.append({"role": "user", "content": message}) |
|
|
|
response = "" |
|
for partial in client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
): |
|
token = partial.choices[0].delta.content |
|
response += token |
|
yield response |
|
|
|
with gr.Blocks(title="Enhanced LORA Chat Interface") as demo: |
|
gr.Markdown( |
|
""" |
|
# LORA Chat Assistant |
|
Welcome! This is a demo of a LORA-based Chat Assistant. |
|
Start by entering your prompt below. |
|
""" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(): |
|
system_message = gr.Textbox( |
|
value="You are a friendly Chatbot.", |
|
label="Initial Behavior (System Message)", |
|
lines=3, |
|
placeholder="Describe how the assistant should behave..." |
|
) |
|
max_tokens = gr.Slider( |
|
minimum=1, maximum=2048, value=512, step=1, |
|
label="Max new tokens" |
|
) |
|
temperature = gr.Slider( |
|
minimum=0.1, maximum=4.0, value=0.7, step=0.1, |
|
label="Temperature" |
|
) |
|
top_p = gr.Slider( |
|
minimum=0.1, maximum=1.0, value=0.95, step=0.05, |
|
label="Top-p (nucleus sampling)" |
|
) |
|
|
|
|
|
|
|
chat = gr.ChatInterface( |
|
fn=respond, |
|
additional_inputs=[system_message, max_tokens, temperature, top_p], |
|
type='tuples' |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|