|
import torch |
|
import transformers |
|
import gradio as gr |
|
from unsloth import FastLanguageModel |
|
|
|
|
|
max_seq_length = 2048 |
|
dtype = None |
|
|
|
def load_model(): |
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name="ivwhy/lora_model", |
|
max_seq_length=max_seq_length, |
|
dtype=dtype, |
|
load_in_4bit=True |
|
) |
|
|
|
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
pipeline = transformers.pipeline( |
|
"text-generation", |
|
model=model, |
|
tokenizer=tokenizer, |
|
device=0 if torch.cuda.is_available() else -1 |
|
) |
|
|
|
return pipeline, tokenizer |
|
|
|
|
|
generation_pipeline, tokenizer = load_model() |
|
|
|
def chat_function(message, history, system_prompt, max_new_tokens, temperature): |
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": message} |
|
] |
|
|
|
|
|
prompt = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True, |
|
) |
|
|
|
|
|
terminators = [ |
|
tokenizer.eos_token_id, |
|
tokenizer.convert_tokens_to_ids("<|eot_id|>") |
|
] |
|
|
|
|
|
outputs = generation_pipeline( |
|
prompt, |
|
max_new_tokens=max_new_tokens, |
|
eos_token_id=terminators, |
|
do_sample=True, |
|
temperature=temperature, |
|
top_p=0.9, |
|
) |
|
|
|
|
|
return outputs[0]["generated_text"][len(prompt):] |
|
|
|
|
|
demo = gr.ChatInterface( |
|
chat_function, |
|
textbox=gr.Textbox(placeholder="Enter message here", container=False, scale=7), |
|
chatbot=gr.Chatbot(height=400), |
|
additional_inputs=[ |
|
gr.Textbox("You are helpful AI", label="System Prompt"), |
|
gr.Slider(minimum=1, maximum=4000, value=500, label="Max New Tokens"), |
|
gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature") |
|
] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |