Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from peft import PeftModel | |
# Load the model and tokenizer | |
def load_model(): | |
base_model_name = "unsloth/llama-3.2-1b-instruct-bnb-4bit" # Replace with your base model name | |
lora_model_name = "sreyanghosh/lora_model" # Replace with your LoRA model path | |
tokenizer = AutoTokenizer.from_pretrained(base_model_name) | |
model = AutoModelForCausalLM.from_pretrained( | |
base_model_name, | |
device_map="auto" if torch.cuda.is_available() else None, | |
load_in_8bit=not torch.cuda.is_available(), | |
) | |
model = PeftModel.from_pretrained(model, lora_model_name) | |
model.eval() | |
return tokenizer, model | |
tokenizer, model = load_model() | |
# Define the respond function | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
# Prepare the conversation history | |
messages = [{"role": "system", "content": system_message}] | |
for user_input, bot_response in history: | |
if user_input: | |
messages.append({"role": "user", "content": user_input}) | |
if bot_response: | |
messages.append({"role": "assistant", "content": bot_response}) | |
messages.append({"role": "user", "content": message}) | |
# Format the input for the model | |
conversation_text = "\n".join( | |
f"{msg['role']}: {msg['content']}" for msg in messages | |
) | |
inputs = tokenizer(conversation_text, return_tensors="pt", truncation=True) | |
# Generate the model's response | |
outputs = model.generate( | |
inputs.input_ids, | |
max_length=len(inputs.input_ids[0]) + max_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
pad_token_id=tokenizer.eos_token_id, | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Extract the new response | |
new_response = response[len(conversation_text):].strip() | |
yield new_response | |
# Gradio app configuration | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p (nucleus sampling)", | |
), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() | |