Spaces:
Runtime error
Runtime error
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig | |
import gradio as gr | |
# Model and tokenizer paths | |
model_name = "ahmedbasemdev/llama-3.2-3b-ChatBot" | |
# Configure 4-bit quantization | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=True, # Enable 4-bit quantization | |
bnb_4bit_use_double_quant=True, # Use double quantization | |
bnb_4bit_quant_type="nf4", # Use NF4 quantization type for better accuracy | |
) | |
# Load the model with 4-bit quantization | |
print("Loading the quantized model...") | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
quantization_config=bnb_config, | |
device_map="auto", # Automatically map to available device (CPU) | |
) | |
# Load the tokenizer | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# Define the inference function | |
def single_inference(question): | |
messages = [] | |
messages.append({"role": "user", "content": question}) | |
# Tokenize the input | |
input_ids = tokenizer.apply_chat_template( | |
messages, | |
add_generation_prompt=True, | |
return_tensors="pt" | |
).to(model.device) # Ensure it runs on the correct device | |
# Generate a response | |
terminators = [ | |
tokenizer.eos_token_id, | |
tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
] | |
outputs = model.generate( | |
input_ids, | |
max_new_tokens=256, | |
eos_token_id=terminators, | |
do_sample=True, | |
temperature=0.2, | |
) | |
response = outputs[0][input_ids.shape[-1]:] | |
output = tokenizer.decode(response, skip_special_tokens=True) | |
return output | |
# Gradio interface | |
print("Setting up Gradio app...") | |
interface = gr.Interface( | |
fn=single_inference, | |
inputs="text", | |
outputs="text", | |
title="Quantized Chatbot", | |
description="Ask me anything!" | |
) | |
# Launch the Gradio app | |
interface.launch() |