Spaces:
Runtime error
Runtime error
import os | |
import time | |
import gradio as gr | |
import torch | |
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer | |
os.environ["TOKENIZERS_PARALLELISM"] = "0" | |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" | |
def load_model_and_tokenizer(): | |
model_name = "NousResearch/Hermes-2-Theta-Llama-3-8B" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
special_tokens = {"pad_token": "<PAD>"} | |
tokenizer.add_special_tokens(special_tokens) | |
config = AutoConfig.from_pretrained(model_name) | |
setattr( | |
config, | |
"quantizer_path", | |
f"codebooks/Hermes-2-Theta-Llama-3-8B_1bit.xmad", | |
) | |
setattr(config, "window_length", 32) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, config=config, torch_dtype=torch.float16, device_map="cuda:2" | |
) | |
if len(tokenizer) > model.get_input_embeddings().weight.shape[0]: | |
print( | |
"WARNING: Resizing the embedding matrix to match the tokenizer vocab size." | |
) | |
model.resize_token_embeddings(len(tokenizer)) | |
model.config.pad_token_id = tokenizer.pad_token_id | |
return model, tokenizer | |
model, tokenizer = load_model_and_tokenizer() | |
def process_dialog(message, history): | |
dialog = [{"role": "user", "content": message}] | |
prompt = tokenizer.apply_chat_template(dialog, tokenize=False, add_generation_prompt=True) | |
tokenized_input_prompt_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device) | |
with torch.no_grad(): | |
token_ids_for_each_answer = model.generate( | |
tokenized_input_prompt_ids, | |
max_new_tokens=512, | |
temperature=0.7, | |
do_sample=True, | |
eos_token_id=tokenizer.eos_token_id, | |
pad_token_id=tokenizer.pad_token_id, | |
) | |
response = token_ids_for_each_answer[0][tokenized_input_prompt_ids.shape[-1]:] | |
cleaned_response = tokenizer.decode( | |
response, | |
skip_special_tokens=True, | |
clean_up_tokenization_spaces=True, | |
) | |
return cleaned_response | |
def chatbot_response(message, history): | |
response = process_dialog(message, history) | |
return response | |
demo = gr.ChatInterface( | |
fn=chatbot_response, | |
examples=["Hello", "How are you?", "Tell me a joke"], | |
title="LLM Chatbot", | |
description="A demo chatbot using a quantized LLaMA model.", | |
) | |
if __name__ == "__main__": | |
demo.launch() | |