songxxzp
Use int4
c502ed1
raw
history blame
960 Bytes
import torch
import gradio as gr
from functools import partial
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModelForSeq2SeqLM.from_pretrained("THUDM/chatglm-6b", revision="int4", trust_remote_code=True).cpu().float()
def chat(query, history=[]):
response = model.chat(tokenizer, query, history, max_length=32)
chat_list = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)]
return chat_list, history
description = "This is an unofficial chatbot application based on open source model ChatGLM-6B, running on cpu(hence max_length is limited to 32)."
title = "ChatGLM-6B Chatbot"
examples = [["Hello?"]]
chatbot_interface = gr.Interface(
fn=chat,
title=title,
description=description,
examples=examples,
inputs=["text", "state"],
outputs=["chatbot", "state"]
)
chatbot_interface.launch()