Spaces:
Sleeping
Sleeping
import subprocess | |
import sys | |
# Cài đặt thư viện nếu chưa có | |
subprocess.check_call([sys.executable, "-m", "pip", "install", "transformers", "streamlit", "torch"]) | |
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer | |
import torch | |
# Load model and tokenizer | |
MODEL_NAME = "lora_model" # Thay thế bằng tên mô hình thực tế của bạn | |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
def generate_response(prompt): | |
"""Generate a response from the model.""" | |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") | |
streamer = TextStreamer(tokenizer) | |
with torch.no_grad(): | |
model.generate(**inputs, streamer=streamer, max_length=512) | |
return "" | |
# Streamlit UI | |
st.set_page_config(page_title="Chatbot", page_icon="🤖") | |
st.title("🤖 AI Chatbot") | |
# Initialize chat history if not exists | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# User input | |
user_input = st.chat_input("Nhập tin nhắn...") | |
if user_input: | |
# Append user message | |
st.session_state.messages.append({"role": "user", "content": user_input}) | |
with st.chat_message("user"): | |
st.markdown(user_input) | |
# Generate response | |
with st.chat_message("assistant"): | |
response = generate_response(user_input) | |
st.markdown(response) | |
# Append assistant response | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |