day2 / app.py
ffgtv3's picture
Update app.py
75c398d verified
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from PIL import Image
import io
import importlib
def check_transformers_version():
import transformers
return transformers.__version__
@st.cache_resource
def load_model():
model_name = "Qwen/Qwen2-VL-7B-Instruct"
try:
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
return tokenizer, model
except Exception as e:
st.error(f"Ошибка при загрузке модели: {str(e)}")
return None, None
def generate_response(prompt, image, tokenizer, model):
if tokenizer is None or model is None:
return "Модель не загружена. Пожалуйста, проверьте ошибки выше."
try:
if image:
image = Image.open(image).convert('RGB')
inputs = tokenizer(prompt, images=[image], return_tensors='pt').to(model.device)
else:
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=100)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
except Exception as e:
return f"Ошибка при генерации ответа: {str(e)}"
st.title("Чат с Qwen VL-7B-Instruct")
transformers_version = check_transformers_version()
st.info(f"Версия transformers: {transformers_version}")
tokenizer, model = load_model()
if tokenizer is None or model is None:
st.warning("Модель не загружена. Приложение может работать некорректно.")
st.info("Попробуйте установить последнюю версию transformers: pip install transformers --upgrade")
else:
st.success("Модель успешно загружена!")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if "image" in message:
st.image(message["image"])
prompt = st.chat_input("Введите ваше сообщение")
uploaded_file = st.file_uploader("Загрузите изображение (необязательно)", type=["png", "jpg", "jpeg"])
if prompt or uploaded_file:
if uploaded_file:
image = Image.open(uploaded_file)
st.session_state.messages.append({"role": "user", "content": prompt or "Опишите это изображение", "image": uploaded_file})
with st.chat_message("user"):
if prompt:
st.markdown(prompt)
st.image(image)
else:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
with st.spinner("Генерация ответа..."):
response = generate_response(prompt, uploaded_file, tokenizer, model)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})