ffgtv3 commited on
Commit
c263cca
·
verified ·
1 Parent(s): fd14c39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -41
app.py CHANGED
@@ -1,51 +1,30 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
- import random
 
5
 
6
  @st.cache_resource
7
  def load_model():
8
  model_name = "Qwen/Qwen2-VL-7B-Instruct"
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(model_name)
11
  return tokenizer, model
12
 
13
- def generate_response(prompt, tokenizer, model):
14
- inputs = tokenizer.encode(prompt, return_tensors='pt')
 
 
 
 
 
15
  with torch.no_grad():
16
- outputs = model.generate(inputs, max_length=100, num_return_sequences=1,
17
- temperature=0.9, top_k=50, top_p=0.95)
18
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
- return add_mistakes(response)
20
-
21
- def add_mistakes(text):
22
- words = text.split()
23
- for i in range(len(words)):
24
- if random.random() < 0.2: # 20% шанс ошибки в слове
25
- words[i] = misspell_word(words[i])
26
- return ' '.join(words)
27
-
28
- def misspell_word(word):
29
- if len(word) < 3:
30
- return word
31
- vowels = 'аеёиоуыэюя'
32
- consonants = 'бвгджзйклмнпрстфхцчшщ'
33
 
34
- if random.random() < 0.5:
35
- # Заменяем случайную гласную
36
- for i, char in enumerate(word):
37
- if char.lower() in vowels:
38
- replacement = random.choice(vowels)
39
- return word[:i] + replacement + word[i+1:]
40
- else:
41
- # Заменяем случайную согласную
42
- for i, char in enumerate(word):
43
- if char.lower() in consonants:
44
- replacement = random.choice(consonants)
45
- return word[:i] + replacement + word[i+1:]
46
- return word
47
 
48
- st.title("AI Чат с простой русской моделью")
49
 
50
  tokenizer, model = load_model()
51
 
@@ -55,14 +34,27 @@ if "messages" not in st.session_state:
55
  for message in st.session_state.messages:
56
  with st.chat_message(message["role"]):
57
  st.markdown(message["content"])
58
-
59
- if prompt := st.chat_input("Введите ваше сообщение"):
60
- st.session_state.messages.append({"role": "user", "content": prompt})
61
- with st.chat_message("user"):
62
- st.markdown(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  with st.chat_message("assistant"):
65
- response = generate_response(prompt, tokenizer, model)
66
  st.markdown(response)
67
 
68
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
+ from PIL import Image
5
+ import io
6
 
7
  @st.cache_resource
8
  def load_model():
9
  model_name = "Qwen/Qwen2-VL-7B-Instruct"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
12
  return tokenizer, model
13
 
14
+ def generate_response(prompt, image, tokenizer, model):
15
+ if image:
16
+ image = Image.open(image).convert('RGB')
17
+ inputs = tokenizer.from_pretrained(prompt, images=[image], return_tensors='pt').to(model.device)
18
+ else:
19
+ inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
20
+
21
  with torch.no_grad():
22
+ outputs = model.generate(**inputs, max_new_tokens=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
+ return response
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ st.title("Чат с Qwen VL-7B-Instruct")
28
 
29
  tokenizer, model = load_model()
30
 
 
34
  for message in st.session_state.messages:
35
  with st.chat_message(message["role"]):
36
  st.markdown(message["content"])
37
+ if "image" in message:
38
+ st.image(message["image"])
39
+
40
+ prompt = st.chat_input("Введите ваше сообщение")
41
+ uploaded_file = st.file_uploader("Загрузите изображение (необязательно)", type=["png", "jpg", "jpeg"])
42
+
43
+ if prompt or uploaded_file:
44
+ if uploaded_file:
45
+ image = Image.open(uploaded_file)
46
+ st.session_state.messages.append({"role": "user", "content": prompt or "Опишите это изображение", "image": uploaded_file})
47
+ with st.chat_message("user"):
48
+ if prompt:
49
+ st.markdown(prompt)
50
+ st.image(image)
51
+ else:
52
+ st.session_state.messages.append({"role": "user", "content": prompt})
53
+ with st.chat_message("user"):
54
+ st.markdown(prompt)
55
 
56
  with st.chat_message("assistant"):
57
+ response = generate_response(prompt, uploaded_file, tokenizer, model)
58
  st.markdown(response)
59
 
60
  st.session_state.messages.append({"role": "assistant", "content": response})