ffgtv3 commited on
Commit
e16425f
·
verified ·
1 Parent(s): 3020135

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -26
app.py CHANGED
@@ -1,34 +1,68 @@
1
- import discord
2
- from discord.ext import commands
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
 
5
 
6
- # Настройка бота
7
- intents = discord.Intents.default()
8
- intents.message_content = True
9
- bot = commands.Bot(command_prefix='!', intents=intents)
 
 
10
 
11
- # Загрузка модели и токенизатора
12
- model_name = "Qwen/Qwen2-72B"
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
14
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
15
 
16
- @bot.event
17
- async def on_ready():
18
- print(f'{bot.user} has connected to Discord!')
 
 
 
19
 
20
- @bot.command(name='AI')
21
- async def ai_response(ctx, *, question):
22
- # Подготовка входных данных
23
- inputs = tokenizer.encode(question, return_tensors='pt')
24
-
25
- # Генерация ответа
26
- with torch.no_grad():
27
- outputs = model.generate(inputs, max_length=100, num_return_sequences=1)
28
 
29
- # Декодирование и отправка ответа
30
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
- await ctx.send(response)
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- # Запуск бота
34
- bot.run('MTI3OTAxOTI4MzY1NzEzMDA4NQ.GOkPKG.LmZZLrGZdH27G70YpsOpY-uDSylhbkGdBGqX0o')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
+ import random
5
 
6
+ @st.cache_resource
7
+ def load_model():
8
+ model_name = "sberbank-ai/rugpt3small_based_on_gpt2"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+ return tokenizer, model
12
 
13
+ def generate_response(prompt, tokenizer, model):
14
+ inputs = tokenizer.encode(prompt, return_tensors='pt')
15
+ with torch.no_grad():
16
+ outputs = model.generate(inputs, max_length=100, num_return_sequences=1,
17
+ temperature=0.9, top_k=50, top_p=0.95)
18
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return add_mistakes(response)
20
 
21
+ def add_mistakes(text):
22
+ words = text.split()
23
+ for i in range(len(words)):
24
+ if random.random() < 0.2: # 20% шанс ошибки в слове
25
+ words[i] = misspell_word(words[i])
26
+ return ' '.join(words)
27
 
28
+ def misspell_word(word):
29
+ if len(word) < 3:
30
+ return word
31
+ vowels = 'аеёиоуыэюя'
32
+ consonants = 'бвгджзйклмнпрстфхцчшщ'
 
 
 
33
 
34
+ if random.random() < 0.5:
35
+ # Заменяем случайную гласную
36
+ for i, char in enumerate(word):
37
+ if char.lower() in vowels:
38
+ replacement = random.choice(vowels)
39
+ return word[:i] + replacement + word[i+1:]
40
+ else:
41
+ # Заменяем случайную согласную
42
+ for i, char in enumerate(word):
43
+ if char.lower() in consonants:
44
+ replacement = random.choice(consonants)
45
+ return word[:i] + replacement + word[i+1:]
46
+ return word
47
+
48
+ st.title("AI Чат с простой русской моделью")
49
 
50
+ tokenizer, model = load_model()
51
+
52
+ if "messages" not in st.session_state:
53
+ st.session_state.messages = []
54
+
55
+ for message in st.session_state.messages:
56
+ with st.chat_message(message["role"]):
57
+ st.markdown(message["content"])
58
+
59
+ if prompt := st.chat_input("Введите ваше сообщение"):
60
+ st.session_state.messages.append({"role": "user", "content": prompt})
61
+ with st.chat_message("user"):
62
+ st.markdown(prompt)
63
+
64
+ with st.chat_message("assistant"):
65
+ response = generate_response(prompt, tokenizer, model)
66
+ st.markdown(response)
67
+
68
+ st.session_state.messages.append({"role": "assistant", "content": response})