Spaces:
Paused
Paused
Update func_ai.py
Browse files- func_ai.py +58 -40
func_ai.py
CHANGED
@@ -1,73 +1,91 @@
|
|
|
|
|
|
1 |
import requests
|
2 |
import torch
|
3 |
-
# from googletrans import Translator
|
4 |
from transformers import pipeline
|
5 |
from deep_translator import GoogleTranslator
|
6 |
import time
|
7 |
-
import os
|
|
|
|
|
8 |
VECTOR_API_URL = os.getenv('API_URL')
|
9 |
|
10 |
-
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
classifier =
|
20 |
-
"zero-shot-classification",
|
21 |
-
model="valhalla/distilbart-mnli-12-6",
|
22 |
-
device=0 if torch.cuda.is_available() else -1
|
23 |
-
)
|
24 |
|
25 |
def classify_comment(text):
|
26 |
if not text:
|
27 |
-
|
28 |
return "non-interrogative"
|
29 |
-
|
30 |
try:
|
31 |
translated_text = GoogleTranslator(source='auto', target="en").translate(text)
|
32 |
-
|
33 |
except Exception as e:
|
34 |
-
|
35 |
return "non-interrogative"
|
36 |
if not translated_text:
|
37 |
-
|
38 |
return "non-interrogative"
|
39 |
|
40 |
try:
|
41 |
result = classifier(translated_text, ["interrogative", "non-interrogative"], clean_up_tokenization_spaces=True)
|
42 |
-
|
43 |
except Exception as e:
|
44 |
-
|
45 |
return "non-interrogative"
|
46 |
|
47 |
top_class = result['labels'][0]
|
48 |
-
|
49 |
return top_class
|
50 |
|
51 |
-
def retrieve_from_vdb(query):
|
52 |
-
print(f"Отправка запроса к FastAPI сервису: {query}")
|
53 |
-
response = requests.post(f"{VECTOR_API_URL}/search/", json={"query": query})
|
54 |
-
if response.status_code == 200:
|
55 |
-
results = response.json().get("results", [])
|
56 |
-
print(f"Получено {len(results)} результатов: {results}")
|
57 |
-
return results
|
58 |
-
else:
|
59 |
-
print(f"Ошибка при поиске: {response.text}")
|
60 |
-
return []
|
61 |
-
|
62 |
def analyze_sentiment(comments):
|
63 |
-
|
64 |
results = []
|
65 |
for i in range(0, len(comments), 50):
|
66 |
batch = comments[i:i + 50]
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
71 |
time.sleep(1) # Задержка для предотвращения перегрузки
|
72 |
-
|
73 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# func_ai.py
|
2 |
+
|
3 |
import requests
|
4 |
import torch
|
|
|
5 |
from transformers import pipeline
|
6 |
from deep_translator import GoogleTranslator
|
7 |
import time
|
8 |
+
import os
|
9 |
+
from datetime import datetime
|
10 |
+
|
11 |
VECTOR_API_URL = os.getenv('API_URL')
|
12 |
|
13 |
+
def log_message(message):
|
14 |
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
15 |
+
print(f"[{timestamp}] {message}")
|
16 |
|
17 |
+
# Инициализация моделей
|
18 |
+
def init_models():
|
19 |
+
log_message("Инициализация моделей AI.")
|
20 |
+
sentiment_model = pipeline(
|
21 |
+
'sentiment-analysis',
|
22 |
+
model='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
23 |
+
tokenizer='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
24 |
+
device=0 if torch.cuda.is_available() else -1
|
25 |
+
)
|
26 |
+
|
27 |
+
classifier = pipeline(
|
28 |
+
"zero-shot-classification",
|
29 |
+
model="valhalla/distilbart-mnli-12-6",
|
30 |
+
device=0 if torch.cuda.is_available() else -1
|
31 |
+
)
|
32 |
+
return sentiment_model, classifier
|
33 |
|
34 |
+
sentiment_model, classifier = init_models()
|
|
|
|
|
|
|
|
|
35 |
|
36 |
def classify_comment(text):
|
37 |
if not text:
|
38 |
+
log_message("Получен пустой текст для классификации.")
|
39 |
return "non-interrogative"
|
40 |
+
log_message(f"Классификация комментария: {text}")
|
41 |
try:
|
42 |
translated_text = GoogleTranslator(source='auto', target="en").translate(text)
|
43 |
+
log_message(f"Переведенный текст: {translated_text}")
|
44 |
except Exception as e:
|
45 |
+
log_message(f"Ошибка при переводе: {e}")
|
46 |
return "non-interrogative"
|
47 |
if not translated_text:
|
48 |
+
log_message("Перевод вернул пустой текст.")
|
49 |
return "non-interrogative"
|
50 |
|
51 |
try:
|
52 |
result = classifier(translated_text, ["interrogative", "non-interrogative"], clean_up_tokenization_spaces=True)
|
53 |
+
log_message(f"Результат классификации: {result}")
|
54 |
except Exception as e:
|
55 |
+
log_message(f"Ошибка при классификации: {e}")
|
56 |
return "non-interrogative"
|
57 |
|
58 |
top_class = result['labels'][0]
|
59 |
+
log_message(f"Верхний класс: {top_class}")
|
60 |
return top_class
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
def analyze_sentiment(comments):
|
63 |
+
log_message("Начинаем анализ настроений.")
|
64 |
results = []
|
65 |
for i in range(0, len(comments), 50):
|
66 |
batch = comments[i:i + 50]
|
67 |
+
log_message(f"Анализируем батч с {i} по {i + len(batch)} комментарий: {batch}")
|
68 |
+
try:
|
69 |
+
batch_results = sentiment_model(batch)
|
70 |
+
log_message(f"Результаты батча: {batch_results}")
|
71 |
+
results.extend(batch_results)
|
72 |
+
except Exception as e:
|
73 |
+
log_message(f"Ошибка при анализе настроений батча: {e}")
|
74 |
time.sleep(1) # Задержка для предотвращения перегрузки
|
75 |
+
log_message("Анализ настроений завершен.")
|
76 |
+
return results
|
77 |
+
|
78 |
+
def retrieve_from_vdb(query):
|
79 |
+
log_message(f"Отправка запроса к FastAPI сервису: {query}")
|
80 |
+
try:
|
81 |
+
response = requests.post(f"{VECTOR_API_URL}/search/", json={"query": query})
|
82 |
+
if response.status_code == 200:
|
83 |
+
results = response.json().get("results", [])
|
84 |
+
log_message(f"Получено {len(results)} результатов: {results}")
|
85 |
+
return results
|
86 |
+
else:
|
87 |
+
log_message(f"Ошибка при поиске: {response.text}")
|
88 |
+
return []
|
89 |
+
except Exception as e:
|
90 |
+
log_message(f"Ошибка при запросе к векторной базе данных: {e}")
|
91 |
+
return []
|