VOIDER commited on
Commit
57c45ff
·
verified ·
1 Parent(s): 9588d56

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +519 -0
app.py ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image, PngImagePlugin
3
+ import io
4
+ import os
5
+ import pandas as pd
6
+ import torch
7
+ from transformers import pipeline as transformers_pipeline , AutoImageProcessor, AutoModelForImageClassification
8
+ # from torchvision import transforms # Менее релевантно для ONNX пайплайна
9
+ from torchmetrics.functional.multimodal import clip_score
10
+ from open_clip import create_model_from_pretrained, get_tokenizer
11
+ import re
12
+ import matplotlib.pyplot as plt
13
+ import json
14
+ from collections import defaultdict
15
+ import numpy as np
16
+ import logging # Для логирования ONNX
17
+
18
+ # --- ONNX Related Imports and Setup ---
19
+ try:
20
+ import onnxruntime
21
+ except ImportError:
22
+ print("onnxruntime not found. Please ensure it's in requirements.txt")
23
+ onnxruntime = None
24
+
25
+ from huggingface_hub import hf_hub_download
26
+
27
+ # imgutils для rgb_encode (если установлен)
28
+ try:
29
+ from imgutils.data import rgb_encode # Предполагаем, что это правильный импорт
30
+ except ImportError:
31
+ print("imgutils.data.rgb_encode not found. Preprocessing for deepghs might be limited.")
32
+ def rgb_encode(image, order_='CHW'): # Простая заглушка, если imgutils нет
33
+ img_arr = np.array(image)
34
+ if order_ == 'CHW':
35
+ img_arr = np.transpose(img_arr, (2, 0, 1))
36
+ return img_arr.astype(np.float32) / 255.0 # Базовая нормализация, если не указана другая
37
+
38
+ # --- Модель Конфигурация и Загрузка ---
39
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
40
+ print(f"Using device: {DEVICE}")
41
+ ONNX_DEVICE = "CUDAExecutionProvider" if DEVICE == "cuda" and onnxruntime and "CUDAExecutionProvider" in onnxruntime.get_available_providers() else "CPUExecutionProvider"
42
+ print(f"Using ONNX device: {ONNX_DEVICE}")
43
+
44
+
45
+ # --- Helper for ONNX models (deepghs) ---
46
+ @torch.no_grad()
47
+ def _img_preprocess_for_onnx(image: Image.Image, size: tuple = (384, 384), normalize_mean=0.5, normalize_std=0.5):
48
+ image = image.resize(size, Image.Resampling.BILINEAR) # Обновлено до Resampling
49
+ data = rgb_encode(image, order_='CHW') # (C, H, W), float32, 0-1 range from common imgutils
50
+
51
+ # Нормализация ((data / 255.0) - mean) / std, если data в 0-255
52
+ # Если rgb_encode уже возвращает 0-1, то (data - mean) / std
53
+ # Предположим, rgb_encode возвращает [0,1] диапазон float32
54
+ mean = np.array([normalize_mean] * 3, dtype=np.float32).reshape((3, 1, 1))
55
+ std = np.array([normalize_std] * 3, dtype=np.float32).reshape((3, 1, 1))
56
+
57
+ normalized_data = (data - mean) / std
58
+ return normalized_data[None, ...].astype(np.float32) # Add batch dimension
59
+
60
+ onnx_sessions_cache = {} # Кэш для ONNX сессий и метаданных
61
+
62
+ def get_onnx_session_and_meta(repo_id, model_subfolder):
63
+ cache_key = f"{repo_id}/{model_subfolder}"
64
+ if cache_key in onnx_sessions_cache:
65
+ return onnx_sessions_cache[cache_key]
66
+
67
+ if not onnxruntime:
68
+ raise ImportError("ONNX Runtime is not available.")
69
+
70
+ try:
71
+ model_path = hf_hub_download(repo_id, filename=f"{model_subfolder}/model.onnx")
72
+ meta_path = hf_hub_download(repo_id, filename=f"{model_subfolder}/meta.json")
73
+
74
+ options = onnxruntime.SessionOptions()
75
+ options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
76
+ if ONNX_DEVICE == "CPUExecutionProvider":
77
+ options.intra_op_num_threads = os.cpu_count()
78
+
79
+ session = onnxruntime.InferenceSession(model_path, options, providers=[ONNX_DEVICE])
80
+
81
+ with open(meta_path, 'r') as f:
82
+ meta = json.load(f)
83
+
84
+ labels = meta.get('labels', [])
85
+ onnx_sessions_cache[cache_key] = (session, labels, meta)
86
+ return session, labels, meta
87
+ except Exception as e:
88
+ print(f"Error loading ONNX model {repo_id}/{model_subfolder}: {e}")
89
+ onnx_sessions_cache[cache_key] = (None, [], None) # Кэшируем ошибку
90
+ return None, [], None
91
+
92
+
93
+ # 1. ImageReward
94
+ try:
95
+ reward_processor = AutoImageProcessor.from_pretrained("THUDM/ImageReward")
96
+ reward_model = AutoModelForImageClassification.from_pretrained("THUDM/ImageReward").to(DEVICE)
97
+ reward_model.eval()
98
+ except Exception as e:
99
+ print(f"Error loading THUDM/ImageReward: {e}")
100
+ reward_processor, reward_model = None, None
101
+
102
+ # 2. Anime Aesthetic (deepghs ONNX)
103
+ # Модель: deepghs/anime_aesthetic, подпапка: swinv2pv3_v0_448_ls0.2_x
104
+ ANIME_AESTHETIC_REPO = "deepghs/anime_aesthetic"
105
+ ANIME_AESTHETIC_SUBFOLDER = "swinv2pv3_v0_448_ls0.2_x"
106
+ ANIME_AESTHETIC_IMG_SIZE = (448, 448)
107
+ # Метки из meta.json: ["normal", "slight", "moderate", "strong", "extreme"]
108
+ # Веса для взвешенной суммы:
109
+ ANIME_AESTHETIC_LABEL_WEIGHTS = {"normal": 0.0, "slight": 1.0, "moderate": 2.0, "strong": 3.0, "extreme": 4.0}
110
+
111
+ # 3. MANIQA (Technical Quality) - Transformers pipeline
112
+ try:
113
+ maniqa_pipe = transformers_pipeline("image-classification", model="honklers/maniqa-nr", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1)
114
+ except Exception as e:
115
+ print(f"Error loading honklers/maniqa-nr: {e}")
116
+ maniqa_pipe = None
117
+
118
+ # 4. CLIP Score (laion/CLIP-ViT-L-14-laion2B-s32B-b82K) - open_clip
119
+ try:
120
+ clip_model_name = 'ViT-L-14'
121
+ clip_pretrained = 'laion2b_s32b_b82k' # laion2B-s32B-b82K
122
+ clip_model_instance, _, clip_preprocess = create_model_from_pretrained(clip_model_name, pretrained=clip_pretrained, device=DEVICE)
123
+ clip_tokenizer = get_tokenizer(clip_model_name)
124
+ clip_model_instance.eval()
125
+ except Exception as e:
126
+ print(f"Error loading CLIP model {clip_model_name} ({clip_pretrained}): {e}")
127
+ clip_model_instance, clip_preprocess, clip_tokenizer = None, None, None
128
+
129
+ # 5. AI Detectors
130
+ # Organika/sdxl-detector - Transformers pipeline
131
+ try:
132
+ sdxl_detector_pipe = transformers_pipeline("image-classification", model="Organika/sdxl-detector", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1)
133
+ except Exception as e:
134
+ print(f"Error loading Organika/sdxl-detector: {e}")
135
+ sdxl_detector_pipe = None
136
+
137
+ # deepghs/anime_ai_check - ONNX
138
+ # Модель: deepghs/anime_ai_check, подпапка: caformer_s36_plus_sce
139
+ ANIME_AI_CHECK_REPO = "deepghs/anime_ai_check"
140
+ ANIME_AI_CHECK_SUBFOLDER = "caformer_s36_plus_sce"
141
+ ANIME_AI_CHECK_IMG_SIZE = (384, 384) # Предположение, если не указано иначе
142
+
143
+ # --- Функции извлечения метаданных (без изменений) ---
144
+ def extract_sd_parameters(image_pil):
145
+ if image_pil is None:
146
+ return "", "N/A", "N/A", "N/A", {}
147
+
148
+ parameters_str = image_pil.info.get("parameters", "")
149
+ if not parameters_str:
150
+ return "", "N/A", "N/A", "N/A", {}
151
+
152
+ prompt = ""
153
+ negative_prompt = ""
154
+ model_name = "N/A"
155
+ model_hash = "N/A"
156
+ other_params_dict = {}
157
+
158
+ neg_prompt_index = parameters_str.find("Negative prompt:")
159
+ steps_meta_index = parameters_str.find("Steps:") # Ищем начало блока с параметрами
160
+
161
+ if neg_prompt_index != -1:
162
+ prompt = parameters_str[:neg_prompt_index].strip()
163
+ # Если "Steps:" найдено после "Negative prompt:", то neg_prompt между ними
164
+ if steps_meta_index != -1 and steps_meta_index > neg_prompt_index:
165
+ negative_prompt = parameters_str[neg_prompt_index + len("Negative prompt:"):steps_meta_index].strip()
166
+ params_part = parameters_str[steps_meta_index:]
167
+ else: # "Steps:" не найдено или до "Negative prompt:", значит neg_prompt до конца строки или до params_part
168
+ # Если params_part вообще нет, то neg_prompt до конца строки
169
+ end_of_neg_prompt = parameters_str.find("\n", neg_prompt_index) # Ищем конец строки для негативного промпта
170
+ if end_of_neg_prompt == -1: end_of_neg_prompt = len(parameters_str)
171
+
172
+ search_params_in_rest = parameters_str[neg_prompt_index + len("Negative prompt:"):]
173
+ actual_steps_index_in_rest = search_params_in_rest.find("Steps:")
174
+ if actual_steps_index_in_rest != -1:
175
+ negative_prompt = search_params_in_rest[:actual_steps_index_in_rest].strip()
176
+ params_part = search_params_in_rest[actual_steps_index_in_rest:]
177
+ else: # Нет "Steps:" после "Negative prompt:"
178
+ negative_prompt = search_params_in_rest.strip() # Берем все как негативный
179
+ params_part = "" # Нет блока параметров
180
+
181
+ else: # "Negative prompt:" не найдено
182
+ # Если "Steps:" найдено, то промпт до него
183
+ if steps_meta_index != -1:
184
+ prompt = parameters_str[:steps_meta_index].strip()
185
+ params_part = parameters_str[steps_meta_index:]
186
+ else: # Нет ни "Negative prompt:", ни "Steps:", весь текст - это промпт
187
+ prompt = parameters_str.strip()
188
+ params_part = ""
189
+
190
+ if not prompt and not negative_prompt and not params_part: # Если все пусто, возможно, это просто параметры
191
+ params_part = parameters_str
192
+
193
+ if params_part:
194
+ params_list = [p.strip() for p in params_part.split(",")]
195
+ temp_other_params = {}
196
+ for param_val_str in params_list:
197
+ parts = param_val_str.split(':', 1)
198
+ if len(parts) == 2:
199
+ key, value = parts[0].strip(), parts[1].strip()
200
+ temp_other_params[key] = value
201
+ if key == "Model": model_name = value
202
+ elif key == "Model hash": model_hash = value
203
+
204
+ # Добавляем в other_params_dict только то, что не "Model" и не "Model hash"
205
+ for k,v in temp_other_params.items():
206
+ if k not in ["Model", "Model hash"]:
207
+ other_params_dict[k] = v
208
+
209
+ if model_name == "N/A" and model_hash != "N/A": model_name = f"hash_{model_hash}"
210
+ if model_name == "N/A" and "Checkpoint" in other_params_dict: model_name = other_params_dict["Checkpoint"]
211
+
212
+ return prompt, negative_prompt, model_name, model_hash, other_params_dict
213
+
214
+ # --- Функции оценки (обновлены для deepghs) ---
215
+
216
+ @torch.no_grad()
217
+ def get_image_reward(image_pil):
218
+ if not reward_model or not reward_processor: return "N/A"
219
+ try:
220
+ inputs = reward_processor(images=image_pil, return_tensors="pt").to(DEVICE)
221
+ outputs = reward_model(**inputs)
222
+ return round(outputs.logits.item(), 4)
223
+ except Exception as e:
224
+ print(f"Error in ImageReward: {e}")
225
+ return "Error"
226
+
227
+ def get_anime_aesthetic_score_deepghs(image_pil):
228
+ session, labels, meta = get_onnx_session_and_meta(ANIME_AESTHETIC_REPO, ANIME_AESTHETIC_SUBFOLDER)
229
+ if not session or not labels: return "N/A"
230
+ try:
231
+ input_data = _img_preprocess_for_onnx(image_pil, size=ANIME_AESTHETIC_IMG_SIZE)
232
+ input_name = session.get_inputs()[0].name
233
+ output_name = session.get_outputs()[0].name
234
+
235
+ onnx_output, = session.run([output_name], {input_name: input_data})
236
+
237
+ scores = onnx_output[0] # Должен быть массив вероятностей/логитов
238
+ # Применение softmax если это логиты (обычно модели классификации ONNX возвращают логиты)
239
+ exp_scores = np.exp(scores - np.max(scores)) # Вычитаем max для стабильности softmax
240
+ probabilities = exp_scores / np.sum(exp_scores)
241
+
242
+ weighted_score = 0.0
243
+ for i, label in enumerate(labels):
244
+ if label in ANIME_AESTHETIC_LABEL_WEIGHTS:
245
+ weighted_score += probabilities[i] * ANIME_AESTHETIC_LABEL_WEIGHTS[label]
246
+ return round(weighted_score, 4)
247
+ except Exception as e:
248
+ print(f"Error in Anime Aesthetic (ONNX): {e}")
249
+ return "Error"
250
+
251
+ @torch.no_grad()
252
+ def get_maniqa_score(image_pil):
253
+ if not maniqa_pipe: return "N/A"
254
+ try:
255
+ result = maniqa_pipe(image_pil.copy())
256
+ score = 0.0
257
+ # Ищем метку, которая соответствует высокому качеству
258
+ # honklers/maniqa-nr может иметь 'LABEL_0', 'LABEL_1' или 'Good Quality', 'Bad Quality'
259
+ # Проверьте model card. Предположим, более высокий скор для первой метки - хорошо.
260
+ # В данном случае, `honklers/maniqa-nr` выводит [{'label': 'Bad Quality', 'score': 0.9}, {'label': 'Good Quality', 'score': 0.1}]
261
+ # Ищем 'Good Quality'
262
+ for item in result:
263
+ if item['label'].lower() == 'good quality': # или другой позитивный лейбл
264
+ score = item['score']
265
+ break
266
+ # Если нет "Good Quality", но есть что-то вроде LABEL_1 (положительный)
267
+ # elif item['label'] == 'LABEL_1': # Пример, если метки такие
268
+ # score = item['score']
269
+ # break
270
+ if score == 0.0 and result: # Если "Good Quality" не найдено, но есть результат
271
+ # Пробуем взять максимальный скор, если метки непонятные, но это рискованно
272
+ # Либо ищем специфичные метки из model card
273
+ pass # Оставляем 0.0 если не найдена позитивная метка
274
+
275
+ return round(score, 4)
276
+ except Exception as e:
277
+ print(f"Error in MANIQA: {e}")
278
+ return "Error"
279
+
280
+ @torch.no_grad()
281
+ def calculate_clip_score_value(image_pil, prompt_text): # Изменено имя, чтобы не конфликтовать с torchmetrics.clip_score
282
+ if not clip_model_instance or not clip_preprocess or not clip_tokenizer or not prompt_text or prompt_text == "N/A":
283
+ return "N/A"
284
+ try:
285
+ image_input = clip_preprocess(image_pil).unsqueeze(0).to(DEVICE)
286
+ text_input = clip_tokenizer([str(prompt_text)]).to(DEVICE)
287
+
288
+ image_features = clip_model_instance.encode_image(image_input)
289
+ text_features = clip_model_instance.encode_text(text_input)
290
+
291
+ image_features_norm = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
292
+ text_features_norm = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
293
+ score = (text_features_norm @ image_features_norm.T).squeeze().item() * 100.0
294
+ return round(score, 2)
295
+ except Exception as e:
296
+ print(f"Error in CLIP Score: {e}")
297
+ return "Error"
298
+
299
+ @torch.no_grad()
300
+ def get_sdxl_detection_score(image_pil):
301
+ if not sdxl_detector_pipe: return "N/A"
302
+ try:
303
+ result = sdxl_detector_pipe(image_pil.copy())
304
+ ai_score = 0.0
305
+ # Organika/sdxl-detector метки: 'artificial', 'real'
306
+ for item in result:
307
+ if item['label'].lower() == 'artificial':
308
+ ai_score = item['score']
309
+ break
310
+ return round(ai_score, 4)
311
+ except Exception as e:
312
+ print(f"Error in SDXL Detector: {e}")
313
+ return "Error"
314
+
315
+ def get_anime_ai_check_score_deepghs(image_pil):
316
+ session, labels, meta = get_onnx_session_and_meta(ANIME_AI_CHECK_REPO, ANIME_AI_CHECK_SUBFOLDER)
317
+ if not session or not labels: return "N/A"
318
+ try:
319
+ input_data = _img_preprocess_for_onnx(image_pil, size=ANIME_AI_CHECK_IMG_SIZE)
320
+ input_name = session.get_inputs()[0].name
321
+ output_name = session.get_outputs()[0].name
322
+
323
+ onnx_output, = session.run([output_name], {input_name: input_data})
324
+
325
+ scores = onnx_output[0]
326
+ exp_scores = np.exp(scores - np.max(scores))
327
+ probabilities = exp_scores / np.sum(exp_scores)
328
+
329
+ ai_prob = 0.0
330
+ for i, label in enumerate(labels):
331
+ if label.lower() == 'ai': # Ищем метку 'ai'
332
+ ai_prob = probabilities[i]
333
+ break
334
+ return round(ai_prob, 4)
335
+ except Exception as e:
336
+ print(f"Error in Anime AI Check (ONNX): {e}")
337
+ return "Error"
338
+
339
+ # --- Основная функция обработки ---
340
+ def process_images(files, progress=gr.Progress(track_tqdm=True)):
341
+ if not files:
342
+ return pd.DataFrame(), None, None, None, None, "Please upload some images."
343
+
344
+ all_results = []
345
+
346
+ # progress(0, desc="Starting processing...") # track_tqdm сделает это
347
+
348
+ for i, file_obj in enumerate(files):
349
+ try:
350
+ # В HF Spaces file_obj может быть именем временного файла или объектом с атрибутом name
351
+ filename = os.path.basename(getattr(file_obj, 'name', str(file_obj))) # getattr для совместимости
352
+ # progress((i+1)/len(files), desc=f"Processing {filename}") # track_tqdm
353
+
354
+ img = Image.open(getattr(file_obj, 'name', str(file_obj)))
355
+ if img.mode != "RGB":
356
+ img = img.convert("RGB")
357
+
358
+ prompt, neg_prompt, model_n, model_h, other_p = extract_sd_parameters(img)
359
+
360
+ # Оценки
361
+ reward = get_image_reward(img.copy())
362
+ anime_aes_deepghs = get_anime_aesthetic_score_deepghs(img.copy())
363
+ maniqa = get_maniqa_score(img.copy())
364
+ clip_val = calculate_clip_score_value(img.copy(), prompt) # Изменено имя функции
365
+ sdxl_detect = get_sdxl_detection_score(img.copy())
366
+ anime_ai_chk_deepghs = get_anime_ai_check_score_deepghs(img.copy())
367
+
368
+ result_entry = {
369
+ "Filename": filename,
370
+ "Prompt": prompt if prompt else "N/A",
371
+ "Model Name": model_n,
372
+ "Model Hash": model_h,
373
+ "ImageReward": reward,
374
+ "AnimeAesthetic_dg": anime_aes_deepghs, # dg = deepghs
375
+ "MANIQA_TQ": maniqa,
376
+ "CLIPScore": clip_val,
377
+ "SDXL_Detector_AI_Prob": sdxl_detect,
378
+ "AnimeAI_Check_dg_Prob": anime_ai_chk_deepghs, # dg = deepghs
379
+ }
380
+ all_results.append(result_entry)
381
+
382
+ except Exception as e:
383
+ print(f"Failed to process {getattr(file_obj, 'name', str(file_obj))}: {e}")
384
+ all_results.append({
385
+ "Filename": os.path.basename(getattr(file_obj, 'name', str(file_obj))) if file_obj else "Unknown File",
386
+ "Prompt": "Error", "Model Name": "Error", "Model Hash": "Error",
387
+ "ImageReward": "Error", "AnimeAesthetic_dg": "Error", "MANIQA_TQ": "Error",
388
+ "CLIPScore": "Error", "SDXL_Detector_AI_Prob": "Error", "AnimeAI_Check_dg_Prob": "Error"
389
+ })
390
+
391
+ df = pd.DataFrame(all_results)
392
+
393
+ plot_model_avg_scores_buffer = None
394
+ if "Model Name" in df.columns and df["Model Name"].nunique() > 0 and df["Model Name"].count() > 0 :
395
+ numeric_cols = ["ImageReward", "AnimeAesthetic_dg", "MANIQA_TQ", "CLIPScore"]
396
+ for col in numeric_cols: df[col] = pd.to_numeric(df[col], errors='coerce')
397
+ try:
398
+ # Фильтруем модели "N/A" перед группировкой для графика
399
+ df_for_plot = df[df["Model Name"] != "N/A"]
400
+ if not df_for_plot.empty and df_for_plot["Model Name"].nunique() > 0 :
401
+ model_avg_scores = df_for_plot.groupby("Model Name")[numeric_cols].mean().dropna(how='all')
402
+ if not model_avg_scores.empty:
403
+ fig1, ax1 = plt.subplots(figsize=(12, 7))
404
+ model_avg_scores.plot(kind="bar", ax=ax1)
405
+ ax1.set_title("Average Scores per Model")
406
+ ax1.set_ylabel("Average Score")
407
+ ax1.tick_params(axis='x', rotation=45, labelsize=8)
408
+ plt.tight_layout()
409
+ plot_model_avg_scores_buffer = io.BytesIO()
410
+ fig1.savefig(plot_model_avg_scores_buffer, format="png")
411
+ plot_model_avg_scores_buffer.seek(0)
412
+ plt.close(fig1)
413
+ except Exception as e: print(f"Error generating model average scores plot: {e}")
414
+
415
+ plot_prompt_clip_scores_buffer = None
416
+ if "Prompt" in df.columns and "CLIPScore" in df.columns and df["Prompt"].nunique() > 0:
417
+ df["CLIPScore"] = pd.to_numeric(df["CLIPScore"], errors='coerce')
418
+ df_prompt_plot = df[df["Prompt"] != "N/A"].dropna(subset=["CLIPScore"])
419
+ if not df_prompt_plot.empty and df_prompt_plot["Prompt"].nunique() > 0:
420
+ try:
421
+ # Сокращаем длинные промпты для графика
422
+ df_prompt_plot["Short Prompt"] = df_prompt_plot["Prompt"].apply(lambda x: (x[:30] + '...') if len(x) > 33 else x)
423
+ prompt_clip_scores = df_prompt_plot.groupby("Short Prompt")["CLIPScore"].mean().sort_values(ascending=False)
424
+ if not prompt_clip_scores.empty and len(prompt_clip_scores) > 1 :
425
+ fig2, ax2 = plt.subplots(figsize=(12, max(7, min(len(prompt_clip_scores)*0.5, 15)))) # Ограничиваем высоту
426
+ prompt_clip_scores.head(20).plot(kind="barh", ax=ax2)
427
+ ax2.set_title("Average CLIPScore per Prompt (Top 20 unique prompts)")
428
+ ax2.set_xlabel("Average CLIPScore")
429
+ plt.tight_layout()
430
+ plot_prompt_clip_scores_buffer = io.BytesIO()
431
+ fig2.savefig(plot_prompt_clip_scores_buffer, format="png")
432
+ plot_prompt_clip_scores_buffer.seek(0)
433
+ plt.close(fig2)
434
+ except Exception as e: print(f"Error generating prompt CLIP scores plot: {e}")
435
+
436
+ csv_buffer_val = ""
437
+ if not df.empty:
438
+ csv_buffer = io.StringIO()
439
+ df.to_csv(csv_buffer, index=False)
440
+ csv_buffer_val = csv_buffer.getvalue()
441
+
442
+ json_buffer_val = ""
443
+ if not df.empty:
444
+ json_buffer = io.StringIO()
445
+ df.to_json(json_buffer, orient='records', indent=4)
446
+ json_buffer_val = json_buffer.getvalue()
447
+
448
+ return (
449
+ df,
450
+ gr.Image(value=plot_model_avg_scores_buffer, type="pil", visible=plot_model_avg_scores_buffer is not None),
451
+ gr.Image(value=plot_prompt_clip_scores_buffer, type="pil", visible=plot_prompt_clip_scores_buffer is not None),
452
+ gr.File(value=csv_buffer_val if csv_buffer_val else None, label="Download CSV Results", visible=bool(csv_buffer_val), file_name="evaluation_results.csv"),
453
+ gr.File(value=json_buffer_val if json_buffer_val else None, label="Download JSON Results", visible=bool(json_buffer_val), file_name="evaluation_results.json"),
454
+ f"Processed {len(all_results)} images.",
455
+ )
456
+
457
+ # --- Интерфейс Gradio ---
458
+ with gr.Blocks(css="footer {display: none !important}") as demo:
459
+ gr.Markdown("# AI Image Model Evaluation Tool")
460
+ gr.Markdown(
461
+ "Upload PNG images (ideally with Stable Diffusion metadata) to evaluate them using various metrics. "
462
+ "Results will be displayed in a table and visualized in charts."
463
+ )
464
+
465
+ with gr.Row():
466
+ image_uploader = gr.Files(
467
+ label="Upload Images (PNG)",
468
+ file_count="multiple",
469
+ file_types=["image"],
470
+ )
471
+
472
+ process_button = gr.Button("Evaluate Images", variant="primary")
473
+ status_textbox = gr.Textbox(label="Status", interactive=False)
474
+
475
+ gr.Markdown("## Evaluation Results Table")
476
+ results_table = gr.DataFrame(headers=[
477
+ "Filename", "Prompt", "Model Name", "Model Hash",
478
+ "ImageReward", "AnimeAesthetic_dg", "MANIQA_TQ", "CLIPScore",
479
+ "SDXL_Detector_AI_Prob", "AnimeAI_Check_dg_Prob"
480
+ ], wrap=True, max_rows=10) # Ограничиваем начальное отображение строк
481
+
482
+ with gr.Row():
483
+ download_csv_button = gr.File(label="Download CSV Results", interactive=False) # visible управляется из output
484
+ download_json_button = gr.File(label="Download JSON Results", interactive=False)
485
+
486
+ gr.Markdown("## Visualizations")
487
+ with gr.Row():
488
+ plot_output_model_avg = gr.Image(label="Average Scores per Model", type="pil", interactive=False)
489
+ plot_output_prompt_clip = gr.Image(label="Average CLIPScore per Prompt", type="pil", interactive=False)
490
+
491
+ process_button.click(
492
+ fn=process_images,
493
+ inputs=[image_uploader],
494
+ outputs=[
495
+ results_table,
496
+ plot_output_model_avg,
497
+ plot_output_prompt_clip,
498
+ download_csv_button,
499
+ download_json_button,
500
+ status_textbox
501
+ ]
502
+ )
503
+
504
+ gr.Markdown(
505
+ """
506
+ **Metric Explanations:**
507
+ - **ImageReward:** General aesthetic and prompt alignment score (higher is better). From THUDM.
508
+ - **AnimeAesthetic_dg:** Aesthetic level for anime style (0-4, higher is better quality level: normal, slight, moderate, strong, extreme). From deepghs (ONNX).
509
+ - **MANIQA_TQ:** Technical Quality score (no-reference), higher indicates better quality (less noise/artifacts). Based on MANIQA.
510
+ - **CLIPScore:** Semantic similarity between the image and its prompt (0-100, higher is better). Uses LAION's CLIP.
511
+ - **SDXL_Detector_AI_Prob:** Estimated probability that the image is AI-generated (higher means more likely AI). From Organika.
512
+ - **AnimeAI_Check_dg_Prob:** Estimated probability that an anime-style image is AI-generated (higher means more likely AI). From deepghs (ONNX).
513
+
514
+ *Processing can take time, especially for many images or on CPU.*
515
+ """
516
+ )
517
+
518
+ if __name__ == "__main__":
519
+ demo.launch(debug=True)