VOIDER commited on
Commit
ffdea99
·
verified ·
1 Parent(s): bad19ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +177 -285
app.py CHANGED
@@ -4,16 +4,16 @@ import io
4
  import os
5
  import pandas as pd
6
  import torch
7
- from transformers import pipeline as transformers_pipeline , AutoImageProcessor, AutoModelForImageClassification
8
- # from torchvision import transforms # Менее релевантно для ONNX пайплайна
9
  from torchmetrics.functional.multimodal import clip_score
10
- from open_clip import create_model_from_pretrained, get_tokenizer
11
  import re
12
  import matplotlib.pyplot as plt
13
  import json
14
  from collections import defaultdict
15
  import numpy as np
16
- import logging # Для логирования ONNX
17
 
18
  # --- ONNX Related Imports and Setup ---
19
  try:
@@ -24,16 +24,20 @@ except ImportError:
24
 
25
  from huggingface_hub import hf_hub_download
26
 
27
- # imgutils для rgb_encode (если установлен)
28
  try:
29
- from imgutils.data import rgb_encode # Предполагаем, что это правильный импорт
 
 
30
  except ImportError:
31
- print("imgutils.data.rgb_encode not found. Preprocessing for deepghs might be limited.")
32
- def rgb_encode(image, order_='CHW'): # Простая заглушка, если imgutils нет
33
- img_arr = np.array(image)
 
34
  if order_ == 'CHW':
35
  img_arr = np.transpose(img_arr, (2, 0, 1))
36
- return img_arr.astype(np.float32) / 255.0 # Базовая нормализация, если не указана другая
 
 
37
 
38
  # --- Модель Конфигурация и Загрузка ---
39
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@@ -41,23 +45,20 @@ print(f"Using device: {DEVICE}")
41
  ONNX_DEVICE = "CUDAExecutionProvider" if DEVICE == "cuda" and onnxruntime and "CUDAExecutionProvider" in onnxruntime.get_available_providers() else "CPUExecutionProvider"
42
  print(f"Using ONNX device: {ONNX_DEVICE}")
43
 
44
-
45
  # --- Helper for ONNX models (deepghs) ---
46
  @torch.no_grad()
47
  def _img_preprocess_for_onnx(image: Image.Image, size: tuple = (384, 384), normalize_mean=0.5, normalize_std=0.5):
48
- image = image.resize(size, Image.Resampling.BILINEAR) # Обновлено до Resampling
49
- data = rgb_encode(image, order_='CHW') # (C, H, W), float32, 0-1 range from common imgutils
 
50
 
51
- # Нормализация ((data / 255.0) - mean) / std, если data в 0-255
52
- # Если rgb_encode уже возвращает 0-1, то (data - mean) / std
53
- # Предположим, rgb_encode возвращает [0,1] диапазон float32
54
  mean = np.array([normalize_mean] * 3, dtype=np.float32).reshape((3, 1, 1))
55
  std = np.array([normalize_std] * 3, dtype=np.float32).reshape((3, 1, 1))
56
 
57
- normalized_data = (data - mean) / std
58
- return normalized_data[None, ...].astype(np.float32) # Add batch dimension
59
 
60
- onnx_sessions_cache = {} # Кэш для ONNX сессий и метаданных
61
 
62
  def get_onnx_session_and_meta(repo_id, model_subfolder):
63
  cache_key = f"{repo_id}/{model_subfolder}"
@@ -65,7 +66,11 @@ def get_onnx_session_and_meta(repo_id, model_subfolder):
65
  return onnx_sessions_cache[cache_key]
66
 
67
  if not onnxruntime:
68
- raise ImportError("ONNX Runtime is not available.")
 
 
 
 
69
 
70
  try:
71
  model_path = hf_hub_download(repo_id, filename=f"{model_subfolder}/model.onnx")
@@ -73,7 +78,7 @@ def get_onnx_session_and_meta(repo_id, model_subfolder):
73
 
74
  options = onnxruntime.SessionOptions()
75
  options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
76
- if ONNX_DEVICE == "CPUExecutionProvider":
77
  options.intra_op_num_threads = os.cpu_count()
78
 
79
  session = onnxruntime.InferenceSession(model_path, options, providers=[ONNX_DEVICE])
@@ -86,138 +91,124 @@ def get_onnx_session_and_meta(repo_id, model_subfolder):
86
  return session, labels, meta
87
  except Exception as e:
88
  print(f"Error loading ONNX model {repo_id}/{model_subfolder}: {e}")
89
- onnx_sessions_cache[cache_key] = (None, [], None) # Кэшируем ошибку
90
  return None, [], None
91
 
92
-
93
  # 1. ImageReward
94
  try:
95
- reward_processor = AutoImageProcessor.from_pretrained("THUDM/ImageReward")
 
96
  reward_model = AutoModelForImageClassification.from_pretrained("THUDM/ImageReward").to(DEVICE)
97
  reward_model.eval()
 
98
  except Exception as e:
99
  print(f"Error loading THUDM/ImageReward: {e}")
100
  reward_processor, reward_model = None, None
101
 
102
  # 2. Anime Aesthetic (deepghs ONNX)
103
- # Модель: deepghs/anime_aesthetic, подпапка: swinv2pv3_v0_448_ls0.2_x
104
  ANIME_AESTHETIC_REPO = "deepghs/anime_aesthetic"
105
  ANIME_AESTHETIC_SUBFOLDER = "swinv2pv3_v0_448_ls0.2_x"
106
  ANIME_AESTHETIC_IMG_SIZE = (448, 448)
107
- # Метки из meta.json: ["normal", "slight", "moderate", "strong", "extreme"]
108
- # Веса для взвешенной суммы:
109
  ANIME_AESTHETIC_LABEL_WEIGHTS = {"normal": 0.0, "slight": 1.0, "moderate": 2.0, "strong": 3.0, "extreme": 4.0}
110
 
111
- # 3. MANIQA (Technical Quality) - Transformers pipeline
112
- try:
113
- maniqa_pipe = transformers_pipeline("image-classification", model="honklers/maniqa-nr", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1)
114
- except Exception as e:
115
- print(f"Error loading honklers/maniqa-nr: {e}")
116
- maniqa_pipe = None
 
 
117
 
118
  # 4. CLIP Score (laion/CLIP-ViT-L-14-laion2B-s32B-b82K) - open_clip
119
  try:
120
  clip_model_name = 'ViT-L-14'
121
- clip_pretrained = 'laion2b_s32b_b82k' # laion2B-s32B-b82K
122
- clip_model_instance, _, clip_preprocess = create_model_from_pretrained(clip_model_name, pretrained=clip_pretrained, device=DEVICE)
123
- clip_tokenizer = get_tokenizer(clip_model_name)
 
 
 
 
 
 
124
  clip_model_instance.eval()
 
125
  except Exception as e:
126
- print(f"Error loading CLIP model {clip_model_name} ({clip_pretrained}): {e}")
127
  clip_model_instance, clip_preprocess, clip_tokenizer = None, None, None
128
 
129
  # 5. AI Detectors
130
  # Organika/sdxl-detector - Transformers pipeline
131
  try:
132
  sdxl_detector_pipe = transformers_pipeline("image-classification", model="Organika/sdxl-detector", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1)
 
133
  except Exception as e:
134
  print(f"Error loading Organika/sdxl-detector: {e}")
135
  sdxl_detector_pipe = None
136
 
137
  # deepghs/anime_ai_check - ONNX
138
- # Модель: deepghs/anime_ai_check, подпапка: caformer_s36_plus_sce
139
  ANIME_AI_CHECK_REPO = "deepghs/anime_ai_check"
140
  ANIME_AI_CHECK_SUBFOLDER = "caformer_s36_plus_sce"
141
- ANIME_AI_CHECK_IMG_SIZE = (384, 384) # Предположение, если не указано иначе
142
 
143
  # --- Функции извлечения метаданных (без изменений) ---
144
  def extract_sd_parameters(image_pil):
145
- if image_pil is None:
146
- return "", "N/A", "N/A", "N/A", {}
147
-
148
  parameters_str = image_pil.info.get("parameters", "")
149
- if not parameters_str:
150
- return "", "N/A", "N/A", "N/A", {}
151
-
152
- prompt = ""
153
- negative_prompt = ""
154
- model_name = "N/A"
155
- model_hash = "N/A"
156
- other_params_dict = {}
157
-
158
- neg_prompt_index = parameters_str.find("Negative prompt:")
159
- steps_meta_index = parameters_str.find("Steps:") # Ищем начало блока с параметрами
160
-
161
- if neg_prompt_index != -1:
162
- prompt = parameters_str[:neg_prompt_index].strip()
163
- # Если "Steps:" найдено после "Negative prompt:", то neg_prompt между ними
164
- if steps_meta_index != -1 and steps_meta_index > neg_prompt_index:
165
- negative_prompt = parameters_str[neg_prompt_index + len("Negative prompt:"):steps_meta_index].strip()
166
- params_part = parameters_str[steps_meta_index:]
167
- else: # "Steps:" не найдено или до "Negative prompt:", значит neg_prompt до конца строки или до params_part
168
- # Если params_part вообще нет, то neg_prompt до конца строки
169
- end_of_neg_prompt = parameters_str.find("\n", neg_prompt_index) # Ищем конец строки для негативного промпта
170
- if end_of_neg_prompt == -1: end_of_neg_prompt = len(parameters_str)
171
-
172
- search_params_in_rest = parameters_str[neg_prompt_index + len("Negative prompt:"):]
173
- actual_steps_index_in_rest = search_params_in_rest.find("Steps:")
174
- if actual_steps_index_in_rest != -1:
175
- negative_prompt = search_params_in_rest[:actual_steps_index_in_rest].strip()
176
- params_part = search_params_in_rest[actual_steps_index_in_rest:]
177
- else: # Нет "Steps:" после "Negative prompt:"
178
- negative_prompt = search_params_in_rest.strip() # Берем все как негативный
179
- params_part = "" # Нет блока параметров
180
-
181
- else: # "Negative prompt:" не найдено
182
- # Если "Steps:" найдено, то промпт до него
183
- if steps_meta_index != -1:
184
  prompt = parameters_str[:steps_meta_index].strip()
185
  params_part = parameters_str[steps_meta_index:]
186
- else: # Нет ни "Negative prompt:", ни "Steps:", весь текст - это промпт
187
  prompt = parameters_str.strip()
188
  params_part = ""
189
-
190
- if not prompt and not negative_prompt and not params_part: # Если все пусто, возможно, это просто параметры
191
- params_part = parameters_str
192
-
193
- if params_part:
194
- params_list = [p.strip() for p in params_part.split(",")]
195
- temp_other_params = {}
196
- for param_val_str in params_list:
197
- parts = param_val_str.split(':', 1)
198
- if len(parts) == 2:
199
- key, value = parts[0].strip(), parts[1].strip()
200
- temp_other_params[key] = value
201
- if key == "Model": model_name = value
202
- elif key == "Model hash": model_hash = value
203
 
204
- # Добавляем в other_params_dict только то, что не "Model" и не "Model hash"
205
- for k,v in temp_other_params.items():
206
- if k not in ["Model", "Model hash"]:
207
- other_params_dict[k] = v
208
-
209
- if model_name == "N/A" and model_hash != "N/A": model_name = f"hash_{model_hash}"
210
- if model_name == "N/A" and "Checkpoint" in other_params_dict: model_name = other_params_dict["Checkpoint"]
211
 
212
- return prompt, negative_prompt, model_name, model_hash, other_params_dict
213
 
214
- # --- Функции оценки (обновлены для deepghs) ---
 
 
215
 
 
216
  @torch.no_grad()
217
  def get_image_reward(image_pil):
218
  if not reward_model or not reward_processor: return "N/A"
219
  try:
220
- inputs = reward_processor(images=image_pil, return_tensors="pt").to(DEVICE)
 
221
  outputs = reward_model(**inputs)
222
  return round(outputs.logits.item(), 4)
223
  except Exception as e:
@@ -228,66 +219,48 @@ def get_anime_aesthetic_score_deepghs(image_pil):
228
  session, labels, meta = get_onnx_session_and_meta(ANIME_AESTHETIC_REPO, ANIME_AESTHETIC_SUBFOLDER)
229
  if not session or not labels: return "N/A"
230
  try:
231
- input_data = _img_preprocess_for_onnx(image_pil, size=ANIME_AESTHETIC_IMG_SIZE)
232
  input_name = session.get_inputs()[0].name
233
  output_name = session.get_outputs()[0].name
234
-
235
  onnx_output, = session.run([output_name], {input_name: input_data})
236
-
237
- scores = onnx_output[0] # Должен быть массив вероятностей/логитов
238
- # Применение softmax если это логиты (обычно модели классификации ONNX возвращают логиты)
239
- exp_scores = np.exp(scores - np.max(scores)) # Вычитаем max для стабильности softmax
240
  probabilities = exp_scores / np.sum(exp_scores)
241
-
242
- weighted_score = 0.0
243
- for i, label in enumerate(labels):
244
- if label in ANIME_AESTHETIC_LABEL_WEIGHTS:
245
- weighted_score += probabilities[i] * ANIME_AESTHETIC_LABEL_WEIGHTS[label]
246
  return round(weighted_score, 4)
247
  except Exception as e:
248
  print(f"Error in Anime Aesthetic (ONNX): {e}")
249
  return "Error"
250
 
251
  @torch.no_grad()
252
- def get_maniqa_score(image_pil):
253
- if not maniqa_pipe: return "N/A"
254
- try:
255
- result = maniqa_pipe(image_pil.copy())
256
- score = 0.0
257
- # Ищем метку, которая соответствует высокому качеству
258
- # honklers/maniqa-nr может иметь 'LABEL_0', 'LABEL_1' или 'Good Quality', 'Bad Quality'
259
- # Проверьте model card. Предположим, более высокий скор для первой метки - хорошо.
260
- # В данном случае, `honklers/maniqa-nr` выводит [{'label': 'Bad Quality', 'score': 0.9}, {'label': 'Good Quality', 'score': 0.1}]
261
- # Ищем 'Good Quality'
262
- for item in result:
263
- if item['label'].lower() == 'good quality': # или другой позитивный лейбл
264
- score = item['score']
265
- break
266
- # Если нет "Good Quality", но есть что-то вроде LABEL_1 (положительный)
267
- # elif item['label'] == 'LABEL_1': # Пример, если метки такие
268
- # score = item['score']
269
- # break
270
- if score == 0.0 and result: # Если "Good Quality" не найдено, но есть результат
271
- # Пробуем взять максимальный скор, если метки непонятные, но это рискованно
272
- # Либо ищем специфичные метки из model card
273
- pass # Оставляем 0.0 если не найдена позитивная метка
274
-
275
- return round(score, 4)
276
- except Exception as e:
277
- print(f"Error in MANIQA: {e}")
278
- return "Error"
279
 
280
  @torch.no_grad()
281
- def calculate_clip_score_value(image_pil, prompt_text): # Изменено имя, чтобы не конфликтовать с torchmetrics.clip_score
282
  if not clip_model_instance or not clip_preprocess or not clip_tokenizer or not prompt_text or prompt_text == "N/A":
283
  return "N/A"
284
  try:
285
  image_input = clip_preprocess(image_pil).unsqueeze(0).to(DEVICE)
286
- text_input = clip_tokenizer([str(prompt_text)]).to(DEVICE)
 
 
 
 
287
 
288
  image_features = clip_model_instance.encode_image(image_input)
289
  text_features = clip_model_instance.encode_text(text_input)
290
-
291
  image_features_norm = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
292
  text_features_norm = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
293
  score = (text_features_norm @ image_features_norm.T).squeeze().item() * 100.0
@@ -302,11 +275,8 @@ def get_sdxl_detection_score(image_pil):
302
  try:
303
  result = sdxl_detector_pipe(image_pil.copy())
304
  ai_score = 0.0
305
- # Organika/sdxl-detector метки: 'artificial', 'real'
306
  for item in result:
307
- if item['label'].lower() == 'artificial':
308
- ai_score = item['score']
309
- break
310
  return round(ai_score, 4)
311
  except Exception as e:
312
  print(f"Error in SDXL Detector: {e}")
@@ -316,21 +286,16 @@ def get_anime_ai_check_score_deepghs(image_pil):
316
  session, labels, meta = get_onnx_session_and_meta(ANIME_AI_CHECK_REPO, ANIME_AI_CHECK_SUBFOLDER)
317
  if not session or not labels: return "N/A"
318
  try:
319
- input_data = _img_preprocess_for_onnx(image_pil, size=ANIME_AI_CHECK_IMG_SIZE)
320
  input_name = session.get_inputs()[0].name
321
  output_name = session.get_outputs()[0].name
322
-
323
  onnx_output, = session.run([output_name], {input_name: input_data})
324
-
325
  scores = onnx_output[0]
326
  exp_scores = np.exp(scores - np.max(scores))
327
  probabilities = exp_scores / np.sum(exp_scores)
328
-
329
  ai_prob = 0.0
330
  for i, label in enumerate(labels):
331
- if label.lower() == 'ai': # Ищем метку 'ai'
332
- ai_prob = probabilities[i]
333
- break
334
  return round(ai_prob, 4)
335
  except Exception as e:
336
  print(f"Error in Anime AI Check (ONNX): {e}")
@@ -342,178 +307,105 @@ def process_images(files, progress=gr.Progress(track_tqdm=True)):
342
  return pd.DataFrame(), None, None, None, None, "Please upload some images."
343
 
344
  all_results = []
345
-
346
- # progress(0, desc="Starting processing...") # track_tqdm сделает это
347
-
348
  for i, file_obj in enumerate(files):
 
349
  try:
350
- # В HF Spaces file_obj может быть именем временного файла или объектом с атрибутом name
351
- filename = os.path.basename(getattr(file_obj, 'name', str(file_obj))) # getattr для совместимости
352
- # progress((i+1)/len(files), desc=f"Processing {filename}") # track_tqdm
353
-
354
  img = Image.open(getattr(file_obj, 'name', str(file_obj)))
355
- if img.mode != "RGB":
356
- img = img.convert("RGB")
357
 
358
  prompt, neg_prompt, model_n, model_h, other_p = extract_sd_parameters(img)
359
 
360
- # Оценки
361
- reward = get_image_reward(img.copy())
362
- anime_aes_deepghs = get_anime_aesthetic_score_deepghs(img.copy())
363
- maniqa = get_maniqa_score(img.copy())
364
- clip_val = calculate_clip_score_value(img.copy(), prompt) # Изменено имя функции
365
- sdxl_detect = get_sdxl_detection_score(img.copy())
366
- anime_ai_chk_deepghs = get_anime_ai_check_score_deepghs(img.copy())
367
-
368
- result_entry = {
369
- "Filename": filename,
370
- "Prompt": prompt if prompt else "N/A",
371
- "Model Name": model_n,
372
- "Model Hash": model_h,
373
- "ImageReward": reward,
374
- "AnimeAesthetic_dg": anime_aes_deepghs, # dg = deepghs
375
- "MANIQA_TQ": maniqa,
376
- "CLIPScore": clip_val,
377
- "SDXL_Detector_AI_Prob": sdxl_detect,
378
- "AnimeAI_Check_dg_Prob": anime_ai_chk_deepghs, # dg = deepghs
379
- }
380
- all_results.append(result_entry)
381
 
 
 
 
 
 
382
  except Exception as e:
383
- print(f"Failed to process {getattr(file_obj, 'name', str(file_obj))}: {e}")
384
  all_results.append({
385
- "Filename": os.path.basename(getattr(file_obj, 'name', str(file_obj))) if file_obj else "Unknown File",
386
- "Prompt": "Error", "Model Name": "Error", "Model Hash": "Error",
387
  "ImageReward": "Error", "AnimeAesthetic_dg": "Error", "MANIQA_TQ": "Error",
388
  "CLIPScore": "Error", "SDXL_Detector_AI_Prob": "Error", "AnimeAI_Check_dg_Prob": "Error"
389
  })
390
 
391
  df = pd.DataFrame(all_results)
392
-
393
- plot_model_avg_scores_buffer = None
394
- if "Model Name" in df.columns and df["Model Name"].nunique() > 0 and df["Model Name"].count() > 0 :
 
395
  numeric_cols = ["ImageReward", "AnimeAesthetic_dg", "MANIQA_TQ", "CLIPScore"]
396
  for col in numeric_cols: df[col] = pd.to_numeric(df[col], errors='coerce')
397
- try:
398
- # Фильтруем модели "N/A" перед группировкой для графика
399
- df_for_plot = df[df["Model Name"] != "N/A"]
400
- if not df_for_plot.empty and df_for_plot["Model Name"].nunique() > 0 :
401
- model_avg_scores = df_for_plot.groupby("Model Name")[numeric_cols].mean().dropna(how='all')
 
402
  if not model_avg_scores.empty:
403
- fig1, ax1 = plt.subplots(figsize=(12, 7))
404
- model_avg_scores.plot(kind="bar", ax=ax1)
405
- ax1.set_title("Average Scores per Model")
406
- ax1.set_ylabel("Average Score")
407
- ax1.tick_params(axis='x', rotation=45, labelsize=8)
408
- plt.tight_layout()
409
- plot_model_avg_scores_buffer = io.BytesIO()
410
- fig1.savefig(plot_model_avg_scores_buffer, format="png")
411
- plot_model_avg_scores_buffer.seek(0)
412
- plt.close(fig1)
413
- except Exception as e: print(f"Error generating model average scores plot: {e}")
414
-
415
- plot_prompt_clip_scores_buffer = None
416
- if "Prompt" in df.columns and "CLIPScore" in df.columns and df["Prompt"].nunique() > 0:
417
- df["CLIPScore"] = pd.to_numeric(df["CLIPScore"], errors='coerce')
418
- df_prompt_plot = df[df["Prompt"] != "N/A"].dropna(subset=["CLIPScore"])
419
- if not df_prompt_plot.empty and df_prompt_plot["Prompt"].nunique() > 0:
420
  try:
421
- # Сокращаем длинные промпты для графика
422
- df_prompt_plot["Short Prompt"] = df_prompt_plot["Prompt"].apply(lambda x: (x[:30] + '...') if len(x) > 33 else x)
423
  prompt_clip_scores = df_prompt_plot.groupby("Short Prompt")["CLIPScore"].mean().sort_values(ascending=False)
424
- if not prompt_clip_scores.empty and len(prompt_clip_scores) > 1 :
425
- fig2, ax2 = plt.subplots(figsize=(12, max(7, min(len(prompt_clip_scores)*0.5, 15)))) # Ограничиваем высоту
426
  prompt_clip_scores.head(20).plot(kind="barh", ax=ax2)
427
- ax2.set_title("Average CLIPScore per Prompt (Top 20 unique prompts)")
428
- ax2.set_xlabel("Average CLIPScore")
429
- plt.tight_layout()
430
- plot_prompt_clip_scores_buffer = io.BytesIO()
431
- fig2.savefig(plot_prompt_clip_scores_buffer, format="png")
432
- plot_prompt_clip_scores_buffer.seek(0)
433
- plt.close(fig2)
434
  except Exception as e: print(f"Error generating prompt CLIP scores plot: {e}")
435
-
436
- csv_buffer_val = ""
437
- if not df.empty:
438
- csv_buffer = io.StringIO()
439
- df.to_csv(csv_buffer, index=False)
440
- csv_buffer_val = csv_buffer.getvalue()
441
-
442
- json_buffer_val = ""
443
- if not df.empty:
444
- json_buffer = io.StringIO()
445
- df.to_json(json_buffer, orient='records', indent=4)
446
- json_buffer_val = json_buffer.getvalue()
447
 
448
  return (
449
  df,
450
  gr.Image(value=plot_model_avg_scores_buffer, type="pil", visible=plot_model_avg_scores_buffer is not None),
451
  gr.Image(value=plot_prompt_clip_scores_buffer, type="pil", visible=plot_prompt_clip_scores_buffer is not None),
452
- gr.File(value=csv_buffer_val if csv_buffer_val else None, label="Download CSV Results", visible=bool(csv_buffer_val), file_name="evaluation_results.csv"),
453
- gr.File(value=json_buffer_val if json_buffer_val else None, label="Download JSON Results", visible=bool(json_buffer_val), file_name="evaluation_results.json"),
454
  f"Processed {len(all_results)} images.",
455
  )
456
 
457
  # --- Интерфейс Gradio ---
458
  with gr.Blocks(css="footer {display: none !important}") as demo:
459
  gr.Markdown("# AI Image Model Evaluation Tool")
460
- gr.Markdown(
461
- "Upload PNG images (ideally with Stable Diffusion metadata) to evaluate them using various metrics. "
462
- "Results will be displayed in a table and visualized in charts."
463
- )
464
-
465
- with gr.Row():
466
- image_uploader = gr.Files(
467
- label="Upload Images (PNG)",
468
- file_count="multiple",
469
- file_types=["image"],
470
- )
471
-
472
  process_button = gr.Button("Evaluate Images", variant="primary")
473
  status_textbox = gr.Textbox(label="Status", interactive=False)
474
-
475
  gr.Markdown("## Evaluation Results Table")
476
- results_table = gr.DataFrame(headers=[
477
- "Filename", "Prompt", "Model Name", "Model Hash",
478
- "ImageReward", "AnimeAesthetic_dg", "MANIQA_TQ", "CLIPScore",
479
- "SDXL_Detector_AI_Prob", "AnimeAI_Check_dg_Prob"
480
- ], wrap=True, max_rows=10) # Ограничиваем начальное отображение строк
481
-
482
  with gr.Row():
483
- download_csv_button = gr.File(label="Download CSV Results", interactive=False) # visible управляется из output
484
  download_json_button = gr.File(label="Download JSON Results", interactive=False)
485
-
486
  gr.Markdown("## Visualizations")
487
  with gr.Row():
488
  plot_output_model_avg = gr.Image(label="Average Scores per Model", type="pil", interactive=False)
489
  plot_output_prompt_clip = gr.Image(label="Average CLIPScore per Prompt", type="pil", interactive=False)
490
-
491
- process_button.click(
492
- fn=process_images,
493
- inputs=[image_uploader],
494
- outputs=[
495
- results_table,
496
- plot_output_model_avg,
497
- plot_output_prompt_clip,
498
- download_csv_button,
499
- download_json_button,
500
- status_textbox
501
- ]
502
- )
503
-
504
- gr.Markdown(
505
- """
506
- **Metric Explanations:**
507
- - **ImageReward:** General aesthetic and prompt alignment score (higher is better). From THUDM.
508
- - **AnimeAesthetic_dg:** Aesthetic level for anime style (0-4, higher is better quality level: normal, slight, moderate, strong, extreme). From deepghs (ONNX).
509
- - **MANIQA_TQ:** Technical Quality score (no-reference), higher indicates better quality (less noise/artifacts). Based on MANIQA.
510
- - **CLIPScore:** Semantic similarity between the image and its prompt (0-100, higher is better). Uses LAION's CLIP.
511
- - **SDXL_Detector_AI_Prob:** Estimated probability that the image is AI-generated (higher means more likely AI). From Organika.
512
- - **AnimeAI_Check_dg_Prob:** Estimated probability that an anime-style image is AI-generated (higher means more likely AI). From deepghs (ONNX).
513
-
514
- *Processing can take time, especially for many images or on CPU.*
515
- """
516
- )
517
 
518
  if __name__ == "__main__":
519
  demo.launch(debug=True)
 
4
  import os
5
  import pandas as pd
6
  import torch
7
+ from transformers import pipeline as transformers_pipeline , AutoModelForImageClassification, CLIPImageProcessor # Изменено для ImageReward
8
+ # from torchvision import transforms
9
  from torchmetrics.functional.multimodal import clip_score
10
+ import open_clip # Изменено для open_clip
11
  import re
12
  import matplotlib.pyplot as plt
13
  import json
14
  from collections import defaultdict
15
  import numpy as np
16
+ import logging
17
 
18
  # --- ONNX Related Imports and Setup ---
19
  try:
 
24
 
25
  from huggingface_hub import hf_hub_download
26
 
 
27
  try:
28
+ from imgutils.data import rgb_encode
29
+ IMGUTILS_AVAILABLE = True
30
+ print("imgutils.data.rgb_encode found and will be used.")
31
  except ImportError:
32
+ print("imgutils.data.rgb_encode not found. Using a basic fallback for preprocessing deepghs models.")
33
+ IMGUTILS_AVAILABLE = False
34
+ def rgb_encode(image: Image.Image, order_='CHW'): # Простая заглушка
35
+ img_arr = np.array(image.convert("RGB")) # Убедимся что RGB
36
  if order_ == 'CHW':
37
  img_arr = np.transpose(img_arr, (2, 0, 1))
38
+ # Эта заглушка возвращает uint8 0-255, как и ожидается далее
39
+ return img_arr.astype(np.uint8)
40
+
41
 
42
  # --- Модель Конфигурация и Загрузка ---
43
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
45
  ONNX_DEVICE = "CUDAExecutionProvider" if DEVICE == "cuda" and onnxruntime and "CUDAExecutionProvider" in onnxruntime.get_available_providers() else "CPUExecutionProvider"
46
  print(f"Using ONNX device: {ONNX_DEVICE}")
47
 
 
48
  # --- Helper for ONNX models (deepghs) ---
49
  @torch.no_grad()
50
  def _img_preprocess_for_onnx(image: Image.Image, size: tuple = (384, 384), normalize_mean=0.5, normalize_std=0.5):
51
+ image = image.resize(size, Image.Resampling.BILINEAR)
52
+ data_uint8 = rgb_encode(image, order_='CHW') # (C, H, W), uint8, 0-255
53
+ data_float01 = data_uint8.astype(np.float32) / 255.0
54
 
 
 
 
55
  mean = np.array([normalize_mean] * 3, dtype=np.float32).reshape((3, 1, 1))
56
  std = np.array([normalize_std] * 3, dtype=np.float32).reshape((3, 1, 1))
57
 
58
+ normalized_data = (data_float01 - mean) / std
59
+ return normalized_data[None, ...].astype(np.float32)
60
 
61
+ onnx_sessions_cache = {}
62
 
63
  def get_onnx_session_and_meta(repo_id, model_subfolder):
64
  cache_key = f"{repo_id}/{model_subfolder}"
 
66
  return onnx_sessions_cache[cache_key]
67
 
68
  if not onnxruntime:
69
+ # raise ImportError("ONNX Runtime is not available.") # Не будем падать, просто вернем None
70
+ print("ONNX Runtime is not available for get_onnx_session_and_meta")
71
+ onnx_sessions_cache[cache_key] = (None, [], None)
72
+ return None, [], None
73
+
74
 
75
  try:
76
  model_path = hf_hub_download(repo_id, filename=f"{model_subfolder}/model.onnx")
 
78
 
79
  options = onnxruntime.SessionOptions()
80
  options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
81
+ if ONNX_DEVICE == "CPUExecutionProvider" and hasattr(os, 'cpu_count'): # hasattr для безопасности
82
  options.intra_op_num_threads = os.cpu_count()
83
 
84
  session = onnxruntime.InferenceSession(model_path, options, providers=[ONNX_DEVICE])
 
91
  return session, labels, meta
92
  except Exception as e:
93
  print(f"Error loading ONNX model {repo_id}/{model_subfolder}: {e}")
94
+ onnx_sessions_cache[cache_key] = (None, [], None)
95
  return None, [], None
96
 
 
97
  # 1. ImageReward
98
  try:
99
+ # THUDM/ImageReward использует CLIPImageProcessor
100
+ reward_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14") # Типичный процессор для таких моделей
101
  reward_model = AutoModelForImageClassification.from_pretrained("THUDM/ImageReward").to(DEVICE)
102
  reward_model.eval()
103
+ print("THUDM/ImageReward loaded successfully.")
104
  except Exception as e:
105
  print(f"Error loading THUDM/ImageReward: {e}")
106
  reward_processor, reward_model = None, None
107
 
108
  # 2. Anime Aesthetic (deepghs ONNX)
 
109
  ANIME_AESTHETIC_REPO = "deepghs/anime_aesthetic"
110
  ANIME_AESTHETIC_SUBFOLDER = "swinv2pv3_v0_448_ls0.2_x"
111
  ANIME_AESTHETIC_IMG_SIZE = (448, 448)
 
 
112
  ANIME_AESTHETIC_LABEL_WEIGHTS = {"normal": 0.0, "slight": 1.0, "moderate": 2.0, "strong": 3.0, "extreme": 4.0}
113
 
114
+ # 3. MANIQA (Technical Quality) - ВРЕМЕННО ОТКЛЮЧЕНО
115
+ maniqa_pipe = None
116
+ print("MANIQA (honklers/maniqa-nr) is temporarily disabled due to loading issues. Will look for alternatives.")
117
+ # try:
118
+ # maniqa_pipe = transformers_pipeline("image-classification", model="honklers/maniqa-nr", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1)
119
+ # except Exception as e:
120
+ # print(f"Error loading honklers/maniqa-nr: {e}")
121
+ # maniqa_pipe = None
122
 
123
  # 4. CLIP Score (laion/CLIP-ViT-L-14-laion2B-s32B-b82K) - open_clip
124
  try:
125
  clip_model_name = 'ViT-L-14'
126
+ # Для open_clip, `pretrained` это обычно имя датасета или комбинация
127
+ # `laion2b_s32b_b82k` - это один из весов для ViT-L-14
128
+ clip_model_instance, clip_preprocess_train, clip_preprocess_val = open_clip.create_model_and_transforms(
129
+ clip_model_name,
130
+ pretrained='laion2b_s32b_b82k', # Это правильное имя претрейна для open_clip
131
+ device=DEVICE
132
+ )
133
+ clip_preprocess = clip_preprocess_val # Используем preprocess для инференса
134
+ clip_tokenizer = open_clip.get_tokenizer(clip_model_name)
135
  clip_model_instance.eval()
136
+ print(f"CLIP model {clip_model_name} (laion2b_s32b_b82k) loaded successfully.")
137
  except Exception as e:
138
+ print(f"Error loading CLIP model {clip_model_name} (laion2b_s32b_b82k): {e}")
139
  clip_model_instance, clip_preprocess, clip_tokenizer = None, None, None
140
 
141
  # 5. AI Detectors
142
  # Organika/sdxl-detector - Transformers pipeline
143
  try:
144
  sdxl_detector_pipe = transformers_pipeline("image-classification", model="Organika/sdxl-detector", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1)
145
+ print("Organika/sdxl-detector loaded successfully.")
146
  except Exception as e:
147
  print(f"Error loading Organika/sdxl-detector: {e}")
148
  sdxl_detector_pipe = None
149
 
150
  # deepghs/anime_ai_check - ONNX
 
151
  ANIME_AI_CHECK_REPO = "deepghs/anime_ai_check"
152
  ANIME_AI_CHECK_SUBFOLDER = "caformer_s36_plus_sce"
153
+ ANIME_AI_CHECK_IMG_SIZE = (384, 384)
154
 
155
  # --- Функции извлечения метаданных (без изменений) ---
156
  def extract_sd_parameters(image_pil):
157
+ if image_pil is None: return "", "N/A", "N/A", "N/A", {}
 
 
158
  parameters_str = image_pil.info.get("parameters", "")
159
+ if not parameters_str: return "", "N/A", "N/A", "N/A", {}
160
+ prompt, negative_prompt, model_name, model_hash, other_params_dict = "", "N/A", "N/A", "N/A", {}
161
+ try:
162
+ neg_prompt_index = parameters_str.find("Negative prompt:")
163
+ steps_meta_index = parameters_str.find("Steps:")
164
+ if neg_prompt_index != -1:
165
+ prompt = parameters_str[:neg_prompt_index].strip()
166
+ params_part_start_index = steps_meta_index if steps_meta_index > neg_prompt_index else -1
167
+ if params_part_start_index != -1:
168
+ negative_prompt = parameters_str[neg_prompt_index + len("Negative prompt:"):params_part_start_index].strip()
169
+ params_part = parameters_str[params_part_start_index:]
170
+ else:
171
+ end_of_neg = parameters_str.find("\n", neg_prompt_index + len("Negative prompt:"))
172
+ if end_of_neg == -1: end_of_neg = len(parameters_str)
173
+ negative_prompt = parameters_str[neg_prompt_index + len("Negative prompt:"):end_of_neg].strip()
174
+ params_part = parameters_str[end_of_neg:].strip() if end_of_neg < len(parameters_str) else ""
175
+ elif steps_meta_index != -1:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  prompt = parameters_str[:steps_meta_index].strip()
177
  params_part = parameters_str[steps_meta_index:]
178
+ else:
179
  prompt = parameters_str.strip()
180
  params_part = ""
181
+
182
+ if params_part:
183
+ params_list = [p.strip() for p in params_part.split(",")]
184
+ temp_other_params = {}
185
+ for param_val_str in params_list:
186
+ parts = param_val_str.split(':', 1)
187
+ if len(parts) == 2:
188
+ key, value = parts[0].strip(), parts[1].strip()
189
+ temp_other_params[key] = value
190
+ if key == "Model": model_name = value
191
+ elif key == "Model hash": model_hash = value
192
+ for k,v in temp_other_params.items():
193
+ if k not in ["Model", "Model hash"]: other_params_dict[k] = v
 
194
 
195
+ if model_name == "N/A" and model_hash != "N/A": model_name = f"hash_{model_hash}"
196
+ # Fallback for model name if only Checkpoint is present (e.g. from ComfyUI)
197
+ if model_name == "N/A" and "Checkpoint" in other_params_dict: model_name = other_params_dict["Checkpoint"]
198
+ if model_name == "N/A" and "model" in other_params_dict: model_name = other_params_dict["model"] # Another common key
 
 
 
199
 
 
200
 
201
+ except Exception as e:
202
+ print(f"Error parsing metadata: {e}")
203
+ return prompt, negative_prompt, model_name, model_hash, other_params_dict
204
 
205
+ # --- Функции оценки ---
206
  @torch.no_grad()
207
  def get_image_reward(image_pil):
208
  if not reward_model or not reward_processor: return "N/A"
209
  try:
210
+ # ImageReward ожидает специфическую предобработку, часто как у CLIP
211
+ inputs = reward_processor(images=image_pil, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
212
  outputs = reward_model(**inputs)
213
  return round(outputs.logits.item(), 4)
214
  except Exception as e:
 
219
  session, labels, meta = get_onnx_session_and_meta(ANIME_AESTHETIC_REPO, ANIME_AESTHETIC_SUBFOLDER)
220
  if not session or not labels: return "N/A"
221
  try:
222
+ input_data = _img_preprocess_for_onnx(image_pil.copy(), size=ANIME_AESTHETIC_IMG_SIZE)
223
  input_name = session.get_inputs()[0].name
224
  output_name = session.get_outputs()[0].name
 
225
  onnx_output, = session.run([output_name], {input_name: input_data})
226
+ scores = onnx_output[0]
227
+ exp_scores = np.exp(scores - np.max(scores))
 
 
228
  probabilities = exp_scores / np.sum(exp_scores)
229
+ weighted_score = sum(probabilities[i] * ANIME_AESTHETIC_LABEL_WEIGHTS.get(label, 0.0) for i, label in enumerate(labels))
 
 
 
 
230
  return round(weighted_score, 4)
231
  except Exception as e:
232
  print(f"Error in Anime Aesthetic (ONNX): {e}")
233
  return "Error"
234
 
235
  @torch.no_grad()
236
+ def get_maniqa_score(image_pil): # Временно возвращает N/A
237
+ # if not maniqa_pipe: return "N/A"
238
+ # try:
239
+ # result = maniqa_pipe(image_pil.copy())
240
+ # score = 0.0
241
+ # for item in result:
242
+ # if item['label'].lower() == 'good quality': score = item['score']; break
243
+ # return round(score, 4)
244
+ # except Exception as e:
245
+ # print(f"Error in MANIQA: {e}")
246
+ # return "Error"
247
+ return "N/A (Disabled)"
248
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
  @torch.no_grad()
251
+ def calculate_clip_score_value(image_pil, prompt_text):
252
  if not clip_model_instance or not clip_preprocess or not clip_tokenizer or not prompt_text or prompt_text == "N/A":
253
  return "N/A"
254
  try:
255
  image_input = clip_preprocess(image_pil).unsqueeze(0).to(DEVICE)
256
+ # Убедимся, что prompt_text это строка, а не None или что-то еще
257
+ text_for_tokenizer = str(prompt_text) if prompt_text else ""
258
+ if not text_for_tokenizer: return "N/A (Empty Prompt)"
259
+
260
+ text_input = clip_tokenizer([text_for_tokenizer]).to(DEVICE)
261
 
262
  image_features = clip_model_instance.encode_image(image_input)
263
  text_features = clip_model_instance.encode_text(text_input)
 
264
  image_features_norm = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
265
  text_features_norm = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
266
  score = (text_features_norm @ image_features_norm.T).squeeze().item() * 100.0
 
275
  try:
276
  result = sdxl_detector_pipe(image_pil.copy())
277
  ai_score = 0.0
 
278
  for item in result:
279
+ if item['label'].lower() == 'artificial': ai_score = item['score']; break
 
 
280
  return round(ai_score, 4)
281
  except Exception as e:
282
  print(f"Error in SDXL Detector: {e}")
 
286
  session, labels, meta = get_onnx_session_and_meta(ANIME_AI_CHECK_REPO, ANIME_AI_CHECK_SUBFOLDER)
287
  if not session or not labels: return "N/A"
288
  try:
289
+ input_data = _img_preprocess_for_onnx(image_pil.copy(), size=ANIME_AI_CHECK_IMG_SIZE)
290
  input_name = session.get_inputs()[0].name
291
  output_name = session.get_outputs()[0].name
 
292
  onnx_output, = session.run([output_name], {input_name: input_data})
 
293
  scores = onnx_output[0]
294
  exp_scores = np.exp(scores - np.max(scores))
295
  probabilities = exp_scores / np.sum(exp_scores)
 
296
  ai_prob = 0.0
297
  for i, label in enumerate(labels):
298
+ if label.lower() == 'ai': ai_prob = probabilities[i]; break
 
 
299
  return round(ai_prob, 4)
300
  except Exception as e:
301
  print(f"Error in Anime AI Check (ONNX): {e}")
 
307
  return pd.DataFrame(), None, None, None, None, "Please upload some images."
308
 
309
  all_results = []
 
 
 
310
  for i, file_obj in enumerate(files):
311
+ filename = "Unknown File"
312
  try:
313
+ # file_obj.name может быть абсолютным путем на сервере
314
+ filename = os.path.basename(getattr(file_obj, 'name', f"file_{i}"))
 
 
315
  img = Image.open(getattr(file_obj, 'name', str(file_obj)))
316
+ if img.mode != "RGB": img = img.convert("RGB")
 
317
 
318
  prompt, neg_prompt, model_n, model_h, other_p = extract_sd_parameters(img)
319
 
320
+ reward = get_image_reward(img)
321
+ anime_aes_deepghs = get_anime_aesthetic_score_deepghs(img)
322
+ maniqa = get_maniqa_score(img) # Будет N/A (Disabled)
323
+ clip_val = calculate_clip_score_value(img, prompt)
324
+ sdxl_detect = get_sdxl_detection_score(img)
325
+ anime_ai_chk_deepghs = get_anime_ai_check_score_deepghs(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
+ all_results.append({
328
+ "Filename": filename, "Prompt": prompt if prompt else "N/A", "Model Name": model_n, "Model Hash": model_h,
329
+ "ImageReward": reward, "AnimeAesthetic_dg": anime_aes_deepghs, "MANIQA_TQ": maniqa,
330
+ "CLIPScore": clip_val, "SDXL_Detector_AI_Prob": sdxl_detect, "AnimeAI_Check_dg_Prob": anime_ai_chk_deepghs,
331
+ })
332
  except Exception as e:
333
+ print(f"CRITICAL: Failed to process {filename}: {e}")
334
  all_results.append({
335
+ "Filename": filename, "Prompt": "Error", "Model Name": "Error", "Model Hash": "Error",
 
336
  "ImageReward": "Error", "AnimeAesthetic_dg": "Error", "MANIQA_TQ": "Error",
337
  "CLIPScore": "Error", "SDXL_Detector_AI_Prob": "Error", "AnimeAI_Check_dg_Prob": "Error"
338
  })
339
 
340
  df = pd.DataFrame(all_results)
341
+ plot_model_avg_scores_buffer, plot_prompt_clip_scores_buffer = None, None
342
+ csv_buffer_val, json_buffer_val = "", ""
343
+
344
+ if not df.empty:
345
  numeric_cols = ["ImageReward", "AnimeAesthetic_dg", "MANIQA_TQ", "CLIPScore"]
346
  for col in numeric_cols: df[col] = pd.to_numeric(df[col], errors='coerce')
347
+
348
+ # График 1
349
+ df_model_plot = df[(df["Model Name"] != "N/A") & (df["Model Name"].notna())]
350
+ if not df_model_plot.empty and df_model_plot["Model Name"].nunique() > 0:
351
+ try:
352
+ model_avg_scores = df_model_plot.groupby("Model Name")[numeric_cols].mean().dropna(how='all')
353
  if not model_avg_scores.empty:
354
+ fig1, ax1 = plt.subplots(figsize=(12, 7)); model_avg_scores.plot(kind="bar", ax=ax1)
355
+ ax1.set_title("Average Scores per Model"); ax1.set_ylabel("Average Score")
356
+ ax1.tick_params(axis='x', rotation=45, labelsize=8); plt.tight_layout()
357
+ plot_model_avg_scores_buffer = io.BytesIO(); fig1.savefig(plot_model_avg_scores_buffer, format="png"); plot_model_avg_scores_buffer.seek(0); plt.close(fig1)
358
+ except Exception as e: print(f"Error generating model average scores plot: {e}")
359
+
360
+ # График 2
361
+ df_prompt_plot = df[(df["Prompt"] != "N/A") & (df["Prompt"].notna()) & (df["CLIPScore"].notna())]
362
+ if not df_prompt_plot.empty and df_prompt_plot["Prompt"].nunique() > 0 :
 
 
 
 
 
 
 
 
363
  try:
364
+ df_prompt_plot["Short Prompt"] = df_prompt_plot["Prompt"].apply(lambda x: (str(x)[:30] + '...') if len(str(x)) > 33 else str(x))
 
365
  prompt_clip_scores = df_prompt_plot.groupby("Short Prompt")["CLIPScore"].mean().sort_values(ascending=False)
366
+ if not prompt_clip_scores.empty and len(prompt_clip_scores) >= 1 : # Изменено на >=1 для одиночных промптов
367
+ fig2, ax2 = plt.subplots(figsize=(12, max(7, min(len(prompt_clip_scores)*0.5, 15))))
368
  prompt_clip_scores.head(20).plot(kind="barh", ax=ax2)
369
+ ax2.set_title("Average CLIPScore per Prompt (Top 20 unique prompts)"); ax2.set_xlabel("Average CLIPScore")
370
+ plt.tight_layout(); plot_prompt_clip_scores_buffer = io.BytesIO(); fig2.savefig(plot_prompt_clip_scores_buffer, format="png"); plot_prompt_clip_scores_buffer.seek(0); plt.close(fig2)
 
 
 
 
 
371
  except Exception as e: print(f"Error generating prompt CLIP scores plot: {e}")
372
+
373
+ csv_b = io.StringIO(); df.to_csv(csv_b, index=False); csv_buffer_val = csv_b.getvalue()
374
+ json_b = io.StringIO(); df.to_json(json_b, orient='records', indent=4); json_buffer_val = json_b.getvalue()
 
 
 
 
 
 
 
 
 
375
 
376
  return (
377
  df,
378
  gr.Image(value=plot_model_avg_scores_buffer, type="pil", visible=plot_model_avg_scores_buffer is not None),
379
  gr.Image(value=plot_prompt_clip_scores_buffer, type="pil", visible=plot_prompt_clip_scores_buffer is not None),
380
+ gr.File(value=csv_buffer_val or None, label="Download CSV Results", visible=bool(csv_buffer_val), file_name="evaluation_results.csv"),
381
+ gr.File(value=json_buffer_val or None, label="Download JSON Results", visible=bool(json_buffer_val), file_name="evaluation_results.json"),
382
  f"Processed {len(all_results)} images.",
383
  )
384
 
385
  # --- Интерфейс Gradio ---
386
  with gr.Blocks(css="footer {display: none !important}") as demo:
387
  gr.Markdown("# AI Image Model Evaluation Tool")
388
+ gr.Markdown("Upload PNG images (ideally with Stable Diffusion metadata) to evaluate them...")
389
+ with gr.Row(): image_uploader = gr.Files(label="Upload Images (PNG)", file_count="multiple", file_types=["image"])
 
 
 
 
 
 
 
 
 
 
390
  process_button = gr.Button("Evaluate Images", variant="primary")
391
  status_textbox = gr.Textbox(label="Status", interactive=False)
 
392
  gr.Markdown("## Evaluation Results Table")
393
+ results_table = gr.DataFrame(headers=[ # Убран max_rows
394
+ "Filename", "Prompt", "Model Name", "Model Hash", "ImageReward", "AnimeAesthetic_dg",
395
+ "MANIQA_TQ", "CLIPScore", "SDXL_Detector_AI_Prob", "AnimeAI_Check_dg_Prob"
396
+ ], wrap=True)
 
 
397
  with gr.Row():
398
+ download_csv_button = gr.File(label="Download CSV Results", interactive=False)
399
  download_json_button = gr.File(label="Download JSON Results", interactive=False)
 
400
  gr.Markdown("## Visualizations")
401
  with gr.Row():
402
  plot_output_model_avg = gr.Image(label="Average Scores per Model", type="pil", interactive=False)
403
  plot_output_prompt_clip = gr.Image(label="Average CLIPScore per Prompt", type="pil", interactive=False)
404
+ process_button.click(fn=process_images, inputs=[image_uploader], outputs=[
405
+ results_table, plot_output_model_avg, plot_output_prompt_clip,
406
+ download_csv_button, download_json_button, status_textbox
407
+ ])
408
+ gr.Markdown("""**Metric Explanations:** ... (без изменений)""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
 
410
  if __name__ == "__main__":
411
  demo.launch(debug=True)