Spaces:
Sleeping
Sleeping
import gradio as gr | |
from PIL import Image, PngImagePlugin # Убедимся, что Image из PIL импортирован | |
import io | |
import os | |
import pandas as pd | |
import torch | |
from transformers import pipeline as transformers_pipeline , CLIPImageProcessor | |
import open_clip | |
import re | |
import matplotlib.pyplot as plt | |
import json | |
from collections import defaultdict | |
import numpy as np | |
import logging | |
import time | |
import tempfile | |
# --- ONNX Related Imports and Setup --- | |
try: | |
import onnxruntime | |
except ImportError: | |
print("WARNING: onnxruntime not found. ONNX models will not be available.") | |
onnxruntime = None | |
from huggingface_hub import hf_hub_download | |
try: | |
from imgutils.data import rgb_encode | |
IMGUTILS_AVAILABLE = True | |
print("INFO: imgutils.data.rgb_encode found and will be used for deepghs models.") | |
except ImportError: | |
print("WARNING: imgutils.data.rgb_encode not found. Using a basic fallback for preprocessing deepghs models.") | |
IMGUTILS_AVAILABLE = False | |
def rgb_encode(image: Image.Image, order_='CHW'): | |
img_arr = np.array(image.convert("RGB")) | |
if order_ == 'CHW': | |
img_arr = np.transpose(img_arr, (2, 0, 1)) | |
return img_arr.astype(np.uint8) | |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
print(f"INFO: PyTorch Device: {DEVICE}") | |
ONNX_EXECUTION_PROVIDER = "CUDAExecutionProvider" if DEVICE == "cuda" and onnxruntime and "CUDAExecutionProvider" in onnxruntime.get_available_providers() else "CPUExecutionProvider" | |
if onnxruntime: print(f"INFO: ONNX Execution Provider: {ONNX_EXECUTION_PROVIDER}") | |
else: print("INFO: ONNX Runtime not available, ONNX models will be skipped.") | |
def _img_preprocess_for_onnx(image: Image.Image, size: tuple = (384, 384), normalize_mean=0.5, normalize_std=0.5): | |
image = image.resize(size, Image.Resampling.BILINEAR) | |
data_uint8 = rgb_encode(image, order_='CHW') | |
data_float01 = data_uint8.astype(np.float32) / 255.0 | |
mean = np.array([normalize_mean] * 3, dtype=np.float32).reshape((3, 1, 1)) | |
std = np.array([normalize_std] * 3, dtype=np.float32).reshape((3, 1, 1)) | |
normalized_data = (data_float01 - mean) / std | |
return normalized_data[None, ...].astype(np.float32) | |
onnx_sessions_cache = {} | |
def get_onnx_session_and_meta(repo_id, model_subfolder, current_log_list): | |
cache_key = f"{repo_id}/{model_subfolder}" | |
if cache_key in onnx_sessions_cache: return onnx_sessions_cache[cache_key] | |
if not onnxruntime: | |
msg = f"ERROR: ONNX Runtime not available for get_onnx_session_and_meta ({cache_key}). Skipping." | |
print(msg); current_log_list.append(msg) | |
onnx_sessions_cache[cache_key] = (None, [], None) | |
return None, [], None | |
try: | |
msg = f"INFO: Loading ONNX model {repo_id}/{model_subfolder}..." | |
print(msg); current_log_list.append(msg) | |
model_path = hf_hub_download(repo_id, filename=f"{model_subfolder}/model.onnx") | |
meta_path = hf_hub_download(repo_id, filename=f"{model_subfolder}/meta.json") | |
options = onnxruntime.SessionOptions() | |
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL | |
if ONNX_EXECUTION_PROVIDER == "CPUExecutionProvider" and hasattr(os, 'cpu_count'): | |
options.intra_op_num_threads = os.cpu_count() | |
session = onnxruntime.InferenceSession(model_path, options, providers=[ONNX_EXECUTION_PROVIDER]) | |
with open(meta_path, 'r') as f: meta = json.load(f) | |
labels = meta.get('labels', []) | |
msg = f"INFO: ONNX model {cache_key} loaded successfully with provider {ONNX_EXECUTION_PROVIDER}." | |
print(msg); current_log_list.append(msg) | |
onnx_sessions_cache[cache_key] = (session, labels, meta) | |
return session, labels, meta | |
except Exception as e: | |
msg = f"ERROR: Failed to load ONNX model {cache_key}: {e}" | |
print(msg); current_log_list.append(msg) | |
onnx_sessions_cache[cache_key] = (None, [], None) | |
return None, [], None | |
reward_processor, reward_model = None, None | |
print("INFO: THUDM/ImageReward is temporarily disabled due to loading issues.") | |
ANIME_AESTHETIC_REPO = "deepghs/anime_aesthetic"; ANIME_AESTHETIC_SUBFOLDER = "swinv2pv3_v0_448_ls0.2_x" | |
ANIME_AESTHETIC_IMG_SIZE = (448, 448); ANIME_AESTHETIC_LABEL_WEIGHTS = {"normal": 0.0, "slight": 1.0, "moderate": 2.0, "strong": 3.0, "extreme": 4.0} | |
print("INFO: MANIQA (honklers/maniqa-nr) is currently disabled.") | |
clip_model_instance, clip_preprocess, clip_tokenizer = None, None, None | |
try: | |
clip_model_name = 'ViT-L-14'; print(f"INFO: Loading CLIP model {clip_model_name} (laion2b_s32b_b82k)...") | |
clip_model_instance, _, clip_preprocess_val = open_clip.create_model_and_transforms(clip_model_name, pretrained='laion2b_s32b_b82k', device=DEVICE) | |
clip_preprocess = clip_preprocess_val; clip_tokenizer = open_clip.get_tokenizer(clip_model_name) | |
clip_model_instance.eval(); print(f"INFO: CLIP model {clip_model_name} (laion2b_s32b_b82k) loaded successfully.") | |
except Exception as e: print(f"ERROR: Failed to load CLIP model {clip_model_name} (laion2b_s32b_b82k): {e}") | |
sdxl_detector_pipe = None | |
try: | |
print("INFO: Loading Organika/sdxl-detector model...") | |
sdxl_detector_pipe = transformers_pipeline("image-classification", model="Organika/sdxl-detector", device=torch.device(DEVICE).index if DEVICE=="cuda" else -1) | |
print("INFO: Organika/sdxl-detector loaded successfully.") | |
except Exception as e: print(f"ERROR: Failed to load Organika/sdxl-detector: {e}") | |
ANIME_AI_CHECK_REPO = "deepghs/anime_ai_check"; ANIME_AI_CHECK_SUBFOLDER = "caformer_s36_plus_sce" | |
ANIME_AI_CHECK_IMG_SIZE = (384, 384) | |
def extract_sd_parameters(image_pil, filename_for_log, current_log_list): | |
if image_pil is None: return "", "N/A", "N/A", "N/A", {} | |
parameters_str = image_pil.info.get("parameters", "") | |
if not parameters_str: | |
current_log_list.append(f"DEBUG [{filename_for_log}]: No metadata found in image.") | |
return "", "N/A", "N/A", "N/A", {} | |
current_log_list.append(f"DEBUG [{filename_for_log}]: Raw metadata: {parameters_str[:100]}...") | |
prompt, negative_prompt, model_name, model_hash, other_params_dict = "", "N/A", "N/A", "N/A", {} | |
try: | |
neg_prompt_index = parameters_str.find("Negative prompt:") | |
steps_meta_index = parameters_str.find("Steps:") | |
if neg_prompt_index != -1: | |
prompt = parameters_str[:neg_prompt_index].strip() | |
params_part_start_index = steps_meta_index if steps_meta_index != -1 and steps_meta_index > neg_prompt_index else -1 | |
if params_part_start_index != -1: | |
negative_prompt = parameters_str[neg_prompt_index + len("Negative prompt:"):params_part_start_index].strip() | |
params_part = parameters_str[params_part_start_index:] | |
else: | |
end_of_neg = parameters_str.find("\n", neg_prompt_index + len("Negative prompt:")) | |
if end_of_neg == -1: end_of_neg = len(parameters_str) | |
negative_prompt = parameters_str[neg_prompt_index + len("Negative prompt:"):end_of_neg].strip() | |
params_part = parameters_str[end_of_neg:].strip() if end_of_neg < len(parameters_str) else "" | |
elif steps_meta_index != -1: | |
prompt = parameters_str[:steps_meta_index].strip(); params_part = parameters_str[steps_meta_index:] | |
else: | |
prompt = parameters_str.strip(); params_part = "" | |
if params_part: | |
params_list = [p.strip() for p in params_part.split(",")] | |
temp_other_params = {} | |
for param_val_str in params_list: | |
parts = param_val_str.split(':', 1) | |
if len(parts) == 2: | |
key, value = parts[0].strip(), parts[1].strip() | |
temp_other_params[key] = value | |
if key.lower() == "model": model_name = value | |
elif key.lower() == "model hash": model_hash = value | |
for k,v in temp_other_params.items(): | |
if k.lower() not in ["model", "model hash"]: other_params_dict[k] = v | |
if model_name == "N/A" and model_hash != "N/A": model_name = f"hash_{model_hash}" | |
if model_name == "N/A" and "Checkpoint" in other_params_dict: model_name = other_params_dict["Checkpoint"] | |
if model_name == "N/A" and "model" in other_params_dict: model_name = other_params_dict["model"] | |
current_log_list.append(f"DEBUG [{filename_for_log}]: Parsed Prompt: {prompt[:50]}... | Model: {model_name}") | |
except Exception as e: current_log_list.append(f"ERROR [{filename_for_log}]: Failed to parse metadata: {e}") | |
return prompt, negative_prompt, model_name, model_hash, other_params_dict | |
def get_image_reward(image_pil, filename_for_log, current_log_list): return "N/A (Disabled)" | |
def get_anime_aesthetic_score_deepghs(image_pil, filename_for_log, current_log_list): | |
session, labels, meta = get_onnx_session_and_meta(ANIME_AESTHETIC_REPO, ANIME_AESTHETIC_SUBFOLDER, current_log_list) | |
if not session or not labels: current_log_list.append(f"INFO [{filename_for_log}]: AnimeAesthetic ONNX model not loaded, skipping."); return "N/A" | |
t_start = time.time(); current_log_list.append(f"DEBUG [{filename_for_log}]: Starting AnimeAesthetic (ONNX) score...") | |
try: | |
input_data = _img_preprocess_for_onnx(image_pil.copy(), size=ANIME_AESTHETIC_IMG_SIZE) | |
input_name = session.get_inputs()[0].name; output_name = session.get_outputs()[0].name | |
onnx_output, = session.run([output_name], {input_name: input_data}) | |
scores = onnx_output[0]; exp_scores = np.exp(scores - np.max(scores)); probabilities = exp_scores / np.sum(exp_scores) | |
weighted_score = sum(probabilities[i] * ANIME_AESTHETIC_LABEL_WEIGHTS.get(label, 0.0) for i, label in enumerate(labels)) | |
score = round(weighted_score, 4); t_end = time.time() | |
current_log_list.append(f"DEBUG [{filename_for_log}]: AnimeAesthetic (ONNX) score: {score} (took {t_end - t_start:.2f}s)"); return score | |
except Exception as e: current_log_list.append(f"ERROR [{filename_for_log}]: AnimeAesthetic (ONNX) scoring failed: {e}"); return "Error" | |
def get_maniqa_score(image_pil, filename_for_log, current_log_list): | |
current_log_list.append(f"INFO [{filename_for_log}]: MANIQA is disabled."); return "N/A (Disabled)" | |
def calculate_clip_score_value(image_pil, prompt_text, filename_for_log, current_log_list): | |
if not clip_model_instance or not clip_preprocess or not clip_tokenizer: current_log_list.append(f"INFO [{filename_for_log}]: CLIP model not loaded, skipping CLIPScore."); return "N/A" | |
if not prompt_text or prompt_text == "N/A": current_log_list.append(f"INFO [{filename_for_log}]: Empty prompt, skipping CLIPScore."); return "N/A (Empty Prompt)" | |
t_start = time.time(); current_log_list.append(f"DEBUG [{filename_for_log}]: Starting CLIPScore (PyTorch Device: {DEVICE})...") | |
try: | |
image_input = clip_preprocess(image_pil).unsqueeze(0).to(DEVICE) | |
text_for_tokenizer = str(prompt_text); text_input = clip_tokenizer([text_for_tokenizer]).to(DEVICE) | |
image_features = clip_model_instance.encode_image(image_input); text_features = clip_model_instance.encode_text(text_input) | |
image_features_norm = image_features / image_features.norm(p=2, dim=-1, keepdim=True) | |
text_features_norm = text_features / text_features.norm(p=2, dim=-1, keepdim=True) | |
score_val = (text_features_norm @ image_features_norm.T).squeeze().item() * 100.0 | |
score = round(score_val, 2); t_end = time.time() | |
current_log_list.append(f"DEBUG [{filename_for_log}]: CLIPScore: {score} (took {t_end - t_start:.2f}s)"); return score | |
except Exception as e: current_log_list.append(f"ERROR [{filename_for_log}]: CLIPScore calculation failed: {e}"); return "Error" | |
def get_sdxl_detection_score(image_pil, filename_for_log, current_log_list): | |
if not sdxl_detector_pipe: current_log_list.append(f"INFO [{filename_for_log}]: SDXL_Detector model not loaded, skipping."); return "N/A" | |
t_start = time.time(); current_log_list.append(f"DEBUG [{filename_for_log}]: Starting SDXL_Detector score (Device: {sdxl_detector_pipe.device})...") | |
try: | |
result = sdxl_detector_pipe(image_pil.copy()); ai_score_val = 0.0 | |
for item in result: | |
if item['label'].lower() == 'artificial': ai_score_val = item['score']; break | |
score = round(ai_score_val, 4); t_end = time.time() | |
current_log_list.append(f"DEBUG [{filename_for_log}]: SDXL_Detector AI Prob: {score} (took {t_end - t_start:.2f}s)"); return score | |
except Exception as e: current_log_list.append(f"ERROR [{filename_for_log}]: SDXL_Detector scoring failed: {e}"); return "Error" | |
def get_anime_ai_check_score_deepghs(image_pil, filename_for_log, current_log_list): | |
session, labels, meta = get_onnx_session_and_meta(ANIME_AI_CHECK_REPO, ANIME_AI_CHECK_SUBFOLDER, current_log_list) | |
if not session or not labels: current_log_list.append(f"INFO [{filename_for_log}]: AnimeAI_Check ONNX model not loaded, skipping."); return "N/A" | |
t_start = time.time(); current_log_list.append(f"DEBUG [{filename_for_log}]: Starting AnimeAI_Check (ONNX) score...") | |
try: | |
input_data = _img_preprocess_for_onnx(image_pil.copy(), size=ANIME_AI_CHECK_IMG_SIZE) | |
input_name = session.get_inputs()[0].name; output_name = session.get_outputs()[0].name | |
onnx_output, = session.run([output_name], {input_name: input_data}) | |
scores = onnx_output[0]; exp_scores = np.exp(scores - np.max(scores)); probabilities = exp_scores / np.sum(exp_scores) | |
ai_prob_val = 0.0 | |
for i, label in enumerate(labels): | |
if label.lower() == 'ai': ai_prob_val = probabilities[i]; break | |
score = round(ai_prob_val, 4); t_end = time.time() | |
current_log_list.append(f"DEBUG [{filename_for_log}]: AnimeAI_Check (ONNX) AI Prob: {score} (took {t_end - t_start:.2f}s)"); return score | |
except Exception as e: current_log_list.append(f"ERROR [{filename_for_log}]: AnimeAI_Check (ONNX) scoring failed: {e}"); return "Error" | |
def process_images_generator(files, progress=gr.Progress(track_tqdm=True)): | |
if not files: | |
yield (pd.DataFrame(), | |
gr.Image(visible=False), gr.Image(visible=False), | |
gr.File(visible=False), gr.File(visible=False), | |
"Please upload some images.", "No files to process.") | |
return | |
all_results = [] | |
log_accumulator = [f"INFO: Starting processing for {len(files)} images..."] | |
yield (pd.DataFrame(all_results), | |
gr.Image(visible=False), gr.Image(visible=False), | |
gr.File(visible=False), gr.File(visible=False), | |
"Processing...", "\n".join(log_accumulator)) | |
for i, file_obj in enumerate(files): | |
filename_for_log = "Unknown File"; current_img_total_time_start = time.time() | |
try: | |
filename_for_log = os.path.basename(getattr(file_obj, 'name', f"file_{i}_{int(time.time())}")) | |
log_accumulator.append(f"--- Processing image {i+1}/{len(files)}: {filename_for_log} ---") | |
progress( (i + 0.1) / len(files), desc=f"Img {i+1}/{len(files)}: Loading {filename_for_log}") | |
yield (pd.DataFrame(all_results), | |
gr.Image(visible=False), gr.Image(visible=False), | |
gr.File(visible=False), gr.File(visible=False), | |
f"Loading image {i+1}/{len(files)}: {filename_for_log}", "\n".join(log_accumulator)) | |
img = Image.open(getattr(file_obj, 'name', str(file_obj))) | |
if img.mode != "RGB": img = img.convert("RGB") | |
progress( (i + 0.3) / len(files), desc=f"Img {i+1}/{len(files)}: Scoring {filename_for_log}") | |
prompt, neg_prompt, model_n, model_h, other_p = extract_sd_parameters(img, filename_for_log, log_accumulator) | |
reward = get_image_reward(img, filename_for_log, log_accumulator) | |
anime_aes_deepghs = get_anime_aesthetic_score_deepghs(img, filename_for_log, log_accumulator) | |
maniqa = get_maniqa_score(img, filename_for_log, log_accumulator) | |
clip_val = calculate_clip_score_value(img, prompt, filename_for_log, log_accumulator) | |
sdxl_detect = get_sdxl_detection_score(img, filename_for_log, log_accumulator) | |
anime_ai_chk_deepghs = get_anime_ai_check_score_deepghs(img, filename_for_log, log_accumulator) | |
current_img_total_time_end = time.time() | |
log_accumulator.append(f"INFO [{filename_for_log}]: Finished all scores (total for image: {current_img_total_time_end - current_img_total_time_start:.2f}s)") | |
all_results.append({ | |
"Filename": filename_for_log, "Prompt": prompt if prompt else "N/A", "Model Name": model_n, "Model Hash": model_h, | |
"ImageReward": reward, "AnimeAesthetic_dg": anime_aes_deepghs, "MANIQA_TQ": maniqa, | |
"CLIPScore": clip_val, "SDXL_Detector_AI_Prob": sdxl_detect, "AnimeAI_Check_dg_Prob": anime_ai_chk_deepghs, | |
}) | |
df_so_far = pd.DataFrame(all_results) | |
progress( (i + 1.0) / len(files), desc=f"Img {i+1}/{len(files)}: Done {filename_for_log}") | |
yield (df_so_far, | |
gr.Image(visible=False), gr.Image(visible=False), | |
gr.File(visible=False), gr.File(visible=False), | |
f"Processed image {i+1}/{len(files)}: {filename_for_log}", "\n".join(log_accumulator)) | |
except Exception as e: | |
log_accumulator.append(f"CRITICAL ERROR processing {filename_for_log}: {e}") | |
print(f"CRITICAL ERROR processing {filename_for_log}: {e}") | |
all_results.append({ | |
"Filename": filename_for_log, "Prompt": "Critical Error", "Model Name": "Error", "Model Hash": "Error", | |
"ImageReward": "Error", "AnimeAesthetic_dg": "Error", "MANIQA_TQ": "Error", | |
"CLIPScore": "Error", "SDXL_Detector_AI_Prob": "Error", "AnimeAI_Check_dg_Prob": "Error" | |
}) | |
df_so_far = pd.DataFrame(all_results) | |
yield (df_so_far, | |
gr.Image(visible=False), gr.Image(visible=False), | |
gr.File(visible=False), gr.File(visible=False), | |
f"Error on image {i+1}/{len(files)}: {filename_for_log}", "\n".join(log_accumulator)) | |
log_accumulator.append("--- Generating final plots and download files ---") | |
progress(1.0, desc="Generating final plots...") | |
yield (pd.DataFrame(all_results), | |
gr.Image(visible=False), gr.Image(visible=False), | |
gr.File(visible=False), gr.File(visible=False), | |
"Generating final plots...", "\n".join(log_accumulator)) | |
df = pd.DataFrame(all_results) | |
plot_model_avg_scores_buffer, plot_prompt_clip_scores_buffer = None, None | |
csv_file_path_out, json_file_path_out = None, None | |
if not df.empty: | |
numeric_cols = ["ImageReward", "AnimeAesthetic_dg", "MANIQA_TQ", "CLIPScore"] | |
for col in numeric_cols: df[col] = pd.to_numeric(df[col], errors='coerce') | |
df_model_plot = df[(df["Model Name"] != "N/A") & (df["Model Name"].notna())] | |
if not df_model_plot.empty and df_model_plot["Model Name"].nunique() > 0: | |
try: | |
model_avg_scores = df_model_plot.groupby("Model Name")[numeric_cols].mean().dropna(how='all') | |
if not model_avg_scores.empty: | |
fig1, ax1 = plt.subplots(figsize=(12, 7)); model_avg_scores.plot(kind="bar", ax=ax1) | |
ax1.set_title("Average Scores per Model"); ax1.set_ylabel("Average Score") | |
ax1.tick_params(axis='x', rotation=45, labelsize=8); plt.tight_layout() | |
plot_model_avg_scores_buffer = io.BytesIO(); fig1.savefig(plot_model_avg_scores_buffer, format="png"); plot_model_avg_scores_buffer.seek(0); plt.close(fig1) | |
log_accumulator.append("INFO: Model average scores plot generated.") | |
except Exception as e: log_accumulator.append(f"ERROR: Failed to generate model average scores plot: {e}") | |
df_prompt_plot = df[(df["Prompt"] != "N/A") & (df["Prompt"].notna()) & (df["CLIPScore"].notna())] | |
if not df_prompt_plot.empty and df_prompt_plot["Prompt"].nunique() > 0 : | |
try: | |
df_prompt_plot["Short Prompt"] = df_prompt_plot["Prompt"].apply(lambda x: (str(x)[:30] + '...') if len(str(x)) > 33 else str(x)) | |
prompt_clip_scores = df_prompt_plot.groupby("Short Prompt")["CLIPScore"].mean().sort_values(ascending=False) | |
if not prompt_clip_scores.empty and len(prompt_clip_scores) >= 1 : | |
fig2, ax2 = plt.subplots(figsize=(12, max(7, min(len(prompt_clip_scores)*0.5, 15)))) | |
prompt_clip_scores.head(20).plot(kind="barh", ax=ax2) | |
ax2.set_title("Average CLIPScore per Prompt (Top 20 unique prompts)"); ax2.set_xlabel("Average CLIPScore") | |
plt.tight_layout(); plot_prompt_clip_scores_buffer = io.BytesIO(); fig2.savefig(plot_prompt_clip_scores_buffer, format="png"); plot_prompt_clip_scores_buffer.seek(0); plt.close(fig2) | |
log_accumulator.append("INFO: Prompt CLIP scores plot generated.") | |
except Exception as e: log_accumulator.append(f"ERROR: Failed to generate prompt CLIP scores plot: {e}") | |
try: | |
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".csv", encoding='utf-8') as tmp_csv: | |
df.to_csv(tmp_csv, index=False); csv_file_path_out = tmp_csv.name | |
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json", encoding='utf-8') as tmp_json: | |
df.to_json(tmp_json, orient='records', indent=4); json_file_path_out = tmp_json.name | |
log_accumulator.append("INFO: CSV and JSON data prepared for download.") | |
except Exception as e: log_accumulator.append(f"ERROR preparing download files: {e}") | |
final_status = f"Finished processing {len(all_results)} images." | |
log_accumulator.append(final_status) | |
# Преобразуем BytesIO в PIL.Image перед передачей в gr.Image | |
pil_plot_model_avg = Image.open(plot_model_avg_scores_buffer) if plot_model_avg_scores_buffer and plot_model_avg_scores_buffer.getbuffer().nbytes > 0 else None | |
pil_plot_prompt_clip = Image.open(plot_prompt_clip_scores_buffer) if plot_prompt_clip_scores_buffer and plot_prompt_clip_scores_buffer.getbuffer().nbytes > 0 else None | |
if pil_plot_model_avg or pil_plot_prompt_clip: | |
log_accumulator.append("INFO: Plots converted to PIL Images for display.") | |
else: | |
log_accumulator.append("INFO: No plots were generated or plots are empty.") | |
yield ( | |
df, | |
gr.Image(value=pil_plot_model_avg, visible=pil_plot_model_avg is not None), | |
gr.Image(value=pil_plot_prompt_clip, visible=pil_plot_prompt_clip is not None), | |
gr.File(value=csv_file_path_out, visible=csv_file_path_out is not None), | |
gr.File(value=json_file_path_out, visible=json_file_path_out is not None), | |
final_status, | |
"\n".join(log_accumulator) | |
) | |
with gr.Blocks(css="footer {display: none !important}") as demo: | |
gr.Markdown("# AI Image Model Evaluation Tool") | |
gr.Markdown("Upload PNG images (ideally with Stable Diffusion metadata) to evaluate them...") | |
with gr.Row(): image_uploader = gr.Files(label="Upload Images (PNG)", file_count="multiple", file_types=["image"]) | |
process_button = gr.Button("Evaluate Images", variant="primary") | |
status_textbox = gr.Textbox(label="Overall Status", interactive=False) | |
log_output_textbox = gr.Textbox(label="Detailed Logs", lines=15, interactive=False, autoscroll=True) | |
gr.Markdown("## Evaluation Results Table") | |
results_table = gr.DataFrame(headers=[ | |
"Filename", "Prompt", "Model Name", "Model Hash", "ImageReward", "AnimeAesthetic_dg", | |
"MANIQA_TQ", "CLIPScore", "SDXL_Detector_AI_Prob", "AnimeAI_Check_dg_Prob" | |
], wrap=True) | |
with gr.Row(): | |
download_csv_button = gr.File(label="Download CSV Results", interactive=False) | |
download_json_button = gr.File(label="Download JSON Results", interactive=False) | |
gr.Markdown("## Visualizations") | |
with gr.Row(): | |
plot_output_model_avg = gr.Image(label="Average Scores per Model", type="pil", interactive=False) | |
plot_output_prompt_clip = gr.Image(label="Average CLIPScore per Prompt", type="pil", interactive=False) | |
process_button.click( | |
fn=process_images_generator, inputs=[image_uploader], | |
outputs=[results_table, plot_output_model_avg, plot_output_prompt_clip, | |
download_csv_button, download_json_button, status_textbox, log_output_textbox] | |
) | |
gr.Markdown("""**Metric Explanations:** ... (без изменений)""") | |
if __name__ == "__main__": | |
print("--- Initializing models, please wait... ---") | |
initial_dummy_logs = [] | |
if onnxruntime: | |
get_onnx_session_and_meta(ANIME_AESTHETIC_REPO, ANIME_AESTHETIC_SUBFOLDER, initial_dummy_logs) | |
get_onnx_session_and_meta(ANIME_AI_CHECK_REPO, ANIME_AI_CHECK_SUBFOLDER, initial_dummy_logs) | |
if initial_dummy_logs: | |
print("--- Initial ONNX loading attempts log: ---") | |
for log_line in initial_dummy_logs: print(log_line) | |
print("-----------------------------------------") | |
print("--- Model initialization attempt complete. Launching Gradio. ---") | |
demo.queue().launch(debug=True) |