Spaces:
Sleeping
Sleeping
Delete scoring.pyutils
Browse files- scoring.pyutils/scoring.py +0 -77
scoring.pyutils/scoring.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import logging
|
3 |
-
|
4 |
-
logger = logging.getLogger(__name__)
|
5 |
-
|
6 |
-
def calculate_final_score(
|
7 |
-
quality_score: float,
|
8 |
-
aesthetics_score: float,
|
9 |
-
prompt_score: float,
|
10 |
-
ai_detection_score: float,
|
11 |
-
has_prompt: bool = True
|
12 |
-
) -> float:
|
13 |
-
"""
|
14 |
-
Calculate weighted composite score for image evaluation.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
quality_score: Technical image quality (0-10)
|
18 |
-
aesthetics_score: Visual appeal score (0-10)
|
19 |
-
prompt_score: Prompt adherence score (0-10)
|
20 |
-
ai_detection_score: AI generation probability (0-1)
|
21 |
-
has_prompt: Whether prompt metadata is available
|
22 |
-
|
23 |
-
Returns:
|
24 |
-
Final composite score (0-10)
|
25 |
-
"""
|
26 |
-
try:
|
27 |
-
# Validate and clamp input scores
|
28 |
-
quality_score = max(0.0, min(10.0, quality_score))
|
29 |
-
aesthetics_score = max(0.0, min(10.0, aesthetics_score))
|
30 |
-
prompt_score = max(0.0, min(10.0, prompt_score))
|
31 |
-
ai_detection_score = max(0.0, min(1.0, ai_detection_score))
|
32 |
-
|
33 |
-
# FIX: Invert and scale the AI detection score to a 0-10 range
|
34 |
-
# A low AI detection probability (good) results in a high score.
|
35 |
-
inverted_ai_score = (1 - ai_detection_score) * 10
|
36 |
-
|
37 |
-
if has_prompt:
|
38 |
-
# Standard weights when prompt is available
|
39 |
-
weights = {
|
40 |
-
'quality': 0.25, # 25% - Technical quality
|
41 |
-
'aesthetics': 0.35, # 35% - Visual appeal (highest weight)
|
42 |
-
'prompt': 0.25, # 25% - Prompt following
|
43 |
-
'ai_detection': 0.15 # 15% - Authenticity (inverted detection score)
|
44 |
-
}
|
45 |
-
|
46 |
-
# FIX: Correctly calculate the weighted score. The sum of weights is 1.0.
|
47 |
-
score = (
|
48 |
-
quality_score * weights['quality'] +
|
49 |
-
aesthetics_score * weights['aesthetics'] +
|
50 |
-
prompt_score * weights['prompt'] +
|
51 |
-
inverted_ai_score * weights['ai_detection']
|
52 |
-
)
|
53 |
-
else:
|
54 |
-
# Redistribute prompt weight when no prompt available
|
55 |
-
weights = {
|
56 |
-
'quality': 0.375, # 25% + 12.5% from prompt
|
57 |
-
'aesthetics': 0.475, # 35% + 12.5% from prompt
|
58 |
-
'ai_detection': 0.15 # 15% - Authenticity
|
59 |
-
}
|
60 |
-
|
61 |
-
# FIX: Correctly calculate the weighted score without prompt. Sum of weights is 1.0.
|
62 |
-
score = (
|
63 |
-
quality_score * weights['quality'] +
|
64 |
-
aesthetics_score * weights['aesthetics'] +
|
65 |
-
inverted_ai_score * weights['ai_detection']
|
66 |
-
)
|
67 |
-
|
68 |
-
# Ensure final score is within the valid 0-10 range
|
69 |
-
final_score = max(0.0, min(10.0, score))
|
70 |
-
|
71 |
-
logger.debug(f"Score calculation - Final: {final_score:.2f}")
|
72 |
-
|
73 |
-
return final_score
|
74 |
-
|
75 |
-
except Exception as e:
|
76 |
-
logger.error(f"Error calculating final score: {str(e)}")
|
77 |
-
return 0.0 # Return 0.0 on error to clearly indicate failure
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|