Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ class AIEvaluationForm:
|
|
27 |
components = {}
|
28 |
|
29 |
with gr.Group():
|
30 |
-
gr.Markdown("## System Information")
|
31 |
gr.Markdown("*Please provide basic information about the AI system being evaluated.*")
|
32 |
|
33 |
components['name'] = gr.Textbox(
|
@@ -61,7 +61,7 @@ class AIEvaluationForm:
|
|
61 |
info="Primary category of the AI system"
|
62 |
)
|
63 |
|
64 |
-
components['
|
65 |
choices=[
|
66 |
"Text",
|
67 |
"Image",
|
@@ -71,10 +71,10 @@ class AIEvaluationForm:
|
|
71 |
],
|
72 |
label="Input modalities (select all that apply)",
|
73 |
value=["Text"],
|
74 |
-
info="
|
75 |
)
|
76 |
|
77 |
-
components['
|
78 |
choices=[
|
79 |
"Text",
|
80 |
"Image",
|
@@ -84,7 +84,7 @@ class AIEvaluationForm:
|
|
84 |
],
|
85 |
label="Output Modalities (select all that apply)",
|
86 |
value=["Text"],
|
87 |
-
info="
|
88 |
)
|
89 |
|
90 |
return list(components.values()), components
|
@@ -163,13 +163,13 @@ class AIEvaluationForm:
|
|
163 |
|
164 |
# Determine source type based on content
|
165 |
if line.startswith('http'):
|
166 |
-
source_type = "
|
167 |
name = line.split('/')[-1] if '/' in line else line
|
168 |
elif 'internal' in line.lower() or 'proprietary' in line.lower():
|
169 |
-
source_type = "
|
170 |
name = line
|
171 |
else:
|
172 |
-
source_type = "
|
173 |
name = line
|
174 |
|
175 |
sources.append({
|
@@ -179,17 +179,6 @@ class AIEvaluationForm:
|
|
179 |
})
|
180 |
|
181 |
return sources
|
182 |
-
|
183 |
-
def load_uploaded_json(self, file):
|
184 |
-
"""Load JSON from uploaded file"""
|
185 |
-
if file is None:
|
186 |
-
return {}
|
187 |
-
try:
|
188 |
-
with open(file.name, 'r') as f:
|
189 |
-
return json.load(f)
|
190 |
-
except Exception as e:
|
191 |
-
return {"error": str(e)}
|
192 |
-
|
193 |
|
194 |
def generate_scorecard(self, *args) -> Tuple[Dict, str]:
|
195 |
"""Generate scorecard JSON from form inputs"""
|
@@ -198,9 +187,10 @@ class AIEvaluationForm:
|
|
198 |
for i, arg in enumerate(args[:10]): # Print first 10 for debugging
|
199 |
print(f"Arg {i}: {type(arg)} = {arg}")
|
200 |
|
201 |
-
# Extract system info (first
|
202 |
-
|
203 |
-
|
|
|
204 |
|
205 |
# Build metadata
|
206 |
metadata = {
|
@@ -318,7 +308,7 @@ class AIEvaluationForm:
|
|
318 |
|
319 |
# Header
|
320 |
gr.Markdown("""
|
321 |
-
# AI System Evaluation Scorecard
|
322 |
|
323 |
This comprehensive evaluation form helps you assess AI systems across multiple dimensions including bias,
|
324 |
cultural sensitivity, environmental impact, privacy, and more. Complete the sections relevant to your system
|
@@ -336,17 +326,17 @@ class AIEvaluationForm:
|
|
336 |
|
337 |
# Generate button and outputs
|
338 |
with gr.Group():
|
339 |
-
gr.Markdown("## Generate Scorecard")
|
340 |
|
341 |
with gr.Row():
|
342 |
generate_btn = gr.Button(
|
343 |
-
"Generate Evaluation Scorecard",
|
344 |
variant="primary",
|
345 |
size="lg",
|
346 |
scale=2
|
347 |
)
|
348 |
clear_btn = gr.Button(
|
349 |
-
"Clear Form",
|
350 |
variant="secondary",
|
351 |
scale=1
|
352 |
)
|
@@ -356,7 +346,7 @@ class AIEvaluationForm:
|
|
356 |
|
357 |
# Outputs
|
358 |
with gr.Group():
|
359 |
-
gr.Markdown("### Generated Scorecard")
|
360 |
|
361 |
with gr.Row():
|
362 |
json_output = gr.JSON(
|
@@ -389,9 +379,9 @@ class AIEvaluationForm:
|
|
389 |
|
390 |
return (
|
391 |
scorecard, # JSON display
|
392 |
-
gr.File(value=filename, visible=True) # File for download
|
393 |
)
|
394 |
-
|
395 |
def clear_form():
|
396 |
"""Clear all form inputs"""
|
397 |
return [None] * len(all_inputs)
|
@@ -411,8 +401,8 @@ class AIEvaluationForm:
|
|
411 |
|
412 |
# Add example data button
|
413 |
with gr.Group():
|
414 |
-
gr.Markdown("### Quick Start")
|
415 |
-
example_btn = gr.Button("Load Example Data", variant="secondary")
|
416 |
|
417 |
def load_example():
|
418 |
"""Load example data for StarCoder2-like system"""
|
@@ -421,8 +411,8 @@ class AIEvaluationForm:
|
|
421 |
"BigCode", # provider
|
422 |
"https://huggingface.co/bigcode/starcoder2-15b", # url
|
423 |
"Generative Model", # type
|
424 |
-
["Text"], # input modalities
|
425 |
-
["Text"]
|
426 |
]
|
427 |
# Add default values for evaluation sections (all N/A initially)
|
428 |
remaining_defaults = []
|
@@ -440,21 +430,28 @@ class AIEvaluationForm:
|
|
440 |
fn=load_example,
|
441 |
outputs=all_inputs
|
442 |
)
|
443 |
-
|
444 |
with gr.Group():
|
445 |
-
gr.Markdown("### Upload Completed Evaluation JSON")
|
446 |
uploaded_file = gr.File(label="Upload JSON File", file_types=[".json"])
|
447 |
uploaded_preview = gr.JSON(label="Preview of Uploaded Content")
|
448 |
-
uploaded_file.change(fn=
|
449 |
|
450 |
gr.Markdown("""
|
451 |
-
### Submit Your Scorecard to the Eval Cards Repository
|
452 |
Once downloaded, you can contribute by submitting a pull request to [Eval Cards GitHub](https://github.com/evaleval/Eval_Cards).
|
453 |
Place your file in the `submissions/` directory.
|
454 |
""")
|
455 |
|
456 |
return demo
|
457 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
|
459 |
def main():
|
460 |
"""Main function to run the application"""
|
@@ -465,9 +462,9 @@ def main():
|
|
465 |
# Create and launch the interface
|
466 |
demo = eval_form.create_interface()
|
467 |
|
468 |
-
print("Launching AI Evaluation Scorecard...")
|
469 |
-
print(f"Loading questions from: {eval_form.template_file}")
|
470 |
-
print(f"Found {len(eval_form.template)} evaluation categories")
|
471 |
|
472 |
# Count total questions
|
473 |
total_questions = sum(
|
@@ -475,7 +472,7 @@ def main():
|
|
475 |
for section in eval_form.template.values()
|
476 |
for subsection in section.values()
|
477 |
)
|
478 |
-
print(f"Total evaluation questions: {total_questions}")
|
479 |
|
480 |
demo.launch(
|
481 |
ssr_mode=False,
|
@@ -486,10 +483,10 @@ def main():
|
|
486 |
)
|
487 |
|
488 |
except FileNotFoundError as e:
|
489 |
-
print(f"Error: {e}")
|
490 |
print("Please ensure 'questions.yaml' exists in the current directory.")
|
491 |
except Exception as e:
|
492 |
-
print(f"Unexpected error: {e}")
|
493 |
|
494 |
if __name__ == "__main__":
|
495 |
main()
|
|
|
27 |
components = {}
|
28 |
|
29 |
with gr.Group():
|
30 |
+
gr.Markdown("## π AI System Information")
|
31 |
gr.Markdown("*Please provide basic information about the AI system being evaluated.*")
|
32 |
|
33 |
components['name'] = gr.Textbox(
|
|
|
61 |
info="Primary category of the AI system"
|
62 |
)
|
63 |
|
64 |
+
components['input modalities'] = gr.CheckboxGroup(
|
65 |
choices=[
|
66 |
"Text",
|
67 |
"Image",
|
|
|
71 |
],
|
72 |
label="Input modalities (select all that apply)",
|
73 |
value=["Text"],
|
74 |
+
info="input modalities supported by the system"
|
75 |
)
|
76 |
|
77 |
+
components['output modalities'] = gr.CheckboxGroup(
|
78 |
choices=[
|
79 |
"Text",
|
80 |
"Image",
|
|
|
84 |
],
|
85 |
label="Output Modalities (select all that apply)",
|
86 |
value=["Text"],
|
87 |
+
info="output modalities supported by the system"
|
88 |
)
|
89 |
|
90 |
return list(components.values()), components
|
|
|
163 |
|
164 |
# Determine source type based on content
|
165 |
if line.startswith('http'):
|
166 |
+
source_type = "π"
|
167 |
name = line.split('/')[-1] if '/' in line else line
|
168 |
elif 'internal' in line.lower() or 'proprietary' in line.lower():
|
169 |
+
source_type = "π’"
|
170 |
name = line
|
171 |
else:
|
172 |
+
source_type = "π"
|
173 |
name = line
|
174 |
|
175 |
sources.append({
|
|
|
179 |
})
|
180 |
|
181 |
return sources
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
|
183 |
def generate_scorecard(self, *args) -> Tuple[Dict, str]:
|
184 |
"""Generate scorecard JSON from form inputs"""
|
|
|
187 |
for i, arg in enumerate(args[:10]): # Print first 10 for debugging
|
188 |
print(f"Arg {i}: {type(arg)} = {arg}")
|
189 |
|
190 |
+
# Extract system info (first num_args arguments)
|
191 |
+
num_args = 6
|
192 |
+
name, provider, url, sys_type, inp_modalities, out_modalities = args[:num_args]
|
193 |
+
remaining_args = list(args[num_args:])
|
194 |
|
195 |
# Build metadata
|
196 |
metadata = {
|
|
|
308 |
|
309 |
# Header
|
310 |
gr.Markdown("""
|
311 |
+
# π AI System Evaluation Scorecard
|
312 |
|
313 |
This comprehensive evaluation form helps you assess AI systems across multiple dimensions including bias,
|
314 |
cultural sensitivity, environmental impact, privacy, and more. Complete the sections relevant to your system
|
|
|
326 |
|
327 |
# Generate button and outputs
|
328 |
with gr.Group():
|
329 |
+
gr.Markdown("## π Generate Scorecard")
|
330 |
|
331 |
with gr.Row():
|
332 |
generate_btn = gr.Button(
|
333 |
+
"π Generate Evaluation Scorecard",
|
334 |
variant="primary",
|
335 |
size="lg",
|
336 |
scale=2
|
337 |
)
|
338 |
clear_btn = gr.Button(
|
339 |
+
"ποΈ Clear Form",
|
340 |
variant="secondary",
|
341 |
scale=1
|
342 |
)
|
|
|
346 |
|
347 |
# Outputs
|
348 |
with gr.Group():
|
349 |
+
gr.Markdown("### π Generated Scorecard")
|
350 |
|
351 |
with gr.Row():
|
352 |
json_output = gr.JSON(
|
|
|
379 |
|
380 |
return (
|
381 |
scorecard, # JSON display
|
382 |
+
gr.File(value=filename, visible=True), # File for download
|
383 |
)
|
384 |
+
|
385 |
def clear_form():
|
386 |
"""Clear all form inputs"""
|
387 |
return [None] * len(all_inputs)
|
|
|
401 |
|
402 |
# Add example data button
|
403 |
with gr.Group():
|
404 |
+
gr.Markdown("### π Quick Start")
|
405 |
+
example_btn = gr.Button("π Load Example Data", variant="secondary")
|
406 |
|
407 |
def load_example():
|
408 |
"""Load example data for StarCoder2-like system"""
|
|
|
411 |
"BigCode", # provider
|
412 |
"https://huggingface.co/bigcode/starcoder2-15b", # url
|
413 |
"Generative Model", # type
|
414 |
+
["Text"], # input modalities
|
415 |
+
["Text"], # output modalities
|
416 |
]
|
417 |
# Add default values for evaluation sections (all N/A initially)
|
418 |
remaining_defaults = []
|
|
|
430 |
fn=load_example,
|
431 |
outputs=all_inputs
|
432 |
)
|
|
|
433 |
with gr.Group():
|
434 |
+
gr.Markdown("### π€ Upload Completed Evaluation JSON")
|
435 |
uploaded_file = gr.File(label="Upload JSON File", file_types=[".json"])
|
436 |
uploaded_preview = gr.JSON(label="Preview of Uploaded Content")
|
437 |
+
uploaded_file.change(fn=load_uploaded_json, inputs=uploaded_file, outputs=uploaded_preview)
|
438 |
|
439 |
gr.Markdown("""
|
440 |
+
### π¬ Submit Your Scorecard to the Eval Cards Repository
|
441 |
Once downloaded, you can contribute by submitting a pull request to [Eval Cards GitHub](https://github.com/evaleval/Eval_Cards).
|
442 |
Place your file in the `submissions/` directory.
|
443 |
""")
|
444 |
|
445 |
return demo
|
446 |
+
|
447 |
+
def load_uploaded_json(file):
|
448 |
+
if file is None:
|
449 |
+
return {}
|
450 |
+
try:
|
451 |
+
with open(file.name, 'r') as f:
|
452 |
+
return json.load(f)
|
453 |
+
except Exception as e:
|
454 |
+
return {"error": str(e)}
|
455 |
|
456 |
def main():
|
457 |
"""Main function to run the application"""
|
|
|
462 |
# Create and launch the interface
|
463 |
demo = eval_form.create_interface()
|
464 |
|
465 |
+
print("π Launching AI Evaluation Scorecard...")
|
466 |
+
print(f"π Loading questions from: {eval_form.template_file}")
|
467 |
+
print(f"π Found {len(eval_form.template)} evaluation categories")
|
468 |
|
469 |
# Count total questions
|
470 |
total_questions = sum(
|
|
|
472 |
for section in eval_form.template.values()
|
473 |
for subsection in section.values()
|
474 |
)
|
475 |
+
print(f"β Total evaluation questions: {total_questions}")
|
476 |
|
477 |
demo.launch(
|
478 |
ssr_mode=False,
|
|
|
483 |
)
|
484 |
|
485 |
except FileNotFoundError as e:
|
486 |
+
print(f"β Error: {e}")
|
487 |
print("Please ensure 'questions.yaml' exists in the current directory.")
|
488 |
except Exception as e:
|
489 |
+
print(f"β Unexpected error: {e}")
|
490 |
|
491 |
if __name__ == "__main__":
|
492 |
main()
|