Spaces:
Running
Running
Update src/aibom_generator/utils.py
Browse files- src/aibom_generator/utils.py +290 -139
src/aibom_generator/utils.py
CHANGED
@@ -371,7 +371,7 @@ def apply_completeness_penalties(original_score: float, missing_fields: Dict[str
|
|
371 |
adjusted_score = original_score * penalty_factor
|
372 |
|
373 |
return {
|
374 |
-
"adjusted_score": round(adjusted_score,
|
375 |
"penalty_applied": penalty_reason is not None,
|
376 |
"penalty_reason": penalty_reason,
|
377 |
"penalty_factor": penalty_factor
|
@@ -404,15 +404,11 @@ def generate_field_recommendations(missing_fields: Dict[str, List[str]]) -> List
|
|
404 |
"priority": "high",
|
405 |
"field": field,
|
406 |
"message": f"Missing critical field: {field}",
|
407 |
-
"recommendation": f"Add {field}
|
408 |
})
|
409 |
|
410 |
-
# Then
|
411 |
-
important_count = 0
|
412 |
for field in missing_fields["important"]:
|
413 |
-
if important_count >= 3:
|
414 |
-
break
|
415 |
-
|
416 |
if field in VALIDATION_MESSAGES:
|
417 |
recommendations.append({
|
418 |
"priority": "medium",
|
@@ -420,15 +416,27 @@ def generate_field_recommendations(missing_fields: Dict[str, List[str]]) -> List
|
|
420 |
"message": VALIDATION_MESSAGES[field]["missing"],
|
421 |
"recommendation": VALIDATION_MESSAGES[field]["recommendation"]
|
422 |
})
|
423 |
-
important_count += 1
|
424 |
else:
|
425 |
recommendations.append({
|
426 |
"priority": "medium",
|
427 |
"field": field,
|
428 |
"message": f"Missing important field: {field}",
|
429 |
-
"recommendation": f"Consider adding {field}
|
430 |
})
|
431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
432 |
|
433 |
return recommendations
|
434 |
|
@@ -444,142 +452,260 @@ def _validate_ai_requirements(aibom: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
444 |
List of validation issues
|
445 |
"""
|
446 |
issues = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
447 |
|
448 |
-
# Check
|
449 |
-
if "components" not in aibom or not aibom["components"]:
|
450 |
issues.append({
|
451 |
"severity": ValidationSeverity.ERROR.value,
|
452 |
"code": "MISSING_COMPONENTS",
|
453 |
-
"message": "
|
454 |
"path": "$.components"
|
455 |
})
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
if "metadata" in aibom and "authors" in aibom["metadata"]:
|
460 |
-
for i, author in enumerate(aibom["metadata"]["authors"]):
|
461 |
-
if "url" in author:
|
462 |
-
issues.append({
|
463 |
-
"severity": ValidationSeverity.ERROR.value,
|
464 |
-
"code": "INVALID_AUTHOR_PROPERTY",
|
465 |
-
"message": "Author object contains 'url' property which is not allowed in CycloneDX schema. Use 'email' instead.",
|
466 |
-
"path": f"$.metadata.authors[{i}].url"
|
467 |
-
})
|
468 |
|
469 |
-
|
470 |
-
|
471 |
-
component_path = f"$.components[{i}]"
|
472 |
|
473 |
-
# Check
|
474 |
if "type" not in component:
|
475 |
issues.append({
|
476 |
"severity": ValidationSeverity.ERROR.value,
|
477 |
"code": "MISSING_COMPONENT_TYPE",
|
478 |
-
"message": "
|
479 |
-
"path":
|
480 |
})
|
481 |
-
|
|
|
482 |
issues.append({
|
483 |
-
"severity": ValidationSeverity.
|
484 |
"code": "INVALID_COMPONENT_TYPE",
|
485 |
-
"message": "
|
486 |
-
"path":
|
487 |
})
|
488 |
-
|
489 |
-
|
490 |
-
|
|
|
491 |
issues.append({
|
492 |
"severity": ValidationSeverity.ERROR.value,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
"code": "MISSING_PURL",
|
494 |
-
"message": "
|
495 |
-
"path":
|
496 |
})
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
516 |
issues.append({
|
517 |
"severity": ValidationSeverity.WARNING.value,
|
518 |
"code": "MISSING_MODEL_CARD",
|
519 |
-
"message": "
|
520 |
-
"path":
|
521 |
})
|
|
|
522 |
else:
|
523 |
model_card = component["modelCard"]
|
524 |
-
model_card_path = f"{component_path}.modelCard"
|
525 |
|
526 |
-
# Check
|
527 |
-
if "modelParameters" not in model_card:
|
528 |
issues.append({
|
529 |
"severity": ValidationSeverity.WARNING.value,
|
530 |
"code": "MISSING_MODEL_PARAMETERS",
|
531 |
-
"message": "
|
532 |
-
"path":
|
533 |
})
|
534 |
-
|
|
|
535 |
# Check considerations
|
536 |
-
if "considerations" not in model_card:
|
537 |
issues.append({
|
538 |
-
"severity": ValidationSeverity.
|
539 |
"code": "MISSING_CONSIDERATIONS",
|
540 |
-
"message": "
|
541 |
-
"path":
|
542 |
})
|
543 |
-
|
544 |
-
|
545 |
-
if "metadata" not in aibom:
|
546 |
-
issues.append({
|
547 |
-
"severity": ValidationSeverity.ERROR.value,
|
548 |
-
"code": "MISSING_METADATA",
|
549 |
-
"message": "AIBOM must contain metadata",
|
550 |
-
"path": "$.metadata"
|
551 |
-
})
|
552 |
-
else:
|
553 |
-
metadata = aibom["metadata"]
|
554 |
-
metadata_path = "$.metadata"
|
555 |
-
|
556 |
-
# Check tools
|
557 |
-
if "tools" not in metadata or not metadata["tools"]:
|
558 |
-
issues.append({
|
559 |
-
"severity": ValidationSeverity.WARNING.value,
|
560 |
-
"code": "MISSING_TOOLS",
|
561 |
-
"message": "Metadata should include tools that generated the AIBOM",
|
562 |
-
"path": f"{metadata_path}.tools"
|
563 |
-
})
|
564 |
-
|
565 |
-
# Check authors
|
566 |
-
if "authors" not in metadata or not metadata["authors"]:
|
567 |
-
issues.append({
|
568 |
-
"severity": ValidationSeverity.INFO.value,
|
569 |
-
"code": "MISSING_AUTHORS",
|
570 |
-
"message": "Metadata should include authors information",
|
571 |
-
"path": f"{metadata_path}.authors"
|
572 |
-
})
|
573 |
-
|
574 |
-
# Check properties
|
575 |
-
if "properties" not in metadata or not metadata["properties"]:
|
576 |
-
issues.append({
|
577 |
-
"severity": ValidationSeverity.INFO.value,
|
578 |
-
"code": "MISSING_PROPERTIES",
|
579 |
-
"message": "Metadata should include properties for additional information",
|
580 |
-
"path": f"{metadata_path}.properties"
|
581 |
-
})
|
582 |
-
|
583 |
return issues
|
584 |
|
585 |
|
@@ -594,8 +720,6 @@ def _generate_validation_recommendations(issues: List[Dict[str, Any]]) -> List[s
|
|
594 |
List of recommendations
|
595 |
"""
|
596 |
recommendations = []
|
597 |
-
|
598 |
-
# Group issues by code
|
599 |
issue_codes = set(issue["code"] for issue in issues)
|
600 |
|
601 |
# Generate recommendations based on issue codes
|
@@ -747,12 +871,16 @@ def calculate_industry_neutral_score(aibom: Dict[str, Any]) -> Dict[str, Any]:
|
|
747 |
|
748 |
# Score each field based on classification
|
749 |
scores_by_category = {category: 0 for category in max_scores.keys()}
|
|
|
750 |
|
751 |
for field, classification in FIELD_CLASSIFICATION.items():
|
752 |
tier = classification["tier"]
|
753 |
weight = classification["weight"]
|
754 |
category = classification["category"]
|
755 |
|
|
|
|
|
|
|
756 |
# Check if field is present
|
757 |
is_present = check_field_in_aibom(aibom, field)
|
758 |
|
@@ -766,16 +894,27 @@ def calculate_industry_neutral_score(aibom: Dict[str, Any]) -> Dict[str, Any]:
|
|
766 |
field_checklist[field] = f"{'β' if is_present else 'β'} {importance_indicator}"
|
767 |
|
768 |
# Normalize category scores to max_scores
|
|
|
769 |
for category in scores_by_category:
|
770 |
-
|
|
|
|
|
|
|
|
|
|
|
771 |
|
772 |
-
# Calculate total score (sum of weighted
|
773 |
total_score = 0
|
774 |
-
for category in
|
|
|
775 |
category_weight = max_scores[category] / sum(max_scores.values())
|
776 |
-
total_score +=
|
|
|
|
|
|
|
777 |
|
778 |
-
|
|
|
779 |
|
780 |
# Determine completeness profile
|
781 |
profile = determine_completeness_profile(aibom, total_score)
|
@@ -788,9 +927,10 @@ def calculate_industry_neutral_score(aibom: Dict[str, Any]) -> Dict[str, Any]:
|
|
788 |
|
789 |
return {
|
790 |
"total_score": penalty_result["adjusted_score"],
|
791 |
-
"section_scores":
|
792 |
"max_scores": max_scores,
|
793 |
"field_checklist": field_checklist,
|
|
|
794 |
"missing_fields": missing_fields,
|
795 |
"completeness_profile": profile,
|
796 |
"penalty_applied": penalty_result["penalty_applied"],
|
@@ -831,12 +971,12 @@ def calculate_completeness_score(aibom: Dict[str, Any], validate: bool = True, u
|
|
831 |
if error_count > 0:
|
832 |
# Severe penalty for errors (up to 50% reduction)
|
833 |
error_penalty = min(0.5, error_count * 0.1)
|
834 |
-
result["total_score"] = round(result["total_score"] * (1 - error_penalty),
|
835 |
result["validation_penalty"] = f"-{int(error_penalty * 100)}% due to {error_count} schema errors"
|
836 |
elif warning_count > 0:
|
837 |
# Minor penalty for warnings (up to 20% reduction)
|
838 |
warning_penalty = min(0.2, warning_count * 0.05)
|
839 |
-
result["total_score"] = round(result["total_score"] * (1 - warning_penalty),
|
840 |
result["validation_penalty"] = f"-{int(warning_penalty * 100)}% due to {warning_count} schema warnings"
|
841 |
|
842 |
return result
|
@@ -900,15 +1040,17 @@ def calculate_completeness_score(aibom: Dict[str, Any], validate: bool = True, u
|
|
900 |
model_card_score = sum([
|
901 |
10 if card.get("modelParameters") else 0,
|
902 |
10 if card.get("quantitativeAnalysis") else 0,
|
903 |
-
10 if card.get("considerations") and len(card["considerations"]) > 50 else 0
|
904 |
])
|
905 |
for field in card_fields:
|
906 |
field_checklist[f"modelCard.{field}"] = "β" if field in card else "β"
|
907 |
-
if field == "considerations" and field in card and len(card["considerations"]) <= 50:
|
908 |
field_checklist[f"modelCard.{field}"] = "β"
|
909 |
|
910 |
# External References (10 points max)
|
911 |
-
ext_refs =
|
|
|
|
|
912 |
ext_score = 0
|
913 |
for ref in ext_refs:
|
914 |
url = ref.get("url", "").lower()
|
@@ -922,23 +1064,32 @@ def calculate_completeness_score(aibom: Dict[str, Any], validate: bool = True, u
|
|
922 |
field_checklist["externalReferences"] = "β" if ext_refs else "β"
|
923 |
|
924 |
# Calculate total score
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
925 |
total_score = (
|
926 |
-
(
|
927 |
-
(
|
928 |
-
(
|
929 |
-
(
|
930 |
-
(
|
931 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
932 |
|
933 |
result = {
|
934 |
-
"total_score":
|
935 |
-
"section_scores":
|
936 |
-
"required_fields": required_score,
|
937 |
-
"metadata": metadata_score,
|
938 |
-
"component_basic": component_score,
|
939 |
-
"component_model_card": model_card_score,
|
940 |
-
"external_references": ext_score
|
941 |
-
},
|
942 |
"max_scores": max_scores,
|
943 |
"field_checklist": field_checklist
|
944 |
}
|
@@ -958,12 +1109,12 @@ def calculate_completeness_score(aibom: Dict[str, Any], validate: bool = True, u
|
|
958 |
if error_count > 0:
|
959 |
# Severe penalty for errors (up to 50% reduction)
|
960 |
error_penalty = min(0.5, error_count * 0.1)
|
961 |
-
result["total_score"] = round(result["total_score"] * (1 - error_penalty),
|
962 |
result["validation_penalty"] = f"-{int(error_penalty * 100)}% due to {error_count} schema errors"
|
963 |
elif warning_count > 0:
|
964 |
# Minor penalty for warnings (up to 20% reduction)
|
965 |
warning_penalty = min(0.2, warning_count * 0.05)
|
966 |
-
result["total_score"] = round(result["total_score"] * (1 - warning_penalty),
|
967 |
result["validation_penalty"] = f"-{int(warning_penalty * 100)}% due to {warning_count} schema warnings"
|
968 |
|
969 |
return result
|
|
|
371 |
adjusted_score = original_score * penalty_factor
|
372 |
|
373 |
return {
|
374 |
+
"adjusted_score": round(adjusted_score, 1), # Round to 1 decimal place
|
375 |
"penalty_applied": penalty_reason is not None,
|
376 |
"penalty_reason": penalty_reason,
|
377 |
"penalty_factor": penalty_factor
|
|
|
404 |
"priority": "high",
|
405 |
"field": field,
|
406 |
"message": f"Missing critical field: {field}",
|
407 |
+
"recommendation": f"Add {field} to improve documentation completeness"
|
408 |
})
|
409 |
|
410 |
+
# Then important fields
|
|
|
411 |
for field in missing_fields["important"]:
|
|
|
|
|
|
|
412 |
if field in VALIDATION_MESSAGES:
|
413 |
recommendations.append({
|
414 |
"priority": "medium",
|
|
|
416 |
"message": VALIDATION_MESSAGES[field]["missing"],
|
417 |
"recommendation": VALIDATION_MESSAGES[field]["recommendation"]
|
418 |
})
|
|
|
419 |
else:
|
420 |
recommendations.append({
|
421 |
"priority": "medium",
|
422 |
"field": field,
|
423 |
"message": f"Missing important field: {field}",
|
424 |
+
"recommendation": f"Consider adding {field} for better documentation"
|
425 |
})
|
426 |
+
|
427 |
+
# Finally supplementary fields (limit to top 5)
|
428 |
+
supplementary_count = 0
|
429 |
+
for field in missing_fields["supplementary"]:
|
430 |
+
if supplementary_count >= 5:
|
431 |
+
break
|
432 |
+
|
433 |
+
recommendations.append({
|
434 |
+
"priority": "low",
|
435 |
+
"field": field,
|
436 |
+
"message": f"Missing supplementary field: {field}",
|
437 |
+
"recommendation": f"Consider adding {field} for comprehensive documentation"
|
438 |
+
})
|
439 |
+
supplementary_count += 1
|
440 |
|
441 |
return recommendations
|
442 |
|
|
|
452 |
List of validation issues
|
453 |
"""
|
454 |
issues = []
|
455 |
+
issue_codes = set()
|
456 |
+
|
457 |
+
# Check required fields
|
458 |
+
for field in ["bomFormat", "specVersion", "serialNumber", "version"]:
|
459 |
+
if field not in aibom:
|
460 |
+
issues.append({
|
461 |
+
"severity": ValidationSeverity.ERROR.value,
|
462 |
+
"code": f"MISSING_{field.upper()}",
|
463 |
+
"message": f"Missing required field: {field}",
|
464 |
+
"path": f"$.{field}"
|
465 |
+
})
|
466 |
+
issue_codes.add(f"MISSING_{field.upper()}")
|
467 |
+
|
468 |
+
# Check bomFormat
|
469 |
+
if "bomFormat" in aibom and aibom["bomFormat"] != "CycloneDX":
|
470 |
+
issues.append({
|
471 |
+
"severity": ValidationSeverity.ERROR.value,
|
472 |
+
"code": "INVALID_BOM_FORMAT",
|
473 |
+
"message": f"Invalid bomFormat: {aibom['bomFormat']}. Must be 'CycloneDX'",
|
474 |
+
"path": "$.bomFormat"
|
475 |
+
})
|
476 |
+
issue_codes.add("INVALID_BOM_FORMAT")
|
477 |
+
|
478 |
+
# Check specVersion
|
479 |
+
if "specVersion" in aibom and aibom["specVersion"] != "1.6":
|
480 |
+
issues.append({
|
481 |
+
"severity": ValidationSeverity.ERROR.value,
|
482 |
+
"code": "INVALID_SPEC_VERSION",
|
483 |
+
"message": f"Invalid specVersion: {aibom['specVersion']}. Must be '1.6'",
|
484 |
+
"path": "$.specVersion"
|
485 |
+
})
|
486 |
+
issue_codes.add("INVALID_SPEC_VERSION")
|
487 |
+
|
488 |
+
# Check serialNumber
|
489 |
+
if "serialNumber" in aibom and not aibom["serialNumber"].startswith("urn:uuid:"):
|
490 |
+
issues.append({
|
491 |
+
"severity": ValidationSeverity.ERROR.value,
|
492 |
+
"code": "INVALID_SERIAL_NUMBER",
|
493 |
+
"message": f"Invalid serialNumber format: {aibom['serialNumber']}. Must start with 'urn:uuid:'",
|
494 |
+
"path": "$.serialNumber"
|
495 |
+
})
|
496 |
+
issue_codes.add("INVALID_SERIAL_NUMBER")
|
497 |
+
|
498 |
+
# Check version
|
499 |
+
if "version" in aibom:
|
500 |
+
if not isinstance(aibom["version"], int):
|
501 |
+
issues.append({
|
502 |
+
"severity": ValidationSeverity.ERROR.value,
|
503 |
+
"code": "INVALID_VERSION_TYPE",
|
504 |
+
"message": f"Invalid version type: {type(aibom['version'])}. Must be an integer",
|
505 |
+
"path": "$.version"
|
506 |
+
})
|
507 |
+
issue_codes.add("INVALID_VERSION_TYPE")
|
508 |
+
elif aibom["version"] <= 0:
|
509 |
+
issues.append({
|
510 |
+
"severity": ValidationSeverity.ERROR.value,
|
511 |
+
"code": "INVALID_VERSION_VALUE",
|
512 |
+
"message": f"Invalid version value: {aibom['version']}. Must be positive",
|
513 |
+
"path": "$.version"
|
514 |
+
})
|
515 |
+
issue_codes.add("INVALID_VERSION_VALUE")
|
516 |
+
|
517 |
+
# Check metadata
|
518 |
+
if "metadata" not in aibom:
|
519 |
+
issues.append({
|
520 |
+
"severity": ValidationSeverity.ERROR.value,
|
521 |
+
"code": "MISSING_METADATA",
|
522 |
+
"message": "Missing metadata section",
|
523 |
+
"path": "$.metadata"
|
524 |
+
})
|
525 |
+
issue_codes.add("MISSING_METADATA")
|
526 |
+
else:
|
527 |
+
metadata = aibom["metadata"]
|
528 |
+
|
529 |
+
# Check timestamp
|
530 |
+
if "timestamp" not in metadata:
|
531 |
+
issues.append({
|
532 |
+
"severity": ValidationSeverity.WARNING.value,
|
533 |
+
"code": "MISSING_TIMESTAMP",
|
534 |
+
"message": "Missing timestamp in metadata",
|
535 |
+
"path": "$.metadata.timestamp"
|
536 |
+
})
|
537 |
+
issue_codes.add("MISSING_TIMESTAMP")
|
538 |
+
|
539 |
+
# Check tools
|
540 |
+
if "tools" not in metadata or not metadata["tools"] or len(metadata["tools"]) == 0:
|
541 |
+
issues.append({
|
542 |
+
"severity": ValidationSeverity.WARNING.value,
|
543 |
+
"code": "MISSING_TOOLS",
|
544 |
+
"message": "Missing tools in metadata",
|
545 |
+
"path": "$.metadata.tools"
|
546 |
+
})
|
547 |
+
issue_codes.add("MISSING_TOOLS")
|
548 |
+
|
549 |
+
# Check authors
|
550 |
+
if "authors" not in metadata or not metadata["authors"] or len(metadata["authors"]) == 0:
|
551 |
+
issues.append({
|
552 |
+
"severity": ValidationSeverity.WARNING.value,
|
553 |
+
"code": "MISSING_AUTHORS",
|
554 |
+
"message": "Missing authors in metadata",
|
555 |
+
"path": "$.metadata.authors"
|
556 |
+
})
|
557 |
+
issue_codes.add("MISSING_AUTHORS")
|
558 |
+
else:
|
559 |
+
# Check author properties
|
560 |
+
for i, author in enumerate(metadata["authors"]):
|
561 |
+
if "url" in author:
|
562 |
+
issues.append({
|
563 |
+
"severity": ValidationSeverity.ERROR.value,
|
564 |
+
"code": "INVALID_AUTHOR_PROPERTY",
|
565 |
+
"message": "Author objects should not contain 'url' property, use 'email' instead",
|
566 |
+
"path": f"$.metadata.authors[{i}].url"
|
567 |
+
})
|
568 |
+
issue_codes.add("INVALID_AUTHOR_PROPERTY")
|
569 |
+
|
570 |
+
# Check properties
|
571 |
+
if "properties" not in metadata or not metadata["properties"] or len(metadata["properties"]) == 0:
|
572 |
+
issues.append({
|
573 |
+
"severity": ValidationSeverity.INFO.value,
|
574 |
+
"code": "MISSING_PROPERTIES",
|
575 |
+
"message": "Missing properties in metadata",
|
576 |
+
"path": "$.metadata.properties"
|
577 |
+
})
|
578 |
+
issue_codes.add("MISSING_PROPERTIES")
|
579 |
|
580 |
+
# Check components
|
581 |
+
if "components" not in aibom or not aibom["components"] or len(aibom["components"]) == 0:
|
582 |
issues.append({
|
583 |
"severity": ValidationSeverity.ERROR.value,
|
584 |
"code": "MISSING_COMPONENTS",
|
585 |
+
"message": "Missing components section or empty components array",
|
586 |
"path": "$.components"
|
587 |
})
|
588 |
+
issue_codes.add("MISSING_COMPONENTS")
|
589 |
+
else:
|
590 |
+
components = aibom["components"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
591 |
|
592 |
+
# Check first component (AI model)
|
593 |
+
component = components[0]
|
|
|
594 |
|
595 |
+
# Check type
|
596 |
if "type" not in component:
|
597 |
issues.append({
|
598 |
"severity": ValidationSeverity.ERROR.value,
|
599 |
"code": "MISSING_COMPONENT_TYPE",
|
600 |
+
"message": "Missing type in first component",
|
601 |
+
"path": "$.components[0].type"
|
602 |
})
|
603 |
+
issue_codes.add("MISSING_COMPONENT_TYPE")
|
604 |
+
elif component["type"] != "machine-learning-model":
|
605 |
issues.append({
|
606 |
+
"severity": ValidationSeverity.ERROR.value,
|
607 |
"code": "INVALID_COMPONENT_TYPE",
|
608 |
+
"message": f"Invalid type in first component: {component['type']}. Must be 'machine-learning-model'",
|
609 |
+
"path": "$.components[0].type"
|
610 |
})
|
611 |
+
issue_codes.add("INVALID_COMPONENT_TYPE")
|
612 |
+
|
613 |
+
# Check name
|
614 |
+
if "name" not in component or not component["name"]:
|
615 |
issues.append({
|
616 |
"severity": ValidationSeverity.ERROR.value,
|
617 |
+
"code": "MISSING_COMPONENT_NAME",
|
618 |
+
"message": "Missing name in first component",
|
619 |
+
"path": "$.components[0].name"
|
620 |
+
})
|
621 |
+
issue_codes.add("MISSING_COMPONENT_NAME")
|
622 |
+
|
623 |
+
# Check bom-ref
|
624 |
+
if "bom-ref" not in component or not component["bom-ref"]:
|
625 |
+
issues.append({
|
626 |
+
"severity": ValidationSeverity.ERROR.value,
|
627 |
+
"code": "MISSING_BOM_REF",
|
628 |
+
"message": "Missing bom-ref in first component",
|
629 |
+
"path": "$.components[0].bom-ref"
|
630 |
+
})
|
631 |
+
issue_codes.add("MISSING_BOM_REF")
|
632 |
+
|
633 |
+
# Check purl
|
634 |
+
if "purl" not in component or not component["purl"]:
|
635 |
+
issues.append({
|
636 |
+
"severity": ValidationSeverity.WARNING.value,
|
637 |
"code": "MISSING_PURL",
|
638 |
+
"message": "Missing purl in first component",
|
639 |
+
"path": "$.components[0].purl"
|
640 |
})
|
641 |
+
issue_codes.add("MISSING_PURL")
|
642 |
+
elif not component["purl"].startswith("pkg:"):
|
643 |
+
issues.append({
|
644 |
+
"severity": ValidationSeverity.ERROR.value,
|
645 |
+
"code": "INVALID_PURL_FORMAT",
|
646 |
+
"message": f"Invalid purl format: {component['purl']}. Must start with 'pkg:'",
|
647 |
+
"path": "$.components[0].purl"
|
648 |
+
})
|
649 |
+
issue_codes.add("INVALID_PURL_FORMAT")
|
650 |
+
elif "@" not in component["purl"]:
|
651 |
+
issues.append({
|
652 |
+
"severity": ValidationSeverity.WARNING.value,
|
653 |
+
"code": "MISSING_VERSION_IN_PURL",
|
654 |
+
"message": f"Missing version in purl: {component['purl']}. Should include version after '@'",
|
655 |
+
"path": "$.components[0].purl"
|
656 |
+
})
|
657 |
+
issue_codes.add("MISSING_VERSION_IN_PURL")
|
658 |
+
|
659 |
+
# Check description
|
660 |
+
if "description" not in component or not component["description"]:
|
661 |
+
issues.append({
|
662 |
+
"severity": ValidationSeverity.WARNING.value,
|
663 |
+
"code": "MISSING_DESCRIPTION",
|
664 |
+
"message": "Missing description in first component",
|
665 |
+
"path": "$.components[0].description"
|
666 |
+
})
|
667 |
+
issue_codes.add("MISSING_DESCRIPTION")
|
668 |
+
elif len(component["description"]) < 20:
|
669 |
+
issues.append({
|
670 |
+
"severity": ValidationSeverity.INFO.value,
|
671 |
+
"code": "SHORT_DESCRIPTION",
|
672 |
+
"message": f"Description is too short: {len(component['description'])} characters. Recommended minimum is 20 characters",
|
673 |
+
"path": "$.components[0].description"
|
674 |
+
})
|
675 |
+
issue_codes.add("SHORT_DESCRIPTION")
|
676 |
+
|
677 |
+
# Check modelCard
|
678 |
+
if "modelCard" not in component or not component["modelCard"]:
|
679 |
issues.append({
|
680 |
"severity": ValidationSeverity.WARNING.value,
|
681 |
"code": "MISSING_MODEL_CARD",
|
682 |
+
"message": "Missing modelCard in first component",
|
683 |
+
"path": "$.components[0].modelCard"
|
684 |
})
|
685 |
+
issue_codes.add("MISSING_MODEL_CARD")
|
686 |
else:
|
687 |
model_card = component["modelCard"]
|
|
|
688 |
|
689 |
+
# Check modelParameters
|
690 |
+
if "modelParameters" not in model_card or not model_card["modelParameters"]:
|
691 |
issues.append({
|
692 |
"severity": ValidationSeverity.WARNING.value,
|
693 |
"code": "MISSING_MODEL_PARAMETERS",
|
694 |
+
"message": "Missing modelParameters in modelCard",
|
695 |
+
"path": "$.components[0].modelCard.modelParameters"
|
696 |
})
|
697 |
+
issue_codes.add("MISSING_MODEL_PARAMETERS")
|
698 |
+
|
699 |
# Check considerations
|
700 |
+
if "considerations" not in model_card or not model_card["considerations"]:
|
701 |
issues.append({
|
702 |
+
"severity": ValidationSeverity.WARNING.value,
|
703 |
"code": "MISSING_CONSIDERATIONS",
|
704 |
+
"message": "Missing considerations in modelCard",
|
705 |
+
"path": "$.components[0].modelCard.considerations"
|
706 |
})
|
707 |
+
issue_codes.add("MISSING_CONSIDERATIONS")
|
708 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
709 |
return issues
|
710 |
|
711 |
|
|
|
720 |
List of recommendations
|
721 |
"""
|
722 |
recommendations = []
|
|
|
|
|
723 |
issue_codes = set(issue["code"] for issue in issues)
|
724 |
|
725 |
# Generate recommendations based on issue codes
|
|
|
871 |
|
872 |
# Score each field based on classification
|
873 |
scores_by_category = {category: 0 for category in max_scores.keys()}
|
874 |
+
max_possible_by_category = {category: 0 for category in max_scores.keys()}
|
875 |
|
876 |
for field, classification in FIELD_CLASSIFICATION.items():
|
877 |
tier = classification["tier"]
|
878 |
weight = classification["weight"]
|
879 |
category = classification["category"]
|
880 |
|
881 |
+
# Add to max possible score for this category
|
882 |
+
max_possible_by_category[category] += weight
|
883 |
+
|
884 |
# Check if field is present
|
885 |
is_present = check_field_in_aibom(aibom, field)
|
886 |
|
|
|
894 |
field_checklist[field] = f"{'β' if is_present else 'β'} {importance_indicator}"
|
895 |
|
896 |
# Normalize category scores to max_scores
|
897 |
+
normalized_scores = {}
|
898 |
for category in scores_by_category:
|
899 |
+
if max_possible_by_category[category] > 0:
|
900 |
+
# Normalize to the max score for this category
|
901 |
+
normalized_score = (scores_by_category[category] / max_possible_by_category[category]) * max_scores[category]
|
902 |
+
normalized_scores[category] = min(normalized_score, max_scores[category])
|
903 |
+
else:
|
904 |
+
normalized_scores[category] = 0
|
905 |
|
906 |
+
# Calculate total score (sum of weighted normalized scores)
|
907 |
total_score = 0
|
908 |
+
for category, score in normalized_scores.items():
|
909 |
+
# Each category contributes its percentage to the total
|
910 |
category_weight = max_scores[category] / sum(max_scores.values())
|
911 |
+
total_score += score * category_weight
|
912 |
+
|
913 |
+
# Round to one decimal place
|
914 |
+
total_score = round(total_score, 1)
|
915 |
|
916 |
+
# Ensure score is between 0 and 100
|
917 |
+
total_score = max(0, min(total_score, 100))
|
918 |
|
919 |
# Determine completeness profile
|
920 |
profile = determine_completeness_profile(aibom, total_score)
|
|
|
927 |
|
928 |
return {
|
929 |
"total_score": penalty_result["adjusted_score"],
|
930 |
+
"section_scores": normalized_scores,
|
931 |
"max_scores": max_scores,
|
932 |
"field_checklist": field_checklist,
|
933 |
+
"field_tiers": {field: info["tier"] for field, info in FIELD_CLASSIFICATION.items()},
|
934 |
"missing_fields": missing_fields,
|
935 |
"completeness_profile": profile,
|
936 |
"penalty_applied": penalty_result["penalty_applied"],
|
|
|
971 |
if error_count > 0:
|
972 |
# Severe penalty for errors (up to 50% reduction)
|
973 |
error_penalty = min(0.5, error_count * 0.1)
|
974 |
+
result["total_score"] = round(result["total_score"] * (1 - error_penalty), 1)
|
975 |
result["validation_penalty"] = f"-{int(error_penalty * 100)}% due to {error_count} schema errors"
|
976 |
elif warning_count > 0:
|
977 |
# Minor penalty for warnings (up to 20% reduction)
|
978 |
warning_penalty = min(0.2, warning_count * 0.05)
|
979 |
+
result["total_score"] = round(result["total_score"] * (1 - warning_penalty), 1)
|
980 |
result["validation_penalty"] = f"-{int(warning_penalty * 100)}% due to {warning_count} schema warnings"
|
981 |
|
982 |
return result
|
|
|
1040 |
model_card_score = sum([
|
1041 |
10 if card.get("modelParameters") else 0,
|
1042 |
10 if card.get("quantitativeAnalysis") else 0,
|
1043 |
+
10 if card.get("considerations") and isinstance(card["considerations"], dict) and len(str(card["considerations"])) > 50 else 0
|
1044 |
])
|
1045 |
for field in card_fields:
|
1046 |
field_checklist[f"modelCard.{field}"] = "β" if field in card else "β"
|
1047 |
+
if field == "considerations" and field in card and (not isinstance(card["considerations"], dict) or len(str(card["considerations"])) <= 50):
|
1048 |
field_checklist[f"modelCard.{field}"] = "β"
|
1049 |
|
1050 |
# External References (10 points max)
|
1051 |
+
ext_refs = []
|
1052 |
+
if components and components[0].get("externalReferences"):
|
1053 |
+
ext_refs = components[0].get("externalReferences")
|
1054 |
ext_score = 0
|
1055 |
for ref in ext_refs:
|
1056 |
url = ref.get("url", "").lower()
|
|
|
1064 |
field_checklist["externalReferences"] = "β" if ext_refs else "β"
|
1065 |
|
1066 |
# Calculate total score
|
1067 |
+
section_scores = {
|
1068 |
+
"required_fields": required_score,
|
1069 |
+
"metadata": metadata_score,
|
1070 |
+
"component_basic": component_score,
|
1071 |
+
"component_model_card": model_card_score,
|
1072 |
+
"external_references": ext_score
|
1073 |
+
}
|
1074 |
+
|
1075 |
+
# Calculate weighted total score
|
1076 |
total_score = (
|
1077 |
+
(section_scores["required_fields"] / max_scores["required_fields"]) * 20 +
|
1078 |
+
(section_scores["metadata"] / max_scores["metadata"]) * 20 +
|
1079 |
+
(section_scores["component_basic"] / max_scores["component_basic"]) * 20 +
|
1080 |
+
(section_scores["component_model_card"] / max_scores["component_model_card"]) * 30 +
|
1081 |
+
(section_scores["external_references"] / max_scores["external_references"]) * 10
|
1082 |
)
|
1083 |
+
|
1084 |
+
# Round to one decimal place
|
1085 |
+
total_score = round(total_score, 1)
|
1086 |
+
|
1087 |
+
# Ensure score is between 0 and 100
|
1088 |
+
total_score = max(0, min(total_score, 100))
|
1089 |
|
1090 |
result = {
|
1091 |
+
"total_score": total_score,
|
1092 |
+
"section_scores": section_scores,
|
|
|
|
|
|
|
|
|
|
|
|
|
1093 |
"max_scores": max_scores,
|
1094 |
"field_checklist": field_checklist
|
1095 |
}
|
|
|
1109 |
if error_count > 0:
|
1110 |
# Severe penalty for errors (up to 50% reduction)
|
1111 |
error_penalty = min(0.5, error_count * 0.1)
|
1112 |
+
result["total_score"] = round(result["total_score"] * (1 - error_penalty), 1)
|
1113 |
result["validation_penalty"] = f"-{int(error_penalty * 100)}% due to {error_count} schema errors"
|
1114 |
elif warning_count > 0:
|
1115 |
# Minor penalty for warnings (up to 20% reduction)
|
1116 |
warning_penalty = min(0.2, warning_count * 0.05)
|
1117 |
+
result["total_score"] = round(result["total_score"] * (1 - warning_penalty), 1)
|
1118 |
result["validation_penalty"] = f"-{int(warning_penalty * 100)}% due to {warning_count} schema warnings"
|
1119 |
|
1120 |
return result
|