a1c00l commited on
Commit
6c4b1a0
·
verified ·
1 Parent(s): ea07650

Update src/aibom_generator/api.py

Browse files
Files changed (1) hide show
  1. src/aibom_generator/api.py +64 -423
src/aibom_generator/api.py CHANGED
@@ -2,16 +2,11 @@ import os
2
  import json
3
  import logging
4
  import sys
5
- import uuid
6
- import asyncio
7
- import shutil
8
- from datetime import datetime
9
- from typing import List, Dict, Any, Optional
10
- from fastapi import FastAPI, HTTPException, Request, Form, File, UploadFile, BackgroundTasks
11
  from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
12
  from fastapi.staticfiles import StaticFiles
13
  from fastapi.templating import Jinja2Templates
14
- from pydantic import BaseModel, Field
15
 
16
  # Configure logging
17
  logging.basicConfig(level=logging.INFO)
@@ -20,8 +15,6 @@ logger = logging.getLogger(__name__)
20
  # Define directories
21
  templates_dir = "templates"
22
  OUTPUT_DIR = "/tmp/aibom_output"
23
- UPLOAD_DIR = "/tmp/aibom_uploads"
24
- BATCH_DIR = "/tmp/aibom_batch_jobs"
25
 
26
  # Initialize templates
27
  templates = Jinja2Templates(directory=templates_dir)
@@ -29,10 +22,8 @@ templates = Jinja2Templates(directory=templates_dir)
29
  # Create app
30
  app = FastAPI(title="AI SBOM Generator API")
31
 
32
- # Ensure directories exist
33
  os.makedirs(OUTPUT_DIR, exist_ok=True)
34
- os.makedirs(UPLOAD_DIR, exist_ok=True)
35
- os.makedirs(BATCH_DIR, exist_ok=True)
36
 
37
  # Mount output directory as static files
38
  app.mount("/output", StaticFiles(directory=OUTPUT_DIR), name="output")
@@ -43,41 +34,10 @@ class StatusResponse(BaseModel):
43
  version: str
44
  generator_version: str
45
 
46
- # Generate request model
47
- class GenerateRequest(BaseModel):
48
- model_id: str
49
- include_inference: bool = True
50
- use_best_practices: bool = True
51
- hf_token: Optional[str] = None
52
-
53
- # Batch request model
54
- class BatchRequest(BaseModel):
55
- model_ids: List[str]
56
- include_inference: bool = True
57
- use_best_practices: bool = True
58
- hf_token: Optional[str] = None
59
-
60
- # Batch job status model
61
- class BatchJobStatus(BaseModel):
62
- job_id: str
63
- status: str
64
- model_ids: List[str]
65
- created_at: str
66
- completed: int = 0
67
- total: int = 0
68
- results: Dict[str, Any] = {}
69
-
70
- # Dictionary to store batch jobs
71
- batch_jobs = {}
72
-
73
  @app.on_event("startup")
74
  async def startup_event():
75
  os.makedirs(OUTPUT_DIR, exist_ok=True)
76
- os.makedirs(UPLOAD_DIR, exist_ok=True)
77
- os.makedirs(BATCH_DIR, exist_ok=True)
78
  logger.info(f"Output directory ready at {OUTPUT_DIR}")
79
- logger.info(f"Upload directory ready at {UPLOAD_DIR}")
80
- logger.info(f"Batch directory ready at {BATCH_DIR}")
81
  logger.info(f"Registered routes: {[route.path for route in app.routes]}")
82
 
83
  @app.get("/", response_class=HTMLResponse)
@@ -92,11 +52,6 @@ async def root(request: Request):
92
  async def get_status():
93
  return StatusResponse(status="operational", version="1.0.0", generator_version="1.0.0")
94
 
95
- # API version of status endpoint
96
- @app.get("/api/status", response_model=StatusResponse)
97
- async def api_get_status():
98
- return StatusResponse(status="operational", version="1.0.0", generator_version="1.0.0")
99
-
100
  # Import utils module for completeness score calculation
101
  def import_utils():
102
  """Import utils module with fallback paths."""
@@ -145,7 +100,7 @@ def create_comprehensive_completeness_score(aibom=None):
145
  If aibom is provided and calculate_completeness_score is available, use it to calculate the score.
146
  Otherwise, return a default score structure.
147
  """
148
- # If we have the calculate_completeness_score function and an AI SBOM, use it
149
  if calculate_completeness_score and aibom:
150
  try:
151
  return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
@@ -305,132 +260,55 @@ def create_comprehensive_completeness_score(aibom=None):
305
  ]
306
  }
307
 
308
- # Helper function to get AI SBOM generator
309
- def get_generator():
310
- """Get AI SBOM generator with fallback paths."""
 
 
 
 
311
  try:
312
  # Try different import paths for AIBOMGenerator
 
313
  try:
314
  from src.aibom_generator.generator import AIBOMGenerator
315
- return AIBOMGenerator()
316
  except ImportError:
317
  try:
318
  from aibom_generator.generator import AIBOMGenerator
319
- return AIBOMGenerator()
320
  except ImportError:
321
  try:
322
  from generator import AIBOMGenerator
323
- return AIBOMGenerator()
324
  except ImportError:
325
  logger.error("Could not import AIBOMGenerator from any known location")
326
  raise ImportError("Could not import AIBOMGenerator from any known location")
327
- except Exception as e:
328
- logger.error(f"Error getting generator: {str(e)}")
329
- raise HTTPException(status_code=500, detail=f"Error getting generator: {str(e)}")
330
 
331
- # Helper function to generate AI SBOM
332
- async def generate_aibom(model_id: str, include_inference: bool = True, use_best_practices: bool = True, hf_token: Optional[str] = None):
333
- """Generate AI SBOM for a model."""
334
- try:
335
- generator = get_generator()
336
-
337
- # Generate AI SBOM
338
  aibom = generator.generate_aibom(
339
  model_id=model_id,
340
  include_inference=include_inference,
341
- use_best_practices=use_best_practices,
342
- hf_token=hf_token
343
  )
344
  enhancement_report = generator.get_enhancement_report()
345
-
346
- # Save AI SBOM to file
347
- request_id = str(uuid.uuid4())
348
- filename = f"{model_id.replace('/', '_')}_{request_id}.json"
349
  filepath = os.path.join(OUTPUT_DIR, filename)
350
-
351
  with open(filepath, "w") as f:
352
  json.dump(aibom, f, indent=2)
353
-
354
  download_url = f"/output/{filename}"
355
-
356
- # Get completeness score or create a comprehensive one if not available
357
- completeness_score = None
358
- if hasattr(generator, 'get_completeness_score'):
359
- try:
360
- completeness_score = generator.get_completeness_score(model_id)
361
- logger.info("Successfully retrieved completeness_score from generator")
362
- except Exception as e:
363
- logger.error(f"Completeness score error from generator: {str(e)}")
364
-
365
- # If completeness_score is None or doesn't have field_checklist, use comprehensive one
366
- if completeness_score is None or not isinstance(completeness_score, dict) or 'field_checklist' not in completeness_score:
367
- logger.info("Using comprehensive completeness_score with field_checklist")
368
- completeness_score = create_comprehensive_completeness_score(aibom)
369
-
370
- # Ensure enhancement_report has the right structure
371
- if enhancement_report is None:
372
- enhancement_report = {
373
- "ai_enhanced": False,
374
- "ai_model": None,
375
- "original_score": {"total_score": 0, "completeness_score": 0},
376
- "final_score": {"total_score": 0, "completeness_score": 0},
377
- "improvement": 0
378
- }
379
- else:
380
- # Ensure original_score has completeness_score
381
- if "original_score" not in enhancement_report or enhancement_report["original_score"] is None:
382
- enhancement_report["original_score"] = {"total_score": 0, "completeness_score": 0}
383
- elif "completeness_score" not in enhancement_report["original_score"]:
384
- enhancement_report["original_score"]["completeness_score"] = enhancement_report["original_score"].get("total_score", 0)
385
-
386
- # Ensure final_score has completeness_score
387
- if "final_score" not in enhancement_report or enhancement_report["final_score"] is None:
388
- enhancement_report["final_score"] = {"total_score": 0, "completeness_score": 0}
389
- elif "completeness_score" not in enhancement_report["final_score"]:
390
- enhancement_report["final_score"]["completeness_score"] = enhancement_report["final_score"].get("total_score", 0)
391
-
392
- return {
393
- "aibom": aibom,
394
- "model_id": model_id,
395
- "generated_at": datetime.now().isoformat(),
396
- "request_id": request_id,
397
- "download_url": download_url,
398
- "enhancement_report": enhancement_report,
399
- "completeness_score": completeness_score
400
- }
401
- except Exception as e:
402
- logger.error(f"Error generating AI SBOM: {str(e)}")
403
- raise HTTPException(status_code=500, detail=f"Error generating AI SBOM: {str(e)}")
404
 
405
- @app.post("/generate", response_class=HTMLResponse)
406
- async def generate_form(
407
- request: Request,
408
- model_id: str = Form(...),
409
- include_inference: bool = Form(False),
410
- use_best_practices: bool = Form(True),
411
- hf_token: Optional[str] = Form(None)
412
- ):
413
- try:
414
- # Generate AI SBOM
415
- result = await generate_aibom(
416
- model_id=model_id,
417
- include_inference=include_inference,
418
- use_best_practices=use_best_practices,
419
- hf_token=hf_token
420
- )
421
-
422
- aibom = result["aibom"]
423
- enhancement_report = result["enhancement_report"]
424
- completeness_score = result["completeness_score"]
425
- download_url = result["download_url"]
426
-
427
  # Create download and UI interaction scripts
428
  download_script = f"""
429
  <script>
430
  function downloadJSON() {{
431
  const a = document.createElement('a');
432
  a.href = '{download_url}';
433
- a.download = '{os.path.basename(download_url)}';
434
  document.body.appendChild(a);
435
  a.click();
436
  document.body.removeChild(a);
@@ -467,7 +345,43 @@ async def generate_form(
467
  }}
468
  </script>
469
  """
 
 
 
 
 
 
 
 
 
470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
  # Add display names and tooltips for score sections
472
  display_names = {
473
  "required_fields": "Required Fields",
@@ -478,8 +392,8 @@ async def generate_form(
478
  }
479
 
480
  tooltips = {
481
- "required_fields": "Basic required fields for a valid AI SBOM",
482
- "metadata": "Information about the AI SBOM itself",
483
  "component_basic": "Basic information about the AI model component",
484
  "component_model_card": "Detailed model card information",
485
  "external_references": "Links to external resources"
@@ -492,7 +406,7 @@ async def generate_form(
492
  "component_model_card": 30,
493
  "external_references": 10
494
  }
495
-
496
  # Render the template with all necessary data
497
  return templates.TemplateResponse(
498
  "result.html",
@@ -515,285 +429,12 @@ async def generate_form(
515
  "error.html", {"request": request, "error": str(e)}
516
  )
517
 
518
- @app.post("/api/generate", response_model=Dict[str, Any])
519
- async def api_generate(request: GenerateRequest):
520
- """
521
- Generate an AI SBOM for a specified Hugging Face model.
522
-
523
- This endpoint generates an AI SBOM for the specified model and returns it as JSON.
524
- """
525
- try:
526
- # Generate AI SBOM
527
- result = await generate_aibom(
528
- model_id=request.model_id,
529
- include_inference=request.include_inference,
530
- use_best_practices=request.use_best_practices,
531
- hf_token=request.hf_token
532
- )
533
-
534
- # Return result without completeness_score to keep response size manageable
535
- return {
536
- "aibom": result["aibom"],
537
- "model_id": result["model_id"],
538
- "generated_at": result["generated_at"],
539
- "request_id": result["request_id"],
540
- "download_url": result["download_url"]
541
- }
542
- except Exception as e:
543
- logger.error(f"Error generating AI SBOM: {str(e)}")
544
- raise HTTPException(status_code=500, detail=f"Error generating AI SBOM: {str(e)}")
545
-
546
- @app.post("/api/generate-with-report", response_model=Dict[str, Any])
547
- async def api_generate_with_report(request: GenerateRequest):
548
- """
549
- Generate an AI SBOM with a detailed enhancement report.
550
-
551
- This endpoint generates an AI SBOM for the specified model and returns it as JSON
552
- along with an enhancement report.
553
- """
554
- try:
555
- # Generate AI SBOM
556
- result = await generate_aibom(
557
- model_id=request.model_id,
558
- include_inference=request.include_inference,
559
- use_best_practices=request.use_best_practices,
560
- hf_token=request.hf_token
561
- )
562
-
563
- # Return result with enhancement report but without completeness_score
564
- return {
565
- "aibom": result["aibom"],
566
- "model_id": result["model_id"],
567
- "generated_at": result["generated_at"],
568
- "request_id": result["request_id"],
569
- "download_url": result["download_url"],
570
- "enhancement_report": result["enhancement_report"]
571
- }
572
- except Exception as e:
573
- logger.error(f"Error generating AI SBOM: {str(e)}")
574
- raise HTTPException(status_code=500, detail=f"Error generating AI SBOM: {str(e)}")
575
-
576
- @app.get("/api/models/{model_id}/score", response_model=Dict[str, Any])
577
- async def get_model_score(
578
- model_id: str,
579
- use_best_practices: bool = True,
580
- hf_token: Optional[str] = None
581
- ):
582
- """
583
- Get the completeness score for a model without generating a full AI SBOM.
584
-
585
- This endpoint calculates the completeness score for the specified model and returns it as JSON.
586
- """
587
- try:
588
- # Get generator
589
- generator = get_generator()
590
-
591
- # Generate minimal AI SBOM to get score
592
- aibom = generator.generate_aibom(
593
- model_id=model_id,
594
- include_inference=False, # No need for inference just for scoring
595
- use_best_practices=use_best_practices,
596
- hf_token=hf_token
597
- )
598
-
599
- # Get completeness score or create a comprehensive one if not available
600
- completeness_score = None
601
- if hasattr(generator, 'get_completeness_score'):
602
- try:
603
- completeness_score = generator.get_completeness_score(model_id)
604
- logger.info("Successfully retrieved completeness_score from generator")
605
- except Exception as e:
606
- logger.error(f"Completeness score error from generator: {str(e)}")
607
-
608
- # If completeness_score is None or doesn't have field_checklist, use comprehensive one
609
- if completeness_score is None or not isinstance(completeness_score, dict) or 'field_checklist' not in completeness_score:
610
- logger.info("Using comprehensive completeness_score with field_checklist")
611
- completeness_score = create_comprehensive_completeness_score(aibom)
612
-
613
- # Return only the score information
614
- return {
615
- "total_score": completeness_score["total_score"],
616
- "section_scores": completeness_score["section_scores"],
617
- "max_scores": completeness_score["max_scores"],
618
- "model_id": model_id
619
- }
620
- except Exception as e:
621
- logger.error(f"Error getting model score: {str(e)}")
622
- raise HTTPException(status_code=500, detail=f"Error getting model score: {str(e)}")
623
-
624
- # Function to process a batch job
625
- async def process_batch_job(job_id: str, model_ids: List[str], include_inference: bool, use_best_practices: bool, hf_token: Optional[str] = None):
626
- """Process a batch job to generate AI SBOMs for multiple models."""
627
- try:
628
- # Update job status to processing
629
- batch_jobs[job_id]["status"] = "processing"
630
-
631
- # Process each model
632
- for model_id in model_ids:
633
- try:
634
- # Generate AI SBOM
635
- result = await generate_aibom(
636
- model_id=model_id,
637
- include_inference=include_inference,
638
- use_best_practices=use_best_practices,
639
- hf_token=hf_token
640
- )
641
-
642
- # Update job results
643
- batch_jobs[job_id]["results"][model_id] = {
644
- "status": "completed",
645
- "download_url": result["download_url"],
646
- "enhancement_report": result["enhancement_report"]
647
- }
648
-
649
- # Update completed count
650
- batch_jobs[job_id]["completed"] += 1
651
- except Exception as e:
652
- logger.error(f"Error processing model {model_id} in batch job {job_id}: {str(e)}")
653
- batch_jobs[job_id]["results"][model_id] = {
654
- "status": "failed",
655
- "error": str(e)
656
- }
657
- batch_jobs[job_id]["completed"] += 1
658
-
659
- # Update job status to completed
660
- batch_jobs[job_id]["status"] = "completed"
661
- except Exception as e:
662
- logger.error(f"Error processing batch job {job_id}: {str(e)}")
663
- batch_jobs[job_id]["status"] = "failed"
664
- batch_jobs[job_id]["error"] = str(e)
665
-
666
- @app.post("/api/batch", response_model=BatchJobStatus)
667
- async def create_batch_job(request: BatchRequest, background_tasks: BackgroundTasks):
668
- """
669
- Start a batch job to generate AI SBOMs for multiple models.
670
-
671
- This endpoint starts a batch job to generate AI SBOMs for the specified models
672
- and returns a job ID that can be used to check the status of the job.
673
- """
674
- try:
675
- # Create job ID
676
- job_id = str(uuid.uuid4())
677
-
678
- # Create job directory
679
- job_dir = os.path.join(BATCH_DIR, job_id)
680
- os.makedirs(job_dir, exist_ok=True)
681
-
682
- # Create job status
683
- job_status = BatchJobStatus(
684
- job_id=job_id,
685
- status="queued",
686
- model_ids=request.model_ids,
687
- created_at=datetime.now().isoformat(),
688
- total=len(request.model_ids)
689
- )
690
-
691
- # Store job status
692
- batch_jobs[job_id] = job_status.dict()
693
-
694
- # Start batch job in background
695
- background_tasks.add_task(
696
- process_batch_job,
697
- job_id,
698
- request.model_ids,
699
- request.include_inference,
700
- request.use_best_practices,
701
- request.hf_token
702
- )
703
-
704
- return job_status
705
- except Exception as e:
706
- logger.error(f"Error creating batch job: {str(e)}")
707
- raise HTTPException(status_code=500, detail=f"Error creating batch job: {str(e)}")
708
-
709
- @app.get("/api/batch/{job_id}", response_model=BatchJobStatus)
710
- async def get_batch_job_status(job_id: str):
711
- """
712
- Get the status of a batch job.
713
-
714
- This endpoint returns the current status of the specified batch job.
715
- """
716
- try:
717
- # Check if job exists
718
- if job_id not in batch_jobs:
719
- raise HTTPException(status_code=404, detail=f"Batch job {job_id} not found")
720
-
721
- # Return job status
722
- return BatchJobStatus(**batch_jobs[job_id])
723
- except HTTPException:
724
- raise
725
- except Exception as e:
726
- logger.error(f"Error getting batch job status: {str(e)}")
727
- raise HTTPException(status_code=500, detail=f"Error getting batch job status: {str(e)}")
728
-
729
- @app.post("/api/upload-model-card", response_model=Dict[str, Any])
730
- async def upload_model_card(
731
- model_id: str = Form(...),
732
- model_card: UploadFile = File(...),
733
- include_inference: bool = Form(True),
734
- use_best_practices: bool = Form(True),
735
- hf_token: Optional[str] = Form(None)
736
- ):
737
- """
738
- Upload a custom model card and generate an AI SBOM.
739
-
740
- This endpoint allows uploading a custom model card file and generates an AI SBOM
741
- using the uploaded model card.
742
- """
743
- try:
744
- # Create upload directory for this model
745
- model_upload_dir = os.path.join(UPLOAD_DIR, model_id.replace("/", "_"))
746
- os.makedirs(model_upload_dir, exist_ok=True)
747
-
748
- # Save uploaded model card
749
- model_card_path = os.path.join(model_upload_dir, "model_card.md")
750
- with open(model_card_path, "wb") as f:
751
- content = await model_card.read()
752
- f.write(content)
753
-
754
- # Get generator
755
- generator = get_generator()
756
-
757
- # Generate AI SBOM with custom model card
758
- aibom = generator.generate_aibom(
759
- model_id=model_id,
760
- include_inference=include_inference,
761
- use_best_practices=use_best_practices,
762
- hf_token=hf_token,
763
- custom_model_card_path=model_card_path
764
- )
765
- enhancement_report = generator.get_enhancement_report()
766
-
767
- # Save AI SBOM to file
768
- request_id = str(uuid.uuid4())
769
- filename = f"{model_id.replace('/', '_')}_{request_id}.json"
770
- filepath = os.path.join(OUTPUT_DIR, filename)
771
-
772
- with open(filepath, "w") as f:
773
- json.dump(aibom, f, indent=2)
774
-
775
- download_url = f"/output/{filename}"
776
-
777
- # Return result
778
- return {
779
- "aibom": aibom,
780
- "model_id": model_id,
781
- "generated_at": datetime.now().isoformat(),
782
- "request_id": request_id,
783
- "download_url": download_url,
784
- "enhancement_report": enhancement_report,
785
- "model_card_uploaded": True
786
- }
787
- except Exception as e:
788
- logger.error(f"Error uploading model card: {str(e)}")
789
- raise HTTPException(status_code=500, detail=f"Error uploading model card: {str(e)}")
790
-
791
  @app.get("/download/{filename}")
792
  async def download_file(filename: str):
793
  """
794
- Download a generated AI SBOM file.
795
 
796
- This endpoint serves the generated AI SBOM JSON files for download.
797
  """
798
  file_path = os.path.join(OUTPUT_DIR, filename)
799
  if not os.path.exists(file_path):
 
2
  import json
3
  import logging
4
  import sys
5
+ from fastapi import FastAPI, HTTPException, Request, Form
 
 
 
 
 
6
  from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
7
  from fastapi.staticfiles import StaticFiles
8
  from fastapi.templating import Jinja2Templates
9
+ from pydantic import BaseModel
10
 
11
  # Configure logging
12
  logging.basicConfig(level=logging.INFO)
 
15
  # Define directories
16
  templates_dir = "templates"
17
  OUTPUT_DIR = "/tmp/aibom_output"
 
 
18
 
19
  # Initialize templates
20
  templates = Jinja2Templates(directory=templates_dir)
 
22
  # Create app
23
  app = FastAPI(title="AI SBOM Generator API")
24
 
25
+ # Ensure output directory exists
26
  os.makedirs(OUTPUT_DIR, exist_ok=True)
 
 
27
 
28
  # Mount output directory as static files
29
  app.mount("/output", StaticFiles(directory=OUTPUT_DIR), name="output")
 
34
  version: str
35
  generator_version: str
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  @app.on_event("startup")
38
  async def startup_event():
39
  os.makedirs(OUTPUT_DIR, exist_ok=True)
 
 
40
  logger.info(f"Output directory ready at {OUTPUT_DIR}")
 
 
41
  logger.info(f"Registered routes: {[route.path for route in app.routes]}")
42
 
43
  @app.get("/", response_class=HTMLResponse)
 
52
  async def get_status():
53
  return StatusResponse(status="operational", version="1.0.0", generator_version="1.0.0")
54
 
 
 
 
 
 
55
  # Import utils module for completeness score calculation
56
  def import_utils():
57
  """Import utils module with fallback paths."""
 
100
  If aibom is provided and calculate_completeness_score is available, use it to calculate the score.
101
  Otherwise, return a default score structure.
102
  """
103
+ # If we have the calculate_completeness_score function and an AIBOM, use it
104
  if calculate_completeness_score and aibom:
105
  try:
106
  return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
 
260
  ]
261
  }
262
 
263
+ @app.post("/generate", response_class=HTMLResponse)
264
+ async def generate_form(
265
+ request: Request,
266
+ model_id: str = Form(...),
267
+ include_inference: bool = Form(False),
268
+ use_best_practices: bool = Form(True)
269
+ ):
270
  try:
271
  # Try different import paths for AIBOMGenerator
272
+ generator = None
273
  try:
274
  from src.aibom_generator.generator import AIBOMGenerator
275
+ generator = AIBOMGenerator()
276
  except ImportError:
277
  try:
278
  from aibom_generator.generator import AIBOMGenerator
279
+ generator = AIBOMGenerator()
280
  except ImportError:
281
  try:
282
  from generator import AIBOMGenerator
283
+ generator = AIBOMGenerator()
284
  except ImportError:
285
  logger.error("Could not import AIBOMGenerator from any known location")
286
  raise ImportError("Could not import AIBOMGenerator from any known location")
 
 
 
287
 
288
+ # Generate AIBOM
 
 
 
 
 
 
289
  aibom = generator.generate_aibom(
290
  model_id=model_id,
291
  include_inference=include_inference,
292
+ use_best_practices=use_best_practices
 
293
  )
294
  enhancement_report = generator.get_enhancement_report()
295
+
296
+ # Save AIBOM to file
297
+ filename = f"{model_id.replace('/', '_')}_aibom.json"
 
298
  filepath = os.path.join(OUTPUT_DIR, filename)
299
+
300
  with open(filepath, "w") as f:
301
  json.dump(aibom, f, indent=2)
302
+
303
  download_url = f"/output/{filename}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  # Create download and UI interaction scripts
306
  download_script = f"""
307
  <script>
308
  function downloadJSON() {{
309
  const a = document.createElement('a');
310
  a.href = '{download_url}';
311
+ a.download = '{filename}';
312
  document.body.appendChild(a);
313
  a.click();
314
  document.body.removeChild(a);
 
345
  }}
346
  </script>
347
  """
348
+
349
+ # Get completeness score or create a comprehensive one if not available
350
+ completeness_score = None
351
+ if hasattr(generator, 'get_completeness_score'):
352
+ try:
353
+ completeness_score = generator.get_completeness_score(model_id)
354
+ logger.info("Successfully retrieved completeness_score from generator")
355
+ except Exception as e:
356
+ logger.error(f"Completeness score error from generator: {str(e)}")
357
 
358
+ # If completeness_score is None or doesn't have field_checklist, use comprehensive one
359
+ if completeness_score is None or not isinstance(completeness_score, dict) or 'field_checklist' not in completeness_score:
360
+ logger.info("Using comprehensive completeness_score with field_checklist")
361
+ completeness_score = create_comprehensive_completeness_score(aibom)
362
+
363
+ # Ensure enhancement_report has the right structure
364
+ if enhancement_report is None:
365
+ enhancement_report = {
366
+ "ai_enhanced": False,
367
+ "ai_model": None,
368
+ "original_score": {"total_score": 0, "completeness_score": 0},
369
+ "final_score": {"total_score": 0, "completeness_score": 0},
370
+ "improvement": 0
371
+ }
372
+ else:
373
+ # Ensure original_score has completeness_score
374
+ if "original_score" not in enhancement_report or enhancement_report["original_score"] is None:
375
+ enhancement_report["original_score"] = {"total_score": 0, "completeness_score": 0}
376
+ elif "completeness_score" not in enhancement_report["original_score"]:
377
+ enhancement_report["original_score"]["completeness_score"] = enhancement_report["original_score"].get("total_score", 0)
378
+
379
+ # Ensure final_score has completeness_score
380
+ if "final_score" not in enhancement_report or enhancement_report["final_score"] is None:
381
+ enhancement_report["final_score"] = {"total_score": 0, "completeness_score": 0}
382
+ elif "completeness_score" not in enhancement_report["final_score"]:
383
+ enhancement_report["final_score"]["completeness_score"] = enhancement_report["final_score"].get("total_score", 0)
384
+
385
  # Add display names and tooltips for score sections
386
  display_names = {
387
  "required_fields": "Required Fields",
 
392
  }
393
 
394
  tooltips = {
395
+ "required_fields": "Basic required fields for a valid AIBOM",
396
+ "metadata": "Information about the AIBOM itself",
397
  "component_basic": "Basic information about the AI model component",
398
  "component_model_card": "Detailed model card information",
399
  "external_references": "Links to external resources"
 
406
  "component_model_card": 30,
407
  "external_references": 10
408
  }
409
+
410
  # Render the template with all necessary data
411
  return templates.TemplateResponse(
412
  "result.html",
 
429
  "error.html", {"request": request, "error": str(e)}
430
  )
431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
  @app.get("/download/{filename}")
433
  async def download_file(filename: str):
434
  """
435
+ Download a generated AIBOM file.
436
 
437
+ This endpoint serves the generated AIBOM JSON files for download.
438
  """
439
  file_path = os.path.join(OUTPUT_DIR, filename)
440
  if not os.path.exists(file_path):