a1c00l commited on
Commit
5cbef81
Β·
verified Β·
1 Parent(s): 98b3f9b

Update src/aibom_generator/api.py

Browse files
Files changed (1) hide show
  1. src/aibom_generator/api.py +146 -50
src/aibom_generator/api.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import json
3
  import logging
@@ -7,11 +8,20 @@ from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
7
  from fastapi.staticfiles import StaticFiles
8
  from fastapi.templating import Jinja2Templates
9
  from pydantic import BaseModel
 
 
 
 
10
 
11
  # Configure logging
12
  logging.basicConfig(level=logging.INFO)
13
  logger = logging.getLogger(__name__)
14
 
 
 
 
 
 
15
  # Define directories
16
  templates_dir = "templates"
17
  OUTPUT_DIR = "/tmp/aibom_output"
@@ -34,6 +44,68 @@ class StatusResponse(BaseModel):
34
  version: str
35
  generator_version: str
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  @app.on_event("startup")
38
  async def startup_event():
39
  os.makedirs(OUTPUT_DIR, exist_ok=True)
@@ -42,11 +114,18 @@ async def startup_event():
42
 
43
  @app.get("/", response_class=HTMLResponse)
44
  async def root(request: Request):
 
45
  try:
46
- return templates.TemplateResponse("index.html", {"request": request})
47
  except Exception as e:
48
  logger.error(f"Error rendering template: {str(e)}")
49
- raise HTTPException(status_code=500, detail=f"Template rendering error: {str(e)}")
 
 
 
 
 
 
50
 
51
  @app.get("/status", response_model=StatusResponse)
52
  async def get_status():
@@ -58,7 +137,7 @@ def import_utils():
58
  try:
59
  # Try different import paths
60
  sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
61
-
62
  # Try direct import first
63
  try:
64
  from utils import calculate_completeness_score
@@ -66,7 +145,7 @@ def import_utils():
66
  return calculate_completeness_score
67
  except ImportError:
68
  pass
69
-
70
  # Try from src
71
  try:
72
  from src.aibom_generator.utils import calculate_completeness_score
@@ -74,7 +153,7 @@ def import_utils():
74
  return calculate_completeness_score
75
  except ImportError:
76
  pass
77
-
78
  # Try from aibom_generator
79
  try:
80
  from aibom_generator.utils import calculate_completeness_score
@@ -82,7 +161,7 @@ def import_utils():
82
  return calculate_completeness_score
83
  except ImportError:
84
  pass
85
-
86
  # If all imports fail, use the default implementation
87
  logger.warning("Could not import calculate_completeness_score, using default implementation")
88
  return None
@@ -106,7 +185,7 @@ def create_comprehensive_completeness_score(aibom=None):
106
  return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
107
  except Exception as e:
108
  logger.error(f"Error calculating completeness score: {str(e)}")
109
-
110
  # Otherwise, return a default comprehensive structure
111
  return {
112
  "total_score": 75.5, # Default score for better UI display
@@ -134,7 +213,7 @@ def create_comprehensive_completeness_score(aibom=None):
134
  "metadata.tools": "βœ” β˜…β˜…",
135
  "metadata.authors": "βœ” β˜…β˜…",
136
  "metadata.component": "βœ” β˜…β˜…",
137
-
138
  # Component basic info
139
  "component.type": "βœ” β˜…β˜…",
140
  "component.name": "βœ” β˜…β˜…β˜…",
@@ -142,15 +221,15 @@ def create_comprehensive_completeness_score(aibom=None):
142
  "component.purl": "βœ” β˜…β˜…",
143
  "component.description": "βœ” β˜…β˜…",
144
  "component.licenses": "βœ” β˜…β˜…",
145
-
146
  # Model card
147
  "modelCard.modelParameters": "βœ” β˜…β˜…",
148
  "modelCard.quantitativeAnalysis": "✘ β˜…β˜…",
149
  "modelCard.considerations": "βœ” β˜…β˜…",
150
-
151
  # External references
152
  "externalReferences": "βœ” β˜…",
153
-
154
  # Additional fields from FIELD_CLASSIFICATION
155
  "name": "βœ” β˜…β˜…β˜…",
156
  "downloadLocation": "βœ” β˜…β˜…β˜…",
@@ -184,7 +263,7 @@ def create_comprehensive_completeness_score(aibom=None):
184
  "metadata.tools": "important",
185
  "metadata.authors": "important",
186
  "metadata.component": "important",
187
-
188
  # Component basic info
189
  "component.type": "important",
190
  "component.name": "critical",
@@ -192,15 +271,15 @@ def create_comprehensive_completeness_score(aibom=None):
192
  "component.purl": "important",
193
  "component.description": "important",
194
  "component.licenses": "important",
195
-
196
  # Model card
197
  "modelCard.modelParameters": "important",
198
  "modelCard.quantitativeAnalysis": "important",
199
  "modelCard.considerations": "important",
200
-
201
  # External references
202
  "externalReferences": "supplementary",
203
-
204
  # Additional fields from FIELD_CLASSIFICATION
205
  "name": "critical",
206
  "downloadLocation": "critical",
@@ -227,8 +306,8 @@ def create_comprehensive_completeness_score(aibom=None):
227
  "missing_fields": {
228
  "critical": [],
229
  "important": ["modelCard.quantitativeAnalysis", "energyConsumption", "safetyRiskAssessment"],
230
- "supplementary": ["modelExplainability", "standardCompliance", "energyQuantity", "energyUnit",
231
- "metric", "metricDecisionThreshold", "modelDataPreprocessing",
232
  "autonomyType", "useSensitivePersonalInformation"]
233
  },
234
  "completeness_profile": {
@@ -267,6 +346,7 @@ async def generate_form(
267
  include_inference: bool = Form(False),
268
  use_best_practices: bool = Form(True)
269
  ):
 
270
  try:
271
  # Try different import paths for AIBOMGenerator
272
  generator = None
@@ -294,53 +374,58 @@ async def generate_form(
294
  enhancement_report = generator.get_enhancement_report()
295
 
296
  # Save AIBOM to file
297
- filename = f"{model_id.replace('/', '_')}_aibom.json"
298
  filepath = os.path.join(OUTPUT_DIR, filename)
299
 
300
  with open(filepath, "w") as f:
301
  json.dump(aibom, f, indent=2)
302
 
 
 
 
 
 
303
  download_url = f"/output/{filename}"
304
 
305
  # Create download and UI interaction scripts
306
  download_script = f"""
307
  <script>
308
  function downloadJSON() {{
309
- const a = document.createElement('a');
310
- a.href = '{download_url}';
311
- a.download = '{filename}';
312
  document.body.appendChild(a);
313
  a.click();
314
  document.body.removeChild(a);
315
  }}
316
-
317
  function switchTab(tabId) {{
318
  // Hide all tabs
319
- document.querySelectorAll('.tab-content').forEach(tab => {{
320
- tab.classList.remove('active');
321
  }});
322
-
323
  // Deactivate all tab buttons
324
- document.querySelectorAll('.aibom-tab').forEach(button => {{
325
- button.classList.remove('active');
326
  }});
327
-
328
  // Show the selected tab
329
- document.getElementById(tabId).classList.add('active');
330
-
331
  // Activate the clicked button
332
- event.currentTarget.classList.add('active');
333
  }}
334
-
335
  function toggleCollapsible(element) {{
336
- element.classList.toggle('active');
337
  var content = element.nextElementSibling;
338
  if (content.style.maxHeight) {{
339
  content.style.maxHeight = null;
340
- content.classList.remove('active');
341
  }} else {{
342
  content.style.maxHeight = content.scrollHeight + "px";
343
- content.classList.add('active');
344
  }}
345
  }}
346
  </script>
@@ -348,15 +433,15 @@ async def generate_form(
348
 
349
  # Get completeness score or create a comprehensive one if not available
350
  completeness_score = None
351
- if hasattr(generator, 'get_completeness_score'):
352
  try:
353
  completeness_score = generator.get_completeness_score(model_id)
354
  logger.info("Successfully retrieved completeness_score from generator")
355
  except Exception as e:
356
  logger.error(f"Completeness score error from generator: {str(e)}")
357
-
358
- # If completeness_score is None or doesn't have field_checklist, use comprehensive one
359
- if completeness_score is None or not isinstance(completeness_score, dict) or 'field_checklist' not in completeness_score:
360
  logger.info("Using comprehensive completeness_score with field_checklist")
361
  completeness_score = create_comprehensive_completeness_score(aibom)
362
 
@@ -375,7 +460,7 @@ async def generate_form(
375
  enhancement_report["original_score"] = {"total_score": 0, "completeness_score": 0}
376
  elif "completeness_score" not in enhancement_report["original_score"]:
377
  enhancement_report["original_score"]["completeness_score"] = enhancement_report["original_score"].get("total_score", 0)
378
-
379
  # Ensure final_score has completeness_score
380
  if "final_score" not in enhancement_report or enhancement_report["final_score"] is None:
381
  enhancement_report["final_score"] = {"total_score": 0, "completeness_score": 0}
@@ -390,15 +475,15 @@ async def generate_form(
390
  "component_model_card": "Model Card",
391
  "external_references": "External References"
392
  }
393
-
394
  tooltips = {
395
- "required_fields": "Basic required fields for a valid AI SBOM",
396
- "metadata": "Information about the AI SBOM itself",
397
  "component_basic": "Basic information about the AI model component",
398
  "component_model_card": "Detailed model card information",
399
  "external_references": "Links to external resources"
400
  }
401
-
402
  weights = {
403
  "required_fields": 20,
404
  "metadata": 20,
@@ -420,28 +505,39 @@ async def generate_form(
420
  "download_script": download_script,
421
  "display_names": display_names,
422
  "tooltips": tooltips,
423
- "weights": weights
 
424
  }
425
  )
426
  except Exception as e:
427
  logger.error(f"Error generating AI SBOM: {str(e)}")
 
 
428
  return templates.TemplateResponse(
429
- "error.html", {"request": request, "error": str(e)}
430
  )
431
 
432
  @app.get("/download/{filename}")
433
  async def download_file(filename: str):
434
  """
435
- Download a generated AI SBOM file.
436
-
437
- This endpoint serves the generated AI SBOM JSON files for download.
438
  """
439
  file_path = os.path.join(OUTPUT_DIR, filename)
440
  if not os.path.exists(file_path):
441
  raise HTTPException(status_code=404, detail="File not found")
442
-
443
  return FileResponse(
444
  file_path,
445
  media_type="application/json",
446
  filename=filename
447
  )
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
  import os
3
  import json
4
  import logging
 
8
  from fastapi.staticfiles import StaticFiles
9
  from fastapi.templating import Jinja2Templates
10
  from pydantic import BaseModel
11
+ from datetime import datetime
12
+ from datasets import Dataset, load_dataset, concatenate_datasets
13
+ import os
14
+ import logging
15
 
16
  # Configure logging
17
  logging.basicConfig(level=logging.INFO)
18
  logger = logging.getLogger(__name__)
19
 
20
+ # --- Add Counter Configuration ---
21
+ HF_REPO = "aetheris-ai/aisbom-usage-log" # User needs to create this private repo
22
+ HF_TOKEN = os.getenv("HF_TOKEN") # User must set this environment variable
23
+ # --- End Counter Configuration ---
24
+
25
  # Define directories
26
  templates_dir = "templates"
27
  OUTPUT_DIR = "/tmp/aibom_output"
 
44
  version: str
45
  generator_version: str
46
 
47
+ # --- Add Counter Helper Functions ---
48
+ def log_sbom_generation(model_id: str):
49
+ """Logs a successful SBOM generation event to the Hugging Face dataset."""
50
+ if not HF_TOKEN:
51
+ logger.warning("HF_TOKEN not set. Skipping SBOM generation logging.")
52
+ return
53
+
54
+ try:
55
+ log_data = {
56
+ "timestamp": [datetime.utcnow().isoformat()],
57
+ "event": ["generated"],
58
+ "model_id": [model_id] # Log the model ID
59
+ }
60
+ ds_new_log = Dataset.from_dict(log_data)
61
+
62
+ # Try to load existing dataset to append
63
+ try:
64
+ # Use trust_remote_code=True if required by the dataset/model on HF
65
+ existing_ds = load_dataset(HF_REPO, token=HF_TOKEN, split=\'train\', trust_remote_code=True)
66
+ # Check if dataset is empty or has different columns (handle initial creation)
67
+ if len(existing_ds) > 0 and set(existing_ds.column_names) == set(log_data.keys()):
68
+ ds_to_push = concatenate_datasets([existing_ds, ds_new_log])
69
+ elif len(existing_ds) == 0:
70
+ logger.info(f"Dataset {HF_REPO} is empty. Pushing initial data.")
71
+ ds_to_push = ds_new_log
72
+ else:
73
+ logger.warning(f"Dataset {HF_REPO} has unexpected columns {existing_ds.column_names} vs {list(log_data.keys())}. Appending new log anyway, structure might differ.")
74
+ # Attempt concatenation even if columns differ slightly, HF might handle it
75
+ # Or consider more robust schema migration/handling if needed
76
+ ds_to_push = concatenate_datasets([existing_ds, ds_new_log])
77
+
78
+ except Exception as load_err:
79
+ # Handle case where dataset doesn\'t exist yet or other loading errors
80
+ logger.info(f"Could not load existing dataset {HF_REPO} (may not exist yet): {load_err}. Pushing new dataset.")
81
+ ds_to_push = ds_new_log # ds is already prepared with the new log entry
82
+
83
+ # Push the updated or new dataset
84
+ ds_to_push.push_to_hub(HF_REPO, token=HF_TOKEN, private=True) # Ensure it\'s private
85
+ logger.info(f"Successfully logged SBOM generation for {model_id} to {HF_REPO}")
86
+
87
+ except Exception as e:
88
+ logger.error(f"Failed to log SBOM generation to {HF_REPO}: {e}")
89
+
90
+ def get_sbom_count() -> str:
91
+ """Retrieves the total count of generated SBOMs from the Hugging Face dataset."""
92
+ if not HF_TOKEN:
93
+ logger.warning("HF_TOKEN not set. Cannot retrieve SBOM count.")
94
+ return "N/A"
95
+ try:
96
+ # Load the dataset - assumes \'train\' split exists after first push
97
+ # Use trust_remote_code=True if required by the dataset/model on HF
98
+ ds = load_dataset(HF_REPO, token=HF_TOKEN, split=\'train\', trust_remote_code=True)
99
+ count = len(ds)
100
+ logger.info(f"Retrieved SBOM count: {count} from {HF_REPO}")
101
+ # Format count for display (e.g., add commas for large numbers)
102
+ return f"{count:,}"
103
+ except Exception as e:
104
+ logger.error(f"Failed to retrieve SBOM count from {HF_REPO}: {e}")
105
+ # Return "N/A" or similar indicator on error
106
+ return "N/A"
107
+ # --- End Counter Helper Functions ---
108
+
109
  @app.on_event("startup")
110
  async def startup_event():
111
  os.makedirs(OUTPUT_DIR, exist_ok=True)
 
114
 
115
  @app.get("/", response_class=HTMLResponse)
116
  async def root(request: Request):
117
+ sbom_count = get_sbom_count() # Get count
118
  try:
119
+ return templates.TemplateResponse("index.html", {"request": request, "sbom_count": sbom_count}) # Pass to template
120
  except Exception as e:
121
  logger.error(f"Error rendering template: {str(e)}")
122
+ # Attempt to render error page even if main page fails
123
+ try:
124
+ return templates.TemplateResponse("error.html", {"request": request, "error": f"Template rendering error: {str(e)}", "sbom_count": sbom_count})
125
+ except Exception as template_err:
126
+ # Fallback if error template also fails
127
+ logger.error(f"Error rendering error template: {template_err}")
128
+ raise HTTPException(status_code=500, detail=f"Template rendering error: {str(e)}")
129
 
130
  @app.get("/status", response_model=StatusResponse)
131
  async def get_status():
 
137
  try:
138
  # Try different import paths
139
  sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
140
+
141
  # Try direct import first
142
  try:
143
  from utils import calculate_completeness_score
 
145
  return calculate_completeness_score
146
  except ImportError:
147
  pass
148
+
149
  # Try from src
150
  try:
151
  from src.aibom_generator.utils import calculate_completeness_score
 
153
  return calculate_completeness_score
154
  except ImportError:
155
  pass
156
+
157
  # Try from aibom_generator
158
  try:
159
  from aibom_generator.utils import calculate_completeness_score
 
161
  return calculate_completeness_score
162
  except ImportError:
163
  pass
164
+
165
  # If all imports fail, use the default implementation
166
  logger.warning("Could not import calculate_completeness_score, using default implementation")
167
  return None
 
185
  return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
186
  except Exception as e:
187
  logger.error(f"Error calculating completeness score: {str(e)}")
188
+
189
  # Otherwise, return a default comprehensive structure
190
  return {
191
  "total_score": 75.5, # Default score for better UI display
 
213
  "metadata.tools": "βœ” β˜…β˜…",
214
  "metadata.authors": "βœ” β˜…β˜…",
215
  "metadata.component": "βœ” β˜…β˜…",
216
+
217
  # Component basic info
218
  "component.type": "βœ” β˜…β˜…",
219
  "component.name": "βœ” β˜…β˜…β˜…",
 
221
  "component.purl": "βœ” β˜…β˜…",
222
  "component.description": "βœ” β˜…β˜…",
223
  "component.licenses": "βœ” β˜…β˜…",
224
+
225
  # Model card
226
  "modelCard.modelParameters": "βœ” β˜…β˜…",
227
  "modelCard.quantitativeAnalysis": "✘ β˜…β˜…",
228
  "modelCard.considerations": "βœ” β˜…β˜…",
229
+
230
  # External references
231
  "externalReferences": "βœ” β˜…",
232
+
233
  # Additional fields from FIELD_CLASSIFICATION
234
  "name": "βœ” β˜…β˜…β˜…",
235
  "downloadLocation": "βœ” β˜…β˜…β˜…",
 
263
  "metadata.tools": "important",
264
  "metadata.authors": "important",
265
  "metadata.component": "important",
266
+
267
  # Component basic info
268
  "component.type": "important",
269
  "component.name": "critical",
 
271
  "component.purl": "important",
272
  "component.description": "important",
273
  "component.licenses": "important",
274
+
275
  # Model card
276
  "modelCard.modelParameters": "important",
277
  "modelCard.quantitativeAnalysis": "important",
278
  "modelCard.considerations": "important",
279
+
280
  # External references
281
  "externalReferences": "supplementary",
282
+
283
  # Additional fields from FIELD_CLASSIFICATION
284
  "name": "critical",
285
  "downloadLocation": "critical",
 
306
  "missing_fields": {
307
  "critical": [],
308
  "important": ["modelCard.quantitativeAnalysis", "energyConsumption", "safetyRiskAssessment"],
309
+ "supplementary": ["modelExplainability", "standardCompliance", "energyQuantity", "energyUnit",
310
+ "metric", "metricDecisionThreshold", "modelDataPreprocessing",
311
  "autonomyType", "useSensitivePersonalInformation"]
312
  },
313
  "completeness_profile": {
 
346
  include_inference: bool = Form(False),
347
  use_best_practices: bool = Form(True)
348
  ):
349
+ sbom_count = get_sbom_count() # Get count early for context
350
  try:
351
  # Try different import paths for AIBOMGenerator
352
  generator = None
 
374
  enhancement_report = generator.get_enhancement_report()
375
 
376
  # Save AIBOM to file
377
+ filename = f"{model_id.replace(\'/\', \'_\')}_aibom.json"
378
  filepath = os.path.join(OUTPUT_DIR, filename)
379
 
380
  with open(filepath, "w") as f:
381
  json.dump(aibom, f, indent=2)
382
 
383
+ # --- Log Generation Event ---
384
+ log_sbom_generation(model_id)
385
+ sbom_count = get_sbom_count() # Refresh count after logging
386
+ # --- End Log ---
387
+
388
  download_url = f"/output/{filename}"
389
 
390
  # Create download and UI interaction scripts
391
  download_script = f"""
392
  <script>
393
  function downloadJSON() {{
394
+ const a = document.createElement(\'a\');
395
+ a.href = \'{download_url}\';
396
+ a.download = \'{filename}\';
397
  document.body.appendChild(a);
398
  a.click();
399
  document.body.removeChild(a);
400
  }}
401
+
402
  function switchTab(tabId) {{
403
  // Hide all tabs
404
+ document.querySelectorAll(\'.tab-content\').forEach(tab => {{
405
+ tab.classList.remove(\'active\');
406
  }});
407
+
408
  // Deactivate all tab buttons
409
+ document.querySelectorAll(\'.aibom-tab\').forEach(button => {{
410
+ button.classList.remove(\'active\');
411
  }});
412
+
413
  // Show the selected tab
414
+ document.getElementById(tabId).classList.add(\'active\');
415
+
416
  // Activate the clicked button
417
+ event.currentTarget.classList.add(\'active\');
418
  }}
419
+
420
  function toggleCollapsible(element) {{
421
+ element.classList.toggle(\'active\');
422
  var content = element.nextElementSibling;
423
  if (content.style.maxHeight) {{
424
  content.style.maxHeight = null;
425
+ content.classList.remove(\'active\');
426
  }} else {{
427
  content.style.maxHeight = content.scrollHeight + "px";
428
+ content.classList.add(\'active\');
429
  }}
430
  }}
431
  </script>
 
433
 
434
  # Get completeness score or create a comprehensive one if not available
435
  completeness_score = None
436
+ if hasattr(generator, \'get_completeness_score\'):
437
  try:
438
  completeness_score = generator.get_completeness_score(model_id)
439
  logger.info("Successfully retrieved completeness_score from generator")
440
  except Exception as e:
441
  logger.error(f"Completeness score error from generator: {str(e)}")
442
+
443
+ # If completeness_score is None or doesn\'t have field_checklist, use comprehensive one
444
+ if completeness_score is None or not isinstance(completeness_score, dict) or \'field_checklist\' not in completeness_score:
445
  logger.info("Using comprehensive completeness_score with field_checklist")
446
  completeness_score = create_comprehensive_completeness_score(aibom)
447
 
 
460
  enhancement_report["original_score"] = {"total_score": 0, "completeness_score": 0}
461
  elif "completeness_score" not in enhancement_report["original_score"]:
462
  enhancement_report["original_score"]["completeness_score"] = enhancement_report["original_score"].get("total_score", 0)
463
+
464
  # Ensure final_score has completeness_score
465
  if "final_score" not in enhancement_report or enhancement_report["final_score"] is None:
466
  enhancement_report["final_score"] = {"total_score": 0, "completeness_score": 0}
 
475
  "component_model_card": "Model Card",
476
  "external_references": "External References"
477
  }
478
+
479
  tooltips = {
480
+ "required_fields": "Basic required fields for a valid AIBOM",
481
+ "metadata": "Information about the AIBOM itself",
482
  "component_basic": "Basic information about the AI model component",
483
  "component_model_card": "Detailed model card information",
484
  "external_references": "Links to external resources"
485
  }
486
+
487
  weights = {
488
  "required_fields": 20,
489
  "metadata": 20,
 
505
  "download_script": download_script,
506
  "display_names": display_names,
507
  "tooltips": tooltips,
508
+ "weights": weights,
509
+ "sbom_count": sbom_count # Pass count
510
  }
511
  )
512
  except Exception as e:
513
  logger.error(f"Error generating AI SBOM: {str(e)}")
514
+ # Ensure count is passed to error template as well
515
+ sbom_count = get_sbom_count() # Refresh count just in case
516
  return templates.TemplateResponse(
517
+ "error.html", {"request": request, "error": str(e), "sbom_count": sbom_count} # Pass count
518
  )
519
 
520
  @app.get("/download/{filename}")
521
  async def download_file(filename: str):
522
  """
523
+ Download a generated AIBOM file.
524
+
525
+ This endpoint serves the generated AIBOM JSON files for download.
526
  """
527
  file_path = os.path.join(OUTPUT_DIR, filename)
528
  if not os.path.exists(file_path):
529
  raise HTTPException(status_code=404, detail="File not found")
530
+
531
  return FileResponse(
532
  file_path,
533
  media_type="application/json",
534
  filename=filename
535
  )
536
+
537
+ # If running directly (for local testing)
538
+ if __name__ == "__main__":
539
+ import uvicorn
540
+ # Ensure HF_TOKEN is set for local testing if needed
541
+ if not HF_TOKEN:
542
+ print("Warning: HF_TOKEN environment variable not set. SBOM count will show N/A and logging will be skipped.")
543
+ uvicorn.run(app, host="0.0.0.0", port=8000)