a1c00l commited on
Commit
0c757a9
·
verified ·
1 Parent(s): d2a45b4

Update src/aibom_generator/generator.py

Browse files
Files changed (1) hide show
  1. src/aibom_generator/generator.py +166 -231
src/aibom_generator/generator.py CHANGED
@@ -1,7 +1,7 @@
1
  import json
2
  import uuid
3
  import datetime
4
- from typing import Dict, Optional, Any, List
5
 
6
  from huggingface_hub import HfApi, ModelCard
7
  from .utils import calculate_completeness_score
@@ -19,7 +19,6 @@ class AIBOMGenerator:
19
  self.inference_model_url = inference_model_url
20
  self.use_inference = use_inference
21
  self.cache_dir = cache_dir
22
- self.enhancement_report = None # Store enhancement report as instance variable
23
 
24
  def generate_aibom(
25
  self,
@@ -27,124 +26,82 @@ class AIBOMGenerator:
27
  output_file: Optional[str] = None,
28
  include_inference: Optional[bool] = None,
29
  ) -> Dict[str, Any]:
30
- try:
31
- use_inference = include_inference if include_inference is not None else self.use_inference
32
- model_info = self._fetch_model_info(model_id)
33
- model_card = self._fetch_model_card(model_id)
34
-
35
- # Store original metadata before any AI enhancement
36
- original_metadata = self._extract_structured_metadata(model_id, model_info, model_card)
37
-
38
- # Create initial AIBOM with original metadata
39
- original_aibom = self._create_aibom_structure(model_id, original_metadata)
40
-
41
- # Calculate initial score
42
- original_score = calculate_completeness_score(original_aibom)
43
-
44
- # Final metadata starts with original metadata
45
- final_metadata = original_metadata.copy() if original_metadata else {}
46
-
47
- # Apply AI enhancement if requested
48
- ai_enhanced = False
49
- ai_model_name = None
50
-
51
- if use_inference and self.inference_model_url:
52
- try:
53
- # Extract additional metadata using AI
54
- enhanced_metadata = self._extract_unstructured_metadata(model_card, model_id)
 
 
 
 
55
 
56
- # If we got enhanced metadata, merge it with original
57
- if enhanced_metadata:
58
- ai_enhanced = True
59
- ai_model_name = "BERT-base-uncased" # Will be replaced with actual model name
60
-
61
- # Merge enhanced metadata with original (enhanced takes precedence)
62
- for key, value in enhanced_metadata.items():
63
- if value is not None and (key not in final_metadata or not final_metadata[key]):
64
- final_metadata[key] = value
65
- except Exception as e:
66
- print(f"Error during AI enhancement: {e}")
67
- # Continue with original metadata if enhancement fails
68
-
69
- # Create final AIBOM with potentially enhanced metadata
70
- aibom = self._create_aibom_structure(model_id, final_metadata)
71
-
72
- # Calculate final score
73
- final_score = calculate_completeness_score(aibom)
74
-
75
- # Add score and enhancement info to metadata properties
76
- if "metadata" in aibom and "properties" not in aibom["metadata"]:
77
- aibom["metadata"]["properties"] = []
 
78
 
79
- if "metadata" in aibom and "properties" in aibom["metadata"]:
80
- # Add score information
81
- aibom["metadata"]["properties"].append({"name": "aibom:quality-score", "value": str(final_score["total_score"])})
82
- aibom["metadata"]["properties"].append({"name": "aibom:quality-breakdown", "value": json.dumps(final_score["section_scores"])})
83
- aibom["metadata"]["properties"].append({"name": "aibom:max-scores", "value": json.dumps(final_score["max_scores"])})
84
-
85
- # Add AI enhancement information
86
- if ai_enhanced:
87
- aibom["metadata"]["properties"].append({"name": "aibom:ai-enhanced", "value": "true"})
88
- aibom["metadata"]["properties"].append({"name": "aibom:ai-model", "value": ai_model_name})
89
- aibom["metadata"]["properties"].append({"name": "aibom:original-score", "value": str(original_score["total_score"])})
90
- aibom["metadata"]["properties"].append({"name": "aibom:score-improvement",
91
- "value": str(round(final_score["total_score"] - original_score["total_score"], 2))})
92
-
93
- if output_file:
94
- with open(output_file, 'w') as f:
95
- json.dump(aibom, f, indent=2)
96
-
97
- # Create enhancement report for UI display and store as instance variable
98
- self.enhancement_report = {
99
- "ai_enhanced": ai_enhanced,
100
- "ai_model": ai_model_name if ai_enhanced else None,
101
- "original_score": original_score,
102
- "final_score": final_score,
103
- "improvement": round(final_score["total_score"] - original_score["total_score"], 2) if ai_enhanced else 0
104
- }
105
-
106
- # Return only the AIBOM to maintain compatibility with existing code
107
- return aibom
108
- except Exception as e:
109
- print(f"Error generating AIBOM: {e}")
110
- # Return a minimal valid AIBOM structure in case of error
111
- return self._create_minimal_aibom(model_id)
112
-
113
- def _create_minimal_aibom(self, model_id: str) -> Dict[str, Any]:
114
- """Create a minimal valid AIBOM structure in case of errors"""
115
- return {
116
- "bomFormat": "CycloneDX",
117
- "specVersion": "1.6",
118
- "serialNumber": f"urn:uuid:{str(uuid.uuid4())}",
119
- "version": 1,
120
- "metadata": {
121
- "timestamp": datetime.datetime.utcnow().isoformat() + "Z",
122
- "tools": [{
123
- "vendor": "Aetheris AI",
124
- "name": "aibom-generator",
125
- "version": "0.1.0"
126
- }],
127
- "component": {
128
- "type": "machine-learning-model",
129
- "name": model_id.split("/")[-1],
130
- "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}"
131
- },
132
- "properties": [
133
- {"name": "aibom:error", "value": "Error generating complete AIBOM"}
134
- ]
135
- },
136
- "components": [{
137
- "type": "machine-learning-model",
138
- "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}",
139
- "name": model_id.split("/")[-1],
140
- "purl": f"pkg:huggingface/{model_id.replace('/', '/')}"
141
- }],
142
- "dependencies": []
143
  }
144
 
145
- def get_enhancement_report(self):
146
- """Return the enhancement report from the last generate_aibom call"""
147
- return self.enhancement_report
148
 
149
  def _fetch_model_info(self, model_id: str) -> Dict[str, Any]:
150
  try:
@@ -191,37 +148,31 @@ class AIBOMGenerator:
191
  metadata = {}
192
 
193
  if model_info:
194
- try:
195
- metadata.update({
196
- "name": model_info.modelId.split("/")[-1] if hasattr(model_info, "modelId") else model_id.split("/")[-1],
197
- "author": model_info.author if hasattr(model_info, "author") else None,
198
- "tags": model_info.tags if hasattr(model_info, "tags") else [],
199
- "pipeline_tag": model_info.pipeline_tag if hasattr(model_info, "pipeline_tag") else None,
200
- "downloads": model_info.downloads if hasattr(model_info, "downloads") else 0,
201
- "last_modified": model_info.lastModified if hasattr(model_info, "lastModified") else None,
202
- "commit": model_info.sha[:7] if hasattr(model_info, "sha") and model_info.sha else None,
203
- "commit_url": f"https://huggingface.co/{model_id}/commit/{model_info.sha}" if hasattr(model_info, "sha") and model_info.sha else None,
204
- })
205
- except Exception as e:
206
- print(f"Error extracting model info metadata: {e}")
207
 
208
- if model_card and hasattr(model_card, "data") and model_card.data:
209
- try:
210
- card_data = model_card.data.to_dict() if hasattr(model_card.data, "to_dict") else {}
211
- metadata.update({
212
- "language": card_data.get("language"),
213
- "license": card_data.get("license"),
214
- "library_name": card_data.get("library_name"),
215
- "base_model": card_data.get("base_model"),
216
- "datasets": card_data.get("datasets"),
217
- "model_name": card_data.get("model_name"),
218
- "tags": card_data.get("tags", metadata.get("tags", [])),
219
- "description": card_data.get("model_summary", None)
220
- })
221
- if hasattr(model_card.data, "eval_results") and model_card.data.eval_results:
222
- metadata["eval_results"] = model_card.data.eval_results
223
- except Exception as e:
224
- print(f"Error extracting model card metadata: {e}")
225
 
226
  metadata["ai:type"] = "Transformer"
227
  metadata["ai:task"] = metadata.get("pipeline_tag", "Text Generation")
@@ -247,46 +198,43 @@ class AIBOMGenerator:
247
  # Since we can't install the required libraries due to space constraints,
248
  # we'll simulate the enhancement with a placeholder implementation
249
 
250
- if model_card and hasattr(model_card, "text") and model_card.text:
251
- try:
252
- card_text = model_card.text
253
-
254
- # Simulate BERT extraction with basic text analysis
255
- # In reality, this would be done with NLP models
256
-
257
- # Extract description if missing
258
- if card_text and "description" not in enhanced_metadata:
259
- # Take first paragraph that's longer than 20 chars as description
260
- paragraphs = [p.strip() for p in card_text.split('\n\n')]
261
- for p in paragraphs:
262
- if len(p) > 20 and not p.startswith('#'):
263
- enhanced_metadata["description"] = p
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  break
265
-
266
- # Extract limitations if present
267
- if "limitations" not in enhanced_metadata:
268
- if "## Limitations" in card_text:
269
- limitations_section = card_text.split("## Limitations")[1].split("##")[0].strip()
270
- if limitations_section:
271
- enhanced_metadata["limitations"] = limitations_section
272
-
273
- # Extract ethical considerations if present
274
- if "ethical_considerations" not in enhanced_metadata:
275
- for heading in ["## Ethical Considerations", "## Ethics", "## Bias"]:
276
- if heading in card_text:
277
- section = card_text.split(heading)[1].split("##")[0].strip()
278
- if section:
279
- enhanced_metadata["ethical_considerations"] = section
280
- break
281
-
282
- # Extract risks if present
283
- if "risks" not in enhanced_metadata:
284
- if "## Risks" in card_text:
285
- risks_section = card_text.split("## Risks")[1].split("##")[0].strip()
286
- if risks_section:
287
- enhanced_metadata["risks"] = risks_section
288
- except Exception as e:
289
- print(f"Error extracting unstructured metadata: {e}")
290
 
291
  return enhanced_metadata
292
 
@@ -294,12 +242,12 @@ class AIBOMGenerator:
294
  timestamp = datetime.datetime.utcnow().isoformat() + "Z"
295
  tools = [{
296
  "vendor": "Aetheris AI",
297
- "name": "aibom-generator",
298
  "version": "0.1.0"
299
  }]
300
 
301
  authors = []
302
- if metadata and "author" in metadata and metadata["author"]:
303
  authors.append({
304
  "name": metadata["author"],
305
  "url": f"https://huggingface.co/{metadata['author']}"
@@ -307,20 +255,16 @@ class AIBOMGenerator:
307
 
308
  component = {
309
  "type": "machine-learning-model",
310
- "name": metadata.get("name", model_id.split("/")[-1]) if metadata else model_id.split("/")[-1],
311
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}"
312
  }
313
 
314
  properties = []
315
- if metadata:
316
- for key, value in metadata.items():
317
- if key not in ["name", "author", "license"] and value is not None:
318
- try:
319
- if isinstance(value, (list, dict)):
320
- value = json.dumps(value)
321
- properties.append({"name": key, "value": str(value)})
322
- except Exception as e:
323
- print(f"Error processing metadata property {key}: {e}")
324
 
325
  metadata_section = {
326
  "timestamp": timestamp,
@@ -336,27 +280,32 @@ class AIBOMGenerator:
336
  return metadata_section
337
 
338
  def _create_component_section(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
 
 
 
 
 
339
  component = {
340
  "type": "machine-learning-model",
341
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}",
342
- "name": metadata.get("name", model_id.split("/")[-1]) if metadata else model_id.split("/")[-1],
343
- "purl": f"pkg:huggingface/{model_id.replace('/', '/')}"
344
  }
345
 
346
- if metadata and "description" in metadata:
347
  component["description"] = metadata["description"]
348
 
349
- if metadata and "commit" in metadata:
350
  component["version"] = metadata["commit"]
351
 
352
- if metadata and "license" in metadata:
353
  component["licenses"] = [{"license": {"id": metadata["license"]}}]
354
 
355
  external_refs = [{
356
  "type": "website",
357
  "url": f"https://huggingface.co/{model_id}"
358
  }]
359
- if metadata and "commit_url" in metadata:
360
  external_refs.append({
361
  "type": "vcs",
362
  "url": metadata["commit_url"]
@@ -369,32 +318,18 @@ class AIBOMGenerator:
369
 
370
  def _create_model_card_section(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
371
  model_card_section = {}
372
-
373
- if not metadata:
374
- return model_card_section
375
-
376
- try:
377
- # Safely extract model parameters
378
- model_parameters = {}
379
- for k in ["base_model", "library_name", "pipeline_tag"]:
380
- if k in metadata:
381
- model_parameters[k] = metadata[k]
382
-
383
- if model_parameters:
384
- model_card_section["modelParameters"] = model_parameters
385
-
386
- # Safely extract evaluation results
387
- if "eval_results" in metadata:
388
- model_card_section["quantitativeAnalysis"] = {"performanceMetrics": metadata["eval_results"]}
389
-
390
- # Safely extract considerations
391
- considerations = {}
392
- for k in ["limitations", "ethical_considerations", "bias", "risks"]:
393
- if k in metadata:
394
- considerations[k] = metadata[k]
395
- if considerations:
396
- model_card_section["considerations"] = considerations
397
- except Exception as e:
398
- print(f"Error creating model card section: {e}")
399
-
400
  return model_card_section
 
1
  import json
2
  import uuid
3
  import datetime
4
+ from typing import Dict, Optional, Any
5
 
6
  from huggingface_hub import HfApi, ModelCard
7
  from .utils import calculate_completeness_score
 
19
  self.inference_model_url = inference_model_url
20
  self.use_inference = use_inference
21
  self.cache_dir = cache_dir
 
22
 
23
  def generate_aibom(
24
  self,
 
26
  output_file: Optional[str] = None,
27
  include_inference: Optional[bool] = None,
28
  ) -> Dict[str, Any]:
29
+ use_inference = include_inference if include_inference is not None else self.use_inference
30
+ model_info = self._fetch_model_info(model_id)
31
+ model_card = self._fetch_model_card(model_id)
32
+
33
+ # Store original metadata before any AI enhancement
34
+ original_metadata = self._extract_structured_metadata(model_id, model_info, model_card)
35
+
36
+ # Create initial AIBOM with original metadata
37
+ original_aibom = self._create_aibom_structure(model_id, original_metadata)
38
+
39
+ # Calculate initial score
40
+ original_score = calculate_completeness_score(original_aibom)
41
+
42
+ # Final metadata starts with original metadata
43
+ final_metadata = original_metadata.copy()
44
+
45
+ # Apply AI enhancement if requested
46
+ ai_enhanced = False
47
+ ai_model_name = None
48
+
49
+ if use_inference and self.inference_model_url:
50
+ try:
51
+ # Extract additional metadata using AI
52
+ enhanced_metadata = self._extract_unstructured_metadata(model_card, model_id)
53
+
54
+ # If we got enhanced metadata, merge it with original
55
+ if enhanced_metadata:
56
+ ai_enhanced = True
57
+ ai_model_name = "BERT-base-uncased" # Will be replaced with actual model name
58
 
59
+ # Merge enhanced metadata with original (enhanced takes precedence)
60
+ for key, value in enhanced_metadata.items():
61
+ if value is not None and (key not in final_metadata or not final_metadata[key]):
62
+ final_metadata[key] = value
63
+ except Exception as e:
64
+ print(f"Error during AI enhancement: {e}")
65
+ # Continue with original metadata if enhancement fails
66
+
67
+ # Create final AIBOM with potentially enhanced metadata
68
+ aibom = self._create_aibom_structure(model_id, final_metadata)
69
+
70
+ # Calculate final score
71
+ final_score = calculate_completeness_score(aibom)
72
+
73
+ # Add score and enhancement info to metadata properties
74
+ if "metadata" in aibom and "properties" not in aibom["metadata"]:
75
+ aibom["metadata"]["properties"] = []
76
+
77
+ if "metadata" in aibom and "properties" in aibom["metadata"]:
78
+ # Add score information
79
+ aibom["metadata"]["properties"].append({"name": "aibom:quality-score", "value": str(final_score["total_score"])})
80
+ aibom["metadata"]["properties"].append({"name": "aibom:quality-breakdown", "value": json.dumps(final_score["section_scores"])})
81
+ aibom["metadata"]["properties"].append({"name": "aibom:max-scores", "value": json.dumps(final_score["max_scores"])})
82
 
83
+ # Add AI enhancement information
84
+ if ai_enhanced:
85
+ aibom["metadata"]["properties"].append({"name": "aibom:ai-enhanced", "value": "true"})
86
+ aibom["metadata"]["properties"].append({"name": "aibom:ai-model", "value": ai_model_name})
87
+ aibom["metadata"]["properties"].append({"name": "aibom:original-score", "value": str(original_score["total_score"])})
88
+ aibom["metadata"]["properties"].append({"name": "aibom:score-improvement",
89
+ "value": str(round(final_score["total_score"] - original_score["total_score"], 2))})
90
+
91
+ if output_file:
92
+ with open(output_file, 'w') as f:
93
+ json.dump(aibom, f, indent=2)
94
+
95
+ # Create enhancement report for UI display
96
+ enhancement_report = {
97
+ "ai_enhanced": ai_enhanced,
98
+ "ai_model": ai_model_name if ai_enhanced else None,
99
+ "original_score": original_score,
100
+ "final_score": final_score,
101
+ "improvement": round(final_score["total_score"] - original_score["total_score"], 2) if ai_enhanced else 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  }
103
 
104
+ return aibom, enhancement_report
 
 
105
 
106
  def _fetch_model_info(self, model_id: str) -> Dict[str, Any]:
107
  try:
 
148
  metadata = {}
149
 
150
  if model_info:
151
+ metadata.update({
152
+ "name": model_info.modelId.split("/")[-1] if hasattr(model_info, "modelId") else model_id.split("/")[-1],
153
+ "author": model_info.author if hasattr(model_info, "author") else None,
154
+ "tags": model_info.tags if hasattr(model_info, "tags") else [],
155
+ "pipeline_tag": model_info.pipeline_tag if hasattr(model_info, "pipeline_tag") else None,
156
+ "downloads": model_info.downloads if hasattr(model_info, "downloads") else 0,
157
+ "last_modified": model_info.lastModified if hasattr(model_info, "lastModified") else None,
158
+ "commit": model_info.sha[:7] if hasattr(model_info, "sha") and model_info.sha else None,
159
+ "commit_url": f"https://huggingface.co/{model_id}/commit/{model_info.sha}" if hasattr(model_info, "sha") and model_info.sha else None,
160
+ })
 
 
 
161
 
162
+ if model_card and model_card.data:
163
+ card_data = model_card.data.to_dict() if hasattr(model_card.data, "to_dict") else {}
164
+ metadata.update({
165
+ "language": card_data.get("language"),
166
+ "license": card_data.get("license"),
167
+ "library_name": card_data.get("library_name"),
168
+ "base_model": card_data.get("base_model"),
169
+ "datasets": card_data.get("datasets"),
170
+ "model_name": card_data.get("model_name"),
171
+ "tags": card_data.get("tags", metadata.get("tags", [])),
172
+ "description": card_data.get("model_summary", None)
173
+ })
174
+ if hasattr(model_card.data, "eval_results") and model_card.data.eval_results:
175
+ metadata["eval_results"] = model_card.data.eval_results
 
 
 
176
 
177
  metadata["ai:type"] = "Transformer"
178
  metadata["ai:task"] = metadata.get("pipeline_tag", "Text Generation")
 
198
  # Since we can't install the required libraries due to space constraints,
199
  # we'll simulate the enhancement with a placeholder implementation
200
 
201
+ if model_card and hasattr(model_card, "text"):
202
+ card_text = model_card.text
203
+
204
+ # Simulate BERT extraction with basic text analysis
205
+ # In reality, this would be done with NLP models
206
+
207
+ # Extract description if missing
208
+ if card_text and "description" not in enhanced_metadata:
209
+ # Take first paragraph that's longer than 20 chars as description
210
+ paragraphs = [p.strip() for p in card_text.split('\n\n')]
211
+ for p in paragraphs:
212
+ if len(p) > 20 and not p.startswith('#'):
213
+ enhanced_metadata["description"] = p
214
+ break
215
+
216
+ # Extract limitations if present
217
+ if "limitations" not in enhanced_metadata:
218
+ if "## Limitations" in card_text:
219
+ limitations_section = card_text.split("## Limitations")[1].split("##")[0].strip()
220
+ if limitations_section:
221
+ enhanced_metadata["limitations"] = limitations_section
222
+
223
+ # Extract ethical considerations if present
224
+ if "ethical_considerations" not in enhanced_metadata:
225
+ for heading in ["## Ethical Considerations", "## Ethics", "## Bias"]:
226
+ if heading in card_text:
227
+ section = card_text.split(heading)[1].split("##")[0].strip()
228
+ if section:
229
+ enhanced_metadata["ethical_considerations"] = section
230
  break
231
+
232
+ # Extract risks if present
233
+ if "risks" not in enhanced_metadata:
234
+ if "## Risks" in card_text:
235
+ risks_section = card_text.split("## Risks")[1].split("##")[0].strip()
236
+ if risks_section:
237
+ enhanced_metadata["risks"] = risks_section
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  return enhanced_metadata
240
 
 
242
  timestamp = datetime.datetime.utcnow().isoformat() + "Z"
243
  tools = [{
244
  "vendor": "Aetheris AI",
245
+ "name": "aetheris-aibom-generator",
246
  "version": "0.1.0"
247
  }]
248
 
249
  authors = []
250
+ if "author" in metadata and metadata["author"]:
251
  authors.append({
252
  "name": metadata["author"],
253
  "url": f"https://huggingface.co/{metadata['author']}"
 
255
 
256
  component = {
257
  "type": "machine-learning-model",
258
+ "name": metadata.get("name", model_id.split("/")[-1]),
259
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}"
260
  }
261
 
262
  properties = []
263
+ for key, value in metadata.items():
264
+ if key not in ["name", "author", "license"] and value is not None:
265
+ if isinstance(value, (list, dict)):
266
+ value = json.dumps(value)
267
+ properties.append({"name": key, "value": str(value)})
 
 
 
 
268
 
269
  metadata_section = {
270
  "timestamp": timestamp,
 
280
  return metadata_section
281
 
282
  def _create_component_section(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
283
+ # Create PURL with version information if commit is available
284
+ purl = f"pkg:huggingface/{model_id.replace('/', '/')}"
285
+ if "commit" in metadata:
286
+ purl = f"{purl}@{metadata['commit']}"
287
+
288
  component = {
289
  "type": "machine-learning-model",
290
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}",
291
+ "name": metadata.get("name", model_id.split("/")[-1]),
292
+ "purl": purl
293
  }
294
 
295
+ if "description" in metadata:
296
  component["description"] = metadata["description"]
297
 
298
+ if "commit" in metadata:
299
  component["version"] = metadata["commit"]
300
 
301
+ if "license" in metadata:
302
  component["licenses"] = [{"license": {"id": metadata["license"]}}]
303
 
304
  external_refs = [{
305
  "type": "website",
306
  "url": f"https://huggingface.co/{model_id}"
307
  }]
308
+ if "commit_url" in metadata:
309
  external_refs.append({
310
  "type": "vcs",
311
  "url": metadata["commit_url"]
 
318
 
319
  def _create_model_card_section(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
320
  model_card_section = {}
321
+ model_parameters = {k: metadata[k] for k in ["base_model", "library_name", "pipeline_tag"] if k in metadata}
322
+ if model_parameters:
323
+ model_card_section["modelParameters"] = model_parameters
324
+
325
+ if "eval_results" in metadata:
326
+ model_card_section["quantitativeAnalysis"] = {"performanceMetrics": metadata["eval_results"]}
327
+
328
+ considerations = {}
329
+ for k in ["limitations", "ethical_considerations", "bias", "risks"]:
330
+ if k in metadata:
331
+ considerations[k] = metadata[k]
332
+ if considerations:
333
+ model_card_section["considerations"] = considerations
334
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
  return model_card_section