Spaces:
Running
Running
Upload api.py
Browse files- src/aibom_generator/api.py +416 -0
src/aibom_generator/api.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks, Query, File, UploadFile, Form
|
2 |
+
from fastapi.responses import JSONResponse, FileResponse
|
3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
4 |
+
from fastapi.staticfiles import StaticFiles
|
5 |
+
from pydantic import BaseModel, Field
|
6 |
+
from typing import Optional, Dict, Any, List
|
7 |
+
import uvicorn
|
8 |
+
import json
|
9 |
+
import os
|
10 |
+
import sys
|
11 |
+
import uuid
|
12 |
+
import shutil
|
13 |
+
from datetime import datetime
|
14 |
+
|
15 |
+
# Add parent directory to path to import generator module
|
16 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
17 |
+
|
18 |
+
# Import the AIBOM generator
|
19 |
+
try:
|
20 |
+
from aibom_fix.final_generator import AIBOMGenerator
|
21 |
+
except ImportError:
|
22 |
+
# If not found, try the mapping directory
|
23 |
+
try:
|
24 |
+
from aibom_mapping.final_generator import AIBOMGenerator
|
25 |
+
except ImportError:
|
26 |
+
# If still not found, use the original generator
|
27 |
+
try:
|
28 |
+
from aibom_fix.generator import AIBOMGenerator
|
29 |
+
except ImportError:
|
30 |
+
try:
|
31 |
+
from generator import AIBOMGenerator
|
32 |
+
except ImportError:
|
33 |
+
# Last resort: try to import from the aibom_generator module
|
34 |
+
try:
|
35 |
+
from aibom_generator.generator import AIBOMGenerator
|
36 |
+
except ImportError:
|
37 |
+
raise ImportError("Could not import AIBOMGenerator from any known location")
|
38 |
+
|
39 |
+
# Create FastAPI app
|
40 |
+
app = FastAPI(
|
41 |
+
title="Aetheris AI SBOM Generator API",
|
42 |
+
description="API for generating CycloneDX JSON AI SBOMs for machine learning models",
|
43 |
+
version="1.0.0",
|
44 |
+
)
|
45 |
+
|
46 |
+
# Add CORS middleware
|
47 |
+
app.add_middleware(
|
48 |
+
CORSMiddleware,
|
49 |
+
allow_origins=["*"], # Allow all origins in development
|
50 |
+
allow_credentials=True,
|
51 |
+
allow_methods=["*"],
|
52 |
+
allow_headers=["*"],
|
53 |
+
)
|
54 |
+
|
55 |
+
# Create output directory for AIBOMs
|
56 |
+
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output")
|
57 |
+
os.makedirs(output_dir, exist_ok=True)
|
58 |
+
app.mount("/output", StaticFiles(directory=output_dir), name="output")
|
59 |
+
|
60 |
+
# Create a global generator instance
|
61 |
+
generator = AIBOMGenerator(use_best_practices=True)
|
62 |
+
|
63 |
+
# Define request models
|
64 |
+
class GenerateAIBOMRequest(BaseModel):
|
65 |
+
model_id: str = Field(..., description="The Hugging Face model ID (e.g., 'meta-llama/Llama-4-Scout-17B-16E-Instruct')")
|
66 |
+
hf_token: Optional[str] = Field(None, description="Optional Hugging Face API token for accessing private models")
|
67 |
+
include_inference: Optional[bool] = Field(True, description="Whether to use AI inference to enhance the AIBOM")
|
68 |
+
use_best_practices: Optional[bool] = Field(True, description="Whether to use industry best practices for scoring")
|
69 |
+
|
70 |
+
class AIBOMResponse(BaseModel):
|
71 |
+
aibom: Dict[str, Any] = Field(..., description="The generated AIBOM in CycloneDX JSON format")
|
72 |
+
model_id: str = Field(..., description="The model ID for which the AIBOM was generated")
|
73 |
+
generated_at: str = Field(..., description="Timestamp when the AIBOM was generated")
|
74 |
+
request_id: str = Field(..., description="Unique ID for this request")
|
75 |
+
download_url: Optional[str] = Field(None, description="URL to download the AIBOM JSON file")
|
76 |
+
|
77 |
+
class EnhancementReport(BaseModel):
|
78 |
+
ai_enhanced: bool = Field(..., description="Whether AI enhancement was applied")
|
79 |
+
ai_model: Optional[str] = Field(None, description="The AI model used for enhancement, if any")
|
80 |
+
original_score: Dict[str, Any] = Field(..., description="Original completeness score before enhancement")
|
81 |
+
final_score: Dict[str, Any] = Field(..., description="Final completeness score after enhancement")
|
82 |
+
improvement: float = Field(..., description="Score improvement from enhancement")
|
83 |
+
|
84 |
+
class AIBOMWithReportResponse(AIBOMResponse):
|
85 |
+
enhancement_report: Optional[EnhancementReport] = Field(None, description="Report on AI enhancement results")
|
86 |
+
|
87 |
+
class StatusResponse(BaseModel):
|
88 |
+
status: str = Field(..., description="API status")
|
89 |
+
version: str = Field(..., description="API version")
|
90 |
+
generator_version: str = Field(..., description="AIBOM generator version")
|
91 |
+
|
92 |
+
class BatchGenerateRequest(BaseModel):
|
93 |
+
model_ids: List[str] = Field(..., description="List of Hugging Face model IDs to generate AIBOMs for")
|
94 |
+
hf_token: Optional[str] = Field(None, description="Optional Hugging Face API token for accessing private models")
|
95 |
+
include_inference: Optional[bool] = Field(True, description="Whether to use AI inference to enhance the AIBOM")
|
96 |
+
use_best_practices: Optional[bool] = Field(True, description="Whether to use industry best practices for scoring")
|
97 |
+
|
98 |
+
class BatchJobResponse(BaseModel):
|
99 |
+
job_id: str = Field(..., description="Unique ID for the batch job")
|
100 |
+
status: str = Field(..., description="Job status (e.g., 'queued', 'processing', 'completed')")
|
101 |
+
model_ids: List[str] = Field(..., description="List of model IDs in the batch")
|
102 |
+
created_at: str = Field(..., description="Timestamp when the job was created")
|
103 |
+
|
104 |
+
# In-memory storage for batch jobs
|
105 |
+
batch_jobs = {}
|
106 |
+
|
107 |
+
# Define API endpoints
|
108 |
+
@app.get("/", response_model=StatusResponse)
|
109 |
+
async def get_status():
|
110 |
+
"""Get the API status and version information."""
|
111 |
+
return {
|
112 |
+
"status": "operational",
|
113 |
+
"version": "1.0.0",
|
114 |
+
"generator_version": "0.1.0",
|
115 |
+
}
|
116 |
+
|
117 |
+
@app.post("/generate", response_model=AIBOMResponse)
|
118 |
+
async def generate_aibom(request: GenerateAIBOMRequest):
|
119 |
+
"""
|
120 |
+
Generate a CycloneDX JSON AI SBOM for a Hugging Face model.
|
121 |
+
|
122 |
+
This endpoint takes a model ID and optional parameters to generate
|
123 |
+
a comprehensive AI SBOM in CycloneDX format.
|
124 |
+
"""
|
125 |
+
try:
|
126 |
+
# Create a new generator instance with the provided token if available
|
127 |
+
gen = AIBOMGenerator(
|
128 |
+
hf_token=request.hf_token,
|
129 |
+
use_inference=request.include_inference,
|
130 |
+
use_best_practices=request.use_best_practices
|
131 |
+
)
|
132 |
+
|
133 |
+
# Generate a request ID
|
134 |
+
request_id = str(uuid.uuid4())
|
135 |
+
|
136 |
+
# Create output file path
|
137 |
+
safe_model_id = request.model_id.replace("/", "_")
|
138 |
+
output_file = os.path.join(output_dir, f"{safe_model_id}_{request_id}.json")
|
139 |
+
|
140 |
+
# Generate the AIBOM
|
141 |
+
aibom = gen.generate_aibom(
|
142 |
+
model_id=request.model_id,
|
143 |
+
include_inference=request.include_inference,
|
144 |
+
use_best_practices=request.use_best_practices,
|
145 |
+
output_file=output_file
|
146 |
+
)
|
147 |
+
|
148 |
+
# Create download URL
|
149 |
+
download_url = f"/output/{os.path.basename(output_file)}"
|
150 |
+
|
151 |
+
# Create response
|
152 |
+
response = {
|
153 |
+
"aibom": aibom,
|
154 |
+
"model_id": request.model_id,
|
155 |
+
"generated_at": datetime.utcnow().isoformat() + "Z",
|
156 |
+
"request_id": request_id,
|
157 |
+
"download_url": download_url
|
158 |
+
}
|
159 |
+
|
160 |
+
return response
|
161 |
+
except Exception as e:
|
162 |
+
raise HTTPException(status_code=500, detail=f"Error generating AIBOM: {str(e)}")
|
163 |
+
|
164 |
+
@app.post("/generate-with-report", response_model=AIBOMWithReportResponse)
|
165 |
+
async def generate_aibom_with_report(request: GenerateAIBOMRequest):
|
166 |
+
"""
|
167 |
+
Generate a CycloneDX JSON AI SBOM with an enhancement report.
|
168 |
+
|
169 |
+
This endpoint is similar to /generate but also includes a report
|
170 |
+
on the AI enhancement results, including before/after scores.
|
171 |
+
"""
|
172 |
+
try:
|
173 |
+
# Create a new generator instance with the provided token if available
|
174 |
+
gen = AIBOMGenerator(
|
175 |
+
hf_token=request.hf_token,
|
176 |
+
use_inference=request.include_inference,
|
177 |
+
use_best_practices=request.use_best_practices
|
178 |
+
)
|
179 |
+
|
180 |
+
# Generate a request ID
|
181 |
+
request_id = str(uuid.uuid4())
|
182 |
+
|
183 |
+
# Create output file path
|
184 |
+
safe_model_id = request.model_id.replace("/", "_")
|
185 |
+
output_file = os.path.join(output_dir, f"{safe_model_id}_{request_id}.json")
|
186 |
+
|
187 |
+
# Generate the AIBOM
|
188 |
+
aibom = gen.generate_aibom(
|
189 |
+
model_id=request.model_id,
|
190 |
+
include_inference=request.include_inference,
|
191 |
+
use_best_practices=request.use_best_practices,
|
192 |
+
output_file=output_file
|
193 |
+
)
|
194 |
+
|
195 |
+
# Get the enhancement report
|
196 |
+
enhancement_report = gen.get_enhancement_report()
|
197 |
+
|
198 |
+
# Create download URL
|
199 |
+
download_url = f"/output/{os.path.basename(output_file)}"
|
200 |
+
|
201 |
+
# Create response
|
202 |
+
response = {
|
203 |
+
"aibom": aibom,
|
204 |
+
"model_id": request.model_id,
|
205 |
+
"generated_at": datetime.utcnow().isoformat() + "Z",
|
206 |
+
"request_id": request_id,
|
207 |
+
"download_url": download_url,
|
208 |
+
"enhancement_report": enhancement_report
|
209 |
+
}
|
210 |
+
|
211 |
+
return response
|
212 |
+
except Exception as e:
|
213 |
+
raise HTTPException(status_code=500, detail=f"Error generating AIBOM: {str(e)}")
|
214 |
+
|
215 |
+
@app.get("/models/{model_id}/score", response_model=Dict[str, Any])
|
216 |
+
async def get_model_score(
|
217 |
+
model_id: str,
|
218 |
+
hf_token: Optional[str] = Query(None, description="Optional Hugging Face API token for accessing private models"),
|
219 |
+
use_best_practices: bool = Query(True, description="Whether to use industry best practices for scoring")
|
220 |
+
):
|
221 |
+
"""
|
222 |
+
Get the completeness score for a model without generating a full AIBOM.
|
223 |
+
|
224 |
+
This is a lightweight endpoint that only returns the scoring information.
|
225 |
+
"""
|
226 |
+
try:
|
227 |
+
# Create a new generator instance with the provided token if available
|
228 |
+
gen = AIBOMGenerator(
|
229 |
+
hf_token=hf_token,
|
230 |
+
use_inference=False, # Don't use inference for scoring only
|
231 |
+
use_best_practices=use_best_practices
|
232 |
+
)
|
233 |
+
|
234 |
+
# Generate the AIBOM (needed to calculate score)
|
235 |
+
aibom = gen.generate_aibom(
|
236 |
+
model_id=model_id,
|
237 |
+
include_inference=False, # Don't use inference for scoring only
|
238 |
+
use_best_practices=use_best_practices
|
239 |
+
)
|
240 |
+
|
241 |
+
# Get the enhancement report for the score
|
242 |
+
enhancement_report = gen.get_enhancement_report()
|
243 |
+
|
244 |
+
if enhancement_report and "final_score" in enhancement_report:
|
245 |
+
return enhancement_report["final_score"]
|
246 |
+
else:
|
247 |
+
raise HTTPException(status_code=500, detail="Failed to calculate score")
|
248 |
+
except Exception as e:
|
249 |
+
raise HTTPException(status_code=500, detail=f"Error calculating score: {str(e)}")
|
250 |
+
|
251 |
+
@app.post("/batch", response_model=BatchJobResponse)
|
252 |
+
async def batch_generate(request: BatchGenerateRequest, background_tasks: BackgroundTasks):
|
253 |
+
"""
|
254 |
+
Start a batch job to generate AIBOMs for multiple models.
|
255 |
+
|
256 |
+
This endpoint queues a background task to generate AIBOMs for all the
|
257 |
+
specified model IDs and returns a job ID that can be used to check status.
|
258 |
+
"""
|
259 |
+
try:
|
260 |
+
# Generate a job ID
|
261 |
+
job_id = str(uuid.uuid4())
|
262 |
+
|
263 |
+
# Create job record
|
264 |
+
job = {
|
265 |
+
"job_id": job_id,
|
266 |
+
"status": "queued",
|
267 |
+
"model_ids": request.model_ids,
|
268 |
+
"created_at": datetime.utcnow().isoformat() + "Z",
|
269 |
+
"completed": 0,
|
270 |
+
"total": len(request.model_ids),
|
271 |
+
"results": {}
|
272 |
+
}
|
273 |
+
|
274 |
+
# Store job in memory
|
275 |
+
batch_jobs[job_id] = job
|
276 |
+
|
277 |
+
# Add background task to process the batch
|
278 |
+
background_tasks.add_task(
|
279 |
+
process_batch_job,
|
280 |
+
job_id=job_id,
|
281 |
+
model_ids=request.model_ids,
|
282 |
+
hf_token=request.hf_token,
|
283 |
+
include_inference=request.include_inference,
|
284 |
+
use_best_practices=request.use_best_practices
|
285 |
+
)
|
286 |
+
|
287 |
+
# Return job info
|
288 |
+
return {
|
289 |
+
"job_id": job_id,
|
290 |
+
"status": "queued",
|
291 |
+
"model_ids": request.model_ids,
|
292 |
+
"created_at": job["created_at"]
|
293 |
+
}
|
294 |
+
except Exception as e:
|
295 |
+
raise HTTPException(status_code=500, detail=f"Error starting batch job: {str(e)}")
|
296 |
+
|
297 |
+
@app.get("/batch/{job_id}", response_model=Dict[str, Any])
|
298 |
+
async def get_batch_status(job_id: str):
|
299 |
+
"""
|
300 |
+
Get the status of a batch job.
|
301 |
+
|
302 |
+
This endpoint returns the current status of a batch job, including
|
303 |
+
progress information and results for completed models.
|
304 |
+
"""
|
305 |
+
if job_id not in batch_jobs:
|
306 |
+
raise HTTPException(status_code=404, detail="Batch job not found")
|
307 |
+
|
308 |
+
return batch_jobs[job_id]
|
309 |
+
|
310 |
+
@app.post("/upload-model-card")
|
311 |
+
async def upload_model_card(
|
312 |
+
model_id: str = Form(...),
|
313 |
+
model_card: UploadFile = File(...),
|
314 |
+
include_inference: bool = Form(True),
|
315 |
+
use_best_practices: bool = Form(True)
|
316 |
+
):
|
317 |
+
"""
|
318 |
+
Generate an AIBOM from an uploaded model card file.
|
319 |
+
|
320 |
+
This endpoint allows users to upload a model card file directly
|
321 |
+
instead of requiring the model to be on Hugging Face.
|
322 |
+
"""
|
323 |
+
try:
|
324 |
+
# Create a temporary directory to store the uploaded file
|
325 |
+
temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "temp")
|
326 |
+
os.makedirs(temp_dir, exist_ok=True)
|
327 |
+
|
328 |
+
# Save the uploaded file
|
329 |
+
file_path = os.path.join(temp_dir, model_card.filename)
|
330 |
+
with open(file_path, "wb") as f:
|
331 |
+
shutil.copyfileobj(model_card.file, f)
|
332 |
+
|
333 |
+
# TODO: Implement custom model card processing
|
334 |
+
# This would require modifying the AIBOMGenerator to accept a file path
|
335 |
+
# instead of a model ID, which is beyond the scope of this example
|
336 |
+
|
337 |
+
# For now, return a placeholder response
|
338 |
+
return {
|
339 |
+
"status": "not_implemented",
|
340 |
+
"message": "Custom model card processing is not yet implemented"
|
341 |
+
}
|
342 |
+
except Exception as e:
|
343 |
+
raise HTTPException(status_code=500, detail=f"Error processing uploaded model card: {str(e)}")
|
344 |
+
|
345 |
+
@app.get("/download/{filename}")
|
346 |
+
async def download_aibom(filename: str):
|
347 |
+
"""
|
348 |
+
Download a previously generated AIBOM file.
|
349 |
+
|
350 |
+
This endpoint allows downloading AIBOM files by filename.
|
351 |
+
"""
|
352 |
+
file_path = os.path.join(output_dir, filename)
|
353 |
+
if not os.path.exists(file_path):
|
354 |
+
raise HTTPException(status_code=404, detail="File not found")
|
355 |
+
|
356 |
+
return FileResponse(file_path, media_type="application/json", filename=filename)
|
357 |
+
|
358 |
+
# Background task function for batch processing
|
359 |
+
async def process_batch_job(job_id: str, model_ids: List[str], hf_token: Optional[str], include_inference: bool, use_best_practices: bool):
|
360 |
+
"""Process a batch job in the background."""
|
361 |
+
# Update job status
|
362 |
+
batch_jobs[job_id]["status"] = "processing"
|
363 |
+
|
364 |
+
# Create output directory
|
365 |
+
batch_output_dir = os.path.join(output_dir, job_id)
|
366 |
+
os.makedirs(batch_output_dir, exist_ok=True)
|
367 |
+
|
368 |
+
# Process each model
|
369 |
+
for model_id in model_ids:
|
370 |
+
try:
|
371 |
+
# Create a new generator instance
|
372 |
+
gen = AIBOMGenerator(
|
373 |
+
hf_token=hf_token,
|
374 |
+
use_inference=include_inference,
|
375 |
+
use_best_practices=use_best_practices
|
376 |
+
)
|
377 |
+
|
378 |
+
# Create output file path
|
379 |
+
safe_model_id = model_id.replace("/", "_")
|
380 |
+
output_file = os.path.join(batch_output_dir, f"{safe_model_id}.json")
|
381 |
+
|
382 |
+
# Generate the AIBOM
|
383 |
+
aibom = gen.generate_aibom(
|
384 |
+
model_id=model_id,
|
385 |
+
include_inference=include_inference,
|
386 |
+
use_best_practices=use_best_practices,
|
387 |
+
output_file=output_file
|
388 |
+
)
|
389 |
+
|
390 |
+
# Get the enhancement report
|
391 |
+
enhancement_report = gen.get_enhancement_report()
|
392 |
+
|
393 |
+
# Create download URL
|
394 |
+
download_url = f"/output/{job_id}/{safe_model_id}.json"
|
395 |
+
|
396 |
+
# Store result
|
397 |
+
batch_jobs[job_id]["results"][model_id] = {
|
398 |
+
"status": "completed",
|
399 |
+
"download_url": download_url,
|
400 |
+
"enhancement_report": enhancement_report
|
401 |
+
}
|
402 |
+
except Exception as e:
|
403 |
+
# Store error
|
404 |
+
batch_jobs[job_id]["results"][model_id] = {
|
405 |
+
"status": "error",
|
406 |
+
"error": str(e)
|
407 |
+
}
|
408 |
+
|
409 |
+
# Update progress
|
410 |
+
batch_jobs[job_id]["completed"] += 1
|
411 |
+
|
412 |
+
# Update job status
|
413 |
+
batch_jobs[job_id]["status"] = "completed"
|
414 |
+
|
415 |
+
if __name__ == "__main__":
|
416 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|