File size: 2,920 Bytes
c2d58b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from fastapi import APIRouter, Form, BackgroundTasks
from config import settings
import os
import json
from routers.donut_evaluate import run_evaluate_donut
from routers.donut_training import run_training_donut
import utils


router = APIRouter()


def invoke_training(max_epochs, val_check_interval, warmup_steps, model_in_use, sparrow_key):
    if sparrow_key != settings.sparrow_key:
        return {"error": "Invalid Sparrow key."}

    if model_in_use == 'donut':
        processing_time = run_training_donut(max_epochs, val_check_interval, warmup_steps)
        utils.log_stats(settings.training_stats_file, [processing_time, settings.model])
        print(f"Processing time training: {processing_time:.2f} seconds")


@router.post("/training")
async def run_training(background_tasks: BackgroundTasks,
                       max_epochs: int = Form(30),
                       val_check_interval: float = Form(0.4),
                       warmup_steps: int = Form(81),
                       model_in_use: str = Form('donut'),
                       sparrow_key: str = Form(None)):

    background_tasks.add_task(invoke_training, max_epochs, val_check_interval, warmup_steps, model_in_use, sparrow_key)

    return {"message": "Sparrow ML training started in the background"}


def invoke_evaluate(model_in_use, sparrow_key):
    if sparrow_key != settings.sparrow_key:
        return {"error": "Invalid Sparrow key."}

    if model_in_use == 'donut':
        scores, accuracy, processing_time = run_evaluate_donut()
        utils.log_stats(settings.evaluate_stats_file, [processing_time, scores, accuracy, settings.model])
        print(f"Processing time evaluate: {processing_time:.2f} seconds")


@router.post("/evaluate")
async def run_evaluate(background_tasks: BackgroundTasks,
                       model_in_use: str = Form('donut'),
                       sparrow_key: str = Form(None)):

    background_tasks.add_task(invoke_evaluate, model_in_use, sparrow_key)

    return {"message": "Sparrow ML model evaluation started in the background"}


@router.get("/statistics/training")
async def get_statistics_training():
    file_path = settings.training_stats_file

    # Check if the file exists, and read its content
    if os.path.exists(file_path):
        with open(file_path, 'r') as file:
            try:
                content = json.load(file)
            except json.JSONDecodeError:
                content = []
    else:
        content = []

    return content


@router.get("/statistics/evaluate")
async def get_statistics_evaluate():
    file_path = settings.evaluate_stats_file

    # Check if the file exists, and read its content
    if os.path.exists(file_path):
        with open(file_path, 'r') as file:
            try:
                content = json.load(file)
            except json.JSONDecodeError:
                content = []
    else:
        content = []

    return content