File size: 5,642 Bytes
a5b0db7
9ab539a
 
 
124bec5
9ab539a
 
 
 
 
124bec5
9ab539a
 
 
 
 
 
 
124bec5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ab539a
 
 
 
124bec5
 
 
 
 
 
9ab539a
bccaf50
9ab539a
bccaf50
9ab539a
 
 
 
 
bccaf50
a1cd618
124bec5
 
 
 
bccaf50
9ab539a
 
 
 
 
124bec5
a1cd618
bccaf50
124bec5
bccaf50
124bec5
bccaf50
124bec5
bccaf50
124bec5
bccaf50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ab539a
 
124bec5
 
9ab539a
124bec5
 
9ab539a
5fc842f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
from dataclasses import dataclass
from enum import Enum

from src.about import Tasks
from pydantic import BaseModel


# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
class ColumnContent(BaseModel):
    name: str
    type: str
    displayed_by_default: bool
    hidden: bool = False
    never_hidden: bool = False

## Leaderboard columns
class AutoEvalColumn(BaseModel):
    library_type_symbol: ColumnContent
    library: ColumnContent
    overall_risk: ColumnContent
    # Task columns
    license: ColumnContent
    security: ColumnContent
    maintenance: ColumnContent
    dependency: ColumnContent
    regulatory: ColumnContent
    # Library information
    library_type: ColumnContent
    framework: ColumnContent
    version: ColumnContent
    language: ColumnContent
    license_name: ColumnContent
    stars: ColumnContent
    availability: ColumnContent
    report_url: ColumnContent
    last_update: ColumnContent
    verified: ColumnContent

auto_eval_column_attrs = AutoEvalColumn(
    library_type_symbol=ColumnContent(name="T", type="str", displayed_by_default=True, never_hidden=True),
    library=ColumnContent(name="Library", type="markdown", displayed_by_default=True, never_hidden=True),
    overall_risk=ColumnContent(name="Trust Score", type="number", displayed_by_default=True),
    # Task columns from Tasks enum
    license=ColumnContent(name="License Risk", type="number", displayed_by_default=True),
    security=ColumnContent(name="Security Risk", type="number", displayed_by_default=True),
    maintenance=ColumnContent(name="Maintenance Risk", type="number", displayed_by_default=True),
    dependency=ColumnContent(name="Dependency Risk", type="number", displayed_by_default=True),
    regulatory=ColumnContent(name="Regulatory Risk", type="number", displayed_by_default=True),
    # Library information
    library_type=ColumnContent(name="Type", type="str", displayed_by_default=False),
    framework=ColumnContent(name="Framework", type="str", displayed_by_default=False),
    version=ColumnContent(name="Version", type="str", displayed_by_default=False, hidden=True),
    language=ColumnContent(name="Language", type="str", displayed_by_default=False),
    license_name=ColumnContent(name="License", type="str", displayed_by_default=True),
    stars=ColumnContent(name="GitHub ⭐", type="number", displayed_by_default=False),
    availability=ColumnContent(name="Active Maintenance", type="bool", displayed_by_default=True),
    report_url=ColumnContent(name="Report", type="markdown", displayed_by_default=True),
    last_update=ColumnContent(name="Last Update", type="str", displayed_by_default=False),
    verified=ColumnContent(name="Verified", type="bool", displayed_by_default=False),
)


## For the queue columns in the submission tab
@dataclass(frozen=True)
class EvalQueueColumn:  # Queue column
    library = ColumnContent(name="library", type="markdown", displayed_by_default=True)
    version = ColumnContent(name="version", type="str", displayed_by_default=True)
    language = ColumnContent(name="language", type="str", displayed_by_default=True) 
    framework = ColumnContent(name="framework", type="str", displayed_by_default=True)
    library_type = ColumnContent(name="library_type", type="str", displayed_by_default=True)
    status = ColumnContent(name="status", type="str", displayed_by_default=True)

## All the library information that we might need
@dataclass
class LibraryDetails:
    name: str
    display_name: str = ""
    symbol: str = "" # emoji


class LibraryType(Enum):
    ML = LibraryDetails(name="ML Framework", symbol="🟒")
    LLM = LibraryDetails(name="LLM Framework", symbol="πŸ”Ά")
    AGENT = LibraryDetails(name="Agent Framework", symbol="β­•")
    VIS = LibraryDetails(name="LLM Inference", symbol="🟦")
    GENERAL = LibraryDetails(name="LLM Orchestration", symbol="🟣")
    Unknown = LibraryDetails(name="", symbol="?")

    def to_str(self, separator=" "):
        return f"{self.value.symbol}{separator}{self.value.name}"

    @staticmethod
    def from_str(type: str) -> "LibraryType":
        if "ML Framework" in type or "🟒" in type:
            return LibraryType.ML
        if "LLM Framework" in type or "πŸ”Ά" in type:
            return LibraryType.LLM 
        if "Agent Framework" in type or "β­•" in type:
            return LibraryType.AGENT
        if "LLM Inference" in type or "🟦" in type:
            return LibraryType.VIS
        if "LLM Orchestration" in type or "🟣" in type:
            return LibraryType.GENERAL
        return LibraryType.Unknown

class Language(Enum):
    Python = LibraryDetails("Python")
    JavaScript = LibraryDetails("JavaScript")
    TypeScript = LibraryDetails("TypeScript") 
    Java = LibraryDetails("Java")
    CPP = LibraryDetails("C++")
    Other = LibraryDetails("Other")

class AssessmentStatus(Enum):
    Verified = LibraryDetails("Verified")
    Unverified = LibraryDetails("Unverified")
    Disputed = LibraryDetails("Disputed")

# Column selection
COLS = [getattr(auto_eval_column_attrs, field).name for field in AutoEvalColumn.model_fields if not getattr(auto_eval_column_attrs, field).hidden]
fields = AutoEvalColumn.model_fields

EVAL_COLS = [getattr(EvalQueueColumn, field).name for field in vars(EvalQueueColumn) if not field.startswith('_')]
EVAL_TYPES = [getattr(EvalQueueColumn, field).type for field in vars(EvalQueueColumn) if not field.startswith('_')]

# Task columns for benchmarking - use the display column names from the Tasks enum
BENCHMARK_COLS = [task.value.col_name for task in Tasks]