wu981526092 commited on
Commit
fbd403a
·
1 Parent(s): 505dacc
app.py CHANGED
@@ -3,6 +3,7 @@ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
 
6
 
7
  from src.about import (
8
  CITATION_BUTTON_LABEL,
@@ -24,33 +25,62 @@ from src.display.utils import (
24
  Language,
25
  AssessmentStatus
26
  )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  from src.submission.submit import add_new_eval
30
 
31
 
32
  def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
 
 
 
 
 
 
 
 
 
34
 
35
  ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
 
 
54
  (
55
  finished_eval_queue_df,
56
  running_eval_queue_df,
@@ -58,8 +88,13 @@ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS,
58
  ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
 
60
  def init_leaderboard(dataframe):
 
61
  if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
 
 
 
 
63
  return Leaderboard(
64
  value=dataframe,
65
  datatype=[c.type for c in fields(AutoEvalColumn)],
@@ -192,7 +227,11 @@ with demo:
192
  show_copy_button=True,
193
  )
194
 
195
- scheduler = BackgroundScheduler()
196
- scheduler.add_job(restart_space, "interval", seconds=1800)
197
- scheduler.start()
 
 
 
 
198
  demo.queue(default_concurrency_limit=40).launch()
 
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
6
+ import os
7
 
8
  from src.about import (
9
  CITATION_BUTTON_LABEL,
 
25
  Language,
26
  AssessmentStatus
27
  )
28
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN, LOCAL_MODE
29
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
30
  from src.submission.submit import add_new_eval
31
 
32
 
33
  def restart_space():
34
+ """Restart the Hugging Face space"""
35
+ if LOCAL_MODE:
36
+ print("Running in local mode, skipping space restart")
37
+ return
38
+
39
+ try:
40
+ API.restart_space(repo_id=REPO_ID)
41
+ except Exception as e:
42
+ print(f"Failed to restart space: {e}")
43
+ print("Continuing without restart")
44
 
45
  ### Space initialisation
46
+ def initialize_data_directories():
47
+ """Initialize directories for assessment data"""
48
+ # Create local directories if they don't exist
49
+ os.makedirs(EVAL_REQUESTS_PATH, exist_ok=True)
50
+ os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
51
+
52
+ if LOCAL_MODE:
53
+ print("Running in local mode, using local directories only")
54
+ return
55
+
56
+ # Try to download from HF if not in local mode
57
+ try:
58
+ print(f"Downloading request data from {QUEUE_REPO} to {EVAL_REQUESTS_PATH}")
59
+ snapshot_download(
60
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset",
61
+ tqdm_class=None, etag_timeout=30, token=TOKEN
62
+ )
63
+ except Exception as e:
64
+ print(f"Failed to download request data: {e}")
65
+ print("Using local data only")
66
+
67
+ try:
68
+ print(f"Downloading result data from {RESULTS_REPO} to {EVAL_RESULTS_PATH}")
69
+ snapshot_download(
70
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset",
71
+ tqdm_class=None, etag_timeout=30, token=TOKEN
72
+ )
73
+ except Exception as e:
74
+ print(f"Failed to download result data: {e}")
75
+ print("Using local data only")
76
+
77
+ # Initialize data
78
+ initialize_data_directories()
79
+
80
+ # Load data for leaderboard
81
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
82
 
83
+ # Load queue data
84
  (
85
  finished_eval_queue_df,
86
  running_eval_queue_df,
 
88
  ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
89
 
90
  def init_leaderboard(dataframe):
91
+ """Initialize the leaderboard component"""
92
  if dataframe is None or dataframe.empty:
93
+ # Create an empty dataframe with the expected columns
94
+ empty_df = pd.DataFrame(columns=COLS)
95
+ print("Warning: Leaderboard DataFrame is empty. Using empty dataframe.")
96
+ dataframe = empty_df
97
+
98
  return Leaderboard(
99
  value=dataframe,
100
  datatype=[c.type for c in fields(AutoEvalColumn)],
 
227
  show_copy_button=True,
228
  )
229
 
230
+ # Only schedule space restarts if not in local mode
231
+ if not LOCAL_MODE:
232
+ scheduler = BackgroundScheduler()
233
+ scheduler.add_job(restart_space, "interval", seconds=1800)
234
+ scheduler.start()
235
+
236
+ # Launch the app
237
  demo.queue(default_concurrency_limit=40).launch()
assessment-queue/langchain-ai_langchain_request.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "library": "langchain-ai/langchain",
3
+ "version": "v0.1.0",
4
+ "repository_url": "https://github.com/langchain-ai/langchain",
5
+ "language": "Python",
6
+ "framework": "Python SDK",
7
+ "library_type": "llm framework",
8
+ "license": "MIT",
9
+ "stars": 74500,
10
+ "status": "FINISHED",
11
+ "submitted_time": "2025-04-30T10:00:00Z",
12
+ "last_updated": "2025-05-01T12:00:00Z",
13
+ "assessment_id": "abc123"
14
+ }
assessment-queue/microsoft_autogen_request.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "library": "microsoft/autogen",
3
+ "version": "v0.2.0",
4
+ "repository_url": "https://github.com/microsoft/autogen",
5
+ "language": "Python",
6
+ "framework": "Agent Framework",
7
+ "library_type": "agent framework",
8
+ "license": "MIT",
9
+ "stars": 48700,
10
+ "status": "FINISHED",
11
+ "submitted_time": "2025-05-02T08:15:00Z",
12
+ "last_updated": "2025-05-03T09:15:00Z",
13
+ "assessment_id": "ghi789"
14
+ }
assessment-queue/pytorch_pytorch_request.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "library": "pytorch/pytorch",
3
+ "version": "v2.1.0",
4
+ "repository_url": "https://github.com/pytorch/pytorch",
5
+ "language": "Python",
6
+ "framework": "Machine Learning",
7
+ "library_type": "machine learning",
8
+ "license": "BSD-3",
9
+ "stars": 72300,
10
+ "status": "FINISHED",
11
+ "submitted_time": "2025-05-01T16:30:00Z",
12
+ "last_updated": "2025-05-02T14:30:00Z",
13
+ "assessment_id": "def456"
14
+ }
assessment-results/sample_assessment.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "assessment": {
3
+ "library_name": "langchain-ai/langchain",
4
+ "version": "v0.1.0",
5
+ "language": "Python",
6
+ "framework": "Python SDK",
7
+ "completed_time": "2025-05-01T12:00:00Z",
8
+ "last_updated": "2025-05-01T12:00:00Z",
9
+ "active_maintenance": true,
10
+ "independently_verified": true,
11
+ "scores": {
12
+ "license_validation": 2.5,
13
+ "security_assessment": 4.8,
14
+ "maintenance_health": 1.2,
15
+ "dependency_management": 3.7,
16
+ "regulatory_compliance": 5.2
17
+ },
18
+ "details": {
19
+ "license_validation": {
20
+ "license_type": "MIT",
21
+ "compatibility": "High",
22
+ "issues": "None"
23
+ },
24
+ "security_assessment": {
25
+ "known_vulnerabilities": 3,
26
+ "patch_responsiveness": "Medium",
27
+ "last_security_review": "2025-03-15"
28
+ },
29
+ "maintenance_health": {
30
+ "active_contributors": 42,
31
+ "release_frequency": "High",
32
+ "issue_response_time": "1.2 days"
33
+ },
34
+ "dependency_management": {
35
+ "vulnerable_dependencies": 2,
36
+ "dependency_freshness": "Good",
37
+ "supply_chain_security": "Medium"
38
+ },
39
+ "regulatory_compliance": {
40
+ "documentation_quality": "Medium",
41
+ "data_privacy_features": "Basic",
42
+ "audit_readiness": "Low"
43
+ }
44
+ }
45
+ }
46
+ }
assessment-results/sample_assessment2.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "assessment": {
3
+ "library_name": "pytorch/pytorch",
4
+ "version": "v2.1.0",
5
+ "language": "Python",
6
+ "framework": "Machine Learning",
7
+ "completed_time": "2025-05-02T14:30:00Z",
8
+ "last_updated": "2025-05-02T14:30:00Z",
9
+ "active_maintenance": true,
10
+ "independently_verified": false,
11
+ "scores": {
12
+ "license_validation": 1.8,
13
+ "security_assessment": 3.2,
14
+ "maintenance_health": 2.0,
15
+ "dependency_management": 2.5,
16
+ "regulatory_compliance": 4.1
17
+ },
18
+ "details": {
19
+ "license_validation": {
20
+ "license_type": "BSD-3",
21
+ "compatibility": "High",
22
+ "issues": "None"
23
+ },
24
+ "security_assessment": {
25
+ "known_vulnerabilities": 2,
26
+ "patch_responsiveness": "High",
27
+ "last_security_review": "2025-04-10"
28
+ },
29
+ "maintenance_health": {
30
+ "active_contributors": 156,
31
+ "release_frequency": "Medium",
32
+ "issue_response_time": "2.5 days"
33
+ },
34
+ "dependency_management": {
35
+ "vulnerable_dependencies": 1,
36
+ "dependency_freshness": "Very Good",
37
+ "supply_chain_security": "High"
38
+ },
39
+ "regulatory_compliance": {
40
+ "documentation_quality": "High",
41
+ "data_privacy_features": "Medium",
42
+ "audit_readiness": "Medium"
43
+ }
44
+ }
45
+ }
46
+ }
assessment-results/sample_assessment3.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "assessment": {
3
+ "library_name": "microsoft/autogen",
4
+ "version": "v0.2.0",
5
+ "language": "Python",
6
+ "framework": "Agent Framework",
7
+ "completed_time": "2025-05-03T09:15:00Z",
8
+ "last_updated": "2025-05-03T09:15:00Z",
9
+ "active_maintenance": true,
10
+ "independently_verified": true,
11
+ "scores": {
12
+ "license_validation": 3.1,
13
+ "security_assessment": 6.7,
14
+ "maintenance_health": 2.8,
15
+ "dependency_management": 5.5,
16
+ "regulatory_compliance": 7.2
17
+ },
18
+ "details": {
19
+ "license_validation": {
20
+ "license_type": "MIT",
21
+ "compatibility": "High",
22
+ "issues": "None"
23
+ },
24
+ "security_assessment": {
25
+ "known_vulnerabilities": 5,
26
+ "patch_responsiveness": "Medium",
27
+ "last_security_review": "2025-02-20"
28
+ },
29
+ "maintenance_health": {
30
+ "active_contributors": 28,
31
+ "release_frequency": "High",
32
+ "issue_response_time": "1.8 days"
33
+ },
34
+ "dependency_management": {
35
+ "vulnerable_dependencies": 4,
36
+ "dependency_freshness": "Medium",
37
+ "supply_chain_security": "Low"
38
+ },
39
+ "regulatory_compliance": {
40
+ "documentation_quality": "Low",
41
+ "data_privacy_features": "Minimal",
42
+ "audit_readiness": "Low"
43
+ }
44
+ }
45
+ }
46
+ }
src/envs.py CHANGED
@@ -2,9 +2,14 @@ import os
2
 
3
  from huggingface_hub import HfApi
4
 
 
 
 
 
5
  # Info to change for your repository
6
  # ----------------------------------
7
- TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
 
8
 
9
  OWNER = "libvulnwatch" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
  # ----------------------------------
@@ -22,4 +27,5 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "assessment-results")
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "assessment-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "assessment-results-bk")
24
 
 
25
  API = HfApi(token=TOKEN)
 
2
 
3
  from huggingface_hub import HfApi
4
 
5
+ # Run in local mode (no Hugging Face connection required)
6
+ # Set to True when developing locally without HF credentials
7
+ LOCAL_MODE = True
8
+
9
  # Info to change for your repository
10
  # ----------------------------------
11
+ # Get token from environment or use None in local mode
12
+ TOKEN = os.environ.get("HF_TOKEN") if not LOCAL_MODE else None
13
 
14
  OWNER = "libvulnwatch" # Change to your org - don't forget to create a results and request dataset, with the correct format!
15
  # ----------------------------------
 
27
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "assessment-queue-bk")
28
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "assessment-results-bk")
29
 
30
+ # Initialize API with token if available
31
  API = HfApi(token=TOKEN)
src/submission/submit.py CHANGED
@@ -11,7 +11,8 @@ import requests
11
  from huggingface_hub import HfApi
12
 
13
  from src.display.utils import LibraryType, Language, AssessmentStatus
14
- from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN
 
15
  from src.submission.check_validity import is_repository_valid, get_library_info
16
 
17
 
@@ -41,12 +42,12 @@ def add_new_eval(
41
  is_valid, validity_message, library_info = is_repository_valid(library_name, repository_url)
42
 
43
  if not is_valid:
44
- return f"⚠️ Invalid submission: {validity_message}"
45
 
46
  # Parse library type
47
  library_type = LibraryType.from_str(library_type_str)
48
  if library_type == LibraryType.Unknown:
49
- return "⚠️ Please select a valid library type."
50
 
51
  # Create a unique identifier for the submission
52
  uid = uuid.uuid4().hex[:6]
@@ -73,14 +74,22 @@ def add_new_eval(
73
  "assessment_id": uid
74
  }
75
 
76
- # Save the request
77
  os.makedirs(EVAL_REQUESTS_PATH, exist_ok=True)
78
- with open(os.path.join(EVAL_REQUESTS_PATH, request_filename), "w") as f:
 
 
 
79
  json.dump(assessment_request, f, indent=2)
80
 
 
 
 
 
 
81
  try:
82
  # Push the file to the HF repo
83
- path = Path(os.path.join(EVAL_REQUESTS_PATH, request_filename))
84
  API.upload_file(
85
  path_or_fileobj=path,
86
  path_in_repo=request_filename,
@@ -88,7 +97,7 @@ def add_new_eval(
88
  repo_type="dataset",
89
  )
90
 
91
- return f"Library '{library_name}' (version {library_version}) has been added to the assessment queue! Assessment ID: {uid}"
92
 
93
  except Exception as e:
94
- return f"Error uploading assessment request: {str(e)}"
 
11
  from huggingface_hub import HfApi
12
 
13
  from src.display.utils import LibraryType, Language, AssessmentStatus
14
+ from src.display.formatting import styled_error, styled_warning, styled_message
15
+ from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN, LOCAL_MODE
16
  from src.submission.check_validity import is_repository_valid, get_library_info
17
 
18
 
 
42
  is_valid, validity_message, library_info = is_repository_valid(library_name, repository_url)
43
 
44
  if not is_valid:
45
+ return styled_error(f"Invalid submission: {validity_message}")
46
 
47
  # Parse library type
48
  library_type = LibraryType.from_str(library_type_str)
49
  if library_type == LibraryType.Unknown:
50
+ return styled_error("Please select a valid library type.")
51
 
52
  # Create a unique identifier for the submission
53
  uid = uuid.uuid4().hex[:6]
 
74
  "assessment_id": uid
75
  }
76
 
77
+ # Ensure directory exists
78
  os.makedirs(EVAL_REQUESTS_PATH, exist_ok=True)
79
+
80
+ # Save the request locally
81
+ request_file_path = os.path.join(EVAL_REQUESTS_PATH, request_filename)
82
+ with open(request_file_path, "w") as f:
83
  json.dump(assessment_request, f, indent=2)
84
 
85
+ # If in local mode, don't try to upload to HF
86
+ if LOCAL_MODE:
87
+ return styled_message(f"Library '{library_name}' (version {library_version}) has been added to the local assessment queue! Assessment ID: {uid}")
88
+
89
+ # Try to upload to HF if not in local mode
90
  try:
91
  # Push the file to the HF repo
92
+ path = Path(request_file_path)
93
  API.upload_file(
94
  path_or_fileobj=path,
95
  path_in_repo=request_filename,
 
97
  repo_type="dataset",
98
  )
99
 
100
+ return styled_message(f"Library '{library_name}' (version {library_version}) has been added to the assessment queue! Assessment ID: {uid}")
101
 
102
  except Exception as e:
103
+ return styled_warning(f"Saved locally but failed to upload to Hugging Face: {str(e)}")