seonglae-holistic commited on
Commit
fdddab8
·
1 Parent(s): dc165c1

fix: submit libaray with minimal information

Browse files
app.py CHANGED
@@ -24,7 +24,6 @@ from src.display.utils import (
24
  EVAL_TYPES,
25
  AutoEvalColumn,
26
  auto_eval_column_attrs,
27
- LibraryType,
28
  Language,
29
  )
30
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN, LOCAL_MODE
@@ -229,15 +228,7 @@ with demo:
229
 
230
  with gr.Row():
231
  with gr.Column():
232
- library_name_textbox = gr.Textbox(label="Library name (org/repo format)")
233
- library_version_textbox = gr.Textbox(label="Version", placeholder="v1.0.0")
234
- library_type = gr.Dropdown(
235
- choices=[t.to_str(" : ") for t in LibraryType if t != LibraryType.Unknown],
236
- label="Library type",
237
- multiselect=False,
238
- value=None,
239
- interactive=True,
240
- )
241
 
242
  with gr.Column():
243
  language = gr.Dropdown(
@@ -247,8 +238,6 @@ with demo:
247
  value=["Python"],
248
  interactive=True,
249
  )
250
- framework = gr.Textbox(label="Framework/Ecosystem (e.g., PyTorch, React)")
251
- repository_url = gr.Textbox(label="Repository URL")
252
 
253
  submit_button = gr.Button("Submit for Assessment")
254
  submission_result = gr.Markdown()
@@ -256,11 +245,7 @@ with demo:
256
  add_new_eval,
257
  [
258
  library_name_textbox,
259
- library_version_textbox,
260
- repository_url,
261
  language,
262
- framework,
263
- library_type,
264
  ],
265
  submission_result,
266
  )
 
24
  EVAL_TYPES,
25
  AutoEvalColumn,
26
  auto_eval_column_attrs,
 
27
  Language,
28
  )
29
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN, LOCAL_MODE
 
228
 
229
  with gr.Row():
230
  with gr.Column():
231
+ library_name_textbox = gr.Textbox(label="Library name")
 
 
 
 
 
 
 
 
232
 
233
  with gr.Column():
234
  language = gr.Dropdown(
 
238
  value=["Python"],
239
  interactive=True,
240
  )
 
 
241
 
242
  submit_button = gr.Button("Submit for Assessment")
243
  submission_result = gr.Markdown()
 
245
  add_new_eval,
246
  [
247
  library_name_textbox,
 
 
248
  language,
 
 
249
  ],
250
  submission_result,
251
  )
src/leaderboard/read_evals.py CHANGED
@@ -19,7 +19,7 @@ class AssessmentResult(BaseModel):
19
  """Represents one full vulnerability assessment. Built from a combination of the result and request file for a given library.
20
  """
21
  assessment_id: str # Unique identifier
22
- library_name: str # org/repo
23
  org: str
24
  repo: str
25
  version: str
 
19
  """Represents one full vulnerability assessment. Built from a combination of the result and request file for a given library.
20
  """
21
  assessment_id: str # Unique identifier
22
+ library_name: str
23
  org: str
24
  repo: str
25
  version: str
src/submission/check_validity.py CHANGED
@@ -1,58 +1,22 @@
1
  import json
2
  import os
3
- import re
4
- import requests
5
  from collections import defaultdict
6
- from datetime import datetime, timedelta, timezone
7
  from typing import Dict, Tuple, Any, List, Set
8
 
9
- def is_repository_valid(repo_name: str, repo_url: str) -> Tuple[bool, str, Dict[str, Any]]:
10
  """
11
  Checks if a GitHub repository is valid and accessible.
12
 
13
  Args:
14
- repo_name: The name of the repository (org/repo format)
15
- repo_url: URL to the repository
16
 
17
  Returns:
18
  Tuple of (is_valid, error_message, library_info)
19
  """
20
  # Basic format validation
21
- if not repo_name or "/" not in repo_name:
22
- return False, "Repository name must be in the format 'organization/repository'", {}
23
-
24
- # Check if GitHub URL
25
- if repo_url and "github.com" in repo_url:
26
- # Extract org and repo from URL if provided
27
- try:
28
- parts = repo_url.split("github.com/")[1].split("/")
29
- org = parts[0]
30
- repo = parts[1].split(".")[0] if "." in parts[1] else parts[1]
31
- url_repo_name = f"{org}/{repo}"
32
-
33
- # Check if URL matches repo_name
34
- if url_repo_name != repo_name:
35
- return False, f"Repository name ({repo_name}) doesn't match the URL ({url_repo_name})", {}
36
- except:
37
- pass # Fall back to using repo_name
38
-
39
- # Get repository information from GitHub API
40
- org, repo = repo_name.split("/")
41
- api_url = f"https://api.github.com/repos/{org}/{repo}"
42
-
43
- try:
44
- response = requests.get(api_url)
45
- if response.status_code != 200:
46
- return False, f"Repository not found or not accessible: {response.json().get('message', 'Unknown error')}", {}
47
-
48
- # Parse repository data
49
- repo_data = response.json()
50
- library_info = get_library_info(repo_data)
51
-
52
- return True, "", library_info
53
-
54
- except Exception as e:
55
- return False, f"Error accessing repository: {str(e)}", {}
56
 
57
  def get_library_info(repo_data: Dict[str, Any]) -> Dict[str, Any]:
58
  """
 
1
  import json
2
  import os
 
 
3
  from collections import defaultdict
 
4
  from typing import Dict, Tuple, Any, List, Set
5
 
6
+ def is_repository_valid(repo_name: str) -> Tuple[bool, str, Dict[str, Any]]:
7
  """
8
  Checks if a GitHub repository is valid and accessible.
9
 
10
  Args:
11
+ repo_name: The name of the repository
 
12
 
13
  Returns:
14
  Tuple of (is_valid, error_message, library_info)
15
  """
16
  # Basic format validation
17
+ if not repo_name:
18
+ return False, "Repository name is required", {}
19
+ return True, "", {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def get_library_info(repo_data: Dict[str, Any]) -> Dict[str, Any]:
22
  """
src/submission/submit.py CHANGED
@@ -4,7 +4,6 @@ import uuid
4
  from datetime import datetime
5
  from pathlib import Path
6
 
7
- from src.display.utils import LibraryType
8
  from src.display.formatting import styled_error, styled_warning, styled_message
9
  from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, LOCAL_MODE
10
  from src.submission.check_validity import is_repository_valid
@@ -12,37 +11,24 @@ from src.submission.check_validity import is_repository_valid
12
 
13
  def add_new_eval(
14
  library_name,
15
- library_version,
16
- repository_url,
17
  language,
18
- framework,
19
- library_type_str,
20
  ) -> str:
21
  """
22
  Adds a new library to the assessment queue.
23
 
24
  Args:
25
- library_name: Name of the library (org/repo format)
26
- library_version: Version of the library
27
- repository_url: URL to the repository
28
  language: Programming language
29
- framework: Related framework/ecosystem
30
- library_type_str: Type of AI library
31
 
32
  Returns:
33
  A message indicating the status of the submission
34
  """
35
  # Check if valid repository
36
- is_valid, validity_message, library_info = is_repository_valid(library_name, repository_url)
37
 
38
  if not is_valid:
39
  return styled_error(f"Invalid submission: {validity_message}")
40
 
41
- # Parse library type
42
- library_type = LibraryType.from_str(library_type_str)
43
- if library_type == LibraryType.Unknown:
44
- return styled_error("Please select a valid library type.")
45
-
46
  # Create a unique identifier for the submission
47
  uid = uuid.uuid4().hex[:6]
48
  timestamp = datetime.now().isoformat()
@@ -55,11 +41,7 @@ def add_new_eval(
55
  # Create the assessment request JSON
56
  assessment_request = {
57
  "library": library_name,
58
- "version": library_version,
59
- "repository_url": repository_url,
60
  "language": "/".join(language) if isinstance(language, list) else language,
61
- "framework": framework,
62
- "library_type": library_type.value.name,
63
  "license": license_name,
64
  "stars": stars,
65
  "status": "PENDING",
@@ -78,7 +60,7 @@ def add_new_eval(
78
 
79
  # If in local mode, don't try to upload to HF
80
  if LOCAL_MODE:
81
- return styled_message(f"Library '{library_name}' (version {library_version}) has been added to the local assessment queue! Assessment ID: {uid}")
82
 
83
  # Try to upload to HF if not in local mode
84
  try:
@@ -91,7 +73,7 @@ def add_new_eval(
91
  repo_type="dataset",
92
  )
93
 
94
- return styled_message(f"Library '{library_name}' (version {library_version}) has been added to the assessment queue! Assessment ID: {uid}")
95
 
96
  except Exception as e:
97
  return styled_warning(f"Saved locally but failed to upload to Hugging Face: {str(e)}")
 
4
  from datetime import datetime
5
  from pathlib import Path
6
 
 
7
  from src.display.formatting import styled_error, styled_warning, styled_message
8
  from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, LOCAL_MODE
9
  from src.submission.check_validity import is_repository_valid
 
11
 
12
  def add_new_eval(
13
  library_name,
 
 
14
  language,
 
 
15
  ) -> str:
16
  """
17
  Adds a new library to the assessment queue.
18
 
19
  Args:
20
+ library_name: Name of the library
 
 
21
  language: Programming language
 
 
22
 
23
  Returns:
24
  A message indicating the status of the submission
25
  """
26
  # Check if valid repository
27
+ is_valid, validity_message, library_info = is_repository_valid(library_name)
28
 
29
  if not is_valid:
30
  return styled_error(f"Invalid submission: {validity_message}")
31
 
 
 
 
 
 
32
  # Create a unique identifier for the submission
33
  uid = uuid.uuid4().hex[:6]
34
  timestamp = datetime.now().isoformat()
 
41
  # Create the assessment request JSON
42
  assessment_request = {
43
  "library": library_name,
 
 
44
  "language": "/".join(language) if isinstance(language, list) else language,
 
 
45
  "license": license_name,
46
  "stars": stars,
47
  "status": "PENDING",
 
60
 
61
  # If in local mode, don't try to upload to HF
62
  if LOCAL_MODE:
63
+ return styled_message(f"Library '{library_name}' has been added to the local assessment queue! Assessment ID: {uid}")
64
 
65
  # Try to upload to HF if not in local mode
66
  try:
 
73
  repo_type="dataset",
74
  )
75
 
76
+ return styled_message(f"Library '{library_name}' has been added to the assessment queue! Assessment ID: {uid}")
77
 
78
  except Exception as e:
79
  return styled_warning(f"Saved locally but failed to upload to Hugging Face: {str(e)}")