Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
submission-fix (#1039)
Browse files- Use uv for dependencies and runtime (2620acfe2080a0b2b8b6139e9f1d1eba6ea3e0f7)
- Switch to uv in dev Dockerfile (b91f2b791918c02db1b7635d21b7dbde284938ba)
- Set logging level to WARNING (f6dd42cae1290960ee07f7caf9e8d20aa8eec6b1)
- Refactor model validation logic (0e60add1c9df5f43c2e3e3b920e5fcb0e58bd5f1)
- Improve model size calculation (9469eaec18aff6a0e88bc9361fb6bd6b637ac706)
- Replace Poetry with uv (d27998844b63b2eacb0fa32114cd7079c20d6639)
- Update backend to use uv (28fd56c9b310ceae547022f9d21d9375fa592e90)
- Fix weightsType casing (7f1a54e79105b3af55a31b7aa3160e222381e486)
- Minor changes (581b4b19243fa8975439b49ab07f8da8a88a1bb6)
- Correct text (deb47717036353ee8a8340b2ff28f6479580b72d)
- Dockerfile +8 -9
- backend/Dockerfile.dev +22 -12
- backend/__init__.py +0 -0
- backend/app/asgi.py +3 -3
- backend/app/services/models.py +32 -22
- backend/app/utils/model_validation.py +42 -40
- backend/pyproject.toml +49 -25
- backend/uv.lock +0 -0
- docker-compose.yml +1 -3
- frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js +7 -7
Dockerfile
CHANGED
@@ -8,23 +8,22 @@ COPY frontend/ ./
|
|
8 |
RUN npm run build
|
9 |
|
10 |
# Build backend
|
11 |
-
FROM
|
|
|
12 |
WORKDIR /app
|
13 |
|
14 |
# Create non-root user
|
15 |
RUN useradd -m -u 1000 user
|
16 |
|
17 |
-
# Install poetry
|
18 |
-
RUN pip install poetry
|
19 |
-
|
20 |
# Create and configure cache directory
|
21 |
RUN mkdir -p /app/.cache && \
|
22 |
chown -R user:user /app
|
23 |
|
24 |
-
# Copy
|
25 |
-
COPY backend/pyproject.toml backend/
|
26 |
-
|
27 |
-
|
|
|
28 |
|
29 |
# Copy backend code
|
30 |
COPY backend/ .
|
@@ -60,4 +59,4 @@ USER user
|
|
60 |
EXPOSE 7860
|
61 |
|
62 |
# Start both servers with wait-for
|
63 |
-
CMD ["sh", "-c", "uvicorn app.asgi:app --host 0.0.0.0 --port 7861 & while ! nc -z localhost 7861; do sleep 1; done && cd frontend && npm run serve"]
|
|
|
8 |
RUN npm run build
|
9 |
|
10 |
# Build backend
|
11 |
+
FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
|
12 |
+
|
13 |
WORKDIR /app
|
14 |
|
15 |
# Create non-root user
|
16 |
RUN useradd -m -u 1000 user
|
17 |
|
|
|
|
|
|
|
18 |
# Create and configure cache directory
|
19 |
RUN mkdir -p /app/.cache && \
|
20 |
chown -R user:user /app
|
21 |
|
22 |
+
# Copy uv configuration files
|
23 |
+
COPY backend/pyproject.toml backend/uv.lock ./
|
24 |
+
|
25 |
+
# Install dependencies using uv
|
26 |
+
RUN uv sync --all-extras --frozen
|
27 |
|
28 |
# Copy backend code
|
29 |
COPY backend/ .
|
|
|
59 |
EXPOSE 7860
|
60 |
|
61 |
# Start both servers with wait-for
|
62 |
+
CMD ["sh", "-c", "uv run uvicorn app.asgi:app --host 0.0.0.0 --port 7861 & while ! nc -z localhost 7861; do sleep 1; done && cd frontend && npm run serve"]
|
backend/Dockerfile.dev
CHANGED
@@ -1,25 +1,35 @@
|
|
1 |
-
|
|
|
2 |
|
|
|
3 |
WORKDIR /app
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
# Install required system dependencies
|
6 |
RUN apt-get update && apt-get install -y \
|
7 |
build-essential \
|
8 |
&& rm -rf /var/lib/apt/lists/*
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
poetry install --no-interaction --no-ansi --no-root
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
ENV LOG_LEVEL=INFO
|
23 |
|
24 |
# In dev, mount volume directly
|
25 |
-
CMD ["uvicorn", "app.asgi:app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "warning", "--no-access-log"]
|
|
|
1 |
+
# Use a Python image with uv pre-installed
|
2 |
+
FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
|
3 |
|
4 |
+
# Set the working directory
|
5 |
WORKDIR /app
|
6 |
|
7 |
+
# Enable bytecode compilation
|
8 |
+
ENV UV_COMPILE_BYTECODE=1
|
9 |
+
|
10 |
+
# Copy from the cache instead of linking since it's a mounted volume
|
11 |
+
ENV UV_LINK_MODE=copy
|
12 |
+
|
13 |
+
# Environment variables configuration for logs
|
14 |
+
ENV PYTHONUNBUFFERED=1
|
15 |
+
ENV LOG_LEVEL=INFO
|
16 |
+
|
17 |
# Install required system dependencies
|
18 |
RUN apt-get update && apt-get install -y \
|
19 |
build-essential \
|
20 |
&& rm -rf /var/lib/apt/lists/*
|
21 |
|
22 |
+
# Copy uv configuration files
|
23 |
+
COPY pyproject.toml uv.lock ./
|
24 |
|
25 |
+
# Install dependencies using uv
|
26 |
+
RUN uv sync --frozen --no-install-project --no-dev
|
27 |
|
28 |
+
# Place executables in the environment at the front of the path
|
29 |
+
ENV PATH="/app/.venv/bin:$PATH"
|
|
|
30 |
|
31 |
+
# Reset the entrypoint, don't invoke `uv`
|
32 |
+
ENTRYPOINT []
|
|
|
33 |
|
34 |
# In dev, mount volume directly
|
35 |
+
CMD ["uv" "run" "uvicorn", "app.asgi:app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "warning", "--no-access-log"]
|
backend/__init__.py
ADDED
File without changes
|
backend/app/asgi.py
CHANGED
@@ -44,18 +44,18 @@ LOGGING_CONFIG = {
|
|
44 |
},
|
45 |
"uvicorn.access": {
|
46 |
"handlers": ["default"],
|
47 |
-
"level": "
|
48 |
"propagate": False,
|
49 |
},
|
50 |
"app": {
|
51 |
"handlers": ["default"],
|
52 |
-
"level": "
|
53 |
"propagate": False,
|
54 |
}
|
55 |
},
|
56 |
"root": {
|
57 |
"handlers": ["default"],
|
58 |
-
"level": "
|
59 |
}
|
60 |
}
|
61 |
|
|
|
44 |
},
|
45 |
"uvicorn.access": {
|
46 |
"handlers": ["default"],
|
47 |
+
"level": "WARNING",
|
48 |
"propagate": False,
|
49 |
},
|
50 |
"app": {
|
51 |
"handlers": ["default"],
|
52 |
+
"level": "WARNING",
|
53 |
"propagate": False,
|
54 |
}
|
55 |
},
|
56 |
"root": {
|
57 |
"handlers": ["default"],
|
58 |
+
"level": "WARNING",
|
59 |
}
|
60 |
}
|
61 |
|
backend/app/services/models.py
CHANGED
@@ -382,26 +382,6 @@ class ModelService(HuggingFaceService):
|
|
382 |
if field not in model_data:
|
383 |
raise ValueError(f"Missing required field: {field}")
|
384 |
|
385 |
-
# Check if model already exists in the system
|
386 |
-
try:
|
387 |
-
logger.info(LogFormatter.subsection("CHECKING EXISTING SUBMISSIONS"))
|
388 |
-
existing_models = await self.get_models()
|
389 |
-
|
390 |
-
# Check in all statuses (pending, evaluating, finished)
|
391 |
-
for status, models in existing_models.items():
|
392 |
-
for model in models:
|
393 |
-
if model["name"] == model_data["model_id"]:
|
394 |
-
error_msg = f"Model {model_data['model_id']} is already in the system with status: {status}"
|
395 |
-
logger.error(LogFormatter.error("Submission rejected", error_msg))
|
396 |
-
raise ValueError(error_msg)
|
397 |
-
|
398 |
-
logger.info(LogFormatter.success("No existing submission found"))
|
399 |
-
except ValueError:
|
400 |
-
raise
|
401 |
-
except Exception as e:
|
402 |
-
logger.error(LogFormatter.error("Failed to check existing submissions", e))
|
403 |
-
raise
|
404 |
-
|
405 |
# Get model info and validate it exists on HuggingFace
|
406 |
try:
|
407 |
logger.info(LogFormatter.subsection("MODEL VALIDATION"))
|
@@ -412,6 +392,7 @@ class ModelService(HuggingFaceService):
|
|
412 |
revision=model_data["revision"],
|
413 |
token=self.token
|
414 |
)
|
|
|
415 |
if not model_info:
|
416 |
raise Exception(f"Model {model_data['model_id']} not found on HuggingFace Hub")
|
417 |
|
@@ -420,6 +401,29 @@ class ModelService(HuggingFaceService):
|
|
420 |
except Exception as e:
|
421 |
logger.error(LogFormatter.error("Model validation failed", e))
|
422 |
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
423 |
|
424 |
# Validate model card
|
425 |
valid, error, model_card = await self.validator.check_model_card(
|
@@ -434,7 +438,8 @@ class ModelService(HuggingFaceService):
|
|
434 |
model_size, error = await self.validator.get_model_size(
|
435 |
model_info,
|
436 |
model_data["precision"],
|
437 |
-
model_data["base_model"]
|
|
|
438 |
)
|
439 |
if model_size is None:
|
440 |
logger.error(LogFormatter.error("Model size validation failed", error))
|
@@ -458,6 +463,11 @@ class ModelService(HuggingFaceService):
|
|
458 |
raise Exception(error)
|
459 |
logger.info(LogFormatter.success("Chat template validation passed"))
|
460 |
|
|
|
|
|
|
|
|
|
|
|
461 |
# Create eval entry
|
462 |
eval_entry = {
|
463 |
"model": model_data["model_id"],
|
@@ -465,7 +475,7 @@ class ModelService(HuggingFaceService):
|
|
465 |
"revision": model_info.sha,
|
466 |
"precision": model_data["precision"],
|
467 |
"params": model_size,
|
468 |
-
"architectures":
|
469 |
"weight_type": model_data["weight_type"],
|
470 |
"status": "PENDING",
|
471 |
"submitted_time": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
|
382 |
if field not in model_data:
|
383 |
raise ValueError(f"Missing required field: {field}")
|
384 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
# Get model info and validate it exists on HuggingFace
|
386 |
try:
|
387 |
logger.info(LogFormatter.subsection("MODEL VALIDATION"))
|
|
|
392 |
revision=model_data["revision"],
|
393 |
token=self.token
|
394 |
)
|
395 |
+
|
396 |
if not model_info:
|
397 |
raise Exception(f"Model {model_data['model_id']} not found on HuggingFace Hub")
|
398 |
|
|
|
401 |
except Exception as e:
|
402 |
logger.error(LogFormatter.error("Model validation failed", e))
|
403 |
raise
|
404 |
+
|
405 |
+
# Update model revision with commit sha
|
406 |
+
model_data["revision"] = model_info.sha
|
407 |
+
|
408 |
+
# Check if model already exists in the system
|
409 |
+
try:
|
410 |
+
logger.info(LogFormatter.subsection("CHECKING EXISTING SUBMISSIONS"))
|
411 |
+
existing_models = await self.get_models()
|
412 |
+
|
413 |
+
# Check in all statuses (pending, evaluating, finished)
|
414 |
+
for status, models in existing_models.items():
|
415 |
+
for model in models:
|
416 |
+
if model["name"] == model_data["model_id"] and model["revision"] == model_data["revision"]:
|
417 |
+
error_msg = f"Model {model_data['model_id']} revision {model_data["revision"]} is already in the system with status: {status}"
|
418 |
+
logger.error(LogFormatter.error("Submission rejected", error_msg))
|
419 |
+
raise ValueError(error_msg)
|
420 |
+
|
421 |
+
logger.info(LogFormatter.success("No existing submission found"))
|
422 |
+
except ValueError:
|
423 |
+
raise
|
424 |
+
except Exception as e:
|
425 |
+
logger.error(LogFormatter.error("Failed to check existing submissions", e))
|
426 |
+
raise
|
427 |
|
428 |
# Validate model card
|
429 |
valid, error, model_card = await self.validator.check_model_card(
|
|
|
438 |
model_size, error = await self.validator.get_model_size(
|
439 |
model_info,
|
440 |
model_data["precision"],
|
441 |
+
model_data["base_model"],
|
442 |
+
revision=model_data["revision"]
|
443 |
)
|
444 |
if model_size is None:
|
445 |
logger.error(LogFormatter.error("Model size validation failed", error))
|
|
|
463 |
raise Exception(error)
|
464 |
logger.info(LogFormatter.success("Chat template validation passed"))
|
465 |
|
466 |
+
|
467 |
+
architectures = model_info.config.get("architectures", "")
|
468 |
+
if architectures:
|
469 |
+
architectures = ";".join(architectures)
|
470 |
+
|
471 |
# Create eval entry
|
472 |
eval_entry = {
|
473 |
"model": model_data["model_id"],
|
|
|
475 |
"revision": model_info.sha,
|
476 |
"precision": model_data["precision"],
|
477 |
"params": model_size,
|
478 |
+
"architectures": architectures,
|
479 |
"weight_type": model_data["weight_type"],
|
480 |
"status": "PENDING",
|
481 |
"submitted_time": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
|
backend/app/utils/model_validation.py
CHANGED
@@ -5,10 +5,12 @@ import re
|
|
5 |
from typing import Tuple, Optional, Dict, Any
|
6 |
import aiohttp
|
7 |
from huggingface_hub import HfApi, ModelCard, hf_hub_download
|
|
|
8 |
from transformers import AutoConfig, AutoTokenizer
|
9 |
from app.config.base import HF_TOKEN, API
|
10 |
from app.utils.logging import LogFormatter
|
11 |
|
|
|
12 |
logger = logging.getLogger(__name__)
|
13 |
|
14 |
class ModelValidator:
|
@@ -54,78 +56,78 @@ class ModelValidator:
|
|
54 |
logger.error(LogFormatter.error(error_msg, e))
|
55 |
return False, str(e), None
|
56 |
|
57 |
-
async def get_safetensors_metadata(self, model_id: str,
|
58 |
"""Get metadata from a safetensors file"""
|
59 |
try:
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
except Exception as e:
|
72 |
-
logger.
|
73 |
return None
|
74 |
-
|
75 |
async def get_model_size(
|
76 |
self,
|
77 |
model_info: Any,
|
78 |
precision: str,
|
79 |
-
base_model: str
|
|
|
80 |
) -> Tuple[Optional[float], Optional[str]]:
|
81 |
"""Get model size in billions of parameters"""
|
82 |
try:
|
83 |
logger.info(LogFormatter.info(f"Checking model size for {model_info.modelId}"))
|
84 |
-
|
85 |
# Check if model is adapter
|
86 |
is_adapter = any(s.rfilename == "adapter_config.json" for s in model_info.siblings if hasattr(s, 'rfilename'))
|
87 |
-
|
88 |
# Try to get size from safetensors first
|
89 |
model_size = None
|
90 |
-
|
91 |
if is_adapter and base_model:
|
92 |
# For adapters, we need both adapter and base model sizes
|
93 |
-
adapter_meta = await self.get_safetensors_metadata(model_info.id,
|
94 |
-
base_meta = await self.get_safetensors_metadata(base_model)
|
95 |
-
|
96 |
if adapter_meta and base_meta:
|
97 |
-
adapter_size = sum(
|
98 |
-
base_size = sum(
|
99 |
model_size = (adapter_size + base_size) / (2 * 1e9) # Convert to billions, assuming float16
|
100 |
else:
|
101 |
# For regular models, just get the model size
|
102 |
-
meta = await self.get_safetensors_metadata(model_info.id)
|
103 |
if meta:
|
104 |
-
total_params = sum(
|
105 |
model_size = total_params / (2 * 1e9) # Convert to billions, assuming float16
|
106 |
-
|
107 |
if model_size is None:
|
108 |
-
#
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
if size_match:
|
113 |
-
size_str = size_match.group(1)
|
114 |
-
model_size = float(size_str)
|
115 |
-
else:
|
116 |
-
return None, "Could not determine model size from safetensors or model name"
|
117 |
-
|
118 |
# Adjust size for GPTQ models
|
119 |
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1
|
120 |
model_size = round(size_factor * model_size, 3)
|
121 |
-
|
122 |
logger.info(LogFormatter.success(f"Model size: {model_size}B parameters"))
|
123 |
return model_size, None
|
124 |
-
|
125 |
except Exception as e:
|
126 |
-
|
127 |
-
logger.error(LogFormatter.error(error_msg, e))
|
128 |
return None, str(e)
|
|
|
129 |
|
130 |
async def check_chat_template(
|
131 |
self,
|
|
|
5 |
from typing import Tuple, Optional, Dict, Any
|
6 |
import aiohttp
|
7 |
from huggingface_hub import HfApi, ModelCard, hf_hub_download
|
8 |
+
from huggingface_hub import hf_api
|
9 |
from transformers import AutoConfig, AutoTokenizer
|
10 |
from app.config.base import HF_TOKEN, API
|
11 |
from app.utils.logging import LogFormatter
|
12 |
|
13 |
+
|
14 |
logger = logging.getLogger(__name__)
|
15 |
|
16 |
class ModelValidator:
|
|
|
56 |
logger.error(LogFormatter.error(error_msg, e))
|
57 |
return False, str(e), None
|
58 |
|
59 |
+
async def get_safetensors_metadata(self, model_id: str, is_adapter: bool = False, revision: str = "main") -> Optional[Dict]:
|
60 |
"""Get metadata from a safetensors file"""
|
61 |
try:
|
62 |
+
if is_adapter:
|
63 |
+
metadata = await asyncio.to_thread(
|
64 |
+
hf_api.parse_safetensors_file_metadata,
|
65 |
+
model_id,
|
66 |
+
"adapter_model.safetensors",
|
67 |
+
token=self.token,
|
68 |
+
revision=revision,
|
69 |
+
)
|
70 |
+
else:
|
71 |
+
metadata = await asyncio.to_thread(
|
72 |
+
hf_api.get_safetensors_metadata,
|
73 |
+
repo_id=model_id,
|
74 |
+
token=self.token,
|
75 |
+
revision=revision,
|
76 |
+
)
|
77 |
+
return metadata
|
78 |
+
|
79 |
except Exception as e:
|
80 |
+
logger.error(f"Failed to get safetensors metadata: {str(e)}")
|
81 |
return None
|
82 |
+
|
83 |
async def get_model_size(
|
84 |
self,
|
85 |
model_info: Any,
|
86 |
precision: str,
|
87 |
+
base_model: str,
|
88 |
+
revision: str
|
89 |
) -> Tuple[Optional[float], Optional[str]]:
|
90 |
"""Get model size in billions of parameters"""
|
91 |
try:
|
92 |
logger.info(LogFormatter.info(f"Checking model size for {model_info.modelId}"))
|
93 |
+
|
94 |
# Check if model is adapter
|
95 |
is_adapter = any(s.rfilename == "adapter_config.json" for s in model_info.siblings if hasattr(s, 'rfilename'))
|
96 |
+
|
97 |
# Try to get size from safetensors first
|
98 |
model_size = None
|
99 |
+
|
100 |
if is_adapter and base_model:
|
101 |
# For adapters, we need both adapter and base model sizes
|
102 |
+
adapter_meta = await self.get_safetensors_metadata(model_info.id, is_adapter=True, revision=revision)
|
103 |
+
base_meta = await self.get_safetensors_metadata(base_model, revision="main")
|
104 |
+
|
105 |
if adapter_meta and base_meta:
|
106 |
+
adapter_size = sum(adapter_meta.parameter_count.values())
|
107 |
+
base_size = sum(base_meta.parameter_count.values())
|
108 |
model_size = (adapter_size + base_size) / (2 * 1e9) # Convert to billions, assuming float16
|
109 |
else:
|
110 |
# For regular models, just get the model size
|
111 |
+
meta = await self.get_safetensors_metadata(model_info.id, revision=revision)
|
112 |
if meta:
|
113 |
+
total_params = sum(meta.parameter_count.values())
|
114 |
model_size = total_params / (2 * 1e9) # Convert to billions, assuming float16
|
115 |
+
|
116 |
if model_size is None:
|
117 |
+
# If model size could not be determined, return an error
|
118 |
+
return None, "Model size could not be determined"
|
119 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
# Adjust size for GPTQ models
|
121 |
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1
|
122 |
model_size = round(size_factor * model_size, 3)
|
123 |
+
|
124 |
logger.info(LogFormatter.success(f"Model size: {model_size}B parameters"))
|
125 |
return model_size, None
|
126 |
+
|
127 |
except Exception as e:
|
128 |
+
logger.error(LogFormatter.error(f"Error while determining model size: {e}"))
|
|
|
129 |
return None, str(e)
|
130 |
+
|
131 |
|
132 |
async def check_chat_template(
|
133 |
self,
|
backend/pyproject.toml
CHANGED
@@ -1,30 +1,54 @@
|
|
1 |
-
[
|
2 |
name = "llm-leaderboard-backend"
|
3 |
version = "0.1.0"
|
4 |
description = "Backend for the Open LLM Leaderboard"
|
5 |
-
|
6 |
-
|
7 |
-
[
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
[
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
[build-system]
|
29 |
-
requires = ["
|
30 |
-
build-backend = "
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
name = "llm-leaderboard-backend"
|
3 |
version = "0.1.0"
|
4 |
description = "Backend for the Open LLM Leaderboard"
|
5 |
+
requires-python = "==3.12.1"
|
6 |
+
|
7 |
+
dependencies = [
|
8 |
+
"fastapi >=0.115.6",
|
9 |
+
"uvicorn >=0.34.0",
|
10 |
+
"numpy >=2.2.0",
|
11 |
+
"pandas >=2.2.3",
|
12 |
+
"datasets >=3.2.0",
|
13 |
+
"pyarrow >=18.1.0",
|
14 |
+
"python-multipart >=0.0.20",
|
15 |
+
"huggingface-hub >=0.27.0",
|
16 |
+
"transformers >=4.47.0",
|
17 |
+
"safetensors >=0.4.5",
|
18 |
+
"aiofiles >=24.1.0",
|
19 |
+
"fastapi-cache2 >=0.2.1",
|
20 |
+
]
|
21 |
+
|
22 |
+
[project.optional-dependencies]
|
23 |
+
dev = [
|
24 |
+
"pytest >=8.3.4",
|
25 |
+
"black >=24.10.0",
|
26 |
+
"isort >=5.13.2",
|
27 |
+
"flake8 >=7.1.1",
|
28 |
+
]
|
29 |
+
|
30 |
+
[tool.ruff]
|
31 |
+
line-length = 120
|
32 |
+
target-version = "py312"
|
33 |
+
include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]
|
34 |
+
ignore=["I","EM","FBT","TRY003","S101","D101","D102","D103","D104","D105","G004","D107","FA102"]
|
35 |
+
fixable=["ALL"]
|
36 |
+
select=["ALL"]
|
37 |
+
|
38 |
+
[tool.ruff.lint]
|
39 |
+
select = ["E", "F"]
|
40 |
+
fixable = ["ALL"]
|
41 |
+
ignore = ["E501"] # line too long (black is taking care of this)
|
42 |
+
|
43 |
+
[tool.isort]
|
44 |
+
profile = "black"
|
45 |
+
|
46 |
+
[tool.black]
|
47 |
+
line-length = 119
|
48 |
|
49 |
[build-system]
|
50 |
+
requires = ["hatchling>=1.0.0"]
|
51 |
+
build-backend = "hatchling.build"
|
52 |
+
|
53 |
+
[tool.hatch.build.targets.wheel]
|
54 |
+
packages = ["backend"]
|
backend/uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
docker-compose.yml
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
version: '3.8'
|
2 |
-
|
3 |
services:
|
4 |
backend:
|
5 |
build:
|
@@ -15,7 +13,7 @@ services:
|
|
15 |
- ENVIRONMENT=${ENVIRONMENT:-development}
|
16 |
- HF_TOKEN=${HF_TOKEN}
|
17 |
- HF_HOME=${HF_HOME:-/.cache}
|
18 |
-
command: uvicorn app.asgi:app --host 0.0.0.0 --port 8000 --reload
|
19 |
|
20 |
frontend:
|
21 |
build:
|
|
|
|
|
|
|
1 |
services:
|
2 |
backend:
|
3 |
build:
|
|
|
13 |
- ENVIRONMENT=${ENVIRONMENT:-development}
|
14 |
- HF_TOKEN=${HF_TOKEN}
|
15 |
- HF_HOME=${HF_HOME:-/.cache}
|
16 |
+
command: uv run uvicorn app.asgi:app --host 0.0.0.0 --port 8000 --reload
|
17 |
|
18 |
frontend:
|
19 |
build:
|
frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js
CHANGED
@@ -113,7 +113,7 @@ const HELP_TEXTS = {
|
|
113 |
Chat Template Support
|
114 |
</Typography>
|
115 |
<Typography variant="body2" sx={{ opacity: 0.9, lineHeight: 1.4 }}>
|
116 |
-
Activates automatically for chat models.
|
117 |
Face chat template for consistent prompt formatting during evaluation.
|
118 |
Required for models using RLHF, DPO, or instruction fine-tuning.
|
119 |
</Typography>
|
@@ -137,7 +137,7 @@ function ModelSubmissionForm({ user, isAuthenticated }) {
|
|
137 |
isChatModel: false,
|
138 |
useChatTemplate: false,
|
139 |
precision: "float16",
|
140 |
-
weightsType: "
|
141 |
baseModel: "",
|
142 |
});
|
143 |
const [error, setError] = useState(null);
|
@@ -333,7 +333,7 @@ function ModelSubmissionForm({ user, isAuthenticated }) {
|
|
333 |
isChatModel: false,
|
334 |
useChatTemplate: false,
|
335 |
precision: "float16",
|
336 |
-
weightsType: "
|
337 |
baseModel: "",
|
338 |
});
|
339 |
}}
|
@@ -482,7 +482,7 @@ function ModelSubmissionForm({ user, isAuthenticated }) {
|
|
482 |
onChange={handleChange}
|
483 |
/>
|
484 |
}
|
485 |
-
label="
|
486 |
/>
|
487 |
<InfoIconWithTooltip tooltip={HELP_TEXTS.chatTemplate} />
|
488 |
</Stack>
|
@@ -536,13 +536,13 @@ function ModelSubmissionForm({ user, isAuthenticated }) {
|
|
536 |
</FormControl>
|
537 |
</Grid>
|
538 |
|
539 |
-
{formData.weightsType !== "
|
540 |
<Grid item xs={12}>
|
541 |
<TextField
|
542 |
fullWidth
|
543 |
required={
|
544 |
-
formData.weightsType === "
|
545 |
-
formData.weightsType === "
|
546 |
}
|
547 |
name="baseModel"
|
548 |
label="Base Model"
|
|
|
113 |
Chat Template Support
|
114 |
</Typography>
|
115 |
<Typography variant="body2" sx={{ opacity: 0.9, lineHeight: 1.4 }}>
|
116 |
+
Activates automatically for chat models. It uses the standardized Hugging
|
117 |
Face chat template for consistent prompt formatting during evaluation.
|
118 |
Required for models using RLHF, DPO, or instruction fine-tuning.
|
119 |
</Typography>
|
|
|
137 |
isChatModel: false,
|
138 |
useChatTemplate: false,
|
139 |
precision: "float16",
|
140 |
+
weightsType: "Original",
|
141 |
baseModel: "",
|
142 |
});
|
143 |
const [error, setError] = useState(null);
|
|
|
333 |
isChatModel: false,
|
334 |
useChatTemplate: false,
|
335 |
precision: "float16",
|
336 |
+
weightsType: "Original",
|
337 |
baseModel: "",
|
338 |
});
|
339 |
}}
|
|
|
482 |
onChange={handleChange}
|
483 |
/>
|
484 |
}
|
485 |
+
label="Use Chat Template"
|
486 |
/>
|
487 |
<InfoIconWithTooltip tooltip={HELP_TEXTS.chatTemplate} />
|
488 |
</Stack>
|
|
|
536 |
</FormControl>
|
537 |
</Grid>
|
538 |
|
539 |
+
{formData.weightsType !== "Original" && (
|
540 |
<Grid item xs={12}>
|
541 |
<TextField
|
542 |
fullWidth
|
543 |
required={
|
544 |
+
formData.weightsType === "Delta" ||
|
545 |
+
formData.weightsType === "Adapter"
|
546 |
}
|
547 |
name="baseModel"
|
548 |
label="Base Model"
|