# Clean up any previous runs | |
rm -rf venv eval-queue/* eval-results/* __pycache__ src/__pycache__ src/*/__pycache__ | |
# Create virtual environment | |
python3 -m venv venv | |
# Ensure we're using the virtual environment's Python and pip | |
PYTHON="./venv/bin/python3" | |
PIP="./venv/bin/pip" | |
# Install dependencies | |
$PYTHON -m pip install --upgrade pip | |
$PIP install -r requirements.txt | |
# Create necessary directories | |
mkdir -p eval-queue eval-results | |
# Create sample data files with correct column names matching Tasks definitions | |
cat > eval-queue/test_model_eval_request_float16.json << EOL | |
{ | |
"model": "test/model", | |
"precision": "float16", | |
"model_type": "pretrained 🟢", | |
"weight_type": "Safetensors", | |
"license": "MIT", | |
"likes": 100, | |
"params": 7, | |
"submitted_time": "2024-01-01", | |
"status": "FINISHED" | |
} | |
EOL | |
cat > eval-results/results_1.json << EOL | |
{ | |
"config": { | |
"model_name": "test/model", | |
"model_dtype": "float16", | |
"model_sha": "main" | |
}, | |
"results": { | |
"secure_coding": { | |
"security_score": 0.85 | |
}, | |
"safetensors_check": { | |
"compliant": true | |
} | |
} | |
} | |
EOL | |
# Set environment variables | |
export HF_HOME="." | |
export HF_TOKEN="dummy-token" # The app will work locally without a real token | |
# Run the app | |
echo "Starting the app..." | |
$PYTHON app.py | |