File size: 1,417 Bytes
b257b3e 99b815f b257b3e 99b815f b257b3e 99b815f b257b3e 99b815f b257b3e 99b815f b257b3e 99b815f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
#!/bin/bash
# Create virtual environment only if it doesn't exist
if [ ! -d "venv" ]; then
python3 -m venv venv
source ./venv/bin/activate
python -m pip install --upgrade pip
pip install -r requirements.txt
else
source ./venv/bin/activate
fi
# Clean up old test data and cache
rm -rf eval-queue/* eval-results/* __pycache__ src/__pycache__ src/*/__pycache__
# Create necessary directories
mkdir -p "eval-queue/test" "eval-results"
# Create sample data files with correct column names matching Tasks definitions
cat > "eval-queue/test/model_eval_request_float16.json" << EOL
{
"model": "test/model",
"precision": "float16",
"model_type": "pretrained 🟢",
"weight_type": "Safetensors",
"license": "MIT",
"likes": 100,
"params": 7,
"submitted_time": "2024-01-01",
"status": "FINISHED"
}
EOL
# Create results file with all required benchmarks
cat > "eval-results/results_20240101_000000.json" << EOL
{
"config": {
"model_name": "test/model",
"model_dtype": "float16",
"model_sha": "main"
},
"results": {
"secure_coding": {
"security_score": 0.85
},
"safetensors_check": {
"compliant": true
}
}
}
EOL
# Print debug info
echo "Current directory structure:"
tree eval-queue eval-results
echo -e "\nStarting the app..."
PYTHONPATH=. ./venv/bin/python app_local.py
|