wxgeorge's picture
:sparkles: adding meta-llama/Llama-3.3-70B-Instruct
8a4b35c
raw
history blame contribute delete
762 Bytes
#!/bin/bash
# update model cache
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
pushd $SCRIPT_DIR > /dev/null
cat << MARKDOWN
---
$(cat manual-metadata.yaml)
$(cat ../model-cache.json \
| jq '[to_entries[] | select(.key | test("32b|70b|72b|8x22b|405b") | not) | .value[]] | sort | { models: . }' \
| node -e "
const fs = require('fs');
const YAML = require('yaml');
const input = fs.readFileSync(0, 'utf-8'); // Read from stdin
const json = JSON.parse(input);
const yamlStr = YAML.stringify(json);
console.log(yamlStr);"
)
- Qwen/Qwen2.5-72B
- nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
- Qwen/QwQ-32B-Preview
- meta-llama/Llama-3.3-70B-Instruct
---
$(cat body.md)
MARKDOWN
popd > /dev/null