LlamaEdge / run.sh
zhengr's picture
Update run.sh
7ddb15f verified
raw
history blame contribute delete
951 Bytes
#!/bin/bash
source /home/user/.wasmedge/env
chat_file_name=$1
embedding_file_name=$2
prompt_template=$3
chat_ctx_size=$4
embedding_ctx_size=$5
if [ -z "$chat_ctx_size" ]; then
chat_ctx_size=8192
fi
if [ -z "$embedding_ctx_size" ]; then
embedding_ctx_size=0
fi
if [ "$embedding_ctx_size" -eq "0" ]; then
wasmedge --dir .:. --env LLAMA_LOG=trace --nn-preload default:GGML:AUTO:/home/user/models/$chat_file_name.gguf llama-api-server.wasm --prompt-template $prompt_template --ctx-size $chat_ctx_size --model-name $chat_file_name ##--socket-addr 0.0.0.0:8080
else
wasmedge --dir .:. --env LLAMA_LOG=trace --nn-preload default:GGML:AUTO:/home/user/models/$chat_file_name --nn-preload embedding:GGML:AUTO:/home/user/models/$embedding_file_name llama-api-server.wasm --prompt-template $prompt_template,embedding --ctx-size $chat_ctx_size,$embedding_ctx_size --model-name $chat_file_name,$embedding_file_name --socket-addr 0.0.0.0:8080
fi