Duplicated from limcheekin/CodeLlama-13B-oasst-sft-v10-GGUF
106db30 5ff6775
1
2
3
4
import subprocess command = ["python3", "-m", "llama_cpp.server", "--model", "model/gguf-model.bin", "--host", "0.0.0.0", "--port", "2600"] subprocess.Popen(command)