Inference-API / src /config.yaml
AurelioAguirre's picture
Fixing dockerfile v2
1eab622
raw
history blame
272 Bytes
server:
port: 8001
timeout: 60
llm_server:
base_url: "https://teamgenki-llmserver.hf.space:7680" # URL of your LLM Server
timeout: 60 # Timeout for requests to LLM Server
logging:
level: "INFO"
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"