michaelthwan's picture
import project
6f61bb9
raw
history blame
258 Bytes
gradio:
concurrent: 20
port: 7860
openai:
api_url: "https://api.openai.com/v1/chat/completions"
content_token: 3200 # tokens per content_main (e.g. transcript). If exceed it will be splitted and iterated
timeout_sec: 25
max_retry: 2
api_key: ""