LibreChat / librechat.yaml
Surbao's picture
Update librechat.yaml
7e42590 verified
version: 1.1.6
cache: true
fileStrategy: "firebase"
interface:
privacyPolicy:
externalUrl: 'https://librechat.ai/privacy-policy'
openNewTab: true
termsOfService:
externalUrl: 'https://librechat.ai/tos'
openNewTab: true
registration:
socialLogins: ["discord", "facebook", "github", "google", "openid"]
endpoints:
custom:
# Anyscale
# # Model list: https://console.anyscale.com/v2/playground
- name: "Anyscale"
apiKey: "user_provided"
baseURL: "https://api.endpoints.anyscale.com/v1"
models:
default: [
"google/gemma-7b-it",
"llava-hf/llava-v1.6-mistral-7b-hf",
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Meta-Llama-3-8B-Instruct",
"mistralai/Mistral-7B-Instruct-v0.1",
"mistralai/Mixtral-8x22B-Instruct-v0.1",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mlabonne/NeuralHermes-2.5-Mistral-7B",
]
fetch: false
titleConvo: true
titleModel: "meta-llama/Meta-Llama-3-8B-Instruct"
summarize: false
summaryModel: "meta-llama/Meta-Llama-3-8B-Instruct"
forcePrompt: false
modelDisplayLabel: "Anyscale"
# APIpie
# https://apipie.ai/dashboard/
# Script to fetch models: https://github.com/LibreChat-AI/librechat-config-yaml/blob/main/scripts/apipie.py
- name: "APIpie"
apiKey: "user_provided"
baseURL: "https://apipie.ai/v1/"
models:
default: [
"GPT-JT-Moderation-6B",
"Hermes-2-Theta-Llama-3-70B",
"Koala-13B",
"Koala-7B",
"LLaMA-2-7B-32K",
"Llama-2-13b-chat-hf",
"Llama-2-13b-hf",
"Llama-2-70b-chat-hf",
"Llama-2-70b-hf",
"Llama-2-7b-chat-hf",
"Llama-2-7b-hf",
"Llama-3-70B-Instruct-Gradient-1048k",
"Llama-3-70b-chat-hf",
"Llama-3-70b-hf",
"Llama-3-8b-chat-hf",
"Llama-3-8b-hf",
"Llama-Rank-V1",
"Meta-Llama-3-70B",
"Meta-Llama-3-70B-Instruct",
"Meta-Llama-3-70B-Instruct-Lite",
"Meta-Llama-3-70B-Instruct-Turbo",
"Meta-Llama-3-8B-Instruct",
"Meta-Llama-3-8B-Instruct-Lite",
"Meta-Llama-3-8B-Instruct-Turbo",
"Meta-Llama-3.1-405B-Instruct-Lite-Pro",
"Meta-Llama-3.1-405B-Instruct-Turbo",
"Meta-Llama-3.1-70B-Instruct-Reference",
"Meta-Llama-3.1-70B-Instruct-Turbo",
"Meta-Llama-3.1-70B-Reference",
"Meta-Llama-3.1-8B-Instruct-Turbo",
"Meta-Llama-3.1-8B-Reference",
"Mistral-7B-Instruct-v0.1",
"Mistral-7B-Instruct-v0.2",
"Mistral-7B-Instruct-v0.3",
"Mistral-7B-OpenOrca",
"Mistral-7B-v0.1",
"Mixtral-8x22B",
"Mixtral-8x22B-Instruct-v0.1",
"Mixtral-8x22B-v0.1",
"Mixtral-8x7B-Instruct-v0.1",
"MythoMax-L2-13b",
"MythoMax-L2-13b-Lite",
"NexusRaven-V2-13B",
"Nous-Capybara-7B-V1p9",
"Nous-Hermes-13b",
"Nous-Hermes-2-Mistral-7B-DPO",
"Nous-Hermes-2-Mixtral-8x7B-DPO",
"Nous-Hermes-2-Mixtral-8x7B-SFT",
"Nous-Hermes-Llama2-13b",
"Nous-Hermes-Llama2-70b",
"Nous-Hermes-llama-2-7b",
"OLMo-7B",
"OLMo-7B-Instruct",
"OpenHermes-2-Mistral-7B",
"OpenHermes-2p5-Mistral-7B",
"Platypus2-70B-instruct",
"Qwen1.5-0.5B",
"Qwen1.5-0.5B-Chat",
"Qwen1.5-1.8B",
"Qwen1.5-1.8B-Chat",
"Qwen1.5-110B-Chat",
"Qwen1.5-14B",
"Qwen1.5-14B-Chat",
"Qwen1.5-32B",
"Qwen1.5-32B-Chat",
"Qwen1.5-4B",
"Qwen1.5-4B-Chat",
"Qwen1.5-72B",
"Qwen1.5-72B-Chat",
"Qwen1.5-7B",
"Qwen1.5-7B-Chat",
"Qwen2-1.5B",
"Qwen2-1.5B-Instruct",
"Qwen2-72B",
"Qwen2-72B-Instruct",
"Qwen2-7B",
"Qwen2-7B-Instruct",
"ReMM-SLERP-L2-13B",
"RedPajama-INCITE-7B-Base",
"RedPajama-INCITE-7B-Chat",
"RedPajama-INCITE-Chat-3B-v1",
"SOLAR-10.7B-Instruct-v1.0",
"SOLAR-10.7B-Instruct-v1.0-int4",
"Snorkel-Mistral-PairRM-DPO",
"StripedHyena-Hessian-7B",
"Toppy-M-7B",
"WizardLM-13B-V1.2",
"WizardLM-2-7B",
"WizardLM-2-8x22B",
"WizardLM-70B-V1.0",
"Yi-34B",
"Yi-34B-Chat",
"Yi-6B",
"airoboros-70b",
"airoboros-l2-70b",
"alpaca-7b",
"babbage-002",
"chat-bison",
"chatgpt-4o-latest",
"chatx_cheap_128k",
"chatx_cheap_32k",
"chatx_cheap_4k",
"chatx_cheap_64k",
"chatx_cheap_8k",
"chatx_mids_4k",
"chatx_premium_128k",
"chatx_premium_32k",
"chatx_premium_4k",
"chatx_premium_8k",
"chronos-hermes-13b",
"chronos-hermes-13b-v2",
"claude-1",
"claude-1.2",
"claude-2",
"claude-2.0",
"claude-2.1",
"claude-3-5-haiku",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet",
"claude-3-haiku",
"claude-3-opus",
"claude-3-sonnet",
"claude-instant-1",
"claude-instant-1.0",
"claude-instant-1.1",
"claude-instant-1.2",
"claude2_4k",
"command",
"command-light",
"command-light-nightly",
"command-light-text-v14",
"command-r",
"command-r-03-2024",
"command-r-08-2024",
"command-r-plus",
"command-r-plus-04-2024",
"command-r-plus-08-2024",
"command-r-plus-v1",
"command-r-v1",
"command-text-v14",
"davinci-002",
"dbrx-instruct",
"deepseek-chat",
"deepseek-llm-67b-chat",
"dolphin-2.5-mixtral-8x7b",
"dolphin-2.6-mixtral-8x7b",
"dolphin-mixtral-8x22b",
"dolphin-mixtral-8x7b",
"eva-qwen-2.5-14b",
"eva-qwen-2.5-32b",
"evo-1-131k-base",
"evo-1-8k-base",
"fimbulvetr-11b-v2",
"gemini-1.5-flash",
"gemini-1.5-pro",
"gemini-exp-1114",
"gemini-flash-1.5",
"gemini-flash-1.5-8b",
"gemini-flash-1.5-8b-exp",
"gemini-flash-1.5-exp",
"gemini-flash_8k",
"gemini-pro",
"gemini-pro-1.5",
"gemini-pro-1.5-exp",
"gemini-pro-vision",
"gemini-pro_8k",
"gemma-1.1-7b-it",
"gemma-2-27b-it",
"gemma-2-9b-it",
"gemma-2b",
"gemma-2b-it",
"gemma-7b",
"gemma-7b-it",
"general_32k",
"goliath-120b",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-3.5_4k",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-1106-vision-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt4o-mini_16k",
"gpt4o_16k",
"grok-2",
"grok-beta",
"grok-vision-beta",
"guanaco-13b",
"guanaco-33b",
"guanaco-65b",
"guanaco-7b",
"hermes-2-pro-llama-3-8b",
"hermes-2-theta-llama-3-8b",
"hermes-3-llama-3.1-405b",
"hermes-3-llama-3.1-70b",
"inflection-3-pi",
"inflection-3-productivity",
"j2-grande-instruct",
"j2-jumbo-instruct",
"j2-mid",
"j2-mid-v1",
"j2-ultra",
"j2-ultra-v1",
"jamba-1-5-large",
"jamba-1-5-large-v1",
"jamba-1-5-mini",
"jamba-1-5-mini-v1",
"jamba-instruct",
"jamba-instruct-v1",
"l3-euryale-70b",
"l3-lunaris-8b",
"l3.1-euryale-70b",
"large-latest",
"lfm-40b",
"llama-13b",
"llama-2-13b",
"llama-2-13b-chat",
"llama-2-70b-chat",
"llama-2-7b",
"llama-2-7b-chat",
"llama-3-70b-instruct",
"llama-3-8b-instruct",
"llama-3-lumimaid-70b",
"llama-3-lumimaid-8b",
"llama-3-sonar-large-32k-chat",
"llama-3-sonar-large-32k-online",
"llama-3-sonar-small-32k-chat",
"llama-3.1-405b",
"llama-3.1-405b-instruct",
"llama-3.1-70b-instruct",
"llama-3.1-8b-instruct",
"llama-3.1-lumimaid-70b",
"llama-3.1-lumimaid-8b",
"llama-3.1-nemotron-70b-instruct",
"llama-3.1-sonar-huge-128k-online",
"llama-3.1-sonar-large-128k-chat",
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-sonar-small-128k-online",
"llama-3.2-11b-vision-instruct",
"llama-3.2-1b-instruct",
"llama-3.2-3b-instruct",
"llama-3.2-90b-vision-instruct",
"llama-30b",
"llama-65b",
"llama-7b",
"llama-guard-2-8b",
"llama2-13b-chat-v1",
"llama2-70b-chat-v1",
"llama3-1_128k",
"llama3-2_128k",
"llama3-70b-instruct-v1",
"llama3-8b-instruct-v1",
"llemma_7b",
"lzlv-70b-fp16-hf",
"magnum-72b",
"magnum-v2-72b",
"magnum-v4-72b",
"medium",
"meta-llama-3.1-8b-instruct",
"midnight-rose-70b",
"ministral-3b",
"ministral-8b",
"mistral-7b-instruct",
"mistral-7b-instruct-v0",
"mistral-7b-instruct-v0.1",
"mistral-7b-instruct-v0.2",
"mistral-7b-instruct-v0.3",
"mistral-large",
"mistral-medium",
"mistral-nemo",
"mistral-small",
"mistral-small-2402-v1",
"mistral-tiny",
"mistral_32k",
"mixtral-8x22b-instruct",
"mixtral-8x7b",
"mixtral-8x7b-instruct",
"mixtral-8x7b-instruct-v0",
"mixtral_32k",
"mn-celeste-12b",
"mn-inferor-12b",
"mn-starcannon-12b",
"mythalion-13b",
"mythomax-l2-13b",
"mythomist-7b",
"noromaid-20b",
"nous-hermes-2-mixtral-8x7b-dpo",
"nous-hermes-2-vision-7b",
"nous-hermes-llama2-13b",
"o1-mini",
"o1-mini-2024-09-12",
"o1-preview",
"o1-preview-2024-09-12",
"olympus-premier-v1",
"online-llama_128k",
"openchat-3.5-1210",
"openchat-7b",
"openchat_3.5",
"openhermes-2.5-mistral-7b",
"palm-2-chat-bison",
"palm-2-chat-bison-32k",
"phi-2",
"phi-3-medium-128k-instruct",
"phi-3-mini-128k-instruct",
"phi-3.5-mini-128k-instruct",
"pixtral-12b",
"pixtral-large-2411",
"qwen-110b-chat",
"qwen-2-72b-instruct",
"qwen-2-7b-instruct",
"qwen-2-vl-72b-instruct",
"qwen-2-vl-7b-instruct",
"qwen-2.5-72b-instruct",
"qwen-2.5-7b-instruct",
"qwen-72b-chat",
"qwen1-5_32k",
"qwen2_32k",
"remm-slerp-l2-13b",
"rocinante-12b",
"snowflake-arctic-instruct",
"sorcererlm-8x22b",
"titan-text-express-v1",
"titan-text-lite-v1",
"titan-text-premier-v1",
"titan-tg1-large",
"titan_32k",
"titan_4k",
"toppy-m-7b",
"unslopnemo-12b",
"vicuna-13b-v1.3",
"vicuna-13b-v1.5",
"vicuna-13b-v1.5-16k",
"vicuna-7b-v1.3",
"vicuna-7b-v1.5",
"weaver",
"wizardlm-2-7b",
"wizardlm-2-8x22b",
"xwin-lm-70b",
"yi-large",
"yi-vision",
"zephyr-7b-beta",
"zephyr-orpo-141b-A35b-v0.1"
]
fetch: false
titleConvo: true
titleModel: "claude-3-haiku"
summarize: false
summaryModel: "claude-3-haiku"
modelDisplayLabel: "APIpie"
# cohere
# Model list: https://dashboard.cohere.com/playground/chat
- name: "cohere"
apiKey: "user_provided"
baseURL: "https://api.cohere.ai/v1"
models:
default: [
"c4ai-aya-23-35b",
"c4ai-aya-23-8b",
"command",
"command-light",
"command-light-nightly",
"command-nightly",
"command-r",
"command-r-plus",
]
fetch: false
modelDisplayLabel: "cohere"
titleModel: "command"
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]
# DEEPNIGHT
# https://github.com/brahmai-research/aiforcause
# Model list: https://aiforcause.deepnight.tech/models
- name: "DEEPNIGHT"
apiKey: "sk-free1234"
baseURL: "https://aiforcause.deepnight.tech/openai/"
models:
default: [
"gpt-35-turbo",
"gpt-35-turbo-16k",
"gpt-4-turbo"
]
fetch: false
titleConvo: true
titleModel: "gpt-35-turbo"
summarize: false
summaryModel: "gpt-35-turbo"
forcePrompt: false
modelDisplayLabel: "DEEPNIGHT"
addParams:
stream: True
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/DEEPNIGHT.png"
# deepseek
# https://platform.deepseek.com/api_keys
# Model list: https://platform.deepseek.com/api-docs/pricing
- name: "deepseek"
apiKey: "user_provided"
baseURL: "https://api.deepseek.com"
models:
default: [
"deepseek-chat",
"deepseek-coder"
]
fetch: false
titleConvo: true
titleModel: "deepseek-chat"
summarize: false
summaryModel: "deepseek-chat"
forcePrompt: false
modelDisplayLabel: "DeepSeek"
# Fireworks.ai
# Models: https://fireworks.ai/models?show=Serverless
- name: "Fireworks"
apiKey: "user_provided"
baseURL: "https://api.fireworks.ai/inference/v1"
models:
default: [
"accounts/fireworks/models/devashisht-test-v2",
"accounts/fireworks/models/dt-fc-rc-v1",
"accounts/fireworks/models/firefunction-v1",
"accounts/fireworks/models/firefunction-v2",
"accounts/fireworks/models/firellava-13b",
"accounts/devashisht-72fdad/models/function-calling-v11",
"accounts/fireworks/models/fw-function-call-34b-v0",
"accounts/stability/models/japanese-stablelm-instruct-beta-70b",
"accounts/stability/models/japanese-stablelm-instruct-gamma-7b",
"accounts/fireworks/models/japanese-stable-vlm",
"accounts/fireworks/models/gemma2-9b-it",
"accounts/fireworks/models/llama-v3p1-405b-instruct",
"accounts/fireworks/models/llama-v3p1-70b-instruct",
"accounts/fireworks/models/llama-v3p1-8b-instruct",
"accounts/fireworks/models/llama-v3-70b-instruct",
"accounts/fireworks/models/llama-v3-70b-instruct-hf",
"accounts/fireworks/models/llama-v3-8b-hf",
"accounts/fireworks/models/llama-v3-8b-instruct",
"accounts/fireworks/models/llama-v3-8b-instruct-hf",
"accounts/fireworks/models/llama-v2-13b-chat",
"accounts/fireworks/models/llama-v2-13b-code-instruct",
"accounts/fireworks/models/llama-v2-34b-code-instruct",
"accounts/fireworks/models/llama-v2-70b-chat",
"accounts/fireworks/models/llama-v2-70b-code-instruct",
"accounts/fireworks/models/llama-v2-7b-chat",
"accounts/fireworks/models/deepseek-coder-v2-instruct",
"accounts/fireworks/models/deepseek-coder-v2-lite-instruct",
"accounts/fireworks/models/llava-v15-13b-fireworks",
"accounts/fireworks/models/mistral-7b-instruct-4k",
"accounts/dev-e24710/models/mistral-spellbound-format",
"accounts/fireworks/models/mixtral-8x22b-instruct",
"accounts/fireworks/models/mixtral-8x7b-instruct",
"accounts/fireworks/models/mixtral-8x7b-instruct-hf",
"accounts/fireworks/models/new-mixtral-chat",
"accounts/fireworks/models/qwen-14b-chat",
"accounts/fireworks/models/qwen-1-8b-chat",
"accounts/fireworks/models/qwen-72b-chat",
"accounts/stability/models/stablelm-zephyr-3b",
"accounts/fireworks/models/yi-34b-200k-capybara",
]
fetch: false
titleConvo: true
titleModel: "accounts/fireworks/models/llama-v2-7b-chat"
summarize: false
summaryModel: "accounts/fireworks/models/llama-v2-7b-chat"
forcePrompt: false
modelDisplayLabel: "Fireworks"
dropParams: ["user"]
# GitHub
- name: "Github Models"
iconURL: https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png
apiKey: "user_provided"
baseURL: "https://models.inference.ai.azure.com"
models:
default: ["gpt-4o","Phi-3.5-MoE-instruct","Phi-3.5-mini-instruct","Phi-3.5-vision-instruct"]
fetch: false
titleConvo: true
titleModel: "gpt-4o-mini"
# groq
# Model list: https://console.groq.com/settings/limits
- name: "groq"
apiKey: "user_provided"
baseURL: "https://api.groq.com/openai/v1/"
models:
default: [
"llama-3.1-405b-reasoning",
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
"llama3-groq-70b-8192-tool-use-preview",
"llama3-groq-8b-8192-tool-use-preview",
"llama3-70b-8192",
"llama3-8b-8192",
"mixtral-8x7b-32768",
"gemma-7b-it",
"gemma2-9b-it"
]
fetch: false
titleConvo: true
titleModel: "mixtral-8x7b-32768"
modelDisplayLabel: "groq"
# HuggingFace
# https://huggingface.co/settings/tokens
- name: 'HuggingFace'
apiKey: "user_provided"
baseURL: 'https://api-inference.huggingface.co/v1'
models:
default: [
"gemma2-9b-it",
"gemma-7b-it",
"llama-3.1-8b-instant",
"llama3-groq-70b-8192-tool-use-preview",
"llama3-groq-8b-8192-tool-use-preview",
"llama-3.1-70b-versatile",
"llama-3.1-70b-specdec",
"llama-3.1-8b-instant",
"llama-3.2-1b-preview",
"llama-3.2-3b-preview",
"llama-3.2-11b-vision-preview",
"llama-3.2-90b-vision-preview",
"llama3-70b-8192",
"llama3-8b-8192",
"mixtral-8x7b-32768",
]
fetch: true
titleConvo: true
titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
dropParams: ["top_p"]
# Hyperbolic
# https://app.hyperbolic.xyz/models
- name: 'Hyperbolic'
apiKey: 'user_provided'
baseURL: 'https://api.hyperbolic.xyz/v1/'
models:
default: [
"deepseek-ai/DeepSeek-V2.5",
"meta-llama/Llama-3.2-3B-Instruct",
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Meta-Llama-3.1-405B",
"meta-llama/Meta-Llama-3.1-405B-FP8",
"meta-llama/Meta-Llama-3.1-405B-Instruct",
"meta-llama/Meta-Llama-3.1-70B-Instruct",
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"NousResearch/Hermes-3-Llama-3.1-70B",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2.5-72B-Instruct",
]
fetch: false
titleConvo: true
titleModel: "meta-llama/Meta-Llama-3.1-8B-Instruct"
modelDisplayLabel: "Hyperbolic"
iconURL: "https://pbs.twimg.com/profile_images/1775708849707819008/1RRWsmmg_400x400.jpg"
# Mistral AI API
# Model list: https://docs.mistral.ai/getting-started/models/
- name: "Mistral"
apiKey: "user_provided"
baseURL: "https://api.mistral.ai/v1"
models:
default: [
"mistral-large-latest",
"pixtral-large-latest",
"ministral-3b-latest",
"ministral-8b-latest",
"mistral-small-latest",
"codestral-latest",
"pixtral-12b-2409",
"open-mistral-nemo",
"open-codestral-mamba",
"open-mistral-7b",
"open-mixtral-8x7b",
"open-mixtral-8x22b"
]
fetch: false
titleConvo: true
titleMethod: "completion"
titleModel: "mistral-tiny"
summarize: false
summaryModel: "mistral-tiny"
forcePrompt: false
modelDisplayLabel: "Mistral"
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
# NVIDIA
# https://build.nvidia.com/explore/discover
- name: "Nvidia"
apiKey: "user_provided"
baseURL: "https://integrate.api.nvidia.com/v1/"
models:
default: [
"nvidia/llama-3.1-nemotron-51b-instruct",
"nvidia/llama-3.1-nemotron-70b-instruct",
"nvidia/nemotron-mini-4b-instruct",
"nvidia/nemotron-4-340b-instruct",
]
fetch: false
titleConvo: true
titleModel: "nvidia/nemotron-mini-4b-instruct"
modelDisplayLabel: "Nvidia"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/refs/heads/main/icons/nvidia.png"
# OpenRouter.ai
# Model list: https://openrouter.ai/models
# Script to fetch models: https://github.com/LibreChat-AI/librechat-config-yaml/blob/main/scripts/openrouter.py
- name: "OpenRouter"
apiKey: "user_provided"
baseURL: "https://openrouter.ai/api/v1"
models:
default: [
"openrouter/auto",
"---FREE---",
"google/gemma-2-9b-it:free",
"gryphe/mythomax-l2-13b:free",
"huggingfaceh4/zephyr-7b-beta:free",
"liquid/lfm-40b:free",
"meta-llama/llama-3-8b-instruct:free",
"meta-llama/llama-3.1-405b-instruct:free",
"meta-llama/llama-3.1-70b-instruct:free",
"meta-llama/llama-3.1-8b-instruct:free",
"meta-llama/llama-3.2-11b-vision-instruct:free",
"meta-llama/llama-3.2-1b-instruct:free",
"meta-llama/llama-3.2-3b-instruct:free",
"meta-llama/llama-3.2-90b-vision-instruct:free",
"microsoft/phi-3-medium-128k-instruct:free",
"microsoft/phi-3-mini-128k-instruct:free",
"mistralai/mistral-7b-instruct:free",
"nousresearch/hermes-3-llama-3.1-405b:free",
"openchat/openchat-7b:free",
"qwen/qwen-2-7b-instruct:free",
"undi95/toppy-m-7b:free",
"---NITRO---",
"gryphe/mythomax-l2-13b:nitro",
"meta-llama/llama-3-70b-instruct:nitro",
"meta-llama/llama-3-8b-instruct:nitro",
"meta-llama/llama-3.1-405b-instruct:nitro",
"meta-llama/llama-3.1-70b-instruct:nitro",
"mistralai/mistral-7b-instruct:nitro",
"mistralai/mixtral-8x7b-instruct:nitro",
"undi95/toppy-m-7b:nitro",
"---BETA---",
"anthropic/claude-2.0:beta",
"anthropic/claude-2.1:beta",
"anthropic/claude-2:beta",
"anthropic/claude-3-5-haiku-20241022:beta",
"anthropic/claude-3-5-haiku:beta",
"anthropic/claude-3-haiku:beta",
"anthropic/claude-3-opus:beta",
"anthropic/claude-3-sonnet:beta",
"anthropic/claude-3.5-sonnet-20240620:beta",
"anthropic/claude-3.5-sonnet:beta",
"---EXTENDED---",
"gryphe/mythomax-l2-13b:extended",
"meta-llama/llama-3-8b-instruct:extended",
"neversleep/llama-3-lumimaid-8b:extended",
"openai/gpt-4o:extended",
"undi95/remm-slerp-l2-13b:extended",
"---AI21---",
"ai21/jamba-1-5-large",
"ai21/jamba-1-5-mini",
"ai21/jamba-instruct",
"---ANTHROPIC---",
"anthropic/claude-2",
"anthropic/claude-2.0",
"anthropic/claude-2.1",
"anthropic/claude-3-5-haiku",
"anthropic/claude-3-5-haiku-20241022",
"anthropic/claude-3-haiku",
"anthropic/claude-3-opus",
"anthropic/claude-3-sonnet",
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3.5-sonnet-20240620",
"---COHERE---",
"cohere/command",
"cohere/command-r",
"cohere/command-r-03-2024",
"cohere/command-r-08-2024",
"cohere/command-r-plus",
"cohere/command-r-plus-04-2024",
"cohere/command-r-plus-08-2024",
"---GOOGLE---",
"google/gemini-exp-1114",
"google/gemini-flash-1.5",
"google/gemini-flash-1.5-8b",
"google/gemini-flash-1.5-8b-exp",
"google/gemini-flash-1.5-exp",
"google/gemini-pro",
"google/gemini-pro-1.5",
"google/gemini-pro-1.5-exp",
"google/gemini-pro-vision",
"google/gemma-2-27b-it",
"google/gemma-2-9b-it",
"google/palm-2-chat-bison",
"google/palm-2-chat-bison-32k",
"google/palm-2-codechat-bison",
"google/palm-2-codechat-bison-32k",
"---META-LLAMA---",
"meta-llama/llama-2-13b-chat",
"meta-llama/llama-3-70b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3.1-405b",
"meta-llama/llama-3.1-405b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.2-11b-vision-instruct",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.2-90b-vision-instruct",
"meta-llama/llama-guard-2-8b",
"---MICROSOFT---",
"microsoft/phi-3-medium-128k-instruct",
"microsoft/phi-3-mini-128k-instruct",
"microsoft/phi-3.5-mini-128k-instruct",
"microsoft/wizardlm-2-7b",
"microsoft/wizardlm-2-8x22b",
"---MISTRALAI---",
"mistralai/codestral-mamba",
"mistralai/ministral-3b",
"mistralai/ministral-8b",
"mistralai/mistral-7b-instruct",
"mistralai/mistral-7b-instruct-v0.1",
"mistralai/mistral-7b-instruct-v0.2",
"mistralai/mistral-7b-instruct-v0.3",
"mistralai/mistral-large",
"mistralai/mistral-large-2407",
"mistralai/mistral-large-2411",
"mistralai/mistral-medium",
"mistralai/mistral-nemo",
"mistralai/mistral-small",
"mistralai/mistral-tiny",
"mistralai/mixtral-8x22b-instruct",
"mistralai/mixtral-8x7b",
"mistralai/mixtral-8x7b-instruct",
"mistralai/pixtral-12b",
"mistralai/pixtral-large-2411",
"---NEVERSLEEP---",
"neversleep/llama-3-lumimaid-70b",
"neversleep/llama-3-lumimaid-8b",
"neversleep/llama-3.1-lumimaid-70b",
"neversleep/llama-3.1-lumimaid-8b",
"neversleep/noromaid-20b",
"---NOUSRESEARCH---",
"nousresearch/hermes-2-pro-llama-3-8b",
"nousresearch/hermes-3-llama-3.1-405b",
"nousresearch/hermes-3-llama-3.1-70b",
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
"nousresearch/nous-hermes-llama2-13b",
"---OPENAI---",
"openai/chatgpt-4o-latest",
"openai/gpt-3.5-turbo",
"openai/gpt-3.5-turbo-0125",
"openai/gpt-3.5-turbo-0613",
"openai/gpt-3.5-turbo-1106",
"openai/gpt-3.5-turbo-16k",
"openai/gpt-3.5-turbo-instruct",
"openai/gpt-4",
"openai/gpt-4-0314",
"openai/gpt-4-1106-preview",
"openai/gpt-4-32k",
"openai/gpt-4-32k-0314",
"openai/gpt-4-turbo",
"openai/gpt-4-turbo-preview",
"openai/gpt-4-vision-preview",
"openai/gpt-4o",
"openai/gpt-4o-2024-05-13",
"openai/gpt-4o-2024-08-06",
"openai/gpt-4o-2024-11-20",
"openai/gpt-4o-mini",
"openai/gpt-4o-mini-2024-07-18",
"openai/o1-mini",
"openai/o1-mini-2024-09-12",
"openai/o1-preview",
"openai/o1-preview-2024-09-12",
"---PERPLEXITY---",
"perplexity/llama-3-sonar-large-32k-chat",
"perplexity/llama-3-sonar-large-32k-online",
"perplexity/llama-3-sonar-small-32k-chat",
"perplexity/llama-3.1-sonar-huge-128k-online",
"perplexity/llama-3.1-sonar-large-128k-chat",
"perplexity/llama-3.1-sonar-large-128k-online",
"perplexity/llama-3.1-sonar-small-128k-chat",
"perplexity/llama-3.1-sonar-small-128k-online",
"---QWEN---",
"qwen/qwen-2-72b-instruct",
"qwen/qwen-2-7b-instruct",
"qwen/qwen-2-vl-72b-instruct",
"qwen/qwen-2-vl-7b-instruct",
"qwen/qwen-2.5-72b-instruct",
"qwen/qwen-2.5-7b-instruct",
"qwen/qwen-2.5-coder-32b-instruct",
"---OTHERS---",
"01-ai/yi-large",
"alpindale/goliath-120b",
"alpindale/magnum-72b",
"anthracite-org/magnum-v4-72b",
"cognitivecomputations/dolphin-mixtral-8x22b",
"cognitivecomputations/dolphin-mixtral-8x7b",
"databricks/dbrx-instruct",
"deepseek/deepseek-chat",
"eva-unit-01/eva-qwen-2.5-32b",
"gryphe/mythomax-l2-13b",
"infermatic/mn-inferor-12b",
"inflection/inflection-3-pi",
"inflection/inflection-3-productivity",
"jondurbin/airoboros-l2-70b",
"liquid/lfm-40b",
"lizpreciatior/lzlv-70b-fp16-hf",
"mancer/weaver",
"nvidia/llama-3.1-nemotron-70b-instruct",
"openchat/openchat-7b",
"pygmalionai/mythalion-13b",
"raifle/sorcererlm-8x22b",
"sao10k/l3-euryale-70b",
"sao10k/l3.1-euryale-70b",
"sophosympatheia/midnight-rose-70b",
"teknium/openhermes-2.5-mistral-7b",
"thedrummer/rocinante-12b",
"thedrummer/unslopnemo-12b",
"undi95/remm-slerp-l2-13b",
"undi95/toppy-m-7b",
"x-ai/grok-beta",
"x-ai/grok-vision-beta",
"xwin-lm/xwin-lm-70b"
]
fetch: false
dropParams: ["stop"]
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "OpenRouter"
# Preplexity
# Model list: https://docs.perplexity.ai/docs/model-cards
- name: "Perplexity"
apiKey: "user_provided"
baseURL: "https://api.perplexity.ai/"
models:
default: [
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-sonar-small-128k-online",
"llama-3.1-sonar-large-128k-chat",
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-huge-128k-online",
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct"
]
fetch: false # fetching list of models is not supported
titleConvo: true
titleModel: "llama-3.1-sonar-small-128k-chat"
summarize: false
summaryModel: "llama-3.1-sonar-small-128k-chat"
forcePrompt: false
dropParams: ["stop", "frequency_penalty"]
modelDisplayLabel: "Perplexity"
# SambaNova
# https://cloud.sambanova.ai/apis
- name: "SambaNova"
apiKey: "user_provided"
baseURL: "https://api.sambanova.ai/v1/"
models:
default: [
"Meta-Llama-3.1-8B-Instruct",
"Meta-Llama-3.1-70B-Instruct",
"Meta-Llama-3.1-405B-Instruct",
"Meta-Llama-3.2-1B-Instruct",
"Meta-Llama-3.2-3B-Instruct",
"Llama-3.2-11B-Vision-Instruct",
"Llama-3.2-90B-Vision-Instruct",
]
fetch: false
titleConvo: true
titleModel: "Meta-Llama-3.1-8B-Instruct"
modelDisplayLabel: "SambaNova"
iconURL: "https://global.discourse-cdn.com/sambanova/original/1X/f5ea7759d23daaad4f91a387079b8a8a71cae3f6.webp"
# ShuttleAI API
# Model list: https://shuttleai.com/models
- name: "ShuttleAI"
apiKey: "user_provided"
baseURL: "https://api.shuttleai.com/v1"
models:
default: [
"shuttleai/shuttle-3",
"shuttleai/shuttle-3-mini",
"shuttleai/s1",
"shuttleai/s1-mini",
"openai/o1-preview-2024-09-12",
"openai/o1-mini-2024-09-12",
"openai/gpt-4o-mini-2024-07-18",
"openai/chatgpt-4o-latest",
"openai/gpt-4o-2024-08-06",
"openai/gpt-4o-2024-05-13",
"openai/gpt-4-turbo-2024-04-09",
"openai/gpt-4-0125-preview",
"openai/gpt-4-1106-preview",
"openai/gpt-4-0613",
"openai/gpt-3.5-turbo-0125",
"openai/gpt-3.5-turbo-1106",
"anthropic/claude-3-5-sonnet-20240620",
"anthropic/claude-3-opus-20240229",
"anthropic/claude-3-haiku-20240307",
"google/gemini-1.5-pro",
"google/gemini-1.5-pro-exp-0827",
"google/gemini-1.5-flash",
"google/gemini-1.5-flash-exp-0827",
"google/gemini-1.5-flash-8b-exp-0924",
"meta-llama/meta-llama-3.2-90b-vision-instruct",
"meta-llama/meta-llama-3.1-405b-instruct",
"meta-llama/meta-llama-3.1-70b-instruct",
"meta-llama/meta-llama-3.1-8b-instruct",
"mattshumer/reflection-llama-3.1-70b",
"perplexity/llama-3.1-sonar-large-128k-online",
"perplexity/llama-3.1-sonar-small-128k-online",
"perplexity/llama-3.1-sonar-large-128k-chat",
"perplexity/llama-3.1-sonar-small-128k-chat",
"mistralai/mistral-nemo-instruct-2407",
"mistralai/codestral-2405",
"alibaba-cloud/qwen-2.5-72b-instruct",
"alibaba-cloud/qwen-2.5-coder-7b",
"alibaba-cloud/qwen-2.5-math-72b",
"cohere/command-r-plus-08-2024",
"cohere/command-r-plus",
"cohere/command-r-08-2024",
"cohere/command-r"
]
fetch: true
titleConvo: true
titleModel: "shuttle-2.5-mini"
summarize: false
summaryModel: "shuttle-2.5-mini"
forcePrompt: false
dropParams: ["user", "frequency_penalty", "presence_penalty", "repetition_penalty"]
modelDisplayLabel: "ShuttleAI"
# together.ai
# https://api.together.ai/settings/api-keys
# Model list: https://docs.together.ai/docs/inference-models
- name: "together.ai"
apiKey: "user_provided"
baseURL: "https://api.together.xyz"
models:
default: [
"Austism/chronos-hermes-13b",
"Gryphe/MythoMax-L2-13b",
"Gryphe/MythoMax-L2-13b-Lite",
"HuggingFaceH4/zephyr-7b-beta",
"NousResearch/Hermes-2-Theta-Llama-3-70B",
"NousResearch/Nous-Capybara-7B-V1p9",
"NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
"NousResearch/Nous-Hermes-Llama2-13b",
"NousResearch/Nous-Hermes-Llama2-70b",
"NousResearch/Nous-Hermes-llama-2-7b",
"Open-Orca/Mistral-7B-OpenOrca",
"Qwen/Qwen1.5-0.5B-Chat",
"Qwen/Qwen1.5-1.8B-Chat",
"Qwen/Qwen1.5-14B-Chat",
"Qwen/Qwen1.5-32B-Chat",
"Qwen/Qwen1.5-4B-Chat",
"Qwen/Qwen1.5-7B-Chat",
"Qwen/Qwen2-1.5B-Instruct",
"Qwen/Qwen2-72B-Instruct",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2.5-72B-Instruct-Turbo",
"Qwen/Qwen2.5-7B-Instruct-Turbo",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Snowflake/snowflake-arctic-instruct",
"Undi95/ReMM-SLERP-L2-13B",
"Undi95/Toppy-M-7B",
"WizardLM/WizardLM-13B-V1.2",
"allenai/OLMo-7B-Instruct",
"carson/ml318br",
"codellama/CodeLlama-13b-Instruct-hf",
"codellama/CodeLlama-34b-Instruct-hf",
"codellama/CodeLlama-70b-Instruct-hf",
"codellama/CodeLlama-7b-Instruct-hf",
"cognitivecomputations/dolphin-2.5-mixtral-8x7b",
"databricks/dbrx-instruct",
"deepseek-ai/deepseek-coder-33b-instruct",
"deepseek-ai/deepseek-llm-67b-chat",
"garage-bAInd/Platypus2-70B-instruct",
"google/gemma-2-27b-it",
"google/gemma-2-9b-it",
"google/gemma-2b-it",
"google/gemma-7b-it",
"gradientai/Llama-3-70B-Instruct-Gradient-1048k",
"llava-hf/llava-v1.6-mistral-7b-hf",
"lmsys/vicuna-13b-v1.3",
"lmsys/vicuna-13b-v1.5",
"lmsys/vicuna-13b-v1.5-16k",
"lmsys/vicuna-7b-v1.3",
"lmsys/vicuna-7b-v1.5",
"meta-llama/Llama-2-13b-chat-hf",
"meta-llama/Llama-2-70b-chat-hf",
"meta-llama/Llama-2-7b-chat-hf",
"meta-llama/Llama-3-70b-chat-hf",
"meta-llama/Llama-3-8b-chat-hf",
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"meta-llama/Llama-Vision-Free",
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Meta-Llama-3-70B-Instruct-Lite",
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
"meta-llama/Meta-Llama-3-8B-Instruct",
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
"meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro",
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-8B-Instruct-Reference",
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
"microsoft/WizardLM-2-8x22B",
"mistralai/Mistral-7B-Instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.2",
"mistralai/Mistral-7B-Instruct-v0.3",
"mistralai/Mixtral-8x22B-Instruct-v0.1",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"openchat/openchat-3.5-1210",
"scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
"scb10x/scb10x-llama3-typhoon-v1-5x-4f316",
"snorkelai/Snorkel-Mistral-PairRM-DPO",
"teknium/OpenHermes-2-Mistral-7B",
"teknium/OpenHermes-2p5-Mistral-7B",
"test/test11",
"togethercomputer/CodeLlama-13b-Instruct",
"togethercomputer/CodeLlama-34b-Instruct",
"togethercomputer/CodeLlama-7b-Instruct",
"togethercomputer/Koala-13B",
"togethercomputer/Koala-7B",
"togethercomputer/Llama-3-8b-chat-hf-int4",
"togethercomputer/Llama-3-8b-chat-hf-int8",
"togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
"togethercomputer/alpaca-7b",
"togethercomputer/guanaco-13b",
"togethercomputer/guanaco-33b",
"togethercomputer/guanaco-65b",
"togethercomputer/guanaco-7b",
"togethercomputer/llama-2-13b-chat",
"togethercomputer/llama-2-70b-chat",
"togethercomputer/llama-2-7b-chat",
"upstage/SOLAR-10.7B-Instruct-v1.0",
"zero-one-ai/Yi-34B-Chat"
]
fetch: false
titleConvo: true
titleModel: "openchat/openchat-3.5-1210"
summarize: false
summaryModel: "openchat/openchat-3.5-1210"
forcePrompt: false
modelDisplayLabel: "together.ai"
# Unify
# Model list: https://unify.ai/chat
- name: "Unify"
apiKey: "user_provided"
baseURL: "https://api.unify.ai/v0/"
models:
default: [
"router@q:1|c:2.12e-01|t:5.00e-04|i:2.78e-04",
"chatgpt-4o-latest@openai",
"gpt-3.5-turbo@openai",
"gpt-4-turbo@openai",
"gpt-4@openai",
"gpt-4o-2024-05-13@openai",
"gpt-4o-2024-08-06@openai",
"gpt-4o-mini@openai",
"gpt-4o@openai",
"o1-mini@openai",
"o1-preview@openai",
"claude-3-haiku@anthropic",
"claude-3-opus@anthropic",
"claude-3-sonnet@anthropic",
"claude-3.5-haiku@anthropic",
"claude-3.5-sonnet-20240620@anthropic",
"claude-3.5-sonnet@anthropic",
"claude-3-haiku@aws-bedrock",
"claude-3-opus@aws-bedrock",
"claude-3-sonnet@aws-bedrock",
"claude-3.5-haiku@aws-bedrock",
"claude-3.5-sonnet-20240620@aws-bedrock",
"claude-3.5-sonnet@aws-bedrock",
"command-r-plus@aws-bedrock",
"llama-3-70b-chat@aws-bedrock",
"llama-3-8b-chat@aws-bedrock",
"llama-3.1-405b-chat@aws-bedrock",
"llama-3.1-70b-chat@aws-bedrock",
"llama-3.1-8b-chat@aws-bedrock",
"llama-3.2-1b-chat@aws-bedrock",
"llama-3.2-3b-chat@aws-bedrock",
"mistral-7b-instruct-v0.2@aws-bedrock",
"mistral-large@aws-bedrock",
"mixtral-8x7b-instruct-v0.1@aws-bedrock",
"claude-3-haiku@vertex-ai",
"claude-3-opus@vertex-ai",
"claude-3-sonnet@vertex-ai",
"claude-3.5-haiku@vertex-ai",
"claude-3.5-sonnet-20240620@vertex-ai",
"claude-3.5-sonnet@vertex-ai",
"gemini-1.0-pro-001@vertex-ai",
"gemini-1.0-pro-002@vertex-ai",
"gemini-1.0-pro@vertex-ai",
"gemini-1.5-flash-001@vertex-ai",
"gemini-1.5-flash-002@vertex-ai",
"gemini-1.5-flash@vertex-ai",
"gemini-1.5-pro-001@vertex-ai",
"gemini-1.5-pro-002@vertex-ai",
"gemini-1.5-pro@vertex-ai",
"llama-3.1-405b-chat@vertex-ai",
"llama-3.1-70b-chat@vertex-ai",
"llama-3.1-8b-chat@vertex-ai",
"llama-3.2-11b-chat@vertex-ai",
"llama-3.2-90b-chat@vertex-ai",
"mistral-large@vertex-ai",
"mistral-nemo@vertex-ai",
"gemma-2-27b-it@deepinfra",
"gemma-2-9b-it@deepinfra",
"llama-3-70b-chat@deepinfra",
"llama-3-8b-chat@deepinfra",
"llama-3.1-405b-chat@deepinfra",
"llama-3.1-70b-chat@deepinfra",
"llama-3.1-8b-chat@deepinfra",
"llama-3.1-nemotron-70b-chat@deepinfra",
"llama-3.2-11b-chat@deepinfra",
"llama-3.2-1b-chat@deepinfra",
"llama-3.2-3b-chat@deepinfra",
"llama-3.2-90b-chat@deepinfra",
"mistral-7b-instruct-v0.3@deepinfra",
"mistral-nemo@deepinfra",
"mixtral-8x7b-instruct-v0.1@deepinfra",
"qwen-2.5-72b-instruct@deepinfra",
"gemma-2-27b-it@together-ai",
"gemma-2-9b-it@together-ai",
"llama-3-70b-chat@together-ai",
"llama-3-8b-chat@together-ai",
"llama-3.1-405b-chat@together-ai",
"llama-3.1-70b-chat@together-ai",
"llama-3.1-8b-chat@together-ai",
"llama-3.2-11b-chat@together-ai",
"llama-3.2-3b-chat@together-ai",
"llama-3.2-90b-chat@together-ai",
"mistral-7b-instruct-v0.3@together-ai",
"mixtral-8x22b-instruct-v0.1@together-ai",
"mixtral-8x7b-instruct-v0.1@together-ai",
"qwen-2-72b-instruct@together-ai",
"qwen-2.5-72b-instruct@together-ai",
"qwen-2.5-7b-instruct@together-ai",
"gemma-2-9b-it@groq",
"gemma-7b-it@groq",
"llama-3-70b-chat@groq",
"llama-3-8b-chat@groq",
"llama-3.1-70b-chat@groq",
"llama-3.1-8b-chat@groq",
"llama-3.2-1b-chat@groq",
"llama-3.2-3b-chat@groq",
"mixtral-8x7b-instruct-v0.1@groq",
"gemma-2-9b-it@lepton-ai",
"llama-3-70b-chat@lepton-ai",
"llama-3-8b-chat@lepton-ai",
"llama-3.1-405b-chat@lepton-ai",
"llama-3.1-70b-chat@lepton-ai",
"llama-3.1-8b-chat@lepton-ai",
"llama-3.2-3b-chat@lepton-ai",
"mistral-7b-instruct-v0.3@lepton-ai",
"mistral-nemo@lepton-ai",
"mixtral-8x7b-instruct-v0.1@lepton-ai",
"qwen-2-72b-instruct@lepton-ai",
"gpt-4o-2024-05-13@azure-ai",
"gpt-4o-2024-08-06@azure-ai",
"gpt-4o-mini@azure-ai",
"gpt-4o@azure-ai",
"llama-3.1-405b-chat@azure-ai",
"llama-3.1-70b-chat@azure-ai",
"llama-3.1-8b-chat@azure-ai",
"llama-3.2-11b-chat@azure-ai",
"llama-3.2-90b-chat@azure-ai",
"mistral-large@azure-ai",
"mistral-nemo@azure-ai",
"llama-3-70b-chat@fireworks-ai",
"llama-3-8b-chat@fireworks-ai",
"llama-3.1-405b-chat@fireworks-ai",
"llama-3.1-70b-chat@fireworks-ai",
"llama-3.1-8b-chat@fireworks-ai",
"llama-3.2-11b-chat@fireworks-ai",
"llama-3.2-1b-chat@fireworks-ai",
"llama-3.2-3b-chat@fireworks-ai",
"llama-3.2-90b-chat@fireworks-ai",
"mistral-nemo@fireworks-ai",
"mixtral-8x22b-instruct-v0.1@fireworks-ai",
"mixtral-8x7b-instruct-v0.1@fireworks-ai",
"qwen-2.5-14b-instruct@fireworks-ai",
"qwen-2.5-72b-instruct@fireworks-ai",
"qwen-2.5-7b-instruct@fireworks-ai",
"llama-3-70b-chat@replicate",
"llama-3-8b-chat@replicate",
"llama-3.1-405b-chat@replicate",
"mixtral-8x7b-instruct-v0.1@replicate",
"llama-3.1-70b-chat@perplexity-ai",
"llama-3.1-8b-chat@perplexity-ai",
"ministral-3b@mistral-ai",
"ministral-8b@mistral-ai",
"mistral-7b-instruct-v0.3@mistral-ai",
"mistral-large@mistral-ai",
"mistral-nemo@mistral-ai",
"mistral-small@mistral-ai",
"mixtral-8x22b-instruct-v0.1@mistral-ai",
"mixtral-8x7b-instruct-v0.1@mistral-ai",
]
fetch: false
titleConvo: true
titleModel: "router@q:1|c:2.12e-01|t:5.00e-04|i:2.78e-04"
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
- name: "xai"
apiKey: "user_provided"
baseURL: "https://api.x.ai/v1"
models:
default: ["grok-beta"]
fetch: false
titleConvo: true
titleMethod: "completion"
titleModel: "grok-beta"
summarize: false
summaryModel: "grok-beta"
forcePrompt: false
modelDisplayLabel: "Grok"
# REVERSE PROXY
# ConvoAI
- name: "ConvoAI"
apiKey: "user_provided"
baseURL: "https://api.convoai.tech/v1/"
models:
default: [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-vision-preview",
"gpt-4-turbo-2024-04-09",
"convoai-pro",
"mixtral-8x22b",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0613",
"gpt-4-32k",
"gpt-4-1106-vision-preview",
"claude-2",
"claude-3-haiku",
"claude-3-sonnet",
"claude-3-opus",
"claude-instant-1.2",
"gemma-2b",
"gemma-2b-it",
"gemma-7b",
"gemma-7b-it",
"gemini-1.0-pro-001",
"gemini-pro",
"gemini-1.0-pro",
"gemini-1.0-pro-latest",
"gemini-1.5-pro",
"gemini-1.5-pro-latest",
"gemini-pro-vision",
"mistral-7b",
"mixtral-8x7b-Instruct-v0.1",
"mistral-7b-instruct-v0.1",
"mistral-7b-instruct-v0.2",
"mixtral-8x7b",
"dolphin-mixtral-8x7b",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-large",
"llama2-7b",
"llama2-70b",
"llama2-13b",
"code-llama-7b",
"code-llama-70b-instruct",
"code-llama-13b",
"code-llama-34b",
"code-llama-34b-instruct",
"openchat-3.5",
"yi-34b-chat",
"yi-34b-200k",
"command-r-plus",
"command-r-plus-4bit",
"aya-101",
"dbrx-instruct",
]
fetch: false
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "ConvoAI"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/ConvoAI.png"
# FreeGPT-4
- name: "FreeGPT-4"
apiKey: "user_provided"
baseURL: "https://api.freegpt4.tech/v1/"
models:
default: [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"claude",
"gemini-pro"
]
fetch: false
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "FreeGPT-4"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/FreeGPT-4.png"
# Mandrill
- name: "Mandrill"
apiKey: "user_provided"
baseURL: "https://api.mandrillai.tech/v1"
models:
default: [
"gpt-4o",
"gpt-4-turbo",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-0301",
"claude-3-opus",
"gemini-pro",
"gemini-pro-vision"
]
fetch: false
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "Mandrill"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/Mandrill.png"
# NagaAI
- name: "NagaAI"
apiKey: "user_provided"
baseURL: "https://api.naga.ac/v1"
models:
default: [
"gpt-4",
"gpt-4-vision-preview",
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-0613",
"mistral-large",
"mistral-large-2402",
"mistral-next",
"mistral-small",
"mistral-small-2402",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0613",
"claude-3-opus",
"claude-3-opus-20240229",
"claude-3-sonnet",
"claude-3-sonnet-20240229",
"claude-3-haiku",
"claude-3-haiku-20240307",
"claude-2.1",
"claude-instant",
"gemini-pro",
"gemini-pro-vision",
"llama-2-70b-chat",
"llama-2-13b-chat",
"llama-2-7b-chat",
"mistral-7b",
"mixtral-8x7b"
]
fetch: false
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
modelDisplayLabel: "NagaAI"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/NagaAI.png"
# Pawan
- name: "Pawan"
apiKey: "user_provided"
baseURL: "https://api.pawan.krd/pai-001-rp/v1"
models:
default: [
"pai-001-rp"
]
fetch: false
titleConvo: true
titleModel: "pai-001-rp"
summarize: false
summaryModel: "pai-001-rp"
forcePrompt: false
modelDisplayLabel: "Pawan"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/Pawan.png"
# Pawan light
- name: "Pawan light"
apiKey: "user_provided"
baseURL: "https://api.pawan.krd/pai-001-light-rp/v1"
models:
default: [
"pai-001-light-rp"
]
fetch: false
titleConvo: true
titleModel: "pai-001-light-rp"
summarize: false
summaryModel: "pai-001-light-rp"
forcePrompt: false
modelDisplayLabel: "Pawan light"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/Pawan.png"
#Shard
- name: "Shard"
apiKey: "user_provided"
baseURL: "https://api.shard-ai.xyz/v1/"
models:
default: [
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-instruct',
'gpt-3.5-turbo-instruct-0914',
'gpt-3.5-turbo-16k',
'gpt-4-0613',
'gpt-4',
'gpt-4-turbo',
'gpt-4-turbo-2024-04-09',
'gpt-4-1106-preview',
'gpt-4-0125-preview',
'gpt-4-turbo-preview',
'gpt-4-vision-preview',
'command-r',
'command-r-plus',
'command-light-nightly',
'command',
'command-light',
'c4ai-aya',
'claude',
'claude-1.2',
'claude-2',
'claude-2.1',
'claude-3-haiku',
'claude-3-sonnet',
'claude-3-opus',
'claude-instant-v1',
'claude-instant-v1-100k',
'palm-2',
'dbrx-instruct',
'gemini-pro',
'gemini-1.5-pro',
'mixtral-8x7b-instruct',
'mixtral-8x7b',
'mixtral-8x22b',
'mixtral-8x22b-finetuned',
'zephyr-8x22b',
'zephyr-7b',
'mistral-tiny',
'mistral-small',
'mistral-medium',
'mistral-large',
'mistral-next',
'mistral-7b-instruct',
'yi-34b',
'gemma-2b',
'gemma-7b', 'gemma-1.1-7b',
'llamaguard-7b',
'llama-2-7b',
'llama-2-13b',
'llama-2-70b',
'llama-3-8b',
'llama-3-70b',
'openchat-3.5',
'phind-codellama-34b',
'llava-1.5',
'llava-1.6-34b',
'llava-1.6-7b',
'lzlv-70b',
'airoboros-70b',
'airoboros-70b-gpt4',
'cinematika-7b',
'toppy-7b',
'codellama-7b-instruct',
'codellama-13b-instruct',
'codellama-34b-instruct',
'codellama-70b-instruct',
'dolphine-mixtral',
'pi', 'mythomax-l2-13b',
'nous-capybara-7b',
'sonar-small-chat',
'sonar-medium-chat',
'sonar-small-online',
'sonar-medium-online',
'perplexity-related',
'hermes-2',
'hermes-2-pro',
'qwen-1.5-32b-chat'
]
fetch: false
titleConvo: true
titleModel: "gpt-3.5-turbo"
modelDisplayLabel: "Shard"
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/shard.png"
# Zukijourney
- name: "Zukijourney"
apiKey: "user_provided"
baseURL: "https://zukijourney.xyzbot.net/unf/"
models:
default: [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4o",
"gpt-4-32k",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-vision-preview",
"claude",
"claude-2",
"claude-2.1",
"claude-instant-v1",
"claude-instant-v1-100k",
"claude-3-opus",
"claude-3-sonnet",
"claude-3.5-sonnet",
"pplx-70b-online",
"palm-2",
"bard",
"gemini-pro",
"gemini-pro-vision",
"mixtral-8x7b",
"mixtral-8x7b-instruct",
"mistral-tiny",
"mistral-small",
"mistral-medium",
"mistral-7b-instruct",
"codellama-7b-instruct",
"llama-2-7b",
"llama-2-70b-chat",
"mythomax-l2-13b-8k",
"sheep-duck-llama",
"goliath-120b",
"nous-llama",
"yi-34b",
"openchat",
"solar10-7b",
"pi"
]
fetch: true
titleConvo: true
titleModel: "gpt-3.5-turbo"
summarize: false
summaryModel: "gpt-3.5-turbo"
forcePrompt: false
dropParams: ["stream"]
iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/zuki.png"
- name: "yi-lightning"
apiKey: "user_provided"
baseURL: "https://wispy-poetry-d145.baochao0912.workers.dev/web/v1"
models:
default: ["yi-lightning"]
fetch: false
titleConvo: false
titleModel: "yi-lightning"
modelDisplayLabel: "yi-lightning"
# addParams:
# safe_prompt: true # Mistral specific value for moderating messages
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error: