|
export const modelNames: Record<string, string> = { |
|
"allenai/molmo-7b-d:free": "AllenAI Molmo 7B D", |
|
"bytedance-research/ui-tars-72b:free": "Bytedance UI-TARS 72B", |
|
"qwen/qwen2.5-vl-3b-instruct:free": "Qwen Qwen2.5 VL 3B Instruct", |
|
"google/gemini-2.5-pro-exp-03-25:free": "Google Gemini Pro 2.5 Experimental", |
|
"qwen/qwen2.5-vl-32b-instruct:free": "Qwen Qwen2.5 VL 32B Instruct", |
|
"deepseek/deepseek-chat-v3-0324:free": "DeepSeek V3 0324", |
|
"deepseek/deepseek-chat-v3-0324": "DeepSeek V3 0324", |
|
"featherless/qwerky-72b:free": "Qwerky 72b", |
|
"openai/o1-pro": "OpenAI o1-pro", |
|
"mistralai/mistral-small-3.1-24b-instruct:free": "Mistral Small 3.1 24B", |
|
"mistralai/mistral-small-3.1-24b-instruct": "Mistral Small 3.1 24B", |
|
"open-r1/olympiccoder-7b:free": "OlympicCoder 7B", |
|
"open-r1/olympiccoder-32b:free": "OlympicCoder 32B", |
|
"steelskull/l3.3-electra-r1-70b": "SteelSkull L3.3 Electra R1 70B", |
|
"allenai/olmo-2-0325-32b-instruct": "AllenAI Olmo 2 32B Instruct", |
|
"google/gemma-3-1b-it:free": "Google Gemma 3 1B", |
|
"google/gemma-3-4b-it:free": "Google Gemma 3 4B", |
|
"ai21/jamba-1.6-large": "AI21 Jamba 1.6 Large", |
|
"ai21/jamba-1.6-mini": "AI21 Jamba Mini 1.6", |
|
"google/gemma-3-12b-it:free": "Google Gemma 3 12B", |
|
"cohere/command-a": "Cohere Command A", |
|
"openai/gpt-4o-mini-search-preview": "OpenAI GPT-4o-mini Search Preview", |
|
"openai/gpt-4o-search-preview": "OpenAI GPT-4o Search Preview", |
|
"tokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3": "Swallow Llama 3.1 Swallow 70B Instruct V0.3", |
|
"rekaai/reka-flash-3:free": "Reka Flash 3", |
|
"google/gemma-3-27b-it:free": "Google Gemma 3 27B", |
|
"google/gemma-3-27b-it": "Google Gemma 3 27B", |
|
"thedrummer/anubis-pro-105b-v1": "TheDrummer Anubis Pro 105B V1", |
|
"latitudegames/wayfarer-large-70b-llama-3.3": "LatitudeGames Wayfarer Large 70B Llama 3.3", |
|
"thedrummer/skyfall-36b-v2": "TheDrummer Skyfall 36B V2", |
|
"microsoft/phi-4-multimodal-instruct": "Microsoft Phi 4 Multimodal Instruct", |
|
"perplexity/sonar-reasoning-pro": "Perplexity Sonar Reasoning Pro", |
|
"perplexity/sonar-pro": "Perplexity Sonar Pro", |
|
"perplexity/sonar-deep-research": "Perplexity Sonar Deep Research", |
|
"deepseek/deepseek-r1-zero:free": "DeepSeek R1 Zero", |
|
"qwen/qwq-32b:free": "Qwen QwQ 32B", |
|
"qwen/qwq-32b": "Qwen QwQ 32B", |
|
"qwen/qwen2.5-32b-instruct": "Qwen Qwen2.5 32B Instruct", |
|
"moonshotai/moonlight-16b-a3b-instruct:free": "Moonshot AI Moonlight 16B A3B Instruct", |
|
"nousresearch/deephermes-3-llama-3-8b-preview:free": "Nous DeepHermes 3 Llama 3 8B Preview", |
|
"openai/gpt-4.5-preview": "OpenAI GPT-4.5 (Preview)", |
|
"google/gemini-2.0-flash-lite-001": "Google Gemini 2.0 Flash Lite", |
|
"anthropic/claude-3.7-sonnet:beta": "Anthropic Claude 3.7 Sonnet (self-moderated)", |
|
"anthropic/claude-3.7-sonnet": "Anthropic Claude 3.7 Sonnet", |
|
"anthropic/claude-3.7-sonnet:thinking": "Anthropic Claude 3.7 Sonnet (thinking)", |
|
"perplexity/r1-1776": "Perplexity R1 1776", |
|
"mistralai/mistral-saba": "Mistral Saba", |
|
"cognitivecomputations/dolphin3.0-r1-mistral-24b:free": "Dolphin3.0 R1 Mistral 24B", |
|
"cognitivecomputations/dolphin3.0-mistral-24b:free": "Dolphin3.0 Mistral 24B", |
|
"meta-llama/llama-guard-3-8b": "Llama Guard 3 8B", |
|
"openai/o3-mini-high": "OpenAI o3 Mini High", |
|
"allenai/llama-3.1-tulu-3-405b": "Llama 3.1 Tulu 3 405B", |
|
"deepseek/deepseek-r1-distill-llama-8b": "DeepSeek R1 Distill Llama 8B", |
|
"google/gemini-2.0-flash-001": "Google Gemini Flash 2.0", |
|
"google/gemini-2.0-flash-lite-preview-02-05:free": "Google Gemini Flash Lite 2.0 Preview", |
|
"google/gemini-2.0-pro-exp-02-05:free": "Google Gemini Pro 2.0 Experimental", |
|
"qwen/qwen-vl-plus": "Qwen VL Plus", |
|
"aion-labs/aion-1.0": "AionLabs Aion-1.0", |
|
"aion-labs/aion-1.0-mini": "AionLabs Aion-1.0-Mini", |
|
"aion-labs/aion-rp-llama-3.1-8b": "AionLabs Aion-RP 1.0 (8B)", |
|
"qwen/qwen-vl-max": "Qwen VL Max", |
|
"qwen/qwen-turbo": "Qwen Qwen-Turbo", |
|
"qwen/qwen2.5-vl-72b-instruct:free": "Qwen Qwen2.5 VL 72B Instruct", |
|
"qwen/qwen2.5-vl-72b-instruct": "Qwen Qwen2.5 VL 72B Instruct", |
|
"qwen/qwen-plus": "Qwen Qwen-Plus", |
|
"qwen/qwen-max": "Qwen Qwen-Max", |
|
"openai/o3-mini": "OpenAI o3 Mini", |
|
"deepseek/deepseek-r1-distill-qwen-1.5b": "DeepSeek R1 Distill Qwen 1.5B", |
|
"mistralai/mistral-small-24b-instruct-2501:free": "Mistral Small 3", |
|
"mistralai/mistral-small-24b-instruct-2501": "Mistral Small 3", |
|
"deepseek/deepseek-r1-distill-qwen-32b:free": "DeepSeek R1 Distill Qwen 32B", |
|
"deepseek/deepseek-r1-distill-qwen-32b": "DeepSeek R1 Distill Qwen 32B", |
|
"deepseek/deepseek-r1-distill-qwen-14b:free": "DeepSeek R1 Distill Qwen 14B", |
|
"deepseek/deepseek-r1-distill-qwen-14b": "DeepSeek R1 Distill Qwen 14B", |
|
"perplexity/sonar-reasoning": "Perplexity Sonar Reasoning", |
|
"perplexity/sonar": "Perplexity Sonar", |
|
"liquid/lfm-7b": "Liquid LFM 7B", |
|
"liquid/lfm-3b": "Liquid LFM 3B", |
|
"deepseek/deepseek-r1-distill-llama-70b:free": "DeepSeek R1 Distill Llama 70B", |
|
"deepseek/deepseek-r1-distill-llama-70b": "DeepSeek R1 Distill Llama 70B", |
|
"google/gemini-2.0-flash-thinking-exp:free": "Google Gemini 2.0 Flash Thinking Experimental 01-21", |
|
"deepseek/deepseek-r1:free": "DeepSeek R1", |
|
"deepseek/deepseek-r1": "DeepSeek R1", |
|
"sophosympatheia/rogue-rose-103b-v0.2:free": "Rogue Rose 103B v0.2", |
|
"minimax/minimax-01": "MiniMax MiniMax-01", |
|
"mistralai/codestral-2501": "Mistral Codestral 2501", |
|
"microsoft/phi-4": "Microsoft Phi 4", |
|
"sao10k/l3.1-70b-hanami-x1": "Sao10K Llama 3.1 70B Hanami x1", |
|
"deepseek/deepseek-chat:free": "DeepSeek V3", |
|
"deepseek/deepseek-chat": "DeepSeek V3", |
|
"google/gemini-2.0-flash-thinking-exp-1219:free": "Google Gemini 2.0 Flash Thinking Experimental", |
|
"sao10k/l3.3-euryale-70b": "Sao10K Llama 3.3 Euryale 70B", |
|
"openai/o1": "OpenAI o1", |
|
"eva-unit-01/eva-llama-3.33-70b": "EVA Llama 3.33 70B", |
|
"x-ai/grok-2-vision-1212": "xAI Grok 2 Vision 1212", |
|
"x-ai/grok-2-1212": "xAI Grok 2 1212", |
|
"cohere/command-r7b-12-2024": "Cohere Command R7B (12-2024)", |
|
"google/gemini-2.0-flash-exp:free": "Google Gemini Flash 2.0 Experimental", |
|
"meta-llama/llama-3.3-70b-instruct:free": "Meta Llama 3.3 70B Instruct", |
|
"meta-llama/llama-3.3-70b-instruct": "Meta Llama 3.3 70B Instruct", |
|
"amazon/nova-lite-v1": "Amazon Nova Lite 1.0", |
|
"amazon/nova-micro-v1": "Amazon Nova Micro 1.0", |
|
"amazon/nova-pro-v1": "Amazon Nova Pro 1.0", |
|
"qwen/qwq-32b-preview:free": "Qwen QwQ 32B Preview", |
|
"qwen/qwq-32b-preview": "Qwen QwQ 32B Preview", |
|
"google/learnlm-1.5-pro-experimental:free": "Google LearnLM 1.5 Pro Experimental", |
|
"eva-unit-01/eva-qwen-2.5-72b": "EVA Qwen2.5 72B", |
|
"openai/gpt-4o-2024-11-20": "OpenAI GPT-4o (2024-11-20)", |
|
"mistralai/mistral-large-2411": "Mistral Large 2411", |
|
"mistralai/mistral-large-2407": "Mistral Large 2407", |
|
"mistralai/pixtral-large-2411": "Mistral Pixtral Large 2411", |
|
"x-ai/grok-vision-beta": "xAI Grok Vision Beta", |
|
"infermatic/mn-inferor-12b": "Infermatic Mistral Nemo Inferor 12B", |
|
"qwen/qwen-2.5-coder-32b-instruct:free": "Qwen2.5 Coder 32B Instruct", |
|
"qwen/qwen-2.5-coder-32b-instruct": "Qwen2.5 Coder 32B Instruct", |
|
"raifle/sorcererlm-8x22b": "SorcererLM 8x22B", |
|
"eva-unit-01/eva-qwen-2.5-32b": "EVA Qwen2.5 32B", |
|
"thedrummer/unslopnemo-12b": "Unslopnemo 12B", |
|
"anthropic/claude-3.5-haiku:beta": "Anthropic Claude 3.5 Haiku (self-moderated)", |
|
"anthropic/claude-3.5-haiku": "Anthropic Claude 3.5 Haiku", |
|
"anthropic/claude-3.5-haiku-20241022:beta": "Anthropic Claude 3.5 Haiku (2024-10-22) (self-moderated)", |
|
"anthropic/claude-3.5-haiku-20241022": "Anthropic Claude 3.5 Haiku (2024-10-22)", |
|
"anthropic/claude-3.5-sonnet:beta": "Anthropic Claude 3.5 Sonnet (self-moderated)", |
|
"anthropic/claude-3.5-sonnet": "Anthropic Claude 3.5 Sonnet", |
|
"anthracite-org/magnum-v4-72b": "Magnum v4 72B", |
|
"neversleep/llama-3.1-lumimaid-70b": "NeverSleep Lumimaid v0.2 70B", |
|
"x-ai/grok-beta": "xAI Grok Beta", |
|
"mistralai/ministral-3b": "Mistral Ministral 3B", |
|
"mistralai/ministral-8b": "Mistral Ministral 8B", |
|
"qwen/qwen-2.5-7b-instruct": "Qwen2.5 7B Instruct", |
|
"nvidia/llama-3.1-nemotron-70b-instruct:free": "NVIDIA Llama 3.1 Nemotron 70B Instruct", |
|
"nvidia/llama-3.1-nemotron-70b-instruct": "NVIDIA Llama 3.1 Nemotron 70B Instruct", |
|
"inflection/inflection-3-pi": "Inflection 3 Pi", |
|
"inflection/inflection-3-productivity": "Inflection 3 Productivity", |
|
"google/gemini-flash-1.5-8b": "Google Gemini Flash 1.5 8B", |
|
"liquid/lfm-40b": "Liquid LFM 40B MoE", |
|
"thedrummer/rocinante-12b": "Rocinante 12B", |
|
"anthracite-org/magnum-v2-72b": "Magnum v2 72B", |
|
"meta-llama/llama-3.2-90b-vision-instruct": "Meta Llama 3.2 90B Vision Instruct", |
|
"meta-llama/llama-3.2-1b-instruct:free": "Meta Llama 3.2 1B Instruct", |
|
"meta-llama/llama-3.2-1b-instruct": "Meta Llama 3.2 1B Instruct", |
|
"meta-llama/llama-3.2-3b-instruct:free": "Meta Llama 3.2 3B Instruct", |
|
"meta-llama/llama-3.2-3b-instruct": "Meta Llama 3.2 3B Instruct", |
|
"meta-llama/llama-3.2-11b-vision-instruct:free": "Meta Llama 3.2 11B Vision Instruct", |
|
"meta-llama/llama-3.2-11b-vision-instruct": "Meta Llama 3.2 11B Vision Instruct", |
|
"qwen/qwen-2.5-72b-instruct:free": "Qwen2.5 72B Instruct", |
|
"qwen/qwen-2.5-72b-instruct": "Qwen2.5 72B Instruct", |
|
"qwen/qwen-2.5-vl-72b-instruct": "Qwen Qwen2.5-VL 72B Instruct", |
|
"neversleep/llama-3.1-lumimaid-8b": "NeverSleep Lumimaid v0.2 8B", |
|
"openai/o1-preview-2024-09-12": "OpenAI o1-preview (2024-09-12)", |
|
"openai/o1-mini": "OpenAI o1-mini", |
|
"openai/o1-mini-2024-09-12": "OpenAI o1-mini (2024-09-12)", |
|
"openai/o1-preview": "OpenAI o1-preview", |
|
"mistralai/pixtral-12b": "Mistral Pixtral 12B", |
|
"cohere/command-r-plus-08-2024": "Cohere Command R+ (08-2024)", |
|
"cohere/command-r-08-2024": "Cohere Command R (08-2024)", |
|
"sao10k/l3.1-euryale-70b": "Sao10K Llama 3.1 Euryale 70B v2.2", |
|
"qwen/qwen-2.5-vl-7b-instruct:free": "Qwen Qwen2.5-VL 7B Instruct", |
|
"qwen/qwen-2.5-vl-7b-instruct": "Qwen Qwen2.5-VL 7B Instruct", |
|
"google/gemini-flash-1.5-8b-exp": "Google Gemini Flash 1.5 8B Experimental", |
|
"ai21/jamba-1-5-large": "AI21 Jamba 1.5 Large", |
|
"ai21/jamba-1-5-mini": "AI21 Jamba 1.5 Mini", |
|
"microsoft/phi-3.5-mini-128k-instruct": "Microsoft Phi-3.5 Mini 128K Instruct", |
|
"nousresearch/hermes-3-llama-3.1-70b": "Nous Hermes 3 70B Instruct", |
|
"nousresearch/hermes-3-llama-3.1-405b": "Nous Hermes 3 405B Instruct", |
|
"openai/chatgpt-4o-latest": "OpenAI ChatGPT-4o", |
|
"aetherwiing/mn-starcannon-12b": "Aetherwiing Starcannon 12B", |
|
"sao10k/l3-lunaris-8b": "Sao10K Llama 3 8B Lunaris", |
|
"openai/gpt-4o-2024-08-06": "OpenAI GPT-4o (2024-08-06)", |
|
"nothingiisreal/mn-celeste-12b": "Mistral Nemo 12B Celeste", |
|
"meta-llama/llama-3.1-405b": "Meta Llama 3.1 405B (base)", |
|
"perplexity/llama-3.1-sonar-large-128k-online": "Perplexity Llama 3.1 Sonar 70B Online", |
|
"perplexity/llama-3.1-sonar-small-128k-online": "Perplexity Llama 3.1 Sonar 8B Online", |
|
"meta-llama/llama-3.1-8b-instruct:free": "Meta Llama 3.1 8B Instruct", |
|
"meta-llama/llama-3.1-8b-instruct": "Meta Llama 3.1 8B Instruct", |
|
"meta-llama/llama-3.1-405b-instruct": "Meta Llama 3.1 405B Instruct", |
|
"meta-llama/llama-3.1-70b-instruct": "Meta Llama 3.1 70B Instruct", |
|
"mistralai/codestral-mamba": "Mistral Codestral Mamba", |
|
"mistralai/mistral-nemo:free": "Mistral Nemo", |
|
"mistralai/mistral-nemo": "Mistral Nemo", |
|
"openai/gpt-4o-mini": "OpenAI GPT-4o-mini", |
|
"openai/gpt-4o-mini-2024-07-18": "OpenAI GPT-4o-mini (2024-07-18)", |
|
"qwen/qwen-2-7b-instruct:free": "Qwen 2 7B Instruct", |
|
"qwen/qwen-2-7b-instruct": "Qwen 2 7B Instruct", |
|
"google/gemma-2-27b-it": "Google Gemma 2 27B", |
|
"alpindale/magnum-72b": "Magnum 72B", |
|
"google/gemma-2-9b-it:free": "Google Gemma 2 9B", |
|
"google/gemma-2-9b-it": "Google Gemma 2 9B", |
|
"01-ai/yi-large": "01.AI Yi Large", |
|
"ai21/jamba-instruct": "AI21 Jamba Instruct", |
|
"anthropic/claude-3.5-sonnet-20240620:beta": "Anthropic Claude 3.5 Sonnet (2024-06-20) (self-moderated)", |
|
"anthropic/claude-3.5-sonnet-20240620": "Anthropic Claude 3.5 Sonnet (2024-06-20)", |
|
"sao10k/l3-euryale-70b": "Sao10k Llama 3 Euryale 70B v2.1", |
|
"cognitivecomputations/dolphin-mixtral-8x22b": "Dolphin 2.9.2 Mixtral 8x22B 🐬", |
|
"qwen/qwen-2-72b-instruct": "Qwen 2 72B Instruct", |
|
"mistralai/mistral-7b-instruct-v0.3": "Mistral 7B Instruct v0.3", |
|
"nousresearch/hermes-2-pro-llama-3-8b": "NousResearch Hermes 2 Pro - Llama-3 8B", |
|
"mistralai/mistral-7b-instruct:free": "Mistral 7B Instruct", |
|
"mistralai/mistral-7b-instruct": "Mistral 7B Instruct", |
|
"microsoft/phi-3-mini-128k-instruct:free": "Microsoft Phi-3 Mini 128K Instruct", |
|
"microsoft/phi-3-mini-128k-instruct": "Microsoft Phi-3 Mini 128K Instruct", |
|
"microsoft/phi-3-medium-128k-instruct:free": "Microsoft Phi-3 Medium 128K Instruct", |
|
"microsoft/phi-3-medium-128k-instruct": "Microsoft Phi-3 Medium 128K Instruct", |
|
"neversleep/llama-3-lumimaid-70b": "NeverSleep Llama 3 Lumimaid 70B", |
|
"google/gemini-flash-1.5": "Google Gemini Flash 1.5", |
|
"openai/gpt-4o-2024-05-13": "OpenAI GPT-4o (2024-05-13)", |
|
"meta-llama/llama-guard-2-8b": "Meta LlamaGuard 2 8B", |
|
"openai/gpt-4o": "OpenAI GPT-4o", |
|
"openai/gpt-4o:extended": "OpenAI GPT-4o (extended)", |
|
"neversleep/llama-3-lumimaid-8b:extended": "NeverSleep Llama 3 Lumimaid 8B (extended)", |
|
"neversleep/llama-3-lumimaid-8b": "NeverSleep Llama 3 Lumimaid 8B", |
|
"sao10k/fimbulvetr-11b-v2": "Fimbulvetr 11B v2", |
|
"meta-llama/llama-3-8b-instruct:free": "Meta Llama 3 8B Instruct", |
|
"meta-llama/llama-3-8b-instruct": "Meta Llama 3 8B Instruct", |
|
"meta-llama/llama-3-70b-instruct": "Meta Llama 3 70B Instruct", |
|
"mistralai/mixtral-8x22b-instruct": "Mistral Mixtral 8x22B Instruct", |
|
"microsoft/wizardlm-2-7b": "WizardLM-2 7B", |
|
"microsoft/wizardlm-2-8x22b": "WizardLM-2 8x22B", |
|
"google/gemini-pro-1.5": "Google Gemini Pro 1.5", |
|
"openai/gpt-4-turbo": "OpenAI GPT-4 Turbo", |
|
"cohere/command-r-plus": "Cohere Command R+", |
|
"cohere/command-r-plus-04-2024": "Cohere Command R+ (04-2024)", |
|
"sophosympatheia/midnight-rose-70b": "Midnight Rose 70B", |
|
"cohere/command": "Cohere Command", |
|
"cohere/command-r": "Cohere Command R", |
|
"anthropic/claude-3-haiku:beta": "Anthropic Claude 3 Haiku (self-moderated)", |
|
"anthropic/claude-3-haiku": "Anthropic Claude 3 Haiku", |
|
"anthropic/claude-3-sonnet:beta": "Anthropic Claude 3 Sonnet (self-moderated)", |
|
"anthropic/claude-3-sonnet": "Anthropic Claude 3 Sonnet", |
|
"anthropic/claude-3-opus:beta": "Anthropic Claude 3 Opus (self-moderated)", |
|
"anthropic/claude-3-opus": "Anthropic Claude 3 Opus", |
|
"cohere/command-r-03-2024": "Cohere Command R (03-2024)", |
|
"mistralai/mistral-large": "Mistral Large", |
|
"google/gemma-7b-it": "Google Gemma 7B", |
|
"openai/gpt-4-turbo-preview": "OpenAI GPT-4 Turbo Preview", |
|
"openai/gpt-3.5-turbo-0613": "OpenAI GPT-3.5 Turbo (older v0613)", |
|
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo": "Nous Hermes 2 Mixtral 8x7B DPO", |
|
"mistralai/mistral-tiny": "Mistral Tiny", |
|
"mistralai/mistral-medium": "Mistral Medium", |
|
"mistralai/mistral-small": "Mistral Small", |
|
"mistralai/mistral-7b-instruct-v0.2": "Mistral 7B Instruct v0.2", |
|
"cognitivecomputations/dolphin-mixtral-8x7b": "Dolphin 2.6 Mixtral 8x7B 🐬", |
|
"google/gemini-pro": "Google Gemini Pro 1.0", |
|
"google/gemini-pro-vision": "Google Gemini Pro Vision 1.0", |
|
"mistralai/mixtral-8x7b": "Mistral Mixtral 8x7B (base)", |
|
"mistralai/mixtral-8x7b-instruct": "Mistral Mixtral 8x7B Instruct", |
|
"openchat/openchat-7b:free": "OpenChat 3.5 7B", |
|
"openchat/openchat-7b": "OpenChat 3.5 7B", |
|
"neversleep/noromaid-20b": "Noromaid 20B", |
|
"anthropic/claude-2:beta": "Anthropic Claude v2 (self-moderated)", |
|
"anthropic/claude-2": "Anthropic Claude v2", |
|
"anthropic/claude-2.1:beta": "Anthropic Claude v2.1 (self-moderated)", |
|
"anthropic/claude-2.1": "Anthropic Claude v2.1", |
|
"teknium/openhermes-2.5-mistral-7b": "OpenHermes 2.5 Mistral 7B", |
|
"alpindale/goliath-120b": "Goliath 120B", |
|
"undi95/toppy-m-7b:free": "Toppy M 7B", |
|
"undi95/toppy-m-7b": "Toppy M 7B", |
|
"openrouter/auto": "Auto Router", |
|
"openai/gpt-4-1106-preview": "OpenAI GPT-4 Turbo (older v1106)", |
|
"openai/gpt-3.5-turbo-1106": "OpenAI GPT-3.5 Turbo 16k (older v1106)", |
|
"google/palm-2-chat-bison-32k": "Google PaLM 2 Chat 32k", |
|
"google/palm-2-codechat-bison-32k": "Google PaLM 2 Code Chat 32k", |
|
"jondurbin/airoboros-l2-70b": "Airoboros 70B", |
|
"xwin-lm/xwin-lm-70b": "Xwin 70B", |
|
"openai/gpt-3.5-turbo-instruct": "OpenAI GPT-3.5 Turbo Instruct", |
|
"mistralai/mistral-7b-instruct-v0.1": "Mistral 7B Instruct v0.1", |
|
"pygmalionai/mythalion-13b": "Pygmalion Mythalion 13B", |
|
"openai/gpt-4-32k-0314": "OpenAI GPT-4 32k (older v0314)", |
|
"openai/gpt-3.5-turbo-16k": "OpenAI GPT-3.5 Turbo 16k", |
|
"openai/gpt-4-32k": "OpenAI GPT-4 32k", |
|
"nousresearch/nous-hermes-llama2-13b": "Nous Hermes 13B", |
|
"mancer/weaver": "Mancer Weaver (alpha)", |
|
"huggingfaceh4/zephyr-7b-beta:free": "Hugging Face Zephyr 7B", |
|
"anthropic/claude-2.0:beta": "Anthropic Claude v2.0 (self-moderated)", |
|
"anthropic/claude-2.0": "Anthropic Claude v2.0", |
|
"undi95/remm-slerp-l2-13b": "ReMM SLERP 13B", |
|
"google/palm-2-codechat-bison": "Google PaLM 2 Code Chat", |
|
"google/palm-2-chat-bison": "Google PaLM 2 Chat", |
|
"gryphe/mythomax-l2-13b:free": "MythoMax 13B", |
|
"gryphe/mythomax-l2-13b": "MythoMax 13B", |
|
"meta-llama/llama-2-70b-chat": "Meta Llama 2 70B Chat", |
|
"meta-llama/llama-2-13b-chat": "Meta Llama 2 13B Chat", |
|
"openai/gpt-4-0314": "OpenAI GPT-4 (older v0314)", |
|
"openai/gpt-4": "OpenAI GPT-4", |
|
"openai/gpt-3.5-turbo": "OpenAI GPT-3.5 Turbo", |
|
"openai/gpt-3.5-turbo-0125": "OpenAI GPT-3.5 Turbo 16k" |
|
}; |
|
|
|
export const customModelNames: Record<string, string> = { |
|
"google/gemini-2.0-flash-exp:free": "Gemini", |
|
"google/gemini-2.0-flash-lite-001": "Gemini", |
|
"x-ai/grok-2-1212": "Grok", |
|
"deepseek/deepseek-chat:free": "DeepSeek", |
|
"meta-llama/llama-3.3-70b-instruct:free": "Llama" |
|
}; |
|
|
|
export const getModelDisplayName = (modelId: string): string => { |
|
return customModelNames[modelId] || modelNames[modelId] || modelId; |
|
}; |