roxky commited on
Commit
9f34763
·
1 Parent(s): 1533f23

Save log to host

Browse files
logging/2025-03-09.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
logging/2025-03-10.jsonl ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
2
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3-7-sonnet-20250219-thinking"}}
3
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "claude-3-7-sonnet-20250219"}}
4
+ {"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
5
+ {"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
6
+ {"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
7
+ {"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o"}}
8
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"Portkey Gateway API error: 500 Internal Server Error\",\"status\":500,\"details\":{\"error\":{\"message\":\"azure-openai error: The server had an error while processing your request. Sorry about that!\",\"type\":\"server_error\",\"param\":null,\"code\":null},\"provider\":\"azure-openai\"}}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-audio"}}
9
+ {"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o"}}
10
+ {"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
11
+ {"type": "message", "message": "ResponseStatusError: Response 403: ", "error": "ResponseStatusError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "gpt-4o"}}
12
+ {"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o"}}
13
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
14
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
15
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
16
+ {"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
17
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
18
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
19
+ {"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
20
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
21
+ {"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
22
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
23
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 524: HTML content", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "flux-dev"}}
24
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 524: HTML content", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "turbo"}}
25
+ {"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
26
+ {"type": "error", "error": "KeyError", "message": "KeyError: 'openai-reasoning'", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-reasoning"}}
27
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"Portkey Gateway API error: 429 Too Many Requests\",\"status\":500,\"details\":{\"error\":{\"message\":\"azure-openai error: Requests to the ChatCompletions_Create Operation under Azure OpenAI API version 2025-01-01-preview have exceeded call rate limit of your current OpenAI S0 pricing tier. Please retry after 2 seconds. Please go here: https://aka.ms/oai/quotaincrease if you would like to further increase the default rate limit.\",\"type\":null,\"param\":null,\"code\":\"429\"},\"provider\":\"azure-openai\"}}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-reasoning"}}
28
+ {"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
29
+ {"type": "message", "message": "ResponseStatusError: Response 400: {\"error\": {\"code\": 400, \"message\": \"Invalid model/host combination /inferd\"}}", "error": "ResponseStatusError", "provider": {"name": "AllenAI", "url": "https://playground.allenai.org", "label": "Ai2 Playground", "model": "tulu3-405b"}}
30
+ {"type": "message", "message": "ResponseError: Error 403: Access forbidden: IP address 10.104.156.5 not allowed.", "error": "ResponseError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
31
+ {"type": "message", "message": "ResponseError: Error model_not_found: The model `deepseek-ai/DeepSeek-V3-Turbo` does not exist", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "deepseek-ai/DeepSeek-V3-Turbo"}}
32
+ {"type": "error", "error": "ContentTypeError", "message": "ContentTypeError: 200, message='Attempt to decode JSON with unexpected mimetype: text/plain; charset=utf-8', url='https://text.pollinations.ai'", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "deepseek-reasoner"}}
33
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
34
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
35
+ {"type": "error", "error": "IndexError", "message": "IndexError: list index out of range", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
36
+ {"type": "error", "error": "IndexError", "message": "IndexError: list index out of range", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
37
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"Portkey Gateway API error: 429 Too Many Requests\",\"status\":500,\"details\":{\"error\":{\"message\":\"azure-openai error: Requests to the ChatCompletions_Create Operation under Azure OpenAI API version 2025-01-01-preview have exceeded call rate limit of your current OpenAI S0 pricing tier. Please retry after 3 seconds. Please go here: https://aka.ms/oai/quotaincrease if you would like to further increase the default rate limit.\",\"type\":null,\"param\":null,\"code\":\"429\"},\"provider\":\"azure-openai\"}}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-reasoning"}}
38
+ {"type": "message", "message": "FileNotFoundError: could not find a valid chrome browser binary. please make sure chrome is installed.or use the keyword argument 'browser_executable_path=/path/to/your/browser' ", "error": "FileNotFoundError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
39
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"Portkey Gateway API error: 500 Internal Server Error\",\"status\":500,\"details\":{\"error\":{\"message\":\"azure-openai error: The server had an error while processing your request. Sorry about that!\",\"type\":\"server_error\",\"param\":null,\"code\":null},\"provider\":\"azure-openai\"}}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-audio"}}
40
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"Portkey Gateway API error: 500 Internal Server Error\",\"status\":500,\"details\":{\"error\":{\"message\":\"azure-openai error: The server had an error while processing your request. Sorry about that!\",\"type\":\"server_error\",\"param\":null,\"code\":null},\"provider\":\"azure-openai\"}}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-audio"}}
41
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":\"Portkey Gateway API error: 500 Internal Server Error\",\"status\":500,\"details\":{\"error\":{\"message\":\"azure-openai error: The server had an error while processing your request. Sorry about that!\",\"type\":\"server_error\",\"param\":null,\"code\":null},\"provider\":\"azure-openai\"}}", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai-audio"}}
42
+ {"type": "message", "message": "ResponseError: Error 403: Access forbidden: IP address 10.105.45.4 not allowed.", "error": "ResponseError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
43
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-mini"}}
44
+ {"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o-mini"}}
45
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o-mini"}}
46
+ {"type": "message", "message": "ResponseStatusError: Response 402: Error", "error": "ResponseStatusError", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-mini"}}
47
+ {"type": "message", "message": "ResponseError: Error 403: Access forbidden: IP address 10.104.159.4 not allowed.", "error": "ResponseError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini"}}
48
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
49
+ {"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
50
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
51
+ {"type": "message", "message": "ClientConnectorError: Cannot connect to host duckduckgo.com:443 ssl:default [Connection reset by peer]", "error": "ClientConnectorError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
52
+ {"type": "message", "message": "ResponseStatusError: Response 500: ", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "flux"}}
53
+ {"type": "message", "message": "ClientPayloadError: Response payload is not completed: <TransferEncodingError: 400, message='Not enough data for satisfy transfer length header.'>", "error": "ClientPayloadError", "provider": {"name": "HuggingSpace", "url": "https://huggingface.co/spaces", "label": null, "model": "flux"}}
54
+ {"type": "message", "message": "ResponseStatusError: Response 500: ", "error": "ResponseStatusError", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "flux"}}
55
+ {"type": "error", "error": "ModelNotSupportedError", "message": "ModelNotSupportedError: Model is not supported: claude-3.5-sonnet in: Blackbox Valid models: ['blackboxai', 'gpt-4o', 'o1', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.7', 'deepseek-v3', 'deepseek-r1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'dbrx-instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'gemini-2.0-flash', 'ImageGeneration', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b', 'Python Agent', 'Java Agent', 'JavaScript Agent', 'HTML Agent', 'Google Cloud Agent', 'Android Developer', 'Swift Developer', 'Next.js Agent', 'MongoDB Agent', 'PyTorch Agent', 'React Agent', 'Xcode Agent', 'Heroku Agent', 'Godot Agent', 'Go Agent', 'Gitlab Agent', 'Git Agent', 'Flask Agent', 'Firebase Agent', 'FastAPI Agent', 'Erlang Agent', 'Electron Agent', 'Docker Agent', 'DigitalOcean Agent', 'Bitbucket Agent', 'Azure Agent', 'Flutter Agent', 'Youtube Agent', 'builder Agent']", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "claude-3.5-sonnet"}}
56
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
57
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "qwq-32b"}}
58
+ {"type": "message", "message": "ResponseError: Model busy, retry later", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "mixtral-small-28b"}}
59
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":null}", "provider": {"name": "Phi_4", "url": "https://huggingface.co/spaces/microsoft/phi-4-multimodal", "label": null, "model": "phi-4-multimodal"}}
60
+ {"type": "message", "message": "ResponseError: Model busy, retry later", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "mixtral-small-28b"}}
61
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":null}", "provider": {"name": "Phi_4", "url": "https://huggingface.co/spaces/microsoft/phi-4-multimodal", "label": null, "model": "phi-4-multimodal"}}
62
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 500: {\"error\":null}", "provider": {"name": "Phi_4", "url": "https://huggingface.co/spaces/microsoft/phi-4-multimodal", "label": null, "model": "phi-4-multimodal"}}
63
+ {"type": "message", "message": "ResponseError: Model busy, retry later", "error": "ResponseError", "provider": {"name": "DeepInfraChat", "url": "https://deepinfra.com/chat", "label": null, "model": "mixtral-small-28b"}}
64
+ {"type": "error", "error": "ResponseError", "message": "ResponseError: Your auth method doesn't allow you to make inference requests", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "llama-3"}}
65
+ {"type": "error", "error": "ResponseError", "message": "ResponseError: Your auth method doesn't allow you to make inference requests", "provider": {"name": "HuggingFace", "url": "https://huggingface.co", "label": null, "model": "llama-3"}}
66
+ {"type": "message", "message": "ClientPayloadError: Response payload is not completed: <ContentLengthError: 400, message='Not enough data for satisfy content length header.'>", "error": "ClientPayloadError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
67
+ {"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
68
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
69
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-3"}}
70
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "deepseek-r1"}}
71
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-mini-free"}}
72
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
73
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
74
+ {"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
75
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
76
+ {"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
77
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-2024-08-06"}}
78
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "gpt-4o-2024-08-06"}}
79
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: {\"detail\":{\"message\":\"You have sent too many messages to the model. Please try again later.\",\"code\":\"model_cap_exceeded\",\"clears_in\":0}}", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o"}}
80
+ {"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
81
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: You have reached your request limit for the day.", "provider": {"name": "Blackbox", "url": "https://www.blackbox.ai", "label": "Blackbox AI", "model": "GPT-4o (Premium)"}}
82
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: {\"detail\":{\"message\":\"You have sent too many messages to the model. Please try again later.\",\"code\":\"model_cap_exceeded\",\"clears_in\":0}}", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o"}}
83
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 429: {\"detail\":{\"message\":\"You have sent too many messages to the model. Please try again later.\",\"code\":\"model_cap_exceeded\",\"clears_in\":0}}", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "gpt-4o"}}
84
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
85
+ {"type": "message", "message": "NotImplementedError: ", "error": "NotImplementedError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
86
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
87
+ {"type": "error", "error": "ResponseError", "message": "ResponseError: Message: 42[\"sonar-pro_query_progress\",{\"status\":\"failed\",\"text\":\"Error in processing query\"}]", "provider": {"name": "PerplexityLabs", "url": "https://labs.perplexity.ai", "label": null, "model": "sonar-pro"}}
88
+ {"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
89
+ {"type": "message", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/.nodriver_is_open'", "error": "PermissionError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
90
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
91
+ {"type": "message", "message": "ResponseError: Error 403: Access forbidden: IP address 10.104.106.4 not allowed.", "error": "ResponseError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
92
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 402: Error", "provider": {"name": "Liaobots", "url": "https://liaobots.site", "label": null, "model": "grok-3"}}
93
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 400: None", "provider": {"name": "AllenAI", "url": "https://playground.allenai.org", "label": "Ai2 Playground", "model": "tulu-3-405b"}}
94
+ {"type": "message", "message": "ResponseStatusError: Response 502: Bad gateway", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai"}}
95
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
96
+ {"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
97
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
98
+ {"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
99
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
100
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
101
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai"}}
102
+ {"type": "message", "message": "TimeoutError: Request timed out: ", "error": "TimeoutError", "provider": {"name": "DDG", "url": "https://duckduckgo.com/aichat", "label": "DuckDuckGo AI Chat", "model": "gpt-4o-mini"}}
103
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai"}}
104
+ {"type": "error", "error": "ResponseStatusError", "message": "ResponseStatusError: Response 502: Bad gateway", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "claude"}}
105
+ {"type": "message", "message": "IndexError: list index out of range", "error": "IndexError", "provider": {"name": "ChatGptEs", "url": "https://chatgpt.es", "label": null, "model": "gpt-4o"}}
106
+ {"type": "message", "message": "ResponseStatusError: Response 502: Bad gateway", "error": "ResponseStatusError", "provider": {"name": "PollinationsAI", "url": "https://pollinations.ai", "label": "Pollinations AI", "model": "openai"}}
107
+ {"type": "message", "message": "ResponseError: Error 403: Access forbidden: IP address 10.104.40.5 not allowed.", "error": "ResponseError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
108
+ {"type": "message", "message": "NoValidHarFileError: No .har file found", "error": "NoValidHarFileError", "provider": {"name": "OpenaiChat", "url": "https://chatgpt.com", "label": "OpenAI ChatGPT", "model": "auto"}}
109
+ {"type": "message", "message": "CloudflareError: Response 403: Cloudflare detected", "error": "CloudflareError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
110
+ {"type": "message", "message": "ResponseStatusError: Response 503: HTML content", "error": "ResponseStatusError", "provider": {"name": "Jmuz", "url": "https://discord.gg/Ew6JzjA2NR", "label": null, "model": "gpt-4o"}}
111
+ {"type": "error", "error": "PermissionError", "message": "PermissionError: [Errno 13] Permission denied: 'har_and_cookies/auth_Cloudflare.json'", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
112
+ {"type": "message", "message": "ResponseError: Error 403: Access forbidden: IP address 10.104.113.4 not allowed.", "error": "ResponseError", "provider": {"name": "OIVSCode", "url": "https://oi-vscode-server.onrender.com", "label": "OI VSCode Server", "model": "gpt-4o-mini-2024-07-18"}}
113
+ {"type": "message", "message": "MissingRequirementsError: Install or update \"curl_cffi\" package | pip install -U curl_cffi", "error": "MissingRequirementsError", "provider": {"name": "Copilot", "url": "https://copilot.microsoft.com", "label": "Microsoft Copilot", "model": "Copilot"}}
114
+ {"type": "message", "message": "AttributeError: 'SimpleCookie' object has no attribute 'jar'", "error": "AttributeError", "provider": {"name": "Cloudflare", "url": "https://playground.ai.cloudflare.com", "label": "Cloudflare AI", "model": "@cf/meta/llama-3.3-70b-instruct-fp8-fast"}}
usage/2025-03-09.jsonl ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"model": "openai", "provider": "PollinationsAI", "completion_tokens": 740, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 6703, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 7443}
2
+ {"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 44, "completion_tokens": 0, "total_tokens": 44}
3
+ {"model": "flux-dev", "provider": "PollinationsImage", "prompt_tokens": 58, "completion_tokens": 0, "total_tokens": 58}
4
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 1602, "completion_tokens": 0, "total_tokens": 1602}
5
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 3290, "completion_tokens": 0, "total_tokens": 3290}
6
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 3290, "completion_tokens": 0, "total_tokens": 3290}
7
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 21, "completion_tokens": 0, "total_tokens": 21}
8
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 3378, "completion_tokens": 0, "total_tokens": 3378}
9
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 5014, "completion_tokens": 0, "total_tokens": 5014}
10
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 5766, "completion_tokens": 0, "total_tokens": 5766}
11
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 6815, "completion_tokens": 0, "total_tokens": 6815}
12
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 7843, "completion_tokens": 0, "total_tokens": 7843}
13
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 8420, "completion_tokens": 0, "total_tokens": 8420}
14
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 9441, "completion_tokens": 0, "total_tokens": 9441}
15
+ {"model": "flux", "provider": "HuggingSpace", "prompt_tokens": 970, "completion_tokens": 0, "total_tokens": 970}
16
+ {"model": "flux", "provider": "PollinationsImage", "prompt_tokens": 970, "completion_tokens": 0, "total_tokens": 970}
17
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 11780, "completion_tokens": 0, "total_tokens": 11780}
18
+ {"model": "voodoohop-flux-1-schnell", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 1220, "completion_tokens": 0, "total_tokens": 1220}
19
+ {"model": "flux", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 1305, "completion_tokens": 0, "total_tokens": 1305}
20
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 12891, "completion_tokens": 0, "total_tokens": 12891}
21
+ {"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "provider": "DeepInfraChat", "prompt_tokens": 957, "total_tokens": 1347, "completion_tokens": 390, "estimated_cost": 0.00023184000000000003}
22
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 1409, "completion_tokens": 0, "total_tokens": 1409}
23
+ {"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "provider": "DeepInfraChat", "prompt_tokens": 1766, "total_tokens": 2123, "completion_tokens": 357, "estimated_cost": 0.00031902}
24
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 14039, "completion_tokens": 0, "total_tokens": 14039}
25
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 15069, "completion_tokens": 0, "total_tokens": 15069}
26
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 2183, "completion_tokens": 0, "total_tokens": 2183}
27
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 16074, "completion_tokens": 0, "total_tokens": 16074}
28
+ {"model": "BLACKBOXAI", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15}
29
+ {"model": "voodoohop-flux-1-schnell", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 111, "completion_tokens": 0, "total_tokens": 111}
30
+ {"model": "openai", "provider": "PollinationsAI", "completion_tokens": 11, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 1080, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 1091}
31
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 16859, "completion_tokens": 0, "total_tokens": 16859}
32
+ {"model": "flux-dev", "provider": "PollinationsAI"}
33
+ {"model": "flux-dev", "provider": "PollinationsAI"}
34
+ {"model": "flux", "provider": "VoodoohopFlux1Schnell", "prompt_tokens": 2570, "completion_tokens": 0, "total_tokens": 2570}
35
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 2676, "completion_tokens": 0, "total_tokens": 2676}
36
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 3033, "completion_tokens": 0, "total_tokens": 3033}
37
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 3529, "completion_tokens": 0, "total_tokens": 3529}
38
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 3787, "completion_tokens": 0, "total_tokens": 3787}
39
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 3984, "completion_tokens": 0, "total_tokens": 3984}
40
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 4099, "completion_tokens": 0, "total_tokens": 4099}
41
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 7083, "completion_tokens": 0, "total_tokens": 7083}
42
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 7698, "completion_tokens": 0, "total_tokens": 7698}
43
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 8305, "completion_tokens": 0, "total_tokens": 8305}
44
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 9420, "completion_tokens": 0, "total_tokens": 9420}
45
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 10078, "completion_tokens": 0, "total_tokens": 10078}
46
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 10915, "completion_tokens": 0, "total_tokens": 10915}
47
+ {"model": "Copilot", "provider": "Copilot", "prompt_tokens": 11167, "completion_tokens": 0, "total_tokens": 11167}
48
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 19443, "completion_tokens": 0, "total_tokens": 19443}
49
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 20105, "completion_tokens": 0, "total_tokens": 20105}
50
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 4357, "completion_tokens": 0, "total_tokens": 4357}
51
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 20646, "completion_tokens": 0, "total_tokens": 20646}
52
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 5410, "completion_tokens": 0, "total_tokens": 5410}
53
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 12830, "completion_tokens": 0, "total_tokens": 12830}
54
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 21180, "completion_tokens": 0, "total_tokens": 21180}
55
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 21489, "completion_tokens": 0, "total_tokens": 21489}
56
+ {"model": "openai", "provider": "PollinationsAI", "completion_tokens": 9, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 20, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 29}
57
+ {"model": "openai-reasoning", "provider": "PollinationsAI", "completion_tokens": 85, "completion_tokens_details": {"reasoning_tokens": 64}, "prompt_tokens": 9, "prompt_tokens_details": {"cached_tokens": 0}, "total_tokens": 94}
58
+ {"provider": "Gemini"}
59
+ {"model": "gemini-2.0-flash-thinking-with-apps", "provider": "Gemini"}
60
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 22165, "completion_tokens": 0, "total_tokens": 22165}
61
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 22620, "completion_tokens": 0, "total_tokens": 22620}
62
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 23125, "completion_tokens": 0, "total_tokens": 23125}
63
+ {"provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 23604, "completion_tokens": 0, "total_tokens": 23604}
64
+ {"provider": "Gemini"}
65
+ {"model": "auto", "provider": "OpenaiChat"}
66
+ {"model": "deepseek-v3", "provider": "DeepSeekAPI"}
67
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingChat"}
68
+ {"model": "deepseek-r1", "provider": "DeepSeekAPI", "prompt_tokens": 46, "completion_tokens": 86, "total_tokens": 132}
69
+ {"model": "gpt-4.5", "provider": "OpenaiChat", "prompt_tokens": 72, "completion_tokens": 0, "total_tokens": 72}
70
+ {"model": "gpt-4.5", "provider": "OpenaiChat", "prompt_tokens": 297, "completion_tokens": 0, "total_tokens": 297}
71
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 13260, "completion_tokens": 0, "total_tokens": 13260}
72
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 13745, "completion_tokens": 0, "total_tokens": 13745}
73
+ {"model": "gpt-4", "provider": "OpenaiChat", "prompt_tokens": 3402, "completion_tokens": 0, "total_tokens": 3402}
74
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 14245, "completion_tokens": 0, "total_tokens": 14245}
75
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 15020, "completion_tokens": 0, "total_tokens": 15020}
76
+ {"provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 24841, "completion_tokens": 0, "total_tokens": 24841}
77
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 26594, "completion_tokens": 0, "total_tokens": 26594}
78
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 28011, "completion_tokens": 0, "total_tokens": 28011}
79
+ {"model": "phi-4-multimodal", "provider": "Phi_4"}
80
+ {"model": "phi-4-multimodal", "provider": "Phi_4"}
81
+ {"model": "gpt-4.5", "provider": "OpenaiChat", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
82
+ {"model": "gpt-4.5", "provider": "OpenaiChat", "prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
83
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 24, "completion_tokens": 0, "total_tokens": 24}
84
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 1779, "completion_tokens": 0, "total_tokens": 1779}
85
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 86, "completion_tokens": 322, "total_tokens": 408}
86
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 12134, "completion_tokens": 0, "total_tokens": 12134}
87
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 117, "completion_tokens": 452, "total_tokens": 569}
88
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 12646, "completion_tokens": 0, "total_tokens": 12646}
89
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 13659, "completion_tokens": 0, "total_tokens": 13659}
90
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 174, "completion_tokens": 161, "total_tokens": 335}
91
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15}
92
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 3816, "completion_tokens": 0, "total_tokens": 3816}
93
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 6426, "completion_tokens": 0, "total_tokens": 6426}
94
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 7436, "completion_tokens": 0, "total_tokens": 7436}
95
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 360, "completion_tokens": 0, "total_tokens": 360}
96
+ {"model": "gpt-4.5", "provider": "OpenaiChat", "prompt_tokens": 11, "completion_tokens": 0, "total_tokens": 11}
97
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 503, "completion_tokens": 0, "total_tokens": 503}
98
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 503, "completion_tokens": 0, "total_tokens": 503}
99
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 580, "completion_tokens": 0, "total_tokens": 580}
100
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 775, "completion_tokens": 264, "total_tokens": 1039}
101
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1043, "completion_tokens": 195, "total_tokens": 1238}
102
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 8475, "completion_tokens": 0, "total_tokens": 8475}
103
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 9338, "completion_tokens": 0, "total_tokens": 9338}
104
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 10363, "completion_tokens": 0, "total_tokens": 10363}
105
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1344, "completion_tokens": 189, "total_tokens": 1533}
106
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 11380, "completion_tokens": 0, "total_tokens": 11380}
107
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1621, "completion_tokens": 319, "total_tokens": 1940}
108
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1881, "completion_tokens": 276, "total_tokens": 2157}
109
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 2196, "completion_tokens": 222, "total_tokens": 2418}
110
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 2504, "completion_tokens": 372, "total_tokens": 2876}
111
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 2750, "completion_tokens": 295, "total_tokens": 3045}
112
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1881, "completion_tokens": 276, "total_tokens": 2157}
113
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1881, "completion_tokens": 276, "total_tokens": 2157}
114
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1881, "completion_tokens": 276, "total_tokens": 2157}
115
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 99, "completion_tokens": 294, "total_tokens": 393}
116
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 316, "completion_tokens": 439, "total_tokens": 755}
117
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 316, "completion_tokens": 439, "total_tokens": 755}
118
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 841, "completion_tokens": 330, "total_tokens": 1171}
119
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1123, "completion_tokens": 229, "total_tokens": 1352}
120
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1421, "completion_tokens": 224, "total_tokens": 1645}
121
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1696, "completion_tokens": 299, "total_tokens": 1995}
122
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1961, "completion_tokens": 243, "total_tokens": 2204}
123
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 2284, "completion_tokens": 304, "total_tokens": 2588}
124
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 2644, "completion_tokens": 182, "total_tokens": 2826}
125
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 2941, "completion_tokens": 338, "total_tokens": 3279}
126
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 32, "completion_tokens": 203, "total_tokens": 235}
127
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 45, "completion_tokens": 152, "total_tokens": 197}
128
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 109, "completion_tokens": 435, "total_tokens": 544}
129
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 190, "completion_tokens": 226, "total_tokens": 416}
130
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 190, "completion_tokens": 226, "total_tokens": 416}
131
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 399, "completion_tokens": 204, "total_tokens": 603}
132
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 94, "completion_tokens": 0, "total_tokens": 94}
133
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 976, "completion_tokens": 0, "total_tokens": 976}
134
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1221, "completion_tokens": 448, "total_tokens": 1669}
135
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1140, "completion_tokens": 164, "total_tokens": 1304}
136
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingFace", "prompt_tokens": 1133, "completion_tokens": 251, "total_tokens": 1384}
137
+ {"model": "qwen-qwen2-72b-instruct", "provider": "Qwen_Qwen_2_72B_Instruct", "prompt_tokens": 2357, "completion_tokens": 0, "total_tokens": 2357}
138
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1133, "completion_tokens": 346, "total_tokens": 1479}
139
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1308, "completion_tokens": 374, "total_tokens": 1682}
140
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 790, "completion_tokens": 408, "total_tokens": 1198}
141
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 960, "completion_tokens": 489, "total_tokens": 1449}
142
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "provider": "HuggingFace", "prompt_tokens": 1497, "completion_tokens": 0, "total_tokens": 1497}
usage/2025-03-10.jsonl ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"model": "auto", "provider": "OpenaiChat", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
2
+ {"model": "auto", "provider": "OpenaiChat", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
3
+ {"model": "o3-mini", "provider": "OpenaiChat", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
4
+ {"model": "deepseek-v3", "provider": "DeepSeekAPI", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
5
+ {"provider": "Gemini", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
6
+ {"model": "qwen-qwen2-72b-instruct", "provider": "HuggingSpace", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
7
+ {"model": "qwen-2.5-1m-demo", "provider": "HuggingSpace", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
8
+ {"model": "Qwen/QwQ-32B", "provider": "HuggingChat", "prompt_tokens": 13, "completion_tokens": 81, "total_tokens": 94}
9
+ {"model": "openai", "provider": "PollinationsAI", "completion_tokens": 9, "completion_tokens_details": {"accepted_prediction_tokens": 0, "audio_tokens": 0, "reasoning_tokens": 0, "rejected_prediction_tokens": 0}, "prompt_tokens": 25, "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0}, "total_tokens": 34}
10
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 20, "completion_tokens": 0, "total_tokens": 20}
11
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 604, "completion_tokens": 0, "total_tokens": 604}
12
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 1557, "completion_tokens": 0, "total_tokens": 1557}
13
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 2200, "completion_tokens": 0, "total_tokens": 2200}
14
+ {"model": "Claude-Sonnet-3.5 (Premium)", "provider": "Blackbox", "prompt_tokens": 2874, "completion_tokens": 0, "total_tokens": 2874}
15
+ {"model": "phi-4-multimodal", "provider": "Phi_4", "prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8}
16
+ {"model": "phi-4-multimodal", "provider": "Phi_4", "prompt_tokens": 13, "completion_tokens": 0, "total_tokens": 13}
17
+ {"model": "flux", "provider": "G4F", "prompt_tokens": 27, "completion_tokens": 0, "total_tokens": 27}