diff --git a/docs/DOC_STYLE_GUIDE.md b/docs/DOC_STYLE_GUIDE.md
deleted file mode 100644
index 213e857078ce2a3919ad0e575cc4f6f5766b8b41..0000000000000000000000000000000000000000
--- a/docs/DOC_STYLE_GUIDE.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Documentation Style Guide
-
-## General Writing Principles
-
-- **Clarity & Conciseness**: Always prioritize clarity and brevity. Avoid unnecessary jargon or overly complex explanations.
-Keep sentences short and to the point.
-- **Gradual Complexity**: Start with the simplest, most basic setup, and then gradually introduce more advanced
-concepts and configurations.
-
-## Formatting Guidelines
-
-### Headers
-
-Use **Title Case** for the first and second level headers.
-
-Example:
- - **Basic Usage**
- - **Advanced Configuration Options**
-
-### Lists
-
-When listing items or options, use bullet points to enhance readability.
-
-Example:
- - Option A
- - Option B
- - Option C
-
-### Procedures
-
-For instructions or processes that need to be followed in a specific order, use numbered steps.
-
-Example:
- 1. Step one: Do this.
- - First this sub step.
- - Then this sub step.
- 2. Step two: Complete this action.
- 3. Step three: Verify the result.
-
-### Code Blocks
-
-* Use code blocks for multi-line inputs, outputs, commands and code samples.
-
-Example:
-```bash
-docker run -it \
- -e THIS=this \
- -e THAT=that
- ...
-```
-
-### Use of Note and Warning
-
-When adding a note or warning, use the built-in note and warning syntax.
-
-Example:
-
-This section is for advanced users only.
-
-
-### Referring to UI Elements
-
-When referencing UI elements, use ``.
-
-Example:
-1. Toggle the `Advanced` option
-2. Enter your model in the `Custom Model` textbox.
diff --git a/docs/docs.json b/docs/docs.json
deleted file mode 100644
index c91d7064ed5a2442a85b766877e7fe67a11fb6d9..0000000000000000000000000000000000000000
--- a/docs/docs.json
+++ /dev/null
@@ -1,204 +0,0 @@
-{
- "$schema": "https://mintlify.com/docs.json",
- "theme": "mint",
- "name": "All Hands Docs",
- "colors": {
- "primary": "#99873c",
- "light": "#ffe165",
- "dark": "#ffe165"
- },
- "background": {
- "color": {
- "light": "#f7f3ee",
- "dark": "#0B0D0E"
- }
- },
- "appearance": {
- "default": "light"
- },
- "favicon": "/logo-square.png",
- "navigation": {
- "tabs": [
- {
- "tab": "Docs",
- "pages": [
- "index",
- "usage/installation",
- "usage/getting-started",
- "usage/key-features",
- {
- "group": "OpenHands Cloud",
- "pages": [
- "usage/cloud/openhands-cloud",
- {
- "group": "Integrations",
- "pages": [
- "usage/cloud/github-installation",
- "usage/cloud/gitlab-installation"
- ]
- },
- "usage/cloud/cloud-ui",
- "usage/cloud/cloud-api"
- ]
- },
- {
- "group": "Running OpenHands Locally",
- "pages": [
- "usage/local-setup",
- "usage/how-to/gui-mode",
- "usage/how-to/cli-mode",
- "usage/how-to/headless-mode",
- "usage/how-to/github-action"
- ]
- },
- {
- "group": "Customization",
- "pages": [
- "usage/prompting/prompting-best-practices",
- "usage/prompting/repository",
- {
- "group": "Microagents",
- "pages": [
- "usage/prompting/microagents-overview",
- "usage/prompting/microagents-repo",
- "usage/prompting/microagents-keyword",
- "usage/prompting/microagents-org",
- "usage/prompting/microagents-public"
- ]
- }
- ]
- },
- {
- "group": "Advanced Configuration",
- "pages": [
- {
- "group": "LLM Configuration",
- "pages": [
- "usage/llms/llms",
- {
- "group": "Providers",
- "pages": [
- "usage/llms/azure-llms",
- "usage/llms/google-llms",
- "usage/llms/groq",
- "usage/llms/local-llms",
- "usage/llms/litellm-proxy",
- "usage/llms/openai-llms",
- "usage/llms/openrouter"
- ]
- }
- ]
- },
- {
- "group": "Runtime Configuration",
- "pages": [
- "usage/runtimes/overview",
- {
- "group": "Providers",
- "pages": [
- "usage/runtimes/docker",
- "usage/runtimes/remote",
- "usage/runtimes/local",
- {
- "group": "Third-Party Providers",
- "pages": [
- "usage/runtimes/modal",
- "usage/runtimes/daytona",
- "usage/runtimes/runloop",
- "usage/runtimes/e2b"
- ]
- }
- ]
- }
- ]
- },
- "usage/configuration-options",
- "usage/how-to/custom-sandbox-guide",
- "usage/search-engine-setup",
- "usage/mcp"
- ]
- },
- {
- "group": "Troubleshooting & Feedback",
- "pages": [
- "usage/troubleshooting/troubleshooting",
- "usage/feedback"
- ]
- },
- {
- "group": "OpenHands Developers",
- "pages": [
- "usage/how-to/development-overview",
- {
- "group": "Architecture",
- "pages": [
- "usage/architecture/backend",
- "usage/architecture/runtime"
- ]
- },
- "usage/how-to/debugging",
- "usage/how-to/evaluation-harness",
- "usage/how-to/websocket-connection"
- ]
- }
- ]
- },
- {
- "tab": "API Reference",
- "openapi": "/openapi.json"
- }
- ],
- "global": {
- "anchors": [
- {
- "anchor": "Company",
- "href": "https://www.all-hands.dev/",
- "icon": "house"
- },
- {
- "anchor": "Blog",
- "href": "https://www.all-hands.dev/blog",
- "icon": "newspaper"
- },
- {
- "anchor": "OpenHands Cloud",
- "href": "https://app.all-hands.dev",
- "icon": "cloud"
- }
- ]
- }
- },
- "logo": {
- "light": "/logo/light.svg",
- "dark": "/logo/dark.svg"
- },
- "navbar": {
- "links": [
- ],
- "primary": {
- "type": "github",
- "href": "https://github.com/All-Hands-AI/OpenHands"
- }
- },
- "footer": {
- "socials": {
- "slack": "https://join.slack.com/t/openhands-ai/shared_invite/zt-34zm4j0gj-Qz5kRHoca8DFCbqXPS~f_A",
- "github": "https://github.com/All-Hands-AI/OpenHands",
- "discord": "https://discord.gg/ESHStjSjD4"
- }
- },
- "contextual": {
- "options": [
- "copy",
- "view",
- "chatgpt",
- "claude"
- ]
- },
- "redirects": [
- {
- "source": "/modules/:slug*",
- "destination": "/:slug*"
- }
- ]
-}
diff --git a/docs/favicon.svg b/docs/favicon.svg
deleted file mode 100644
index b785c738bf178e7072e15ad6770e13ad1032d54b..0000000000000000000000000000000000000000
--- a/docs/favicon.svg
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/index.mdx b/docs/index.mdx
deleted file mode 100644
index 4fb2874a0f5bf9abf4daabca40cd13cd6258e784..0000000000000000000000000000000000000000
--- a/docs/index.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Introduction
-description: OpenHands - Code Less, Make More
-icon: book-open
-mode: wide
----
-Use AI to tackle the toil in your backlog. Our agents have all the same tools as a human developer: they can modify code, run commands, browse the web, call APIs, and yes-even copy code snippets from StackOverflow.
-
-VIDEO
diff --git a/docs/logo-square.png b/docs/logo-square.png
deleted file mode 100644
index 624dad2fb22d4d2ac723d8e9f81ab56e0886c36d..0000000000000000000000000000000000000000
--- a/docs/logo-square.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:de58ad6132a9afb7d4924612c5e013695658af993a81a7611a6564f6a8063d59
-size 1189506
diff --git a/docs/logo/dark.svg b/docs/logo/dark.svg
deleted file mode 100644
index 5632a89e4a7d2d7ce61c5c0e8fbf2859e26cc750..0000000000000000000000000000000000000000
--- a/docs/logo/dark.svg
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/logo/light.svg b/docs/logo/light.svg
deleted file mode 100644
index 80f4eccf394a0d90d9a1a16bd5ee44bfede63e14..0000000000000000000000000000000000000000
--- a/docs/logo/light.svg
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/openapi.json b/docs/openapi.json
deleted file mode 100644
index 954c28957810b9a7f30c0b34bd6caadbe00d2445..0000000000000000000000000000000000000000
--- a/docs/openapi.json
+++ /dev/null
@@ -1,2091 +0,0 @@
-{
- "openapi": "3.0.3",
- "info": {
- "title": "OpenHands API",
- "description": "OpenHands: Code Less, Make More",
- "version": "1.0.0"
- },
- "servers": [
- {
- "url": "https://app.all-hands.dev",
- "description": "Production server"
- },
- {
- "url": "http://localhost:3000",
- "description": "Local development server"
- }
- ],
- "paths": {
- "/health": {
- "get": {
- "summary": "Health check",
- "description": "Check if the API is running",
- "operationId": "health",
- "responses": {
- "200": {
- "description": "API is running",
- "content": {
- "text/plain": {
- "schema": {
- "type": "string",
- "example": "OK"
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/config": {
- "get": {
- "summary": "Get runtime configuration",
- "description": "Retrieve the runtime configuration (session ID and runtime ID)",
- "operationId": "getRemoteRuntimeConfig",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Runtime configuration",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "runtime_id": {
- "type": "string",
- "nullable": true
- },
- "session_id": {
- "type": "string",
- "nullable": true
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/vscode-url": {
- "get": {
- "summary": "Get VSCode URL",
- "description": "Get the VSCode URL for the conversation",
- "operationId": "getVscodeUrl",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "VSCode URL",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "vscode_url": {
- "type": "string",
- "nullable": true
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error getting VSCode URL",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "vscode_url": {
- "type": "string",
- "nullable": true
- },
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/web-hosts": {
- "get": {
- "summary": "Get runtime hosts",
- "description": "Get the hosts used by the runtime",
- "operationId": "getHosts",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Runtime hosts",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "hosts": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error getting runtime hosts",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "hosts": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "nullable": true
- },
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/submit-feedback": {
- "post": {
- "summary": "Submit feedback",
- "description": "Submit user feedback for a conversation",
- "operationId": "submitFeedback",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "email": {
- "type": "string",
- "format": "email"
- },
- "version": {
- "type": "string"
- },
- "permissions": {
- "type": "string",
- "default": "private"
- },
- "polarity": {
- "type": "string"
- },
- "feedback": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "Feedback submitted successfully",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "500": {
- "description": "Error submitting feedback",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/list-files": {
- "get": {
- "summary": "List files",
- "description": "List files in the specified path",
- "operationId": "listFiles",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "path",
- "in": "query",
- "required": false,
- "schema": {
- "type": "string"
- },
- "description": "Path to list files from"
- }
- ],
- "responses": {
- "200": {
- "description": "List of files",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- }
- },
- "404": {
- "description": "Runtime not initialized",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error listing files",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/select-file": {
- "get": {
- "summary": "Get file content",
- "description": "Retrieve the content of a specified file",
- "operationId": "selectFile",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "file",
- "in": "query",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Path of the file to be retrieved"
- }
- ],
- "responses": {
- "200": {
- "description": "File content",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "code": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "415": {
- "description": "Unable to open binary file",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error opening file",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/zip-directory": {
- "get": {
- "summary": "Download workspace as zip",
- "description": "Download the current workspace as a zip file",
- "operationId": "zipCurrentWorkspace",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Workspace zip file",
- "content": {
- "application/zip": {
- "schema": {
- "type": "string",
- "format": "binary"
- }
- }
- }
- },
- "500": {
- "description": "Error zipping workspace",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/git/changes": {
- "get": {
- "summary": "Get git changes",
- "description": "Get git changes in the workspace",
- "operationId": "gitChanges",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Git changes",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "500": {
- "description": "Error getting git changes",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/git/diff": {
- "get": {
- "summary": "Get git diff",
- "description": "Get git diff for a specific file",
- "operationId": "gitDiff",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "path",
- "in": "query",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Path of the file to get diff for"
- }
- ],
- "responses": {
- "200": {
- "description": "Git diff",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- },
- "500": {
- "description": "Error getting git diff",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/trajectory": {
- "get": {
- "summary": "Get trajectory",
- "description": "Get the conversation trajectory",
- "operationId": "getTrajectory",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Conversation trajectory",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "trajectory": {
- "type": "array",
- "items": {
- "type": "object"
- }
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error getting trajectory",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "trajectory": {
- "type": "array",
- "items": {
- "type": "object"
- },
- "nullable": true
- },
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}/security/{path}": {
- "get": {
- "summary": "Security analyzer API (GET)",
- "description": "Catch-all route for security analyzer API GET requests",
- "operationId": "securityApiGet",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "path",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Security analyzer API path"
- }
- ],
- "responses": {
- "200": {
- "description": "Security analyzer response",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "404": {
- "description": "Security analyzer not initialized",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "detail": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "post": {
- "summary": "Security analyzer API (POST)",
- "description": "Catch-all route for security analyzer API POST requests",
- "operationId": "securityApiPost",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "path",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Security analyzer API path"
- }
- ],
- "requestBody": {
- "required": false,
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "Security analyzer response",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "404": {
- "description": "Security analyzer not initialized",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "detail": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "put": {
- "summary": "Security analyzer API (PUT)",
- "description": "Catch-all route for security analyzer API PUT requests",
- "operationId": "securityApiPut",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "path",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Security analyzer API path"
- }
- ],
- "requestBody": {
- "required": false,
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "Security analyzer response",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "404": {
- "description": "Security analyzer not initialized",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "detail": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "delete": {
- "summary": "Security analyzer API (DELETE)",
- "description": "Catch-all route for security analyzer API DELETE requests",
- "operationId": "securityApiDelete",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- },
- {
- "name": "path",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Security analyzer API path"
- }
- ],
- "responses": {
- "200": {
- "description": "Security analyzer response",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- },
- "404": {
- "description": "Security analyzer not initialized",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "detail": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations": {
- "post": {
- "summary": "Create new conversation",
- "description": "Initialize a new conversation",
- "operationId": "newConversation",
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "repository": {
- "type": "string",
- "nullable": true,
- "description": "Full name of the repository (e.g., owner/repo)"
- },
- "git_provider": {
- "type": "string",
- "nullable": true,
- "description": "The Git provider (e.g., github or gitlab). If omitted, all configured providers are checked for the repository."
- },
- "selected_branch": {
- "type": "string",
- "nullable": true
- },
- "initial_user_msg": {
- "type": "string",
- "nullable": true
- },
- "conversation_instructions": {
- "type": "string",
- "nullable": true,
- "description": "Optional instructions the agent must follow throughout the conversation while addressing the user's initial task"
- },
- "image_urls": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "nullable": true
- },
- "replay_json": {
- "type": "string",
- "nullable": true
- }
- }
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "Conversation created successfully",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "status": {
- "type": "string",
- "example": "ok"
- },
- "conversation_id": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "400": {
- "description": "Error creating conversation",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "status": {
- "type": "string",
- "example": "error"
- },
- "message": {
- "type": "string"
- },
- "msg_id": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "get": {
- "summary": "Search conversations",
- "description": "Search for conversations",
- "operationId": "searchConversations",
- "parameters": [
- {
- "name": "page_id",
- "in": "query",
- "required": false,
- "schema": {
- "type": "string"
- },
- "description": "Page ID for pagination"
- },
- {
- "name": "limit",
- "in": "query",
- "required": false,
- "schema": {
- "type": "integer",
- "default": 20
- },
- "description": "Number of conversations to return"
- }
- ],
- "responses": {
- "200": {
- "description": "Conversations",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "conversation_id": {
- "type": "string"
- },
- "title": {
- "type": "string"
- },
- "last_updated_at": {
- "type": "string",
- "format": "date-time"
- },
- "created_at": {
- "type": "string",
- "format": "date-time"
- },
- "selected_repository": {
- "type": "string",
- "nullable": true
- },
- "status": {
- "type": "string",
- "enum": ["RUNNING", "STOPPED"]
- },
- "trigger": {
- "type": "string",
- "enum": ["GUI", "API"]
- }
- }
- }
- },
- "next_page_id": {
- "type": "string",
- "nullable": true
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/conversations/{conversation_id}": {
- "get": {
- "summary": "Get conversation",
- "description": "Get conversation details",
- "operationId": "getConversation",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Conversation details",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "conversation_id": {
- "type": "string"
- },
- "title": {
- "type": "string"
- },
- "last_updated_at": {
- "type": "string",
- "format": "date-time"
- },
- "created_at": {
- "type": "string",
- "format": "date-time"
- },
- "selected_repository": {
- "type": "string",
- "nullable": true
- },
- "status": {
- "type": "string",
- "enum": ["RUNNING", "STOPPED"]
- },
- "trigger": {
- "type": "string",
- "enum": ["GUI", "API"]
- }
- }
- }
- }
- }
- },
- "404": {
- "description": "Conversation not found",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "nullable": true
- }
- }
- }
- }
- }
- },
- "patch": {
- "summary": "Update conversation",
- "description": "Update conversation details",
- "operationId": "updateConversation",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "title": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "Conversation updated successfully",
- "content": {
- "application/json": {
- "schema": {
- "type": "boolean"
- }
- }
- }
- }
- }
- },
- "delete": {
- "summary": "Delete conversation",
- "description": "Delete a conversation",
- "operationId": "deleteConversation",
- "parameters": [
- {
- "name": "conversation_id",
- "in": "path",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Conversation ID"
- }
- ],
- "responses": {
- "200": {
- "description": "Conversation deleted successfully",
- "content": {
- "application/json": {
- "schema": {
- "type": "boolean"
- }
- }
- }
- }
- }
- }
- },
- "/api/user/repositories": {
- "get": {
- "summary": "Get user repositories",
- "description": "Get repositories for the authenticated user",
- "operationId": "getUserRepositories",
- "parameters": [
- {
- "name": "sort",
- "in": "query",
- "required": false,
- "schema": {
- "type": "string",
- "default": "pushed"
- },
- "description": "Sort order for repositories"
- }
- ],
- "responses": {
- "200": {
- "description": "User repositories",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "full_name": {
- "type": "string"
- },
- "description": {
- "type": "string",
- "nullable": true
- },
- "html_url": {
- "type": "string"
- },
- "private": {
- "type": "boolean"
- },
- "fork": {
- "type": "boolean"
- },
- "updated_at": {
- "type": "string",
- "format": "date-time"
- }
- }
- }
- }
- }
- }
- },
- "401": {
- "description": "Authentication error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- },
- "500": {
- "description": "Unknown error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- },
- "/api/user/info": {
- "get": {
- "summary": "Get user info",
- "description": "Get information about the authenticated user",
- "operationId": "getUser",
- "responses": {
- "200": {
- "description": "User information",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "login": {
- "type": "string"
- },
- "name": {
- "type": "string",
- "nullable": true
- },
- "email": {
- "type": "string",
- "nullable": true
- },
- "avatar_url": {
- "type": "string",
- "nullable": true
- }
- }
- }
- }
- }
- },
- "401": {
- "description": "Authentication error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- },
- "500": {
- "description": "Unknown error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- },
- "/api/user/search/repositories": {
- "get": {
- "summary": "Search repositories",
- "description": "Search for repositories",
- "operationId": "searchRepositories",
- "parameters": [
- {
- "name": "query",
- "in": "query",
- "required": true,
- "schema": {
- "type": "string"
- },
- "description": "Search query"
- },
- {
- "name": "per_page",
- "in": "query",
- "required": false,
- "schema": {
- "type": "integer",
- "default": 5
- },
- "description": "Number of repositories to return per page"
- },
- {
- "name": "sort",
- "in": "query",
- "required": false,
- "schema": {
- "type": "string",
- "default": "stars"
- },
- "description": "Sort order for repositories"
- },
- {
- "name": "order",
- "in": "query",
- "required": false,
- "schema": {
- "type": "string",
- "default": "desc"
- },
- "description": "Sort direction"
- }
- ],
- "responses": {
- "200": {
- "description": "Search results",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "full_name": {
- "type": "string"
- },
- "description": {
- "type": "string",
- "nullable": true
- },
- "html_url": {
- "type": "string"
- },
- "private": {
- "type": "boolean"
- },
- "fork": {
- "type": "boolean"
- },
- "updated_at": {
- "type": "string",
- "format": "date-time"
- }
- }
- }
- }
- }
- }
- },
- "401": {
- "description": "Authentication error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- },
- "500": {
- "description": "Unknown error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- },
- "/api/user/suggested-tasks": {
- "get": {
- "summary": "Get suggested tasks",
- "description": "Get suggested tasks for the authenticated user across their most recently pushed repositories",
- "operationId": "getSuggestedTasks",
- "responses": {
- "200": {
- "description": "Suggested tasks",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "title": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "repository": {
- "type": "string"
- },
- "type": {
- "type": "string"
- },
- "created_at": {
- "type": "string",
- "format": "date-time"
- }
- }
- }
- }
- }
- }
- },
- "401": {
- "description": "Authentication error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- },
- "500": {
- "description": "Unknown error",
- "content": {
- "application/json": {
- "schema": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- },
- "/api/settings": {
- "get": {
- "summary": "Get settings",
- "description": "Get user settings",
- "operationId": "loadSettings",
- "responses": {
- "200": {
- "description": "User settings",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "language": {
- "type": "string"
- },
- "agent": {
- "type": "string"
- },
- "security_analyzer": {
- "type": "string"
- },
- "confirmation_mode": {
- "type": "boolean"
- },
- "llm_model": {
- "type": "string"
- },
- "llm_api_key_set": {
- "type": "boolean"
- },
- "llm_base_url": {
- "type": "string",
- "nullable": true
- },
- "remote_runtime_resource_factor": {
- "type": "number"
- },
- "enable_default_condenser": {
- "type": "boolean"
- },
- "enable_sound_notifications": {
- "type": "boolean"
- },
- "user_consents_to_analytics": {
- "type": "boolean"
- },
- "provider_tokens_set": {
- "type": "object",
- "additionalProperties": {
- "type": "boolean"
- }
- }
- }
- }
- }
- }
- },
- "401": {
- "description": "Invalid token",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "404": {
- "description": "Settings not found",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "post": {
- "summary": "Store settings",
- "description": "Store user settings",
- "operationId": "storeSettings",
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "language": {
- "type": "string"
- },
- "agent": {
- "type": "string"
- },
- "security_analyzer": {
- "type": "string"
- },
- "confirmation_mode": {
- "type": "boolean"
- },
- "llm_model": {
- "type": "string"
- },
- "llm_api_key": {
- "type": "string"
- },
- "llm_base_url": {
- "type": "string",
- "nullable": true
- },
- "remote_runtime_resource_factor": {
- "type": "number"
- },
- "enable_default_condenser": {
- "type": "boolean"
- },
- "enable_sound_notifications": {
- "type": "boolean"
- },
- "user_consents_to_analytics": {
- "type": "boolean"
- },
- "provider_tokens": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "Settings stored successfully",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "message": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "401": {
- "description": "Invalid token",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error storing settings",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/reset-settings": {
- "post": {
- "summary": "Reset settings (Deprecated)",
- "description": "This endpoint is deprecated and will return a 410 Gone error. Reset functionality has been removed.",
- "operationId": "resetSettings",
- "deprecated": true,
- "responses": {
- "410": {
- "description": "Feature removed",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string",
- "example": "Reset settings functionality has been removed."
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/unset-settings-tokens": {
- "post": {
- "summary": "Unset settings tokens",
- "description": "Unset provider tokens in settings",
- "operationId": "unsetSettingsTokens",
- "responses": {
- "200": {
- "description": "Tokens unset successfully",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "message": {
- "type": "string"
- }
- }
- }
- }
- }
- },
- "500": {
- "description": "Error unsetting tokens",
- "content": {
- "application/json": {
- "schema": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/options/models": {
- "get": {
- "summary": "Get models",
- "description": "Get all models supported by LiteLLM",
- "operationId": "getLitellmModels",
- "responses": {
- "200": {
- "description": "List of models",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/options/agents": {
- "get": {
- "summary": "Get agents",
- "description": "Get all agents supported by OpenHands",
- "operationId": "getAgents",
- "responses": {
- "200": {
- "description": "List of agents",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/options/security-analyzers": {
- "get": {
- "summary": "Get security analyzers",
- "description": "Get all supported security analyzers",
- "operationId": "getSecurityAnalyzers",
- "responses": {
- "200": {
- "description": "List of security analyzers",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- }
- }
- }
- }
- },
- "/api/options/config": {
- "get": {
- "summary": "Get config",
- "description": "Get current server configuration",
- "operationId": "getConfig",
- "responses": {
- "200": {
- "description": "Server configuration",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- }
- }
- }
- }
- },
- "components": {
- "schemas": {
- "Repository": {
- "type": "object",
- "properties": {
- "full_name": {
- "type": "string"
- },
- "description": {
- "type": "string",
- "nullable": true
- },
- "html_url": {
- "type": "string"
- },
- "private": {
- "type": "boolean"
- },
- "fork": {
- "type": "boolean"
- },
- "updated_at": {
- "type": "string",
- "format": "date-time"
- }
- }
- },
- "User": {
- "type": "object",
- "properties": {
- "login": {
- "type": "string"
- },
- "name": {
- "type": "string",
- "nullable": true
- },
- "email": {
- "type": "string",
- "nullable": true
- },
- "avatar_url": {
- "type": "string",
- "nullable": true
- }
- }
- },
- "SuggestedTask": {
- "type": "object",
- "properties": {
- "title": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "repository": {
- "type": "string"
- },
- "type": {
- "type": "string"
- },
- "created_at": {
- "type": "string",
- "format": "date-time"
- }
- }
- },
- "ConversationInfo": {
- "type": "object",
- "properties": {
- "conversation_id": {
- "type": "string"
- },
- "title": {
- "type": "string"
- },
- "last_updated_at": {
- "type": "string",
- "format": "date-time"
- },
- "created_at": {
- "type": "string",
- "format": "date-time"
- },
- "selected_repository": {
- "type": "string",
- "nullable": true
- },
- "status": {
- "type": "string",
- "enum": ["RUNNING", "STOPPED"]
- },
- "trigger": {
- "type": "string",
- "enum": ["GUI", "API"]
- }
- }
- },
- "ConversationInfoResultSet": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ConversationInfo"
- }
- },
- "next_page_id": {
- "type": "string",
- "nullable": true
- }
- }
- },
- "FeedbackDataModel": {
- "type": "object",
- "properties": {
- "email": {
- "type": "string",
- "format": "email"
- },
- "version": {
- "type": "string"
- },
- "permissions": {
- "type": "string",
- "default": "private"
- },
- "polarity": {
- "type": "string"
- },
- "feedback": {
- "type": "string"
- },
- "trajectory": {
- "type": "array",
- "items": {
- "type": "object"
- }
- }
- }
- },
- "Settings": {
- "type": "object",
- "properties": {
- "language": {
- "type": "string"
- },
- "agent": {
- "type": "string"
- },
- "security_analyzer": {
- "type": "string"
- },
- "confirmation_mode": {
- "type": "boolean"
- },
- "llm_model": {
- "type": "string"
- },
- "llm_api_key": {
- "type": "string"
- },
- "llm_base_url": {
- "type": "string",
- "nullable": true
- },
- "remote_runtime_resource_factor": {
- "type": "number"
- },
- "enable_default_condenser": {
- "type": "boolean"
- },
- "enable_sound_notifications": {
- "type": "boolean"
- },
- "user_consents_to_analytics": {
- "type": "boolean"
- }
- }
- },
- "GETSettingsModel": {
- "type": "object",
- "properties": {
- "language": {
- "type": "string"
- },
- "agent": {
- "type": "string"
- },
- "security_analyzer": {
- "type": "string"
- },
- "confirmation_mode": {
- "type": "boolean"
- },
- "llm_model": {
- "type": "string"
- },
- "llm_api_key_set": {
- "type": "boolean"
- },
- "llm_base_url": {
- "type": "string",
- "nullable": true
- },
- "remote_runtime_resource_factor": {
- "type": "number"
- },
- "enable_default_condenser": {
- "type": "boolean"
- },
- "enable_sound_notifications": {
- "type": "boolean"
- },
- "user_consents_to_analytics": {
- "type": "boolean"
- },
- "provider_tokens_set": {
- "type": "object",
- "additionalProperties": {
- "type": "boolean"
- }
- }
- }
- },
- "POSTSettingsModel": {
- "type": "object",
- "properties": {
- "language": {
- "type": "string"
- },
- "agent": {
- "type": "string"
- },
- "security_analyzer": {
- "type": "string"
- },
- "confirmation_mode": {
- "type": "boolean"
- },
- "llm_model": {
- "type": "string"
- },
- "llm_api_key": {
- "type": "string"
- },
- "llm_base_url": {
- "type": "string",
- "nullable": true
- },
- "remote_runtime_resource_factor": {
- "type": "number"
- },
- "enable_default_condenser": {
- "type": "boolean"
- },
- "enable_sound_notifications": {
- "type": "boolean"
- },
- "user_consents_to_analytics": {
- "type": "boolean"
- },
- "provider_tokens": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- }
- }
- }
- },
- "securitySchemes": {
- "bearerAuth": {
- "type": "http",
- "scheme": "bearer",
- "bearerFormat": "JWT"
- }
- }
- },
- "security": [
- {
- "bearerAuth": []
- }
- ]
-}
diff --git a/docs/static/img/backend_architecture.png b/docs/static/img/backend_architecture.png
deleted file mode 100644
index c7eae5508960e41d90447bafadefa1284bb38232..0000000000000000000000000000000000000000
--- a/docs/static/img/backend_architecture.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:ec7d7be3dc2240b7beefd169d9aadeb261a885ba036d48ee1c869f2f72c93cfe
-size 273400
diff --git a/docs/static/img/backend_architecture.puml b/docs/static/img/backend_architecture.puml
deleted file mode 100644
index c96651ab469baa827cd9f0a2ff59492191f22e17..0000000000000000000000000000000000000000
--- a/docs/static/img/backend_architecture.puml
+++ /dev/null
@@ -1,201 +0,0 @@
-@startuml openhands
-!pragma useIntermediatePackages false
-
-class openhands.action.agent.AgentEchoAction {
- content: str
- runnable: bool
- action: str
-}
-class openhands.action.agent.AgentFinishAction {
- runnable: bool
- action: str
-}
-class openhands.observation.AgentMessageObservation {
- role: str
- observation: str
-}
-class openhands.action.agent.AgentSummarizeAction {
- summary: str
- action: str
-}
-class openhands.action.agent.AgentThinkAction {
- thought: str
- runnable: bool
- action: str
-}
-class openhands.action.base.ExecutableAction {
-}
-class openhands.action.base.NotExecutableAction {
-}
-class openhands.observation.Observation {
- content: str
-}
-class openhands.action.base.Action {
-}
-class openhands.action.base.NullAction {
- action: str
-}
-class openhands.action.bash.CmdRunAction {
- command: str
- action: str
-}
-class openhands.action.browse.BrowseURLAction {
- url: str
- action: str
-}
-class openhands.observation.BrowserOutputObservation {
- url: str
- status_code: int
- error: bool
- observation: str
-}
-class openhands.action.fileop.FileReadAction {
- path: str
- action: str
-}
-class openhands.observation.FileReadObservation {
- path: str
- observation: str
-}
-class openhands.action.fileop.FileWriteAction {
- path: str
- contents: str
- action: str
-}
-class openhands.observation.FileWriteObservation {
- path: str
- observation: str
-}
-class openhands.action.tasks.AddTaskAction {
- parent: str
- goal: str
- subtasks: list
- action: str
-}
-class openhands.action.tasks.ModifyTaskAction {
- id: str
- state: str
- action: str
-}
-abstract class openhands.agent.Agent {
- _registry: Dict[str, Type[Agent]] {static}
- llm: LLM
- _complete: None
-}
-class openhands.llm.llm.LLM {
- model: None
- api_key: None
- base_url: None
- _debug_dir: None
- _debug_idx: None
- _debug_id: None
- _completion: None
-}
-class openhands.controller.agent_controller.AgentController {
- agent: Agent
- max_iterations: int
- workdir: str
- command_manager: CommandManager
- state: State
- plan: Plan
- callbacks: List[Callable]
-}
-class openhands.observation.AgentErrorObservation {
- observation: str
-}
-class openhands.controller.command_manager.CommandManager {
- directory: None
- shell: None
-}
-class openhands.observation.NullObservation {
- observation: str
-}
-class openhands.plan.Plan {
- main_goal: str {static}
- task: Task {static}
- main_goal: str
- task: None
-}
-class openhands.state.State {
- plan: Plan
- iteration: int
- history: List[Tuple[Action, Observation]]
- updated_info: List[Tuple[Action, Observation]]
-}
-class openhands.observation.CmdOutputObservation {
- command: str
- exit_code: int
- observation: str
-}
-class openhands.sandbox.sandbox.DockerInteractive {
- instance_id: None
- instance_id: None
- workspace_dir: None
- workspace_dir: None
- workspace_dir: None
- timeout: int
- base_container_image: None
- container_name: None
-}
-class openhands.observation.UserMessageObservation {
- role: str
- observation: str
-}
-class openhands.plan.Task {
- id: str {static}
- goal: str {static}
- parent: Task | None {static}
- subtasks: List[Task] {static}
- id: None
- id: None
- parent: None
- goal: str
- subtasks: None
-}
-
-class openhands.server.session.Session {
- websocket: None
- controller: Optional[AgentController]
- agent: Optional[Agent]
- agent_task: None
-}
-
-openhands.action.base.ExecutableAction <|-- openhands.action.agent.AgentEchoAction
-openhands.action.base.NotExecutableAction <|-- openhands.action.agent.AgentFinishAction
-openhands.observation.Observation <|-- openhands.observation.AgentMessageObservation
-openhands.action.base.NotExecutableAction <|-- openhands.action.agent.AgentSummarizeAction
-openhands.action.base.NotExecutableAction <|-- openhands.action.agent.AgentThinkAction
-openhands.action.base.Action <|-- openhands.action.base.ExecutableAction
-openhands.action.base.Action <|-- openhands.action.base.NotExecutableAction
-openhands.action.base.NotExecutableAction <|-- openhands.action.base.NullAction
-openhands.action.base.ExecutableAction <|-- openhands.action.bash.CmdRunAction
-openhands.action.base.ExecutableAction <|-- openhands.action.browse.BrowseURLAction
-openhands.observation.Observation <|-- openhands.observation.BrowserOutputObservation
-openhands.action.base.ExecutableAction <|-- openhands.action.fileop.FileReadAction
-openhands.observation.Observation <|-- openhands.observation.FileReadObservation
-openhands.action.base.ExecutableAction <|-- openhands.action.fileop.FileWriteAction
-openhands.observation.Observation <|-- openhands.observation.FileWriteObservation
-openhands.action.base.NotExecutableAction <|-- openhands.action.tasks.AddTaskAction
-openhands.action.base.NotExecutableAction <|-- openhands.action.tasks.ModifyTaskAction
-openhands.agent.Agent *-- openhands.agent.Agent
-openhands.agent.Agent *-- openhands.llm.llm.LLM
-openhands.controller.agent_controller.AgentController *-- openhands.agent.Agent
-openhands.observation.Observation <|-- openhands.observation.AgentErrorObservation
-openhands.observation.Observation <|-- openhands.observation.NullObservation
-openhands.plan.Plan *-- openhands.plan.Task
-openhands.state.State *-- openhands.plan.Plan
-openhands.state.State *-- openhands.observation.CmdOutputObservation
-openhands.state.State *-- openhands.action.base.Action
-openhands.state.State *-- openhands.observation.Observation
-openhands.observation.Observation <|-- openhands.observation.CmdOutputObservation
-openhands.observation.Observation <|-- openhands.observation.UserMessageObservation
-openhands.plan.Task *-- openhands.plan.Task
-openhands.server.session.Session *-- openhands.controller.agent_controller.AgentController
-openhands.server.session.Session *-- openhands.agent.Agent
-openhands.controller.agent_controller.AgentController -> openhands.state.State
-openhands.controller.agent_controller.AgentController -> openhands.plan.Plan
-openhands.controller.agent_controller.AgentController -> openhands.controller.command_manager.CommandManager
-openhands.controller.command_manager.CommandManager -> openhands.sandbox.sandbox.DockerInteractive
-
-footer Based on f3fda42; Generated by //py2puml//
-@enduml
diff --git a/docs/static/img/backend_architecture.svg b/docs/static/img/backend_architecture.svg
deleted file mode 100644
index a5fc173d5a844679f0a5f2089140fa80f1dd72b9..0000000000000000000000000000000000000000
--- a/docs/static/img/backend_architecture.svg
+++ /dev/null
@@ -1 +0,0 @@
-openhands action agent base bash browse fileop tasks observation agent llm.llm controller agent_controller command_manager plan state sandbox.sandbox server.session AgentEchoAction content: str runnable: bool action: str AgentFinishAction runnable: bool action: str AgentRecallAction query: str action: str AgentSummarizeAction summary: str action: str AgentThinkAction thought: str runnable: bool action: str ExecutableAction NotExecutableAction Action NullAction action: str CmdKillAction id: int action: str CmdRunAction command: str background: bool action: str BrowseURLAction url: str action: str FileReadAction path: str action: str FileWriteAction path: str contents: str action: str AddTaskAction parent: str goal: str subtasks: list action: str ModifyTaskAction id: str state: str action: str AgentMessageObservation role: str observation: str AgentRecallObservation memories: List[str] role: str observation: str Observation content: str BrowserOutputObservation url: str status_code: int error: bool observation: str FileReadObservation path: str observation: str FileWriteObservation path: str observation: str AgentErrorObservation observation: str NullObservation observation: str CmdOutputObservation command_id: int command: str exit_code: int observation: str UserMessageObservation role: str observation: str Agent _registry: Dict[str, Type[Agent]] llm: LLM _complete: None LLM model: None api_key: None base_url: None _debug_dir: None _debug_idx: None _debug_id: None _completion: None AgentController agent: Agent max_iterations: int workdir: str command_manager: CommandManager state: State plan: Plan callbacks: List[Callable] CommandManager directory: None shell: None Plan main_goal: str task: Task main_goal: str task: None Task id: str goal: str parent: Task | None subtasks: List[Task] id: None id: None parent: None goal: str subtasks: None State plan: Plan iteration: int background_commands_obs: List[CmdOutputObservation] history: List[Tuple[Action, Observation]] updated_info: List[Tuple[Action, Observation]] DockerInteractive background_commands: Dict[int, BackgroundCommand] instance_id: None instance_id: None workspace_dir: None workspace_dir: None workspace_dir: None timeout: int base_container_image: None container_name: None BackgroundCommand Session websocket: None controller: Optional[AgentController] agent: Optional[Agent] agent_task: None Based on f3fda42; Generated by py2puml
diff --git a/docs/static/img/connect-repo.png b/docs/static/img/connect-repo.png
deleted file mode 100644
index 9eb86dc79a41fe5a36513115b71f8ca34bcc8ec3..0000000000000000000000000000000000000000
Binary files a/docs/static/img/connect-repo.png and /dev/null differ
diff --git a/docs/static/img/docs/api-key-generation.png b/docs/static/img/docs/api-key-generation.png
deleted file mode 100644
index 713a4a79d43abc83d1b6834bd94e6892214c157b..0000000000000000000000000000000000000000
Binary files a/docs/static/img/docs/api-key-generation.png and /dev/null differ
diff --git a/docs/static/img/logo-square.png b/docs/static/img/logo-square.png
deleted file mode 100644
index 624dad2fb22d4d2ac723d8e9f81ab56e0886c36d..0000000000000000000000000000000000000000
--- a/docs/static/img/logo-square.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:de58ad6132a9afb7d4924612c5e013695658af993a81a7611a6564f6a8063d59
-size 1189506
diff --git a/docs/static/img/logo.png b/docs/static/img/logo.png
deleted file mode 100644
index 799c7ee7392f97f50eb6b91a35aa6c74c6ba0ae9..0000000000000000000000000000000000000000
Binary files a/docs/static/img/logo.png and /dev/null differ
diff --git a/docs/static/img/oh-features.png b/docs/static/img/oh-features.png
deleted file mode 100644
index 376ef750e5baed2d874b9bd4ec0dc8c48f2f84af..0000000000000000000000000000000000000000
--- a/docs/static/img/oh-features.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0e5022b7839f81b08e06b7b4f2f6e624259aa68ed6bfd95b47026cb940bacacb
-size 147978
diff --git a/docs/static/img/results.png b/docs/static/img/results.png
deleted file mode 100644
index 7fdb83affef90ccf017c7c484f97d50c3a8cf003..0000000000000000000000000000000000000000
--- a/docs/static/img/results.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:920040c9ef19e8bed163da4b2ba337f8108e8636f55ce0a1b6388d5cd9ab32c2
-size 323430
diff --git a/docs/static/img/screenshot.png b/docs/static/img/screenshot.png
deleted file mode 100644
index 6976b12a044873c720d9e1ab4c98ddc64ce3b517..0000000000000000000000000000000000000000
--- a/docs/static/img/screenshot.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4bb621a61826a12dc8eea4fb2b8d270e8b6a12dcb77ac84665982452a48cb6c4
-size 679233
diff --git a/docs/static/img/system_architecture.png b/docs/static/img/system_architecture.png
deleted file mode 100644
index 42c05b1d4a86d8fb15e54075bb5df962c6aea7c1..0000000000000000000000000000000000000000
Binary files a/docs/static/img/system_architecture.png and /dev/null differ
diff --git a/docs/static/img/system_architecture.puml b/docs/static/img/system_architecture.puml
deleted file mode 100644
index b1b9713ef45fb55eb1cd418dbd5ae7c0c434e187..0000000000000000000000000000000000000000
--- a/docs/static/img/system_architecture.puml
+++ /dev/null
@@ -1,67 +0,0 @@
-@startuml "System Architecture"
-
-
-node frontend as frontend{
-
- component App
-
- package components{
-
- component Terminal
-
- component ChatInterface
-
- component BannerSettings
-
- }
-
- package services{
- component chatService
-
- component settingsService
-
- chatService -[hidden]u-> settingsService
- }
-
- package socket
-
- App -> Terminal
- App -> ChatInterface
- App -> BannerSettings
- ChatInterface -> chatService
- BannerSettings -> settingsService
- Terminal -> socket
- chatService -d-> socket
- settingsService -d-> socket
- services -[hidden]d-> socket
-
- Terminal -[hidden]u-> ChatInterface
- ChatInterface -[hidden]u-> BannerSettings
-
-
-
- interface "HTTP (:3001)" as HTTP
- HTTP - App
-
-}
-
-node backend{
- package server as serverpackage{
- component Server
-
- 'defined in server/server.py, port is defined at startup with uvicorn
- interface "Client WS\n(:3000/ws)" as client_socket
- client_socket - Server
-
-
- }
- node AgentController{
-
- }
- Server -d-> AgentController
-}
-
-
-socket -d-> client_socket: connects to \n VITE_TERMINAL_WS_URL
-
-@enduml
diff --git a/docs/static/img/system_architecture.svg b/docs/static/img/system_architecture.svg
deleted file mode 100644
index d259a4ca3be652065ed67f11ec352fce9faee973..0000000000000000000000000000000000000000
--- a/docs/static/img/system_architecture.svg
+++ /dev/null
@@ -1 +0,0 @@
-frontend components services backend server App socket HTTP (:3001) Terminal ChatInterface BannerSettings chatService settingsService Server Client WS (:3000/ws) AgentController connects to VITE_TERMINAL_WS_URL
diff --git a/docs/static/img/system_architecture_overview.png b/docs/static/img/system_architecture_overview.png
deleted file mode 100644
index 290972eb78592d3d88b7016e67b551b41b8ae96d..0000000000000000000000000000000000000000
--- a/docs/static/img/system_architecture_overview.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:8059e09a3a6cadda2f0e19eb6d16dc05b2179c56b6604998be70076bd6bd70a6
-size 175252
diff --git a/docs/static/img/teaser.mp4 b/docs/static/img/teaser.mp4
deleted file mode 100644
index 6e7891c85d296a11fee49cdffb02098c3da2094e..0000000000000000000000000000000000000000
--- a/docs/static/img/teaser.mp4
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a2b9dfd699226a4e6d3e2bb0926941f954cc11c68024e011c861215bb81ff381
-size 36187951
diff --git a/docs/usage/about.mdx b/docs/usage/about.mdx
deleted file mode 100644
index 0d52f410ba4ac1bcd9fdc7f07e63458e1fb1a33c..0000000000000000000000000000000000000000
--- a/docs/usage/about.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: About OpenHands
----
-
-## Research Strategy
-
-Achieving full replication of production-grade applications with LLMs is a complex endeavor. Our strategy involves:
-
-- **Core Technical Research:** Focusing on foundational research to understand and improve the technical aspects of code generation and handling.
-- **Task Planning:** Developing capabilities for bug detection, codebase management, and optimization.
-- **Evaluation:** Establishing comprehensive evaluation metrics to better understand and improve our agents.
-
-## Default Agent
-
-Our default Agent is currently the [CodeActAgent](./agents), which is capable of generating code and handling files.
-
-## Built With
-
-OpenHands is built using a combination of powerful frameworks and libraries, providing a robust foundation for its
-development. Here are the key technologies used in the project:
-
-       
-
-Please note that the selection of these technologies is in progress, and additional technologies may be added or
-existing ones may be removed as the project evolves. We strive to adopt the most suitable and efficient tools to
-enhance the capabilities of OpenHands.
-
-## License
-
-Distributed under MIT [License](https://github.com/All-Hands-AI/OpenHands/blob/main/LICENSE).
diff --git a/docs/usage/agents.mdx b/docs/usage/agents.mdx
deleted file mode 100644
index ec51228bcf1638a3289b5b56d5e7c65cf1b584ba..0000000000000000000000000000000000000000
--- a/docs/usage/agents.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: Main Agent and Capabilities
----
-
-## CodeActAgent
-
-### Description
-
-This agent implements the CodeAct idea ([paper](https://arxiv.org/abs/2402.01030), [tweet](https://twitter.com/xingyaow_/status/1754556835703751087)) that consolidates LLM agents’ **act**ions into a
-unified **code** action space for both _simplicity_ and _performance_.
-
-The conceptual idea is illustrated below. At each turn, the agent can:
-
-1. **Converse**: Communicate with humans in natural language to ask for clarification, confirmation, etc.
-2. **CodeAct**: Choose to perform the task by executing code
-
-- Execute any valid Linux `bash` command
-- Execute any valid `Python` code with [an interactive Python interpreter](https://ipython.org/). This is simulated through `bash` command, see plugin system below for more details.
-
-
-
-### Demo
-
-https://github.com/All-Hands-AI/OpenHands/assets/38853559/f592a192-e86c-4f48-ad31-d69282d5f6ac
-
-_Example of CodeActAgent with `gpt-4-turbo-2024-04-09` performing a data science task (linear regression)_.
diff --git a/docs/usage/architecture/backend.mdx b/docs/usage/architecture/backend.mdx
deleted file mode 100644
index b49c5b45b250bf82cb46d46abbe666ec6872b2af..0000000000000000000000000000000000000000
--- a/docs/usage/architecture/backend.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
----
-title: Backend Architecture
----
-
-
-
-
OpenHands System Architecture Diagram (July 4, 2024)
-
-
-This is a high-level overview of the system architecture. The system is divided into two main components: the frontend and the backend. The frontend is responsible for handling user interactions and displaying the results. The backend is responsible for handling the business logic and executing the agents.
-
-# Frontend architecture
-
-
-
-This Overview is simplified to show the main components and their interactions. For a more detailed view of the backend architecture, see the Backend Architecture section below.
-
-# Backend Architecture
-
-_**Disclaimer**: The backend architecture is a work in progress and is subject to change. The following diagram shows the current architecture of the backend based on the commit that is shown in the footer of the diagram._
-
-
-
-
- Updating this Diagram
-
- The generation of the backend architecture diagram is partially automated.
- The diagram is generated from the type hints in the code using the py2puml
- tool. The diagram is then manually reviewed, adjusted and exported to PNG
- and SVG.
-
- ## Prerequisites
-
- - Running python environment in which openhands is executable
- (according to the instructions in the README.md file in the root of the repository)
- - [py2puml](https://github.com/lucsorel/py2puml) installed
-
-## Steps
-
-1. Autogenerate the diagram by running the following command from the root of the repository:
- `py2puml openhands openhands > docs/architecture/backend_architecture.puml`
-
-2. Open the generated file in a PlantUML editor, e.g. Visual Studio Code with the PlantUML extension or [PlantText](https://www.planttext.com/)
-
-3. Review the generated PUML and make all necessary adjustments to the diagram (add missing parts, fix mistakes, improve positioning).
- _py2puml creates the diagram based on the type hints in the code, so missing or incorrect type hints may result in an incomplete or incorrect diagram._
-
-4. Review the diff between the new and the previous diagram and manually check if the changes are correct.
- _Make sure not to remove parts that were manually added to the diagram in the past and are still relevant._
-
-5. Add the commit hash of the commit that was used to generate the diagram to the diagram footer.
-
-6. Export the diagram as PNG and SVG files and replace the existing diagrams in the `docs/architecture` directory. This can be done with (e.g. [PlantText](https://www.planttext.com/))
-
-
-
diff --git a/docs/usage/architecture/runtime.mdx b/docs/usage/architecture/runtime.mdx
deleted file mode 100644
index 5ebcf3bafc21348c88b86abb9b5088d5d2d3021c..0000000000000000000000000000000000000000
--- a/docs/usage/architecture/runtime.mdx
+++ /dev/null
@@ -1,136 +0,0 @@
----
-title: Runtime Architecture
----
-
-The OpenHands Docker Runtime is the core component that enables secure and flexible execution of AI agent's action.
-It creates a sandboxed environment using Docker, where arbitrary code can be run safely without risking the host system.
-
-## Why do we need a sandboxed runtime?
-
-OpenHands needs to execute arbitrary code in a secure, isolated environment for several reasons:
-
-1. Security: Executing untrusted code can pose significant risks to the host system. A sandboxed environment prevents malicious code from accessing or modifying the host system's resources
-2. Consistency: A sandboxed environment ensures that code execution is consistent across different machines and setups, eliminating "it works on my machine" issues
-3. Resource Control: Sandboxing allows for better control over resource allocation and usage, preventing runaway processes from affecting the host system
-4. Isolation: Different projects or users can work in isolated environments without interfering with each other or the host system
-5. Reproducibility: Sandboxed environments make it easier to reproduce bugs and issues, as the execution environment is consistent and controllable
-
-## How does the Runtime work?
-
-The OpenHands Runtime system uses a client-server architecture implemented with Docker containers. Here's an overview of how it works:
-
-```mermaid
-graph TD
- A[User-provided Custom Docker Image] --> B[OpenHands Backend]
- B -->|Builds| C[OH Runtime Image]
- C -->|Launches| D[Action Executor]
- D -->|Initializes| E[Browser]
- D -->|Initializes| F[Bash Shell]
- D -->|Initializes| G[Plugins]
- G -->|Initializes| L[Jupyter Server]
-
- B -->|Spawn| H[Agent]
- B -->|Spawn| I[EventStream]
- I <--->|Execute Action to
- Get Observation
- via REST API
- | D
-
- H -->|Generate Action| I
- I -->|Obtain Observation| H
-
- subgraph "Docker Container"
- D
- E
- F
- G
- L
- end
-```
-
-1. User Input: The user provides a custom base Docker image
-2. Image Building: OpenHands builds a new Docker image (the "OH runtime image") based on the user-provided image. This new image includes OpenHands-specific code, primarily the "runtime client"
-3. Container Launch: When OpenHands starts, it launches a Docker container using the OH runtime image
-4. Action Execution Server Initialization: The action execution server initializes an `ActionExecutor` inside the container, setting up necessary components like a bash shell and loading any specified plugins
-5. Communication: The OpenHands backend (`openhands/runtime/impl/eventstream/eventstream_runtime.py`) communicates with the action execution server over RESTful API, sending actions and receiving observations
-6. Action Execution: The runtime client receives actions from the backend, executes them in the sandboxed environment, and sends back observations
-7. Observation Return: The action execution server sends execution results back to the OpenHands backend as observations
-
-The role of the client:
-
-- It acts as an intermediary between the OpenHands backend and the sandboxed environment
-- It executes various types of actions (shell commands, file operations, Python code, etc.) safely within the container
-- It manages the state of the sandboxed environment, including the current working directory and loaded plugins
-- It formats and returns observations to the backend, ensuring a consistent interface for processing results
-
-## How OpenHands builds and maintains OH Runtime images
-
-OpenHands' approach to building and managing runtime images ensures efficiency, consistency, and flexibility in creating and maintaining Docker images for both production and development environments.
-
-Check out the [relevant code](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/runtime/utils/runtime_build.py) if you are interested in more details.
-
-### Image Tagging System
-
-OpenHands uses a three-tag system for its runtime images to balance reproducibility with flexibility.
-Tags may be in one of 2 formats:
-
-- **Versioned Tag**: `oh_v{openhands_version}_{base_image}` (e.g.: `oh_v0.9.9_nikolaik_s_python-nodejs_t_python3.12-nodejs22`)
-- **Lock Tag**: `oh_v{openhands_version}_{16_digit_lock_hash}` (e.g.: `oh_v0.9.9_1234567890abcdef`)
-- **Source Tag**: `oh_v{openhands_version}_{16_digit_lock_hash}_{16_digit_source_hash}`
- (e.g.: `oh_v0.9.9_1234567890abcdef_1234567890abcdef`)
-
-#### Source Tag - Most Specific
-
-This is the first 16 digits of the MD5 of the directory hash for the source directory. This gives a hash
-for only the openhands source
-
-#### Lock Tag
-
-This hash is built from the first 16 digits of the MD5 of:
-
-- The name of the base image upon which the image was built (e.g.: `nikolaik/python-nodejs:python3.12-nodejs22`)
-- The content of the `pyproject.toml` included in the image.
-- The content of the `poetry.lock` included in the image.
-
-This effectively gives a hash for the dependencies of Openhands independent of the source code.
-
-#### Versioned Tag - Most Generic
-
-This tag is a concatenation of openhands version and the base image name (transformed to fit in tag standard).
-
-#### Build Process
-
-When generating an image...
-
-- **No re-build**: OpenHands first checks whether an image with the same **most specific source tag** exists. If there is such an image,
- no build is performed - the existing image is used.
-- **Fastest re-build**: OpenHands next checks whether an image with the **generic lock tag** exists. If there is such an image,
- OpenHands builds a new image based upon it, bypassing all installation steps (like `poetry install` and
- `apt-get`) except a final operation to copy the current source code. The new image is tagged with a
- **source** tag only.
-- **Ok-ish re-build**: If neither a **source** nor **lock** tag exists, an image will be built based upon the **versioned** tag image.
- In versioned tag image, most dependencies should already been installed hence saving time.
-- **Slowest re-build**: If all of the three tags don't exists, a brand new image is built based upon the base
- image (Which is a slower operation). This new image is tagged with all the **source**, **lock**, and **versioned** tags.
-
-This tagging approach allows OpenHands to efficiently manage both development and production environments.
-
-1. Identical source code and Dockerfile always produce the same image (via hash-based tags)
-2. The system can quickly rebuild images when minor changes occur (by leveraging recent compatible images)
-3. The **lock** tag (e.g., `runtime:oh_v0.9.3_1234567890abcdef`) always points to the latest build for a particular base image, dependency, and OpenHands version combination
-
-## Runtime Plugin System
-
-The OpenHands Runtime supports a plugin system that allows for extending functionality and customizing the runtime environment. Plugins are initialized when the runtime client starts up.
-
-Check [an example of Jupyter plugin here](https://github.com/All-Hands-AI/OpenHands/blob/ecf4aed28b0cf7c18d4d8ff554883ba182fc6bdd/openhands/runtime/plugins/jupyter/__init__.py#L21-L55) if you want to implement your own plugin.
-
-*More details about the Plugin system are still under construction - contributions are welcomed!*
-
-Key aspects of the plugin system:
-
-1. Plugin Definition: Plugins are defined as Python classes that inherit from a base `Plugin` class
-2. Plugin Registration: Available plugins are registered in an `ALL_PLUGINS` dictionary
-3. Plugin Specification: Plugins are associated with `Agent.sandbox_plugins: list[PluginRequirement]`. Users can specify which plugins to load when initializing the runtime
-4. Initialization: Plugins are initialized asynchronously when the runtime client starts
-5. Usage: The runtime client can use initialized plugins to extend its capabilities (e.g., the JupyterPlugin for running IPython cells)
diff --git a/docs/usage/cloud/cloud-api.mdx b/docs/usage/cloud/cloud-api.mdx
deleted file mode 100644
index c4565cccaae831088deb9c6ae04a8c8b2096ffc9..0000000000000000000000000000000000000000
--- a/docs/usage/cloud/cloud-api.mdx
+++ /dev/null
@@ -1,176 +0,0 @@
----
-title: Cloud API
-description: OpenHands Cloud provides a REST API that allows you to programmatically interact with the service. This guide explains how to obtain an API key and use the API to start conversations.
----
-
-For more detailed information about the API, refer to the [OpenHands API Reference](https://docs.all-hands.dev/swagger-ui/).
-
-## Obtaining an API Key
-
-To use the OpenHands Cloud API, you'll need to generate an API key:
-
-1. Log in to your [OpenHands Cloud](https://app.all-hands.dev) account.
-2. Navigate to the [Settings page](https://app.all-hands.dev/settings).
-3. Select the `API Keys` tab.
-4. Click `Create API Key`.
-5. Give your key a descriptive name (Example: "Development" or "Production") and select `Create`.
-6. Copy the generated API key and store it securely. It will only be shown once.
-
-
-
-## API Usage
-
-### Starting a New Conversation
-
-To start a new conversation with OpenHands to perform a task, you'll need to make a POST request to the conversation endpoint.
-
-#### Request Parameters
-
-| Parameter | Type | Required | Description |
-|--------------------|----------|----------|------------------------------------------------------------------------------------------------------|
-| `initial_user_msg` | string | Yes | The initial message to start the conversation. |
-| `repository` | string | No | Git repository name to provide context in the format `owner/repo`. You must have access to the repo. |
-
-#### Examples
-
-
-cURL
-
-```bash
-curl -X POST "https://app.all-hands.dev/api/conversations" \
- -H "Authorization: Bearer YOUR_API_KEY" \
- -H "Content-Type: application/json" \
- -d '{
- "initial_user_msg": "Check whether there is any incorrect information in the README.md file and send a PR to fix it if so.",
- "repository": "yourusername/your-repo"
- }'
-```
-
-
-
-Python (with requests)
-
-```python
-import requests
-
-api_key = "YOUR_API_KEY"
-url = "https://app.all-hands.dev/api/conversations"
-
-headers = {
- "Authorization": f"Bearer {api_key}",
- "Content-Type": "application/json"
-}
-
-data = {
- "initial_user_msg": "Check whether there is any incorrect information in the README.md file and send a PR to fix it if so.",
- "repository": "yourusername/your-repo"
-}
-
-response = requests.post(url, headers=headers, json=data)
-conversation = response.json()
-
-print(f"Conversation Link: https://app.all-hands.dev/conversations/{conversation['conversation_id']}")
-print(f"Status: {conversation['status']}")
-```
-
-
-
-TypeScript/JavaScript (with fetch)
-
-```typescript
-const apiKey = "YOUR_API_KEY";
-const url = "https://app.all-hands.dev/api/conversations";
-
-const headers = {
- "Authorization": `Bearer ${apiKey}`,
- "Content-Type": "application/json"
-};
-
-const data = {
- initial_user_msg: "Check whether there is any incorrect information in the README.md file and send a PR to fix it if so.",
- repository: "yourusername/your-repo"
-};
-
-async function startConversation() {
- try {
- const response = await fetch(url, {
- method: "POST",
- headers: headers,
- body: JSON.stringify(data)
- });
-
- const conversation = await response.json();
-
- console.log(`Conversation Link: https://app.all-hands.dev/conversations/${conversation.id}`);
- console.log(`Status: ${conversation.status}`);
-
- return conversation;
- } catch (error) {
- console.error("Error starting conversation:", error);
- }
-}
-
-startConversation();
-```
-
-
-
-#### Response
-
-The API will return a JSON object with details about the created conversation:
-
-```json
-{
- "status": "ok",
- "conversation_id": "abc1234",
-}
-```
-
-You may receive an `AuthenticationError` if:
-
-- You provided an invalid API key.
-- You provided the wrong repository name.
-- You don't have access to the repository.
-
-
-### Retrieving Conversation Status
-
-You can check the status of a conversation by making a GET request to the conversation endpoint.
-
-#### Endpoint
-
-```
-GET https://app.all-hands.dev/api/conversations/{conversation_id}
-```
-
-#### Example
-
-
-cURL
-
-```bash
-curl -X GET "https://app.all-hands.dev/api/conversations/{conversation_id}" \
- -H "Authorization: Bearer YOUR_API_KEY"
-```
-
-
-#### Response
-
-The response is formatted as follows:
-
-```json
-{
- "conversation_id":"abc1234",
- "title":"Update README.md",
- "created_at":"2025-04-29T15:13:51.370706Z",
- "last_updated_at":"2025-04-29T15:13:57.199210Z",
- "status":"RUNNING",
- "selected_repository":"yourusername/your-repo",
- "trigger":"gui"
-}
-```
-
-## Rate Limits
-
-If you have too many conversations running at once, older conversations will be paused to limit the number of concurrent conversations.
-If you're running into issues and need a higher limit for your use case, please contact us at [contact@all-hands.dev](mailto:contact@all-hands.dev).
diff --git a/docs/usage/cloud/cloud-ui.mdx b/docs/usage/cloud/cloud-ui.mdx
deleted file mode 100644
index 8b6d7bcf9733d3b038f9670dc7595e5cb4288597..0000000000000000000000000000000000000000
--- a/docs/usage/cloud/cloud-ui.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
----
-title: Cloud UI
-description: The Cloud UI provides a web interface for interacting with OpenHands. This page explains how to use the
- OpenHands Cloud UI.
----
-
-## Landing Page
-
-The landing page is where you can:
-
-- [Add GitHub repository access](/usage/cloud/github-installation#adding-github-repository-access) to OpenHands.
-- [Select a GitHub repo](/usage/cloud/github-installation#working-with-github-repos-in-openhands-cloud) or
- [a GitLab repo](/usage/cloud/gitlab-installation#working-with-gitlab-repos-in-openhands-cloud) to start working on.
-- See `Suggested Tasks` for repositories that OpenHands has access to.
-- Launch an empty conversation using `Launch from Scratch`.
-
-## Settings
-
-The Settings page allows you to:
-
-- [Configure GitHub repository access](/usage/cloud/github-installation#modifying-repository-access) for OpenHands.
-- Set application settings like your preferred language, notifications and other preferences.
-- Add credits to your account.
-- Generate custom secrets.
-- Create API keys to work with OpenHands programmatically.
-
-## Key Features
-
-For an overview of the key features available inside a conversation, please refer to the [Key Features](../key-features)
-section of the documentation.
-
-## Next Steps
-
-- [Install GitHub Integration](/usage/cloud/github-installation) to use OpenHands with your GitHub repositories.
-- [Install GitLab Integration](/usage/cloud/gitlab-installation) to use OpenHands with your GitLab repositories.
-- [Use the Cloud API](/usage/cloud/cloud-api) to programmatically interact with OpenHands.
diff --git a/docs/usage/cloud/github-installation.mdx b/docs/usage/cloud/github-installation.mdx
deleted file mode 100644
index eb25cab10e4f9745e858696afd3f864f5f266287..0000000000000000000000000000000000000000
--- a/docs/usage/cloud/github-installation.mdx
+++ /dev/null
@@ -1,71 +0,0 @@
----
-title: GitHub Integration
-description: This guide walks you through the process of installing OpenHands Cloud for your GitHub repositories. Once
- set up, it will allow OpenHands to work with your GitHub repository through the Cloud UI or straight from GitHub!
----
-
-## Prerequisites
-
-- Signed in to [OpenHands Cloud](https://app.all-hands.dev) with [a GitHub account](/usage/cloud/openhands-cloud).
-
-## Adding GitHub Repository Access
-
-You can grant OpenHands access to specific GitHub repositories:
-
-1. Click on `Add GitHub repos` on the landing page.
-2. Select your organization and choose the specific repositories to grant OpenHands access to.
-
- - OpenHands requests short-lived tokens (8-hour expiration) with these permissions:
- - Actions: Read and write
- - Commit statuses: Read and write
- - Contents: Read and write
- - Issues: Read and write
- - Metadata: Read-only
- - Pull requests: Read and write
- - Webhooks: Read and write
- - Workflows: Read and write
- - Repository access for a user is granted based on:
- - Permission granted for the repository
- - User's GitHub permissions (owner/collaborator)
-
-
-3. Click `Install & Authorize`.
-
-## Modifying Repository Access
-
-You can modify GitHub repository access at any time by:
-- Selecting `Add GitHub repos` on the landing page or
-- Visiting the Settings page and selecting `Configure GitHub Repositories` under the `Git` tab
-
-## Working With GitHub Repos in Openhands Cloud
-
-Once you've granted GitHub repository access, you can start working with your GitHub repository. Use the `select a repo`
-and `select a branch` dropdowns to select the appropriate repository and branch you'd like OpenHands to work on. Then
-click on `Launch` to start the conversation!
-
-
-
-## Working on Github Issues and Pull Requests Using Openhands
-
-Giving GitHub repository access to OpenHands also allows you to work on GitHub issues and pull requests directly.
-
-### Working with Issues
-
-On your repository, label an issue with `openhands` or add a message starting with
-`@openhands`. OpenHands will:
-1. Comment on the issue to let you know it is working on it.
- - You can click on the link to track the progress on OpenHands Cloud.
-2. Open a pull request if it determines that the issue has been successfully resolved.
-3. Comment on the issue with a summary of the performed tasks and a link to the PR.
-
-### Working with Pull Requests
-
-To get OpenHands to work on pull requests, mention `@openhands` in the comments to:
-- Ask questions
-- Request updates
-- Get code explanations
-
-## Next Steps
-
-- [Learn about the Cloud UI](/usage/cloud/cloud-ui).
-- [Use the Cloud API](/usage/cloud/cloud-api) to programmatically interact with OpenHands.
diff --git a/docs/usage/cloud/gitlab-installation.mdx b/docs/usage/cloud/gitlab-installation.mdx
deleted file mode 100644
index 028b984facc23e10bf2902fc0380c62ec5ec0338..0000000000000000000000000000000000000000
--- a/docs/usage/cloud/gitlab-installation.mdx
+++ /dev/null
@@ -1,25 +0,0 @@
----
-title: GitLab Integration
-description: This guide walks you through the process of installing OpenHands Cloud for your GitLab repositories. Once
- set up, it will allow OpenHands to work with your GitLab repository.
----
-
-## Prerequisites
-
-- Signed in to [OpenHands Cloud](https://app.all-hands.dev) with [a GitLab account](/usage/cloud/openhands-cloud).
-
-## Adding GitLab Repository Access
-
-Upon signing into OpenHands Cloud with a GitLab account, OpenHands will have access to your repositories.
-
-## Working With GitLab Repos in Openhands Cloud
-
-After signing in with a Gitlab account, use the `select a repo` and `select a branch` dropdowns to select the
-appropriate repository and branch you'd like OpenHands to work on. Then click on `Launch` to start the conversation!
-
-
-
-## Next Steps
-
-- [Learn about the Cloud UI](/usage/cloud/cloud-ui).
-- [Use the Cloud API](/usage/cloud/cloud-api) to programmatically interact with OpenHands.
diff --git a/docs/usage/cloud/openhands-cloud.mdx b/docs/usage/cloud/openhands-cloud.mdx
deleted file mode 100644
index 053453afb2b6a4b6c52c07d764132f507b3f6d43..0000000000000000000000000000000000000000
--- a/docs/usage/cloud/openhands-cloud.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: Getting Started
-description: Getting started with OpenHands Cloud.
----
-
-## Accessing OpenHands Cloud
-
-OpenHands Cloud is the hosted cloud version of All Hands AI's OpenHands. To get started with OpenHands Cloud,
-visit [app.all-hands.dev](https://app.all-hands.dev).
-
-You'll be prompted to connect with your GitHub or GitLab account:
-
-1. Click `Log in with GitHub` or `Log in with GitLab`.
-2. Review the permissions requested by OpenHands and authorize the application.
- - OpenHands will require certain permissions from your account. To read more about these permissions,
- you can click the `Learn more` link on the authorization page.
-3. Review and accept the `terms of service` and select `Continue`.
-
-## Next Steps
-
-Once you've connected your account, you can:
-
-- [Install GitHub Integration](/usage/cloud/github-installation) to use OpenHands with your GitHub repositories.
-- [Install GitLab Integration](/usage/cloud/gitlab-installation) to use OpenHands with your GitLab repositories.
-- [Learn about the Cloud UI](/usage/cloud/cloud-ui).
-- [Use the Cloud API](/usage/cloud/cloud-api) to programmatically interact with OpenHands.
diff --git a/docs/usage/configuration-options.mdx b/docs/usage/configuration-options.mdx
deleted file mode 100644
index e39c5eca958e03f8b986b47abd4fa3de6bd8f4a1..0000000000000000000000000000000000000000
--- a/docs/usage/configuration-options.mdx
+++ /dev/null
@@ -1,413 +0,0 @@
----
-title: Configuration Options
-description: This page outlines all available configuration options for OpenHands, allowing you to customize its behavior and integrate it with other services. In GUI Mode, any settings applied through the Settings UI will take precedence.
----
-
-## Core Configuration
-
-The core configuration options are defined in the `[core]` section of the `config.toml` file.
-
-### API Keys
-- `e2b_api_key`
- - Type: `str`
- - Default: `""`
- - Description: API key for E2B
-
-- `modal_api_token_id`
- - Type: `str`
- - Default: `""`
- - Description: API token ID for Modal
-
-- `modal_api_token_secret`
- - Type: `str`
- - Default: `""`
- - Description: API token secret for Modal
-
-### Workspace
-- `workspace_base` **(Deprecated)**
- - Type: `str`
- - Default: `"./workspace"`
- - Description: Base path for the workspace. **Deprecated: Use `SANDBOX_VOLUMES` instead.**
-
-- `cache_dir`
- - Type: `str`
- - Default: `"/tmp/cache"`
- - Description: Cache directory path
-
-### Debugging and Logging
-- `debug`
- - Type: `bool`
- - Default: `false`
- - Description: Enable debugging
-
-- `disable_color`
- - Type: `bool`
- - Default: `false`
- - Description: Disable color in terminal output
-
-### Trajectories
-- `save_trajectory_path`
- - Type: `str`
- - Default: `"./trajectories"`
- - Description: Path to store trajectories (can be a folder or a file). If it's a folder, the trajectories will be saved in a file named with the session id name and .json extension, in that folder.
-
-- `replay_trajectory_path`
- - Type: `str`
- - Default: `""`
- - Description: Path to load a trajectory and replay. If given, must be a path to the trajectory file in JSON format. The actions in the trajectory file would be replayed first before any user instruction is executed.
-
-### File Store
-- `file_store_path`
- - Type: `str`
- - Default: `"/tmp/file_store"`
- - Description: File store path
-
-- `file_store`
- - Type: `str`
- - Default: `"memory"`
- - Description: File store type
-
-- `file_uploads_allowed_extensions`
- - Type: `list of str`
- - Default: `[".*"]`
- - Description: List of allowed file extensions for uploads
-
-- `file_uploads_max_file_size_mb`
- - Type: `int`
- - Default: `0`
- - Description: Maximum file size for uploads, in megabytes
-
-- `file_uploads_restrict_file_types`
- - Type: `bool`
- - Default: `false`
- - Description: Restrict file types for file uploads
-
-- `file_uploads_allowed_extensions`
- - Type: `list of str`
- - Default: `[".*"]`
- - Description: List of allowed file extensions for uploads
-
-### Task Management
-- `max_budget_per_task`
- - Type: `float`
- - Default: `0.0`
- - Description: Maximum budget per task (0.0 means no limit)
-
-- `max_iterations`
- - Type: `int`
- - Default: `100`
- - Description: Maximum number of iterations
-
-### Sandbox Configuration
-- `volumes`
- - Type: `str`
- - Default: `None`
- - Description: Volume mounts in the format 'host_path:container_path[:mode]', e.g. '/my/host/dir:/workspace:rw'. Multiple mounts can be specified using commas, e.g. '/path1:/workspace/path1,/path2:/workspace/path2:ro'
-
-- `workspace_mount_path_in_sandbox` **(Deprecated)**
- - Type: `str`
- - Default: `"/workspace"`
- - Description: Path to mount the workspace in the sandbox. **Deprecated: Use `SANDBOX_VOLUMES` instead.**
-
-- `workspace_mount_path` **(Deprecated)**
- - Type: `str`
- - Default: `""`
- - Description: Path to mount the workspace. **Deprecated: Use `SANDBOX_VOLUMES` instead.**
-
-- `workspace_mount_rewrite` **(Deprecated)**
- - Type: `str`
- - Default: `""`
- - Description: Path to rewrite the workspace mount path to. You can usually ignore this, it refers to special cases of running inside another container. **Deprecated: Use `SANDBOX_VOLUMES` instead.**
-
-### Miscellaneous
-- `run_as_openhands`
- - Type: `bool`
- - Default: `true`
- - Description: Run as OpenHands
-
-- `runtime`
- - Type: `str`
- - Default: `"docker"`
- - Description: Runtime environment
-
-- `default_agent`
- - Type: `str`
- - Default: `"CodeActAgent"`
- - Description: Name of the default agent
-
-- `jwt_secret`
- - Type: `str`
- - Default: `uuid.uuid4().hex`
- - Description: JWT secret for authentication. Please set it to your own value.
-
-## LLM Configuration
-
-The LLM (Large Language Model) configuration options are defined in the `[llm]` section of the `config.toml` file.
-
-To use these with the docker command, pass in `-e LLM_`. Example: `-e LLM_NUM_RETRIES`.
-
-
-For development setups, you can also define custom named LLM configurations. See [Custom LLM Configurations](./llms/custom-llm-configs) for details.
-
-
-**AWS Credentials**
-- `aws_access_key_id`
- - Type: `str`
- - Default: `""`
- - Description: AWS access key ID
-
-- `aws_region_name`
- - Type: `str`
- - Default: `""`
- - Description: AWS region name
-
-- `aws_secret_access_key`
- - Type: `str`
- - Default: `""`
- - Description: AWS secret access key
-
-### API Configuration
-- `api_key`
- - Type: `str`
- - Default: `None`
- - Description: API key to use
-
-- `base_url`
- - Type: `str`
- - Default: `""`
- - Description: API base URL
-
-- `api_version`
- - Type: `str`
- - Default: `""`
- - Description: API version
-
-- `input_cost_per_token`
- - Type: `float`
- - Default: `0.0`
- - Description: Cost per input token
-
-- `output_cost_per_token`
- - Type: `float`
- - Default: `0.0`
- - Description: Cost per output token
-
-### Custom LLM Provider
-- `custom_llm_provider`
- - Type: `str`
- - Default: `""`
- - Description: Custom LLM provider
-
-
-### Message Handling
-- `max_message_chars`
- - Type: `int`
- - Default: `30000`
- - Description: The approximate maximum number of characters in the content of an event included in the prompt to the LLM. Larger observations are truncated.
-
-- `max_input_tokens`
- - Type: `int`
- - Default: `0`
- - Description: Maximum number of input tokens
-
-- `max_output_tokens`
- - Type: `int`
- - Default: `0`
- - Description: Maximum number of output tokens
-
-### Model Selection
-- `model`
- - Type: `str`
- - Default: `"claude-3-5-sonnet-20241022"`
- - Description: Model to use
-
-### Retrying
-- `num_retries`
- - Type: `int`
- - Default: `8`
- - Description: Number of retries to attempt
-
-- `retry_max_wait`
- - Type: `int`
- - Default: `120`
- - Description: Maximum wait time (in seconds) between retry attempts
-
-- `retry_min_wait`
- - Type: `int`
- - Default: `15`
- - Description: Minimum wait time (in seconds) between retry attempts
-
-- `retry_multiplier`
- - Type: `float`
- - Default: `2.0`
- - Description: Multiplier for exponential backoff calculation
-
-### Advanced Options
-- `drop_params`
- - Type: `bool`
- - Default: `false`
- - Description: Drop any unmapped (unsupported) params without causing an exception
-
-- `caching_prompt`
- - Type: `bool`
- - Default: `true`
- - Description: Using the prompt caching feature if provided by the LLM and supported
-
-- `ollama_base_url`
- - Type: `str`
- - Default: `""`
- - Description: Base URL for the OLLAMA API
-
-- `temperature`
- - Type: `float`
- - Default: `0.0`
- - Description: Temperature for the API
-
-- `timeout`
- - Type: `int`
- - Default: `0`
- - Description: Timeout for the API
-
-- `top_p`
- - Type: `float`
- - Default: `1.0`
- - Description: Top p for the API
-
-- `disable_vision`
- - Type: `bool`
- - Default: `None`
- - Description: If model is vision capable, this option allows to disable image processing (useful for cost reduction)
-
-## Agent Configuration
-
-The agent configuration options are defined in the `[agent]` and `[agent.]` sections of the `config.toml` file.
-
-### LLM Configuration
-- `llm_config`
- - Type: `str`
- - Default: `'your-llm-config-group'`
- - Description: The name of the LLM config to use
-
-### ActionSpace Configuration
-- `function_calling`
- - Type: `bool`
- - Default: `true`
- - Description: Whether function calling is enabled
-
-- `enable_browsing`
- - Type: `bool`
- - Default: `false`
- - Description: Whether browsing delegate is enabled in the action space (only works with function calling)
-
-- `enable_llm_editor`
- - Type: `bool`
- - Default: `false`
- - Description: Whether LLM editor is enabled in the action space (only works with function calling)
-
-- `enable_jupyter`
- - Type: `bool`
- - Default: `false`
- - Description: Whether Jupyter is enabled in the action space
-
-- `enable_history_truncation`
- - Type: `bool`
- - Default: `true`
- - Description: Whether history should be truncated to continue the session when hitting LLM context length limit
-
-### Microagent Usage
-- `enable_prompt_extensions`
- - Type: `bool`
- - Default: `true`
- - Description: Whether to use microagents at all
-
-- `disabled_microagents`
- - Type: `list of str`
- - Default: `None`
- - Description: A list of microagents to disable
-
-## Sandbox Configuration
-
-The sandbox configuration options are defined in the `[sandbox]` section of the `config.toml` file.
-
-
-
-To use these with the docker command, pass in `-e SANDBOX_`. Example: `-e SANDBOX_TIMEOUT`.
-
-### Execution
-- `timeout`
- - Type: `int`
- - Default: `120`
- - Description: Sandbox timeout in seconds
-
-- `user_id`
- - Type: `int`
- - Default: `1000`
- - Description: Sandbox user ID
-
-### Container Image
-- `base_container_image`
- - Type: `str`
- - Default: `"nikolaik/python-nodejs:python3.12-nodejs22"`
- - Description: Container image to use for the sandbox
-
-### Networking
-- `use_host_network`
- - Type: `bool`
- - Default: `false`
- - Description: Use host network
-
-- `runtime_binding_address`
- - Type: `str`
- - Default: `0.0.0.0`
- - Description: The binding address for the runtime ports. It specifies which network interface on the host machine Docker should bind the runtime ports to.
-
-### Linting and Plugins
-- `enable_auto_lint`
- - Type: `bool`
- - Default: `false`
- - Description: Enable auto linting after editing
-
-- `initialize_plugins`
- - Type: `bool`
- - Default: `true`
- - Description: Whether to initialize plugins
-
-### Dependencies and Environment
-- `runtime_extra_deps`
- - Type: `str`
- - Default: `""`
- - Description: Extra dependencies to install in the runtime image
-
-- `runtime_startup_env_vars`
- - Type: `dict`
- - Default: `{}`
- - Description: Environment variables to set at the launch of the runtime
-
-### Evaluation
-- `browsergym_eval_env`
- - Type: `str`
- - Default: `""`
- - Description: BrowserGym environment to use for evaluation
-
-## Security Configuration
-
-The security configuration options are defined in the `[security]` section of the `config.toml` file.
-
-To use these with the docker command, pass in `-e SECURITY_ `. Example: `-e SECURITY_CONFIRMATION_MODE`.
-
-### Confirmation Mode
-- `confirmation_mode`
- - Type: `bool`
- - Default: `false`
- - Description: Enable confirmation mode
-
-### Security Analyzer
-- `security_analyzer`
- - Type: `str`
- - Default: `""`
- - Description: The security analyzer to use
-
----
-
-> **Note**: Adjust configurations carefully, especially for memory, security, and network-related settings to ensure optimal performance and security.
-Please note that the configuration options may be subject to change in future versions of OpenHands. It's recommended to refer to the official documentation for the most up-to-date information.
diff --git a/docs/usage/feedback.mdx b/docs/usage/feedback.mdx
deleted file mode 100644
index e47ebdc34702633adc2fb6b557b769eda86ce603..0000000000000000000000000000000000000000
--- a/docs/usage/feedback.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
-# ✅ Providing Feedback
-
-When using OpenHands, you will encounter cases where things work well, and others where they don't. We encourage you to
-provide feedback when you use OpenHands to help give feedback to the development team, and perhaps more importantly,
-create an open corpus of coding agent training examples -- Share-OpenHands!
-
-## 📝 How to Provide Feedback
-
-Providing feedback is easy! When you are using OpenHands, you can press the thumbs-up or thumbs-down button at any point
-during your interaction. You will be prompted to provide your email address
-(e.g. so we can contact you if we want to ask any follow-up questions), and you can choose whether you want to provide feedback publicly or privately.
-
-VIDEO
-
-## 📜 Data Use and Privacy
-
-### Data sharing settings
-
-When you submit data, you can submit it either publicly or privately.
-
-- **Public** data will be distributed under the MIT License, like OpenHands itself, and can be used by the community to
-train and test models. Obviously, feedback that you can make public will be more valuable for the community as a whole,
-so when you are not dealing with sensitive information, we would encourage you to choose this option!
-- **Private** data will be made available to the OpenHands team for the purpose of improving OpenHands.
-However, a link with a unique ID will still be created that you can share publicly with others.
-
-### Who collects and stores the data?
-
-The data is collected and stored by [All Hands AI](https://all-hands.dev), a company founded by OpenHands maintainers to support and improve OpenHands.
-
-### How will public data be released?
-
-The public data will be released when we hit fixed milestones, such as 1,000 public examples, 10,000 public examples, etc.
-At this time, we will follow the following release process:
-
-1. All people who contributed public feedback will receive an email describing the data release and being given an opportunity to opt out.
-2. The person or people in charge of the data release will perform quality control of the data, removing low-quality feedback,
-removing email submitter email addresses, and attempting to remove any sensitive information.
-3. The data will be released publicly under the MIT license through commonly used sites such as GitHub or Hugging Face.
-
-### What if I want my data deleted?
-
-For data on the All Hands AI servers, we are happy to delete it at request:
-
-**One Piece of Data:** If you want one piece of data deleted, we will shortly be adding a mechanism to delete pieces of
-data using the link and password that is displayed on the interface when you submit data.
-
-**All Data:** If you would like all pieces of your data deleted, or you do not have the ID and password that you
-received when submitting the data, please contact `contact@all-hands.dev` from the email address that you registered
-when you originally submitted the data.
diff --git a/docs/usage/getting-started.mdx b/docs/usage/getting-started.mdx
deleted file mode 100644
index 6cdf766938eaf97a69711075ff2ba28bafc97625..0000000000000000000000000000000000000000
--- a/docs/usage/getting-started.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: Start Building
-description: So you've [run OpenHands](./installation) and have [set up your LLM](./installation#setup). Now what?
-icon: code
----
-
-OpenHands can assist with a range of engineering tasks. However, the technology is still new, and we’re far from having
-agents that can handle complex tasks independently. It’s important to understand what the agent does well and where it
-needs support.
-
-## Hello World
-
-Start with a simple "hello world" example. It might be trickier than it seems!
-
-Prompt the agent with:
-> Write a bash script hello.sh that prints "hello world!"
-
-The agent will write the script, set the correct permissions, and run it to check the output.
-
-You can continue prompting the agent to refine your code. This is a great way to
-work with agents. Start simple, and iterate.
-
-> Modify hello.sh so that it accepts a name as the first argument, but defaults to "world"
-
-You can also use any language you need. The agent may need time to set up the environment.
-
-> Please convert hello.sh to a Ruby script, and run it
-
-## Building From Scratch
-
-Agents excel at "greenfield" tasks, where they don’t need context about existing code and
-they can start from scratch.
-Begin with a simple task and iterate from there. Be specific about what you want and the tech stack.
-
-For example, we might build a TODO app:
-
-> Build a frontend-only TODO app in React. All state should be stored in localStorage.
-
-Once the basic structure is in place, continue refining:
-
-> Allow adding an optional due date to each task.
-
-Just like normal development, commit and push your code often.
-This way you can always revert back to an old state if the agent goes off track.
-You can ask the agent to commit and push for you:
-
-> Commit the changes and push them to a new branch called "feature/due-dates"
-
-## Adding New Code
-
-OpenHands is great at adding new code to an existing codebase.
-
-For instance, you can ask OpenHands to add a GitHub action that lints your code. It might check your codebase to
-determine the language, then create a new file in `./github/workflows/lint.yml`.
-
-> Add a GitHub action that lints the code in this repository.
-
-Some tasks need more context. While OpenHands can use commands like ls and grep to search, providing context upfront
-speeds things up and reduces token usage.
-
-> Modify ./backend/api/routes.js to add a new route that returns a list of all tasks.
-
-> Add a new React component to the ./frontend/components directory to display a list of Widgets.
-> It should use the existing Widget component.
-
-## Refactoring
-
-OpenHands does great at refactoring code in small chunks. Rather than rearchitecting the entire codebase,
-it's more effective to break up long files and functions or rename variables.
-
-> Rename all the single-letter variables in ./app.go.
-
-> Split the `build_and_deploy_widgets` function into two functions, `build_widgets` and `deploy_widgets` in widget.php.
-
-> Break ./api/routes.js into separate files for each route.
-
-## Bug Fixes
-
-OpenHands can help track down and fix bugs, but bug fixing can be tricky and often requires more context.
-It’s helpful if you’ve already diagnosed the issue and just need OpenHands to handle the logic.
-
-> The email field in the `/subscribe` endpoint is rejecting .io domains. Fix this.
-
-> The `search_widgets` function in ./app.py is doing a case-sensitive search. Make it case-insensitive.
-
-For bug fixing, test-driven development can be really useful. You can ask the agent to write a new test and iterate
-until the bug is fixed:
-
-> The `hello` function crashes on the empty string. Write a test that reproduces this bug, then fix the code so it passes.
-
-## More
-
-OpenHands can assist with nearly any coding task, but it takes some practice to get the best results.
-Keep these tips in mind:
-* Keep your tasks small.
-* Be specific.
-* Provide plenty of context.
-* Commit and push frequently.
-
-See [Prompting Best Practices](./prompting/prompting-best-practices) for more tips on how to get the most out of OpenHands.
diff --git a/docs/usage/how-to/cli-mode.mdx b/docs/usage/how-to/cli-mode.mdx
deleted file mode 100644
index f70e4fce22ea9a371168fa5696342fa23dcfbd40..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/cli-mode.mdx
+++ /dev/null
@@ -1,105 +0,0 @@
----
-title: CLI Mode
-description: CLI mode provides a powerful interactive Command-Line Interface (CLI) that lets you engage with OpenHands directly from your terminal.
----
-
-This mode is different from the [headless mode](./headless-mode), which is non-interactive and better for scripting.
-
-## Getting Started
-
-### Running with Python
-
-1. Ensure you have followed the [Development setup instructions](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md).
-2. Set your model, API key, and other preferences using environment variables or with the [`config.toml`](https://github.com/All-Hands-AI/OpenHands/blob/main/config.template.toml) file.
-3. Launch an interactive OpenHands conversation from the command line:
-
-```bash
-poetry run python -m openhands.cli.main
-```
-
-This command opens an interactive prompt where you can type tasks or commands and get responses from OpenHands.
-
-### Running with Docker
-
-1. Set the following environment variables in your terminal:
- - `SANDBOX_VOLUMES` to specify the directory you want OpenHands to access ([See using SANDBOX_VOLUMES for more info](../runtimes/docker#using-sandbox_volumes))
- - `LLM_MODEL` - the LLM model to use (e.g. `export LLM_MODEL="anthropic/claude-sonnet-4-20250514"`)
- - `LLM_API_KEY` - your API key (e.g. `export LLM_API_KEY="sk_test_12345"`)
-
-2. Run the following command:
-
-```bash
-docker run -it \
- --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.41-nikolaik \
- -e SANDBOX_USER_ID=$(id -u) \
- -e SANDBOX_VOLUMES=$SANDBOX_VOLUMES \
- -e LLM_API_KEY=$LLM_API_KEY \
- -e LLM_MODEL=$LLM_MODEL \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v ~/.openhands-state:/.openhands-state \
- --add-host host.docker.internal:host-gateway \
- --name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.41 \
- python -m openhands.cli.main --override-cli-mode true
-```
-
-This launches the CLI in Docker, allowing you to interact with OpenHands as described above.
-
-The `-e SANDBOX_USER_ID=$(id -u)` ensures files created by the agent in your workspace have the correct permissions.
-
-## Interactive CLI Overview
-
-### What is CLI Mode?
-
-CLI mode enables real-time interaction with OpenHands agents. You can type natural language tasks, use interactive
-commands, and receive instant feedback—all inside your terminal.
-
-### Starting a Conversation
-
-When you start the CLI, you'll see a welcome message and a prompt (`>`). Enter your first task or type a command to
-begin your conversation.
-
-### Available Commands
-
-You can use the following commands whenever the prompt (`>`) is displayed:
-
-| Command | Description |
-|--------------|----------------------------------------------------------------|
-| `/help` | Show all available interactive commands and their descriptions |
-| `/exit` | Exit the application |
-| `/init` | Initialize a new repository for agent exploration |
-| `/status` | Show conversation details and usage metrics |
-| `/new` | Start a new conversation |
-| `/settings` | View and modify current LLM/agent settings |
-| `/resume` | Resume the agent if paused |
-
-#### Settings and Configuration
-
-You can update your model, API key, agent, and other preferences interactively using the `/settings` command. Just
-follow the prompts:
-
-- **Basic settings**: Choose a model/provider and enter your API key.
-- **Advanced settings**: Set custom endpoints, enable or disable confirmation mode, and configure memory condensation.
-
-Settings can also be managed via the `config.toml` file.
-
-#### Repository Initialization
-
-The `/init` command helps the agent understand your project by creating a `.openhands/microagents/repo.md` file with
-project details and structure. Use this when onboarding the agent to a new codebase.
-
-#### Agent Pause/Resume Feature
-
-You can pause the agent while it is running by pressing `Ctrl-P`. To continue the conversation after pausing, simply
-type `/resume` at the prompt.
-
-## Tips and Troubleshooting
-
-- Use `/help` at any time to see the list of available commands.
-- If you encounter permission issues, make sure your workspace directory is trusted and all required environment variables are set correctly.
-- For advanced LLM configuration, use the advanced options in `/settings`.
-- When confirmation mode is enabled, the CLI will prompt before sensitive operations. You can type `a` or `always` at the first confirmation prompt to automatically confirm subsequent actions for the current conversation.
-- If you want to start over, use `/new` to begin a fresh conversation without restarting the CLI.
-
----
diff --git a/docs/usage/how-to/custom-sandbox-guide.mdx b/docs/usage/how-to/custom-sandbox-guide.mdx
deleted file mode 100644
index dc32910f31cd06b6627c71b5e12d0a0a1a7340d9..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/custom-sandbox-guide.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: Custom Sandbox
-description: This guide is for users that would like to use their own custom Docker image for the runtime. For example, with certain tools or programming languages pre-installed.
----
-
-The sandbox is where the agent performs its tasks. Instead of running commands directly on your computer
-(which could be risky), the agent runs them inside a Docker container.
-
-The default OpenHands sandbox (`python-nodejs:python3.12-nodejs22`
-from [nikolaik/python-nodejs](https://hub.docker.com/r/nikolaik/python-nodejs)) comes with some packages installed such
-as python and Node.js but may need other software installed by default.
-
-You have two options for customization:
-
-- Use an existing image with the required software.
-- Create your own custom Docker image.
-
-If you choose the first option, you can skip the `Create Your Docker Image` section.
-
-## Create Your Docker Image
-
-To create a custom Docker image, it must be Debian based.
-
-For example, if you want OpenHands to have `ruby` installed, you could create a `Dockerfile` with the following content:
-
-```dockerfile
-FROM nikolaik/python-nodejs:python3.12-nodejs22
-
-# Install required packages
-RUN apt-get update && apt-get install -y ruby
-```
-
-Or you could use a Ruby-specific base image:
-
-```dockerfile
-FROM ruby:latest
-```
-
-Save this file in a folder. Then, build your Docker image (e.g., named custom-image) by navigating to the folder in
-the terminal and running::
-```bash
-docker build -t custom-image .
-```
-
-This will produce a new image called `custom-image`, which will be available in Docker.
-
-## Using the Docker Command
-
-When running OpenHands using [the docker command](/usage/installation#start-the-app), replace
-`-e SANDBOX_RUNTIME_CONTAINER_IMAGE=...` with `-e SANDBOX_BASE_CONTAINER_IMAGE=`:
-
-```commandline
-docker run -it --rm --pull=always \
- -e SANDBOX_BASE_CONTAINER_IMAGE=custom-image \
- ...
-```
-
-## Using the Development Workflow
-
-### Setup
-
-First, ensure you can run OpenHands by following the instructions in [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md).
-
-### Specify the Base Sandbox Image
-
-In the `config.toml` file within the OpenHands directory, set the `base_container_image` to the image you want to use.
-This can be an image you’ve already pulled or one you’ve built:
-
-```bash
-[core]
-...
-[sandbox]
-base_container_image="custom-image"
-```
-
-### Additional Configuration Options
-
-The `config.toml` file supports several other options for customizing your sandbox:
-
-```toml
-[core]
-# Install additional dependencies when the runtime is built
-# Can contain any valid shell commands
-# If you need the path to the Python interpreter in any of these commands, you can use the $OH_INTERPRETER_PATH variable
-runtime_extra_deps = """
-pip install numpy pandas
-apt-get update && apt-get install -y ffmpeg
-"""
-
-# Set environment variables for the runtime
-# Useful for configuration that needs to be available at runtime
-runtime_startup_env_vars = { DATABASE_URL = "postgresql://user:pass@localhost/db" }
-
-# Specify platform for multi-architecture builds (e.g., "linux/amd64" or "linux/arm64")
-platform = "linux/amd64"
-```
-
-### Run
-
-Run OpenHands by running ```make run``` in the top level directory.
diff --git a/docs/usage/how-to/debugging.mdx b/docs/usage/how-to/debugging.mdx
deleted file mode 100644
index b8a4ead3df618b56e7c74e90316a5acc7d524d37..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/debugging.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
----
-title: Debugging
----
-
-The following is intended as a primer on debugging OpenHands for Development purposes.
-
-## Server / VSCode
-
-The following `launch.json` will allow debugging the agent, controller and server elements, but not the sandbox (Which runs inside docker). It will ignore any changes inside the `workspace/` directory:
-
-```
-{
- "version": "0.2.0",
- "configurations": [
- {
- "name": "OpenHands CLI",
- "type": "debugpy",
- "request": "launch",
- "module": "openhands.cli.main",
- "justMyCode": false
- },
- {
- "name": "OpenHands WebApp",
- "type": "debugpy",
- "request": "launch",
- "module": "uvicorn",
- "args": [
- "openhands.server.listen:app",
- "--reload",
- "--reload-exclude",
- "${workspaceFolder}/workspace",
- "--port",
- "3000"
- ],
- "justMyCode": false
- }
- ]
-}
-```
-
-More specific debugging configurations which include more parameters may be specified:
-
-```
- ...
- {
- "name": "Debug CodeAct",
- "type": "debugpy",
- "request": "launch",
- "module": "openhands.core.main",
- "args": [
- "-t",
- "Ask me what your task is.",
- "-d",
- "${workspaceFolder}/workspace",
- "-c",
- "CodeActAgent",
- "-l",
- "llm.o1",
- "-n",
- "prompts"
- ],
- "justMyCode": false
- }
- ...
-```
-
-Values in the snippet above can be updated such that:
-
- * *t*: the task
- * *d*: the openhands workspace directory
- * *c*: the agent
- * *l*: the LLM config (pre-defined in config.toml)
- * *n*: session name (e.g. eventstream name)
diff --git a/docs/usage/how-to/development-overview.mdx b/docs/usage/how-to/development-overview.mdx
deleted file mode 100644
index dbd7a5a5bae83ea8ff77a23ee584716c6e1bb530..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/development-overview.mdx
+++ /dev/null
@@ -1,71 +0,0 @@
----
-title: Development Overview
-description: This guide provides an overview of the key documentation resources available in the OpenHands repository. Whether you're looking to contribute, understand the architecture, or work on specific components, these resources will help you navigate the codebase effectively.
----
-
-## Core Documentation
-
-### Project Fundamentals
-- **Main Project Overview** (`/README.md`)
- The primary entry point for understanding OpenHands, including features and basic setup instructions.
-
-- **Development Guide** (`/Development.md`)
- Comprehensive guide for developers working on OpenHands, including setup, requirements, and development workflows.
-
-- **Contributing Guidelines** (`/CONTRIBUTING.md`)
- Essential information for contributors, covering code style, PR process, and contribution workflows.
-
-### Component Documentation
-
-#### Frontend
-- **Frontend Application** (`/frontend/README.md`)
- Complete guide for setting up and developing the React-based frontend application.
-
-#### Backend
-- **Backend Implementation** (`/openhands/README.md`)
- Detailed documentation of the Python backend implementation and architecture.
-
-- **Server Documentation** (`/openhands/server/README.md`)
- Server implementation details, API documentation, and service architecture.
-
-- **Runtime Environment** (`/openhands/runtime/README.md`)
- Documentation covering the runtime environment, execution model, and runtime configurations.
-
-#### Infrastructure
-- **Container Documentation** (`/containers/README.md`)
- Comprehensive information about Docker containers, deployment strategies, and container management.
-
-### Testing and Evaluation
-- **Unit Testing Guide** (`/tests/unit/README.md`)
- Instructions for writing, running, and maintaining unit tests.
-
-- **Evaluation Framework** (`/evaluation/README.md`)
- Documentation for the evaluation framework, benchmarks, and performance testing.
-
-### Advanced Features
-- **Microagents Architecture** (`/microagents/README.md`)
- Detailed information about the microagents architecture, implementation, and usage.
-
-### Documentation Standards
-- **Documentation Style Guide** (`/docs/DOC_STYLE_GUIDE.md`)
- Standards and guidelines for writing and maintaining project documentation.
-
-## Getting Started with Development
-
-If you're new to developing with OpenHands, we recommend following this sequence:
-
-1. Start with the main `README.md` to understand the project's purpose and features
-2. Review the `CONTRIBUTING.md` guidelines if you plan to contribute
-3. Follow the setup instructions in `Development.md`
-4. Dive into specific component documentation based on your area of interest:
- - Frontend developers should focus on `/frontend/README.md`
- - Backend developers should start with `/openhands/README.md`
- - Infrastructure work should begin with `/containers/README.md`
-
-## Documentation Updates
-
-When making changes to the codebase, please ensure that:
-1. Relevant documentation is updated to reflect your changes
-2. New features are documented in the appropriate README files
-3. Any API changes are reflected in the server documentation
-4. Documentation follows the style guide in `/docs/DOC_STYLE_GUIDE.md`
diff --git a/docs/usage/how-to/evaluation-harness.mdx b/docs/usage/how-to/evaluation-harness.mdx
deleted file mode 100644
index 474a2b573b2d0804942b104c951960de0dd4af88..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/evaluation-harness.mdx
+++ /dev/null
@@ -1,280 +0,0 @@
----
-title: Evaluation Harness
----
-
-This guide provides an overview of how to integrate your own evaluation benchmark into the OpenHands framework.
-
-## Setup Environment and LLM Configuration
-
-Please follow instructions [here](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md) to setup your local development environment.
-OpenHands in development mode uses `config.toml` to keep track of most configurations.
-
-Here's an example configuration file you can use to define and use multiple LLMs:
-
-```toml
-[llm]
-# IMPORTANT: add your API key here, and set the model to the one you want to evaluate
-model = "claude-3-5-sonnet-20241022"
-api_key = "sk-XXX"
-
-[llm.eval_gpt4_1106_preview_llm]
-model = "gpt-4-1106-preview"
-api_key = "XXX"
-temperature = 0.0
-
-[llm.eval_some_openai_compatible_model_llm]
-model = "openai/MODEL_NAME"
-base_url = "https://OPENAI_COMPATIBLE_URL/v1"
-api_key = "XXX"
-temperature = 0.0
-```
-
-
-## How to use OpenHands in the command line
-
-OpenHands can be run from the command line using the following format:
-
-```bash
-poetry run python ./openhands/core/main.py \
- -i \
- -t "" \
- -c \
- -l
-```
-
-For example:
-
-```bash
-poetry run python ./openhands/core/main.py \
- -i 10 \
- -t "Write me a bash script that prints hello world." \
- -c CodeActAgent \
- -l llm
-```
-
-This command runs OpenHands with:
-- A maximum of 10 iterations
-- The specified task description
-- Using the CodeActAgent
-- With the LLM configuration defined in the `llm` section of your `config.toml` file
-
-## How does OpenHands work
-
-The main entry point for OpenHands is in `openhands/core/main.py`. Here's a simplified flow of how it works:
-
-1. Parse command-line arguments and load the configuration
-2. Create a runtime environment using `create_runtime()`
-3. Initialize the specified agent
-4. Run the controller using `run_controller()`, which:
- - Attaches the runtime to the agent
- - Executes the agent's task
- - Returns a final state when complete
-
-The `run_controller()` function is the core of OpenHands's execution. It manages the interaction between the agent, the runtime, and the task, handling things like user input simulation and event processing.
-
-
-## Easiest way to get started: Exploring Existing Benchmarks
-
-We encourage you to review the various evaluation benchmarks available in the [`evaluation/benchmarks/` directory](https://github.com/All-Hands-AI/OpenHands/blob/main/evaluation/benchmarks) of our repository.
-
-To integrate your own benchmark, we suggest starting with the one that most closely resembles your needs. This approach can significantly streamline your integration process, allowing you to build upon existing structures and adapt them to your specific requirements.
-
-## How to create an evaluation workflow
-
-
-To create an evaluation workflow for your benchmark, follow these steps:
-
-1. Import relevant OpenHands utilities:
- ```python
- import openhands.agenthub
- from evaluation.utils.shared import (
- EvalMetadata,
- EvalOutput,
- make_metadata,
- prepare_dataset,
- reset_logger_for_multiprocessing,
- run_evaluation,
- )
- from openhands.controller.state.state import State
- from openhands.core.config import (
- AppConfig,
- SandboxConfig,
- get_llm_config_arg,
- parse_arguments,
- )
- from openhands.core.logger import openhands_logger as logger
- from openhands.core.main import create_runtime, run_controller
- from openhands.events.action import CmdRunAction
- from openhands.events.observation import CmdOutputObservation, ErrorObservation
- from openhands.runtime.runtime import Runtime
- ```
-
-2. Create a configuration:
- ```python
- def get_config(instance: pd.Series, metadata: EvalMetadata) -> AppConfig:
- config = AppConfig(
- default_agent=metadata.agent_class,
- runtime='docker',
- max_iterations=metadata.max_iterations,
- sandbox=SandboxConfig(
- base_container_image='your_container_image',
- enable_auto_lint=True,
- timeout=300,
- ),
- )
- config.set_llm_config(metadata.llm_config)
- return config
- ```
-
-3. Initialize the runtime and set up the evaluation environment:
- ```python
- def initialize_runtime(runtime: Runtime, instance: pd.Series):
- # Set up your evaluation environment here
- # For example, setting environment variables, preparing files, etc.
- pass
- ```
-
-4. Create a function to process each instance:
- ```python
- from openhands.utils.async_utils import call_async_from_sync
- def process_instance(instance: pd.Series, metadata: EvalMetadata) -> EvalOutput:
- config = get_config(instance, metadata)
- runtime = create_runtime(config)
- call_async_from_sync(runtime.connect)
- initialize_runtime(runtime, instance)
-
- instruction = get_instruction(instance, metadata)
-
- state = run_controller(
- config=config,
- task_str=instruction,
- runtime=runtime,
- fake_user_response_fn=your_user_response_function,
- )
-
- # Evaluate the agent's actions
- evaluation_result = await evaluate_agent_actions(runtime, instance)
-
- return EvalOutput(
- instance_id=instance.instance_id,
- instruction=instruction,
- test_result=evaluation_result,
- metadata=metadata,
- history=compatibility_for_eval_history_pairs(state.history),
- metrics=state.metrics.get() if state.metrics else None,
- error=state.last_error if state and state.last_error else None,
- )
- ```
-
-5. Run the evaluation:
- ```python
- metadata = make_metadata(llm_config, dataset_name, agent_class, max_iterations, eval_note, eval_output_dir)
- output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
- instances = prepare_dataset(your_dataset, output_file, eval_n_limit)
-
- await run_evaluation(
- instances,
- metadata,
- output_file,
- num_workers,
- process_instance
- )
- ```
-
-This workflow sets up the configuration, initializes the runtime environment, processes each instance by running the agent and evaluating its actions, and then collects the results into an `EvalOutput` object. The `run_evaluation` function handles parallelization and progress tracking.
-
-Remember to customize the `get_instruction`, `your_user_response_function`, and `evaluate_agent_actions` functions according to your specific benchmark requirements.
-
-By following this structure, you can create a robust evaluation workflow for your benchmark within the OpenHands framework.
-
-
-## Understanding the `user_response_fn`
-
-The `user_response_fn` is a crucial component in OpenHands's evaluation workflow. It simulates user interaction with the agent, allowing for automated responses during the evaluation process. This function is particularly useful when you want to provide consistent, predefined responses to the agent's queries or actions.
-
-
-### Workflow and Interaction
-
-The correct workflow for handling actions and the `user_response_fn` is as follows:
-
-1. Agent receives a task and starts processing
-2. Agent emits an Action
-3. If the Action is executable (e.g., CmdRunAction, IPythonRunCellAction):
- - The Runtime processes the Action
- - Runtime returns an Observation
-4. If the Action is not executable (typically a MessageAction):
- - The `user_response_fn` is called
- - It returns a simulated user response
-5. The agent receives either the Observation or the simulated response
-6. Steps 2-5 repeat until the task is completed or max iterations are reached
-
-Here's a more accurate visual representation:
-
-```
- [Agent]
- |
- v
- [Emit Action]
- |
- v
- [Is Action Executable?]
- / \
- Yes No
- | |
- v v
- [Runtime] [user_response_fn]
- | |
- v v
- [Return Observation] [Simulated Response]
- \ /
- \ /
- v v
- [Agent receives feedback]
- |
- v
- [Continue or Complete Task]
-```
-
-In this workflow:
-
-- Executable actions (like running commands or executing code) are handled directly by the Runtime
-- Non-executable actions (typically when the agent wants to communicate or ask for clarification) are handled by the `user_response_fn`
-- The agent then processes the feedback, whether it's an Observation from the Runtime or a simulated response from the `user_response_fn`
-
-This approach allows for automated handling of both concrete actions and simulated user interactions, making it suitable for evaluation scenarios where you want to test the agent's ability to complete tasks with minimal human intervention.
-
-### Example Implementation
-
-Here's an example of a `user_response_fn` used in the SWE-Bench evaluation:
-
-```python
-def codeact_user_response(state: State | None) -> str:
- msg = (
- 'Please continue working on the task on whatever approach you think is suitable.\n'
- 'If you think you have solved the task, please first send your answer to user through message and then exit .\n'
- 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
- )
-
- if state and state.history:
- # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
- user_msgs = [
- event
- for event in state.history
- if isinstance(event, MessageAction) and event.source == 'user'
- ]
- if len(user_msgs) >= 2:
- # let the agent know that it can give up when it has tried 3 times
- return (
- msg
- + 'If you want to give up, run: exit .\n'
- )
- return msg
-```
-
-This function does the following:
-
-1. Provides a standard message encouraging the agent to continue working
-2. Checks how many times the agent has attempted to communicate with the user
-3. If the agent has made multiple attempts, it provides an option to give up
-
-By using this function, you can ensure consistent behavior across multiple evaluation runs and prevent the agent from getting stuck waiting for human input.
diff --git a/docs/usage/how-to/github-action.mdx b/docs/usage/how-to/github-action.mdx
deleted file mode 100644
index 95cc12515e8e8910ed8ff796d76757be7a71e995..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/github-action.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: OpenHands GitHub Action
-description: This guide explains how to use the OpenHands GitHub Action in your own projects.
----
-
-## Using the Action in the OpenHands Repository
-
-To use the OpenHands GitHub Action in a repository, you can:
-
-1. Create an issue in the repository.
-2. Add the `fix-me` label to the issue or leave a comment on the issue starting with `@openhands-agent`.
-
-The action will automatically trigger and attempt to resolve the issue.
-
-## Installing the Action in a New Repository
-
-To install the OpenHands GitHub Action in your own repository, follow
-the [README for the OpenHands Resolver](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/resolver/README.md).
-
-## Usage Tips
-
-### Iterative resolution
-
-1. Create an issue in the repository.
-2. Add the `fix-me` label to the issue, or leave a comment starting with `@openhands-agent`.
-3. Review the attempt to resolve the issue by checking the pull request.
-4. Follow up with feedback through general comments, review comments, or inline thread comments.
-5. Add the `fix-me` label to the pull request, or address a specific comment by starting with `@openhands-agent`.
-
-### Label versus Macro
-
-- Label (`fix-me`): Requests OpenHands to address the **entire** issue or pull request.
-- Macro (`@openhands-agent`): Requests OpenHands to consider only the issue/pull request description and **the specific comment**.
-
-## Advanced Settings
-
-### Add custom repository settings
-
-You can provide custom directions for OpenHands by following the [README for the resolver](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/resolver/README.md#providing-custom-instructions).
-
-### Custom configurations
-
-GitHub resolver will automatically check for valid [repository secrets](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions?tool=webui#creating-secrets-for-a-repository) or [repository variables](https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/store-information-in-variables#creating-configuration-variables-for-a-repository) to customize its behavior.
-The customization options you can set are:
-
-| **Attribute name** | **Type** | **Purpose** | **Example** |
-| -------------------------------- | -------- | --------------------------------------------------------------------------------------------------- | -------------------------------------------------- |
-| `LLM_MODEL` | Variable | Set the LLM to use with OpenHands | `LLM_MODEL="anthropic/claude-3-5-sonnet-20241022"` |
-| `OPENHANDS_MAX_ITER` | Variable | Set max limit for agent iterations | `OPENHANDS_MAX_ITER=10` |
-| `OPENHANDS_MACRO` | Variable | Customize default macro for invoking the resolver | `OPENHANDS_MACRO=@resolveit` |
-| `OPENHANDS_BASE_CONTAINER_IMAGE` | Variable | Custom Sandbox ([learn more](https://docs.all-hands.dev/modules/usage/how-to/custom-sandbox-guide)) | `OPENHANDS_BASE_CONTAINER_IMAGE="custom_image"` |
-| `TARGET_BRANCH` | Variable | Merge to branch other than `main` | `TARGET_BRANCH="dev"` |
-| `TARGET_RUNNER` | Variable | Target runner to execute the agent workflow (default ubuntu-latest) | `TARGET_RUNNER="custom-runner"` |
diff --git a/docs/usage/how-to/gui-mode.mdx b/docs/usage/how-to/gui-mode.mdx
deleted file mode 100644
index e7a6bfb4f8dd58678a307dc0c9ba35f2fe87e246..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/gui-mode.mdx
+++ /dev/null
@@ -1,143 +0,0 @@
----
-title: GUI Mode
-description: OpenHands provides a Graphical User Interface (GUI) mode for interacting with the AI assistant.
----
-
-## Installation and Setup
-
-1. Follow the installation instructions to install OpenHands.
-2. After running the command, access OpenHands at [http://localhost:3000](http://localhost:3000).
-
-## Interacting with the GUI
-
-### Initial Setup
-
-1. Upon first launch, you'll see a settings popup.
-2. Select an `LLM Provider` and `LLM Model` from the dropdown menus. If the required model does not exist in the list,
- select `see advanced settings`. Then toggle `Advanced` options and enter it with the correct prefix in the
- `Custom Model` text box.
-3. Enter the corresponding `API Key` for your chosen provider.
-4. Click `Save Changes` to apply the settings.
-
-### Version Control Tokens
-
-OpenHands supports multiple version control providers. You can configure tokens for multiple providers simultaneously.
-
-#### GitHub Token Setup
-
-OpenHands automatically exports a `GITHUB_TOKEN` to the shell environment if provided:
-
-
- Setting Up a GitHub Token
-
- 1. **Generate a Personal Access Token (PAT)**:
- - On GitHub, go to Settings > Developer Settings > Personal Access Tokens > Tokens (classic).
- - **New token (classic)**
- - Required scopes:
- - `repo` (Full control of private repositories)
- - **Fine-Grained Tokens**
- - All Repositories (You can select specific repositories, but this will impact what returns in repo search)
- - Minimal Permissions ( Select `Meta Data = Read-only` read for search, `Pull Requests = Read and Write` and `Content = Read and Write` for branch creation)
- 2. **Enter Token in OpenHands**:
- - Click the Settings button (gear icon).
- - Navigate to the `Git` tab.
- - Paste your token in the `GitHub Token` field.
- - Click `Save Changes` to apply the changes.
-
-
-
- Organizational Token Policies
-
- If you're working with organizational repositories, additional setup may be required:
-
- 1. **Check Organization Requirements**:
- - Organization admins may enforce specific token policies.
- - Some organizations require tokens to be created with SSO enabled.
- - Review your organization's [token policy settings](https://docs.github.com/en/organizations/managing-programmatic-access-to-your-organization/setting-a-personal-access-token-policy-for-your-organization).
- 2. **Verify Organization Access**:
- - Go to your token settings on GitHub.
- - Look for the organization under `Organization access`.
- - If required, click `Enable SSO` next to your organization.
- - Complete the SSO authorization process.
-
-
-
- Troubleshooting
-
- Common issues and solutions:
-
- - **Token Not Recognized**:
- - Ensure the token is properly saved in settings.
- - Check that the token hasn't expired.
- - Verify the token has the required scopes.
- - Try regenerating the token.
-
- - **Organization Access Denied**:
- - Check if SSO is required but not enabled.
- - Verify organization membership.
- - Contact organization admin if token policies are blocking access.
-
- - **Verifying Token Works**:
- - The app will show a green checkmark if the token is valid.
- - Try accessing a repository to confirm permissions.
- - Check the browser console for any error messages.
-
-
-#### GitLab Token Setup
-
-OpenHands automatically exports a `GITLAB_TOKEN` to the shell environment if provided:
-
-
- Setting Up a GitLab Token
-
- 1. **Generate a Personal Access Token (PAT)**:
- - On GitLab, go to User Settings > Access Tokens.
- - Create a new token with the following scopes:
- - `api` (API access)
- - `read_user` (Read user information)
- - `read_repository` (Read repository)
- - `write_repository` (Write repository)
- - Set an expiration date or leave it blank for a non-expiring token.
- 2. **Enter Token in OpenHands**:
- - Click the Settings button (gear icon).
- - Navigate to the `Git` tab.
- - Paste your token in the `GitLab Token` field.
- - Click `Save Changes` to apply the changes.
-
-
-
- Troubleshooting
-
- Common issues and solutions:
-
- - **Token Not Recognized**:
- - Ensure the token is properly saved in settings.
- - Check that the token hasn't expired.
- - Verify the token has the required scopes.
-
- - **Access Denied**:
- - Verify project access permissions.
- - Check if the token has the necessary scopes.
- - For group/organization repositories, ensure you have proper access.
-
-
-### Advanced Settings
-
-1. Inside the Settings page, under the `LLM` tab, toggle `Advanced` options to access additional settings.
-2. Use the `Custom Model` text box to manually enter a model if it's not in the list.
-3. Specify a `Base URL` if required by your LLM provider.
-
-### Interacting with the AI
-
-1. Type your prompt in the input box.
-2. Click the send button or press Enter to submit your message.
-3. The AI will process your input and provide a response in the chat window.
-4. You can continue the conversation by asking follow-up questions or providing additional information.
-
-## Tips for Effective Use
-
-- Be specific in your requests to get the most accurate and helpful responses, as described in the [prompting best practices](../prompting/prompting-best-practices).
-- Use one of the recommended models, as described in the [LLMs section](usage/llms/llms.md).
-
-Remember, the GUI mode of OpenHands is designed to make your interaction with the AI assistant as smooth and intuitive
-as possible. Don't hesitate to explore its features to maximize your productivity.
diff --git a/docs/usage/how-to/headless-mode.mdx b/docs/usage/how-to/headless-mode.mdx
deleted file mode 100644
index 1de9a165839052ad3d23616de16bbbf10aaeee15..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/headless-mode.mdx
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: Headless Mode
-description: You can run OpenHands with a single command, without starting the web application. This makes it easy to write scripts and automate tasks with OpenHands.
----
-
-This is different from [CLI Mode](./cli-mode), which is interactive, and better for active development.
-
-## With Python
-
-To run OpenHands in headless mode with Python:
-1. Ensure you have followed the [Development setup instructions](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md).
-2. Run the following command:
-```bash
-poetry run python -m openhands.core.main -t "write a bash script that prints hi"
-```
-
-You'll need to be sure to set your model, API key, and other settings via environment variables
-[or the `config.toml` file](https://github.com/All-Hands-AI/OpenHands/blob/main/config.template.toml).
-
-## With Docker
-
-To run OpenHands in Headless mode with Docker:
-
-1. Set the following environment variables in your terminal:
- - `SANDBOX_VOLUMES` to specify the directory you want OpenHands to access ([See using SANDBOX_VOLUMES for more info](../runtimes/docker#using-sandbox_volumes))
- - `LLM_MODEL` - the LLM model to use (e.g. `export LLM_MODEL="anthropic/claude-sonnet-4-20250514"`)
- - `LLM_API_KEY` - your API key (e.g. `export LLM_API_KEY="sk_test_12345"`)
-
-2. Run the following Docker command:
-
-```bash
-docker run -it \
- --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.41-nikolaik \
- -e SANDBOX_USER_ID=$(id -u) \
- -e SANDBOX_VOLUMES=$SANDBOX_VOLUMES \
- -e LLM_API_KEY=$LLM_API_KEY \
- -e LLM_MODEL=$LLM_MODEL \
- -e LOG_ALL_EVENTS=true \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v ~/.openhands-state:/.openhands-state \
- --add-host host.docker.internal:host-gateway \
- --name openhands-app-$(date +%Y%m%d%H%M%S) \
- docker.all-hands.dev/all-hands-ai/openhands:0.41 \
- python -m openhands.core.main -t "write a bash script that prints hi"
-```
-
-The `-e SANDBOX_USER_ID=$(id -u)` is passed to the Docker command to ensure the sandbox user matches the host user’s
-permissions. This prevents the agent from creating root-owned files in the mounted workspace.
-
-## Advanced Headless Configurations
-
-To view all available configuration options for headless mode, run the Python command with the `--help` flag.
-
-### Additional Logs
-
-For the headless mode to log all the agent actions, in the terminal run: `export LOG_ALL_EVENTS=true`
diff --git a/docs/usage/how-to/websocket-connection.mdx b/docs/usage/how-to/websocket-connection.mdx
deleted file mode 100644
index 38c877e6b2e9765dc487f7f79a9c4a310b4239de..0000000000000000000000000000000000000000
--- a/docs/usage/how-to/websocket-connection.mdx
+++ /dev/null
@@ -1,179 +0,0 @@
----
-title: WebSocket Connection
----
-
-This guide explains how to connect to the OpenHands WebSocket API to receive real-time events and send actions to the agent.
-
-## Overview
-
-OpenHands uses [Socket.IO](https://socket.io/) for WebSocket communication between the client and server. The WebSocket connection allows you to:
-
-1. Receive real-time events from the agent
-2. Send user actions to the agent
-3. Maintain a persistent connection for ongoing conversations
-
-## Connecting to the WebSocket
-
-### Connection Parameters
-
-When connecting to the WebSocket, you need to provide the following query parameters:
-
-- `conversation_id`: The ID of the conversation you want to join
-- `latest_event_id`: The ID of the latest event you've received (use `-1` for a new connection)
-- `providers_set`: (Optional) A comma-separated list of provider types
-
-### Connection Example
-
-Here's a basic example of connecting to the WebSocket using JavaScript:
-
-```javascript
-import { io } from "socket.io-client";
-
-const socket = io("http://localhost:3000", {
- transports: ["websocket"],
- query: {
- conversation_id: "your-conversation-id",
- latest_event_id: -1,
- providers_set: "github,gitlab" // Optional
- }
-});
-
-socket.on("connect", () => {
- console.log("Connected to OpenHands WebSocket");
-});
-
-socket.on("oh_event", (event) => {
- console.log("Received event:", event);
-});
-
-socket.on("connect_error", (error) => {
- console.error("Connection error:", error);
-});
-
-socket.on("disconnect", (reason) => {
- console.log("Disconnected:", reason);
-});
-```
-
-## Sending Actions to the Agent
-
-To send an action to the agent, use the `oh_user_action` event:
-
-```javascript
-// Send a user message to the agent
-socket.emit("oh_user_action", {
- type: "message",
- source: "user",
- message: "Hello, can you help me with my project?"
-});
-```
-
-## Receiving Events from the Agent
-
-The server emits events using the `oh_event` event type. Here are some common event types you might receive:
-
-- User messages (`source: "user", type: "message"`)
-- Agent messages (`source: "agent", type: "message"`)
-- File edits (`action: "edit"`)
-- File writes (`action: "write"`)
-- Command executions (`action: "run"`)
-
-Example event handler:
-
-```javascript
-socket.on("oh_event", (event) => {
- if (event.source === "agent" && event.type === "message") {
- console.log("Agent says:", event.message);
- } else if (event.action === "run") {
- console.log("Command executed:", event.args.command);
- console.log("Result:", event.result);
- }
-});
-```
-
-## Using Websocat for Testing
-
-[Websocat](https://github.com/vi/websocat) is a command-line tool for interacting with WebSockets. It's useful for testing your WebSocket connection without writing a full client application.
-
-### Installation
-
-```bash
-# On macOS
-brew install websocat
-
-# On Linux
-curl -L https://github.com/vi/websocat/releases/download/v1.11.0/websocat.x86_64-unknown-linux-musl > websocat
-chmod +x websocat
-sudo mv websocat /usr/local/bin/
-```
-
-### Connecting to the WebSocket
-
-```bash
-# Connect to the WebSocket and print all received messages
-echo "40{}" | \
-websocat "ws://localhost:3000/socket.io/?EIO=4&transport=websocket&conversation_id=your-conversation-id&latest_event_id=-1"
-```
-
-### Sending a Message
-
-```bash
-# Send a message to the agent
-echo '42["oh_user_action",{"type":"message","source":"user","message":"Hello, agent!"}]' | \
-websocat "ws://localhost:3000/socket.io/?EIO=4&transport=websocket&conversation_id=your-conversation-id&latest_event_id=-1"
-```
-
-### Complete Example with Websocat
-
-Here's a complete example of connecting to the WebSocket, sending a message, and receiving events:
-
-```bash
-# Start a persistent connection
-websocat -v "ws://localhost:3000/socket.io/?EIO=4&transport=websocket&conversation_id=your-conversation-id&latest_event_id=-1"
-
-# In another terminal, send a message
-echo '42["oh_user_action",{"type":"message","source":"user","message":"Can you help me with my project?"}]' | \
-websocat "ws://localhost:3000/socket.io/?EIO=4&transport=websocket&conversation_id=your-conversation-id&latest_event_id=-1"
-```
-
-## Event Structure
-
-Events sent and received through the WebSocket follow a specific structure:
-
-```typescript
-interface OpenHandsEvent {
- id: string; // Unique event ID
- source: string; // "user" or "agent"
- timestamp: string; // ISO timestamp
- message?: string; // For message events
- type?: string; // Event type (e.g., "message")
- action?: string; // Action type (e.g., "run", "edit", "write")
- args?: any; // Action arguments
- result?: any; // Action result
-}
-```
-
-## Best Practices
-
-1. **Handle Reconnection**: Implement reconnection logic in your client to handle network interruptions.
-2. **Track Event IDs**: Store the latest event ID you've received and use it when reconnecting to avoid duplicate events.
-3. **Error Handling**: Implement proper error handling for connection errors and failed actions.
-4. **Rate Limiting**: Avoid sending too many actions in a short period to prevent overloading the server.
-
-## Troubleshooting
-
-### Connection Issues
-
-- Verify that the OpenHands server is running and accessible
-- Check that you're providing the correct conversation ID
-- Ensure your WebSocket URL is correctly formatted
-
-### Authentication Issues
-
-- Make sure you have the necessary authentication cookies if required
-- Verify that you have permission to access the specified conversation
-
-### Event Handling Issues
-
-- Check that you're correctly parsing the event data
-- Verify that your event handlers are properly registered
diff --git a/docs/usage/installation.mdx b/docs/usage/installation.mdx
deleted file mode 100644
index 6579025782c039c28cadb6f4d5cddebd95ab6813..0000000000000000000000000000000000000000
--- a/docs/usage/installation.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: Quick Start
-description: Running OpenHands Cloud or running on your local system.
-icon: rocket
----
-
-## OpenHands Cloud
-
-The easiest way to get started with OpenHands is on OpenHands Cloud, which comes with $50 in free credits for new users.
-
-To get started with OpenHands Cloud, visit [app.all-hands.dev](https://app.all-hands.dev).
-
-For more information see [getting started with OpenHands Cloud.](/usage/cloud/openhands-cloud)
-
-## Running OpenHands Locally
-
-Run OpenHands on your local system and bring your own LLM and API key.
-
-For more information see [running OpenHands locally.](/usage/local-setup)
diff --git a/docs/usage/key-features.mdx b/docs/usage/key-features.mdx
deleted file mode 100644
index 488a3930c0094902806b0746c7d3ebc9081cf3fd..0000000000000000000000000000000000000000
--- a/docs/usage/key-features.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
----
-title: Key Features
-icon: bars
----
-
-
-
-### Chat Panel
-- Displays the conversation between the user and OpenHands.
-- OpenHands explains its actions in this panel.
-
-### Changes
-- Shows the file changes performed by OpenHands.
-
-### VS Code
-- Embedded VS Code for browsing and modifying files.
-- Can also be used to upload and download files.
-
-### Terminal
-- A space for OpenHands and users to run terminal commands.
-
-### Jupyter
-- Shows all Python commands that were executed by OpenHands.
-- Particularly handy when using OpenHands to perform data visualization tasks.
-
-### App
-- Displays the web server when OpenHands runs an application.
-- Users can interact with the running application.
-
-### Browser
-- Used by OpenHands to browse websites.
-- The browser is non-interactive.
diff --git a/docs/usage/llms/azure-llms.mdx b/docs/usage/llms/azure-llms.mdx
deleted file mode 100644
index d1dbb3104133b50aac2f8740a2236e7b1e002801..0000000000000000000000000000000000000000
--- a/docs/usage/llms/azure-llms.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
----
-title: Azure
-description: OpenHands uses LiteLLM to make calls to Azure's chat models. You can find their documentation on using Azure as a provider [here](https://docs.litellm.ai/docs/providers/azure).
----
-
-## Azure OpenAI Configuration
-
-When running OpenHands, you'll need to set the following environment variable using `-e` in the
-[docker run command](../installation#running-openhands):
-
-```
-LLM_API_VERSION="" # e.g. "2023-05-15"
-```
-
-Example:
-```bash
-docker run -it --pull=always \
- -e LLM_API_VERSION="2023-05-15"
- ...
-```
-
-Then in the OpenHands UI Settings under the `LLM` tab:
-
-
-You will need your ChatGPT deployment name which can be found on the deployments page in Azure. This is referenced as
-<deployment-name> below.
-
-
-1. Enable `Advanced` options.
-2. Set the following:
- - `Custom Model` to azure/<deployment-name>
- - `Base URL` to your Azure API Base URL (e.g. `https://example-endpoint.openai.azure.com`)
- - `API Key` to your Azure API key
-
-### Azure OpenAI Configuration
-
-When running OpenHands, set the following environment variable using `-e` in the
-[docker run command](../installation#running-openhands):
-
-```
-LLM_API_VERSION="" # e.g. "2024-02-15-preview"
-```
diff --git a/docs/usage/llms/custom-llm-configs.mdx b/docs/usage/llms/custom-llm-configs.mdx
deleted file mode 100644
index 2a7af46817ddd69ffd7a5facebd1b7da6eaecc62..0000000000000000000000000000000000000000
--- a/docs/usage/llms/custom-llm-configs.mdx
+++ /dev/null
@@ -1,137 +0,0 @@
----
-title: Custom LLM Configurations
-description: OpenHands supports defining multiple named LLM configurations in your `config.toml` file. This feature allows you to use different LLM configurations for different purposes, such as using a cheaper model for tasks that don't require high-quality responses, or using different models with different parameters for specific agents.
----
-
-## How It Works
-
-Named LLM configurations are defined in the `config.toml` file using sections that start with `llm.`. For example:
-
-```toml
-# Default LLM configuration
-[llm]
-model = "gpt-4"
-api_key = "your-api-key"
-temperature = 0.0
-
-# Custom LLM configuration for a cheaper model
-[llm.gpt3]
-model = "gpt-3.5-turbo"
-api_key = "your-api-key"
-temperature = 0.2
-
-# Another custom configuration with different parameters
-[llm.high-creativity]
-model = "gpt-4"
-api_key = "your-api-key"
-temperature = 0.8
-top_p = 0.9
-```
-
-Each named configuration inherits all settings from the default `[llm]` section and can override any of those settings. You can define as many custom configurations as needed.
-
-## Using Custom Configurations
-
-### With Agents
-
-You can specify which LLM configuration an agent should use by setting the `llm_config` parameter in the agent's configuration section:
-
-```toml
-[agent.RepoExplorerAgent]
-# Use the cheaper GPT-3 configuration for this agent
-llm_config = 'gpt3'
-
-[agent.CodeWriterAgent]
-# Use the high creativity configuration for this agent
-llm_config = 'high-creativity'
-```
-
-### Configuration Options
-
-Each named LLM configuration supports all the same options as the default LLM configuration. These include:
-
-- Model selection (`model`)
-- API configuration (`api_key`, `base_url`, etc.)
-- Model parameters (`temperature`, `top_p`, etc.)
-- Retry settings (`num_retries`, `retry_multiplier`, etc.)
-- Token limits (`max_input_tokens`, `max_output_tokens`)
-- And all other LLM configuration options
-
-For a complete list of available options, see the LLM Configuration section in the [Configuration Options](../configuration-options) documentation.
-
-## Use Cases
-
-Custom LLM configurations are particularly useful in several scenarios:
-
-- **Cost Optimization**: Use cheaper models for tasks that don't require high-quality responses, like repository exploration or simple file operations.
-- **Task-Specific Tuning**: Configure different temperature and top_p values for tasks that require different levels of creativity or determinism.
-- **Different Providers**: Use different LLM providers or API endpoints for different tasks.
-- **Testing and Development**: Easily switch between different model configurations during development and testing.
-
-## Example: Cost Optimization
-
-A practical example of using custom LLM configurations to optimize costs:
-
-```toml
-# Default configuration using GPT-4 for high-quality responses
-[llm]
-model = "gpt-4"
-api_key = "your-api-key"
-temperature = 0.0
-
-# Cheaper configuration for repository exploration
-[llm.repo-explorer]
-model = "gpt-3.5-turbo"
-temperature = 0.2
-
-# Configuration for code generation
-[llm.code-gen]
-model = "gpt-4"
-temperature = 0.0
-max_output_tokens = 2000
-
-[agent.RepoExplorerAgent]
-llm_config = 'repo-explorer'
-
-[agent.CodeWriterAgent]
-llm_config = 'code-gen'
-```
-
-In this example:
-- Repository exploration uses a cheaper model since it mainly involves understanding and navigating code
-- Code generation uses GPT-4 with a higher token limit for generating larger code blocks
-- The default configuration remains available for other tasks
-
-# Custom Configurations with Reserved Names
-
-OpenHands can use custom LLM configurations named with reserved names, for specific use cases. If you specify the model and other settings under the reserved names, then OpenHands will load and them for a specific purpose. As of now, one such configuration is implemented: draft editor.
-
-## Draft Editor Configuration
-
-The `draft_editor` configuration is a group of settings you can provide, to specify the model to use for preliminary drafting of code edits, for any tasks that involve editing and refining code. You need to provide it under the section `[llm.draft_editor]`.
-
-For example, you can define in `config.toml` a draft editor like this:
-
-```toml
-[llm.draft_editor]
-model = "gpt-4"
-temperature = 0.2
-top_p = 0.95
-presence_penalty = 0.0
-frequency_penalty = 0.0
-```
-
-This configuration:
-- Uses GPT-4 for high-quality edits and suggestions
-- Sets a low temperature (0.2) to maintain consistency while allowing some flexibility
-- Uses a high top_p value (0.95) to consider a wide range of token options
-- Disables presence and frequency penalties to maintain focus on the specific edits needed
-
-Use this configuration when you want to let an LLM draft edits before making them. In general, it may be useful to:
-- Review and suggest code improvements
-- Refine existing content while maintaining its core meaning
-- Make precise, focused changes to code or text
-
-
-Custom LLM configurations are only available when using OpenHands in development mode, via `main.py` or `cli.py`. When running via `docker run`, please use the standard configuration options.
-
diff --git a/docs/usage/llms/google-llms.mdx b/docs/usage/llms/google-llms.mdx
deleted file mode 100644
index 999e32f8c19d86cac3ea0abd069f7cbcfd72bac9..0000000000000000000000000000000000000000
--- a/docs/usage/llms/google-llms.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: Google Gemini/Vertex
-description: OpenHands uses LiteLLM to make calls to Google's chat models. You can find their documentation on using Google as a provider -> [Gemini - Google AI Studio](https://docs.litellm.ai/docs/providers/gemini), [VertexAI - Google Cloud Platform](https://docs.litellm.ai/docs/providers/vertex)
----
-
-## Gemini - Google AI Studio Configs
-
-When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings under the `LLM` tab:
-- `LLM Provider` to `Gemini`
-- `LLM Model` to the model you will be using.
-If the model is not in the list, enable `Advanced` options, and enter it in `Custom Model`
-(e.g. gemini/<model-name> like `gemini/gemini-2.0-flash`).
-- `API Key` to your Gemini API key
-
-## VertexAI - Google Cloud Platform Configs
-
-To use Vertex AI through Google Cloud Platform when running OpenHands, you'll need to set the following environment
-variables using `-e` in the [docker run command](../installation#running-openhands):
-
-```
-GOOGLE_APPLICATION_CREDENTIALS=""
-VERTEXAI_PROJECT=""
-VERTEXAI_LOCATION=""
-```
-
-Then set the following in the OpenHands UI through the Settings under the `LLM` tab:
-- `LLM Provider` to `VertexAI`
-- `LLM Model` to the model you will be using.
-If the model is not in the list, enable `Advanced` options, and enter it in `Custom Model`
-(e.g. vertex_ai/<model-name>).
diff --git a/docs/usage/llms/groq.mdx b/docs/usage/llms/groq.mdx
deleted file mode 100644
index eb971c83a07aa46c0486e7498386acc3d5d074a3..0000000000000000000000000000000000000000
--- a/docs/usage/llms/groq.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
----
-title: Groq
-description: OpenHands uses LiteLLM to make calls to chat models on Groq. You can find their documentation on using Groq as a provider [here](https://docs.litellm.ai/docs/providers/groq).
----
-
-## Configuration
-
-When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings under the `LLM` tab:
-- `LLM Provider` to `Groq`
-- `LLM Model` to the model you will be using. [Visit here to see the list of
-models that Groq hosts](https://console.groq.com/docs/models). If the model is not in the list,
-enable `Advanced` options, and enter it in `Custom Model` (e.g. groq/<model-name> like `groq/llama3-70b-8192`).
-- `API key` to your Groq API key. To find or create your Groq API Key, [see here](https://console.groq.com/keys).
-
-## Using Groq as an OpenAI-Compatible Endpoint
-
-The Groq endpoint for chat completion is [mostly OpenAI-compatible](https://console.groq.com/docs/openai). Therefore, you can access Groq models as you
-would access any OpenAI-compatible endpoint. In the OpenHands UI through the Settings under the `LLM` tab:
-1. Enable `Advanced` options
-2. Set the following:
- - `Custom Model` to the prefix `openai/` + the model you will be using (e.g. `openai/llama3-70b-8192`)
- - `Base URL` to `https://api.groq.com/openai/v1`
- - `API Key` to your Groq API key
diff --git a/docs/usage/llms/litellm-proxy.mdx b/docs/usage/llms/litellm-proxy.mdx
deleted file mode 100644
index 13fb4c8666507a6573c0ea86d2b61aa3a185fed4..0000000000000000000000000000000000000000
--- a/docs/usage/llms/litellm-proxy.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: LiteLLM Proxy
-description: OpenHands supports using the [LiteLLM proxy](https://docs.litellm.ai/docs/proxy/quick_start) to access various LLM providers.
----
-
-## Configuration
-
-To use LiteLLM proxy with OpenHands, you need to:
-
-1. Set up a LiteLLM proxy server (see [LiteLLM documentation](https://docs.litellm.ai/docs/proxy/quick_start))
-2. When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings under the `LLM` tab:
- * Enable `Advanced` options
- * `Custom Model` to the prefix `litellm_proxy/` + the model you will be using (e.g. `litellm_proxy/anthropic.claude-3-5-sonnet-20241022-v2:0`)
- * `Base URL` to your LiteLLM proxy URL (e.g. `https://your-litellm-proxy.com`)
- * `API Key` to your LiteLLM proxy API key
-
-## Supported Models
-
-The supported models depend on your LiteLLM proxy configuration. OpenHands supports any model that your LiteLLM proxy
-is configured to handle.
-
-Refer to your LiteLLM proxy configuration for the list of available models and their names.
diff --git a/docs/usage/llms/llms.mdx b/docs/usage/llms/llms.mdx
deleted file mode 100644
index ccc2662f3a84eb431e6fed67b99299f87f9679fa..0000000000000000000000000000000000000000
--- a/docs/usage/llms/llms.mdx
+++ /dev/null
@@ -1,92 +0,0 @@
----
-title: Overview
-description: OpenHands can connect to any LLM supported by LiteLLM. However, it requires a powerful model to work.
----
-
-
-This section is for users who want to connect OpenHands to different LLMs.
-
-
-## Model Recommendations
-
-Based on our evaluations of language models for coding tasks (using the SWE-bench dataset), we can provide some
-recommendations for model selection. Our latest benchmarking results can be found in [this spreadsheet](https://docs.google.com/spreadsheets/d/1wOUdFCMyY6Nt0AIqF705KN4JKOWgeI4wUGUP60krXXs/edit?gid=0).
-
-Based on these findings and community feedback, these are the latest models that have been verified to work reasonably well with OpenHands:
-
-- [anthropic/claude-sonnet-4-20250514](https://www.anthropic.com/api) (recommended)
-- [openai/o4-mini](https://openai.com/index/introducing-o3-and-o4-mini/)
-- [gemini/gemini-2.5-pro](https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/)
-- [deepseek/deepseek-chat](https://api-docs.deepseek.com/)
-- [all-hands/openhands-lm-32b-v0.1](https://www.all-hands.dev/blog/introducing-openhands-lm-32b----a-strong-open-coding-agent-model) -- available through [OpenRouter](https://openrouter.ai/all-hands/openhands-lm-32b-v0.1)
-
-
-
-OpenHands will issue many prompts to the LLM you configure. Most of these LLMs cost money, so be sure to set spending
-limits and monitor usage.
-
-
-If you have successfully run OpenHands with specific providers, we encourage you to open a PR to share your setup process
-to help others using the same provider!
-
-For a full list of the providers and models available, please consult the
-[litellm documentation](https://docs.litellm.ai/docs/providers).
-
-
-Most current local and open source models are not as powerful. When using such models, you may see long
-wait times between messages, poor responses, or errors about malformed JSON. OpenHands can only be as powerful as the
-models driving it. However, if you do find ones that work, please add them to the verified list above.
-
-
-## LLM Configuration
-
-The following can be set in the OpenHands UI through the Settings:
-
-- `LLM Provider`
-- `LLM Model`
-- `API Key`
-- `Base URL` (through `Advanced` settings)
-
-There are some settings that may be necessary for some LLMs/providers that cannot be set through the UI. Instead, these
-can be set through environment variables passed to the docker run command when starting the app
-using `-e`:
-
-- `LLM_API_VERSION`
-- `LLM_EMBEDDING_MODEL`
-- `LLM_EMBEDDING_DEPLOYMENT_NAME`
-- `LLM_DROP_PARAMS`
-- `LLM_DISABLE_VISION`
-- `LLM_CACHING_PROMPT`
-
-We have a few guides for running OpenHands with specific model providers:
-
-- [Azure](/usage/llms/azure-llms)
-- [Google](/usage/llms/google-llms)
-- [Groq](/usage/llms/groq)
-- [Local LLMs with SGLang or vLLM](/usage/llms/local-llms)
-- [LiteLLM Proxy](/usage/llms/litellm-proxy)
-- [OpenAI](/usage/llms/openai-llms)
-- [OpenRouter](/usage/llms/openrouter)
-
-### API retries and rate limits
-
-LLM providers typically have rate limits, sometimes very low, and may require retries. OpenHands will automatically
-retry requests if it receives a Rate Limit Error (429 error code).
-
-You can customize these options as you need for the provider you're using. Check their documentation, and set the
-following environment variables to control the number of retries and the time between retries:
-
-- `LLM_NUM_RETRIES` (Default of 4 times)
-- `LLM_RETRY_MIN_WAIT` (Default of 5 seconds)
-- `LLM_RETRY_MAX_WAIT` (Default of 30 seconds)
-- `LLM_RETRY_MULTIPLIER` (Default of 2)
-
-If you are running OpenHands in development mode, you can also set these options in the `config.toml` file:
-
-```toml
-[llm]
-num_retries = 4
-retry_min_wait = 5
-retry_max_wait = 30
-retry_multiplier = 2
-```
diff --git a/docs/usage/llms/local-llms.mdx b/docs/usage/llms/local-llms.mdx
deleted file mode 100644
index 7735f4f01dba8ebbdd4a0fb9c0d7ea422aba074d..0000000000000000000000000000000000000000
--- a/docs/usage/llms/local-llms.mdx
+++ /dev/null
@@ -1,160 +0,0 @@
----
-title: Local LLMs
-description: When using a Local LLM, OpenHands may have limited functionality. It is highly recommended that you use GPUs to serve local models for optimal experience.
----
-
-## News
-
-- 2025/05/21: We collaborated with Mistral AI and released [Devstral Small](https://mistral.ai/news/devstral) that achieves [46.8% on SWE-Bench Verified](https://github.com/SWE-bench/experiments/pull/228)!
-- 2025/03/31: We released an open model OpenHands LM v0.1 32B that achieves 37.1% on SWE-Bench Verified
-([blog](https://www.all-hands.dev/blog/introducing-openhands-lm-32b----a-strong-open-coding-agent-model), [model](https://huggingface.co/all-hands/openhands-lm-32b-v0.1)).
-
-
-## Quickstart: Running OpenHands on Your Macbook
-
-### Serve the model on your Macbook
-
-We recommend using [LMStudio](https://lmstudio.ai/) for serving these models locally.
-
-1. Download [LM Studio](https://lmstudio.ai/) and install it
-
-2. Download the model:
- - Option 1: Directly download the LLM from [this link](https://lmstudio.ai/model/devstral-small-2505-mlx) or by searching for the name `Devstral-Small-2505` in LM Studio
- - Option 2: Download a LLM in GGUF format. For example, to download [Devstral Small 2505 GGUF](https://huggingface.co/mistralai/Devstral-Small-2505_gguf), using `huggingface-cli download mistralai/Devstral-Small-2505_gguf --local-dir mistralai/Devstral-Small-2505_gguf`. Then in bash terminal, run `lms import {model_name}` in the directory where you've downloaded the model checkpoint (e.g. run `lms import devstralQ4_K_M.gguf` in `mistralai/Devstral-Small-2505_gguf`)
-
-3. Open LM Studio application, you should first switch to `power user` mode, and then open the developer tab:
-
-
-
-4. Then click `Select a model to load` on top of the application:
-
-
-
-5. And choose the model you want to use, holding `option` on mac to enable advanced loading options:
-
-
-
-6. You should then pick an appropriate context window for OpenHands based on your hardware configuration (larger than 32768 is recommended for using OpenHands, but too large may cause you to run out of memory); Flash attention is also recommended if it works on your machine.
-
-
-
-7. And you should start the server (if it is not already in `Running` status), un-toggle `Serve on Local Network` and remember the port number of the LMStudio URL (`1234` is the port number for `http://127.0.0.1:1234` in this example):
-
-
-
-8. Finally, you can click the `copy` button near model name to copy the model name (`imported-models/uncategorized/devstralq4_k_m.gguf` in this example):
-
-
-
-### Start OpenHands with locally served model
-
-Check [the installation guide](https://docs.all-hands.dev/modules/usage/installation) to make sure you have all the prerequisites for running OpenHands.
-
-```bash
-export LMSTUDIO_MODEL_NAME="imported-models/uncategorized/devstralq4_k_m.gguf" # <- Replace this with the model name you copied from LMStudio
-export LMSTUDIO_URL="http://host.docker.internal:1234" # <- Replace this with the port from LMStudio
-
-docker pull docker.all-hands.dev/all-hands-ai/runtime:0.41-nikolaik
-
-mkdir -p ~/.openhands-state && echo '{"language":"en","agent":"CodeActAgent","max_iterations":null,"security_analyzer":null,"confirmation_mode":false,"llm_model":"lm_studio/'$LMSTUDIO_MODEL_NAME'","llm_api_key":"dummy","llm_base_url":"'$LMSTUDIO_URL/v1'","remote_runtime_resource_factor":null,"github_token":null,"enable_default_condenser":true,"user_consents_to_analytics":true}' > ~/.openhands-state/settings.json
-
-docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.41-nikolaik \
- -e LOG_ALL_EVENTS=true \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v ~/.openhands-state:/.openhands-state \
- -p 3000:3000 \
- --add-host host.docker.internal:host-gateway \
- --name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:0.41
-```
-
-Once your server is running -- you can visit `http://localhost:3000` in your browser to use OpenHands with local Devstral model:
-```
-Digest: sha256:e72f9baecb458aedb9afc2cd5bc935118d1868719e55d50da73190d3a85c674f
-Status: Image is up to date for docker.all-hands.dev/all-hands-ai/openhands:0.41
-Starting OpenHands...
-Running OpenHands as root
-14:22:13 - openhands:INFO: server_config.py:50 - Using config class None
-INFO: Started server process [8]
-INFO: Waiting for application startup.
-INFO: Application startup complete.
-INFO: Uvicorn running on http://0.0.0.0:3000 (Press CTRL+C to quit)
-```
-
-
-## Advanced: Serving LLM on GPUs
-
-### Download model checkpoints
-
-
-The model checkpoints downloaded here should NOT be in GGUF format.
-
-
-For example, to download [OpenHands LM 32B v0.1](https://huggingface.co/all-hands/openhands-lm-32b-v0.1):
-
-```bash
-huggingface-cli download all-hands/openhands-lm-32b-v0.1 --local-dir all-hands/openhands-lm-32b-v0.1
-```
-
-### Create an OpenAI-Compatible Endpoint With SGLang
-
-- Install SGLang following [the official documentation](https://docs.sglang.ai/start/install.html).
-- Example launch command for OpenHands LM 32B (with at least 2 GPUs):
-
-```bash
-SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 python3 -m sglang.launch_server \
- --model all-hands/openhands-lm-32b-v0.1 \
- --served-model-name openhands-lm-32b-v0.1 \
- --port 8000 \
- --tp 2 --dp 1 \
- --host 0.0.0.0 \
- --api-key mykey --context-length 131072
-```
-
-### Create an OpenAI-Compatible Endpoint with vLLM
-
-- Install vLLM following [the official documentation](https://docs.vllm.ai/en/latest/getting_started/installation.html).
-- Example launch command for OpenHands LM 32B (with at least 2 GPUs):
-
-```bash
-vllm serve all-hands/openhands-lm-32b-v0.1 \
- --host 0.0.0.0 --port 8000 \
- --api-key mykey \
- --tensor-parallel-size 2 \
- --served-model-name openhands-lm-32b-v0.1
- --enable-prefix-caching
-```
-
-## Advanced: Run and Configure OpenHands
-
-### Run OpenHands
-
-#### Using Docker
-
-Run OpenHands using [the official docker run command](../installation#start-the-app).
-
-#### Using Development Mode
-
-Use the instructions in [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md) to build OpenHands.
-Ensure `config.toml` exists by running `make setup-config` which will create one for you. In the `config.toml`, enter the following:
-
-```
-[core]
-workspace_base="/path/to/your/workspace"
-
-[llm]
-model="openhands-lm-32b-v0.1"
-ollama_base_url="http://localhost:8000"
-```
-
-Start OpenHands using `make run`.
-
-### Configure OpenHands
-
-Once OpenHands is running, you'll need to set the following in the OpenHands UI through the Settings under the `LLM` tab:
-1. Enable `Advanced` options.
-2. Set the following:
-- `Custom Model` to `openai/` (e.g. `openai/openhands-lm-32b-v0.1`)
-- `Base URL` to `http://host.docker.internal:8000`
-- `API key` to the same string you set when serving the model (e.g. `mykey`)
diff --git a/docs/usage/llms/openai-llms.mdx b/docs/usage/llms/openai-llms.mdx
deleted file mode 100644
index d97a573f2df1de19f54d41e049e2a6f751e8cf94..0000000000000000000000000000000000000000
--- a/docs/usage/llms/openai-llms.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: OpenAI
-description: OpenHands uses LiteLLM to make calls to OpenAI's chat models. You can find their documentation on using OpenAI as a provider [here](https://docs.litellm.ai/docs/providers/openai).
----
-
-## Configuration
-
-When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings under the `LLM` tab:
-* `LLM Provider` to `OpenAI`
-* `LLM Model` to the model you will be using.
-[Visit here to see a full list of OpenAI models that LiteLLM supports.](https://docs.litellm.ai/docs/providers/openai#openai-chat-completion-models)
-If the model is not in the list, enable `Advanced` options, and enter it in `Custom Model` (e.g. openai/<model-name> like `openai/gpt-4o`).
-* `API Key` to your OpenAI API key. To find or create your OpenAI Project API Key, [see here](https://platform.openai.com/api-keys).
-
-## Using OpenAI-Compatible Endpoints
-
-Just as for OpenAI Chat completions, we use LiteLLM for OpenAI-compatible endpoints. You can find their full documentation on this topic [here](https://docs.litellm.ai/docs/providers/openai_compatible).
-
-## Using an OpenAI Proxy
-
-If you're using an OpenAI proxy, in the OpenHands UI through the Settings under the `LLM` tab:
-1. Enable `Advanced` options
-2. Set the following:
- - `Custom Model` to openai/<model-name> (e.g. `openai/gpt-4o` or openai/<proxy-prefix>/<model-name>)
- - `Base URL` to the URL of your OpenAI proxy
- - `API Key` to your OpenAI API key
diff --git a/docs/usage/llms/openrouter.mdx b/docs/usage/llms/openrouter.mdx
deleted file mode 100644
index b465eebbbfc47b7420e6dcd0156f4e9b751e027b..0000000000000000000000000000000000000000
--- a/docs/usage/llms/openrouter.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: OpenRouter
-description: OpenHands uses LiteLLM to make calls to chat models on OpenRouter. You can find their documentation on using OpenRouter as a provider [here](https://docs.litellm.ai/docs/providers/openrouter).
----
-
-## Configuration
-
-When running OpenHands, you'll need to set the following in the OpenHands UI through the Settings under the `LLM` tab:
-* `LLM Provider` to `OpenRouter`
-* `LLM Model` to the model you will be using.
-[Visit here to see a full list of OpenRouter models](https://openrouter.ai/models).
-If the model is not in the list, enable `Advanced` options, and enter it in
-`Custom Model` (e.g. openrouter/<model-name> like `openrouter/anthropic/claude-3.5-sonnet`).
-* `API Key` to your OpenRouter API key.
diff --git a/docs/usage/llms/screenshots/1_select_power_user.png b/docs/usage/llms/screenshots/1_select_power_user.png
deleted file mode 100644
index cc1c9680457b558ca446bf91b94ed10aee7163ed..0000000000000000000000000000000000000000
--- a/docs/usage/llms/screenshots/1_select_power_user.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3224bad5d97fadf42c637548a2a35f8adaf139adf4a899cc49a4a68b78e57632
-size 232921
diff --git a/docs/usage/llms/screenshots/2_select_model.png b/docs/usage/llms/screenshots/2_select_model.png
deleted file mode 100644
index 9f26dd1f71d9ae9831bfaa230d9b03044e353c43..0000000000000000000000000000000000000000
--- a/docs/usage/llms/screenshots/2_select_model.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c71f16d252809e117e83bf56639f1ed93c3a4661c44c540d2668fc82bf919743
-size 430035
diff --git a/docs/usage/llms/screenshots/3_select_devstral.png b/docs/usage/llms/screenshots/3_select_devstral.png
deleted file mode 100644
index 61ee34c065ef8365224bda7c927dd76c958fb3b0..0000000000000000000000000000000000000000
Binary files a/docs/usage/llms/screenshots/3_select_devstral.png and /dev/null differ
diff --git a/docs/usage/llms/screenshots/4_set_context_window.png b/docs/usage/llms/screenshots/4_set_context_window.png
deleted file mode 100644
index 34585d2e3798656dd92ea4bff251d6cefbcb40f1..0000000000000000000000000000000000000000
--- a/docs/usage/llms/screenshots/4_set_context_window.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:f3b36c5a1951ca062984675f9e8e315366600bf33a5b1a4a3d1a599adb6e8594
-size 571585
diff --git a/docs/usage/llms/screenshots/5_copy_url.png b/docs/usage/llms/screenshots/5_copy_url.png
deleted file mode 100644
index 95dd9563f273876d14202ed8ffe6861e1e0f89c7..0000000000000000000000000000000000000000
--- a/docs/usage/llms/screenshots/5_copy_url.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:94979479b2e60498d3efba02c207e8dc06e03d9e4824316d259df198067068f6
-size 661554
diff --git a/docs/usage/llms/screenshots/6_copy_to_get_model_name.png b/docs/usage/llms/screenshots/6_copy_to_get_model_name.png
deleted file mode 100644
index 7078f80b540805b2562ef8bd431048a27e8e308d..0000000000000000000000000000000000000000
Binary files a/docs/usage/llms/screenshots/6_copy_to_get_model_name.png and /dev/null differ
diff --git a/docs/usage/local-setup.mdx b/docs/usage/local-setup.mdx
deleted file mode 100644
index f059273cac64ebb70e30a5b13ae16ea83030b58d..0000000000000000000000000000000000000000
--- a/docs/usage/local-setup.mdx
+++ /dev/null
@@ -1,151 +0,0 @@
----
-title: Getting Started
-description: Getting started with running OpenHands locally.
----
-
-## Recommended Methods for Running Openhands on Your Local System
-
-### System Requirements
-
-- MacOS with [Docker Desktop support](https://docs.docker.com/desktop/setup/install/mac-install/#system-requirements)
-- Linux
-- Windows with [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) and [Docker Desktop support](https://docs.docker.com/desktop/setup/install/windows-install/#system-requirements)
-
-A system with a modern processor and a minimum of **4GB RAM** is recommended to run OpenHands.
-
-### Prerequisites
-
-
-
-
-
- **Docker Desktop**
-
- 1. [Install Docker Desktop on Mac](https://docs.docker.com/desktop/setup/install/mac-install).
- 2. Open Docker Desktop, go to `Settings > Advanced` and ensure `Allow the default Docker socket to be used` is enabled.
-
-
-
-
-
- Tested with Ubuntu 22.04.
-
-
- **Docker Desktop**
-
- 1. [Install Docker Desktop on Linux](https://docs.docker.com/desktop/setup/install/linux/).
-
-
-
-
-
- **WSL**
-
- 1. [Install WSL](https://learn.microsoft.com/en-us/windows/wsl/install).
- 2. Run `wsl --version` in powershell and confirm `Default Version: 2`.
-
- **Docker Desktop**
-
- 1. [Install Docker Desktop on Windows](https://docs.docker.com/desktop/setup/install/windows-install).
- 2. Open Docker Desktop, go to `Settings` and confirm the following:
- - General: `Use the WSL 2 based engine` is enabled.
- - Resources > WSL Integration: `Enable integration with my default WSL distro` is enabled.
-
-
- The docker command below to start the app must be run inside the WSL terminal.
-
-
-
-
-
-
-### Start the App
-
-```bash
-docker pull docker.all-hands.dev/all-hands-ai/runtime:0.40-nikolaik
-
-docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.40-nikolaik \
- -e LOG_ALL_EVENTS=true \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v ~/.openhands-state:/.openhands-state \
- -p 3000:3000 \
- --add-host host.docker.internal:host-gateway \
- --name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:0.40
-```
-
-You'll find OpenHands running at http://localhost:3000!
-
-### Setup
-
-After launching OpenHands, you **must** select an `LLM Provider` and `LLM Model` and enter a corresponding `API Key`.
-This can be done during the initial settings popup or by selecting the `Settings`
-button (gear icon) in the UI.
-
-If the required model does not exist in the list, in `Settings` under the `LLM` tab, you can toggle `Advanced` options
-and manually enter it with the correct prefix in the `Custom Model` text box.
-The `Advanced` options also allow you to specify a `Base URL` if required.
-
-#### Getting an API Key
-
-OpenHands requires an API key to access most language models. Here's how to get an API key from the recommended providers:
-
-
-
-
-
-1. [Create an Anthropic account](https://console.anthropic.com/).
-2. [Generate an API key](https://console.anthropic.com/settings/keys).
-3. [Set up billing](https://console.anthropic.com/settings/billing).
-
-
-
-
-
-1. [Create an OpenAI account](https://platform.openai.com/).
-2. [Generate an API key](https://platform.openai.com/api-keys).
-3. [Set up billing](https://platform.openai.com/account/billing/overview).
-
-
-
-
-
-1. Create a Google account if you don't already have one.
-2. [Generate an API key](https://aistudio.google.com/apikey).
-3. [Set up billing](https://aistudio.google.com/usage?tab=billing).
-
-
-
-
-
-Consider setting usage limits to control costs.
-
-#### Setting Up Search Engine
-
-OpenHands can be configured to use a search engine to allow the agent to search the web for information when needed.
-
-To enable search functionality in OpenHands:
-
-1. Get a Tavily API key from [tavily.com](https://tavily.com/).
-2. Enter the Tavily API key in the Settings page under `LLM` tab > `Search API Key (Tavily)`
-
-For more details, see the [Search Engine Setup](/usage/search-engine-setup) guide.
-
-
-Now you're ready to [get started with OpenHands](/usage/getting-started).
-
-### Versions
-
-The [docker command above](/usage/local-setup#start-the-app) pulls the most recent stable release of OpenHands. You have other options as well:
-- For a specific release, replace `$VERSION` in `openhands:$VERSION` and `runtime:$VERSION`, with the version number.
-For example, `0.9` will automatically point to the latest `0.9.x` release, and `0` will point to the latest `0.x.x` release.
-- For the most up-to-date development version, replace `$VERSION` in `openhands:$VERSION` and `runtime:$VERSION`, with `main`.
-This version is unstable and is recommended for testing or development purposes only.
-
-## Next Steps
-
-- [Connect OpenHands to your local filesystem.](/usage/runtimes/docker#connecting-to-your-filesystem) to use OpenHands with your GitHub repositories
-- [Run OpenHands in a scriptable headless mode.](/usage/how-to/headless-mode)
-- [Run OpenHands with a friendly CLI.](/usage/how-to/cli-mode)
-- [Run OpenHands on tagged issues with a GitHub action.](/usage/how-to/github-action)
diff --git a/docs/usage/mcp.mdx b/docs/usage/mcp.mdx
deleted file mode 100644
index bfce6ef749c5318b263ba4a1889809e2e5aaf799..0000000000000000000000000000000000000000
--- a/docs/usage/mcp.mdx
+++ /dev/null
@@ -1,94 +0,0 @@
----
-title: Model Context Protocol (MCP)
-description: This page outlines how to configure and use the Model Context Protocol (MCP) in OpenHands, allowing you to extend the agent's capabilities with custom tools.
----
-
-## Overview
-
-Model Context Protocol (MCP) is a mechanism that allows OpenHands to communicate with external tool servers. These
-servers can provide additional functionality to the agent, such as specialized data processing, external API access,
-or custom tools. MCP is based on the open standard defined at [modelcontextprotocol.io](https://modelcontextprotocol.io).
-
-## Configuration
-
-MCP configuration can be defined in:
-* The OpenHands UI through the Settings under the `MCP` tab.
-* The `config.toml` file under the `[mcp]` section if not using the UI.
-
-### Configuration Example via config.toml
-
-```toml
-[mcp]
-# SSE Servers - External servers that communicate via Server-Sent Events
-sse_servers = [
- # Basic SSE server with just a URL
- "http://example.com:8080/mcp",
-
- # SSE server with API key authentication
- {url="https://secure-example.com/mcp", api_key="your-api-key"}
-]
-
-# Stdio Servers - Local processes that communicate via standard input/output
-stdio_servers = [
- # Basic stdio server
- {name="fetch", command="uvx", args=["mcp-server-fetch"]},
-
- # Stdio server with environment variables
- {
- name="data-processor",
- command="python",
- args=["-m", "my_mcp_server"],
- env={
- "DEBUG": "true",
- "PORT": "8080"
- }
- }
-]
-```
-
-## Configuration Options
-
-### SSE Servers
-
-SSE servers are configured using either a string URL or an object with the following properties:
-
-- `url` (required)
- - Type: `str`
- - Description: The URL of the SSE server
-
-### Stdio Servers
-
-Stdio servers are configured using an object with the following properties:
-
-- `name` (required)
- - Type: `str`
- - Description: A unique name for the server
-
-- `command` (required)
- - Type: `str`
- - Description: The command to run the server
-
-- `args` (optional)
- - Type: `list of str`
- - Default: `[]`
- - Description: Command-line arguments to pass to the server
-
-- `env` (optional)
- - Type: `dict of str to str`
- - Default: `{}`
- - Description: Environment variables to set for the server process
-
-## How MCP Works
-
-When OpenHands starts, it:
-
-1. Reads the MCP configuration.
-2. Connects to any configured SSE servers.
-3. Starts any configured stdio servers.
-4. Registers the tools provided by these servers with the agent.
-
-The agent can then use these tools just like any built-in tool. When the agent calls an MCP tool:
-
-1. OpenHands routes the call to the appropriate MCP server.
-2. The server processes the request and returns a response.
-3. OpenHands converts the response to an observation and presents it to the agent.
diff --git a/docs/usage/prompting/microagents-keyword.mdx b/docs/usage/prompting/microagents-keyword.mdx
deleted file mode 100644
index 8dfa51d20382a9151de0b70dedb53017aaac1722..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/microagents-keyword.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
----
-title: Keyword-Triggered Microagents
-description: Keyword-triggered microagents provide OpenHands with specific instructions that are activated when certain keywords appear in the prompt. This is useful for tailoring behavior based on particular tools, languages, or frameworks.
----
-
-## Usage
-
-These microagents are only loaded when a prompt includes one of the trigger words.
-
-## Frontmatter Syntax
-
-Frontmatter is required for keyword-triggered microagents. It must be placed at the top of the file,
-above the guidelines.
-
-Enclose the frontmatter in triple dashes (---) and include the following fields:
-
-| Field | Description | Required | Default |
-|------------|--------------------------------------------------|----------|------------------|
-| `triggers` | A list of keywords that activate the microagent. | Yes | None |
-| `agent` | The agent this microagent applies to. | No | 'CodeActAgent' |
-
-
-## Example
-
-Keyword-triggered microagent file example located at `.openhands/microagents/yummy.md`:
-```
----
-triggers:
-- yummyhappy
-- happyyummy
----
-
-The user has said the magic word. Respond with "That was delicious!"
-```
-
-[See examples of microagents triggered by keywords in the official OpenHands repository](https://github.com/All-Hands-AI/OpenHands/tree/main/microagents)
diff --git a/docs/usage/prompting/microagents-org.mdx b/docs/usage/prompting/microagents-org.mdx
deleted file mode 100644
index 46b20281341a1c2e06b8065ccafa4d0730ba8c84..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/microagents-org.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: Organization and User Microagents
-description: Organizations and users can define microagents that apply to all repositories belonging to the organization or user.
----
-
-## Usage
-
-These microagents can be [any type of microagent](./microagents-overview#microagent-types) and will be loaded
-accordingly. However, they are applied to all repositories belonging to the organization or user.
-
-Add a `.openhands` repository under the organization or user and create a `microagents` directory and place the
-microagents in that directory.
-
-## Example
-
-General microagent file example for organization `Great-Co` located inside the `.openhands` repository:
-`microagents/org-microagent.md`:
-```
-* Use type hints and error boundaries; validate inputs at system boundaries and fail with meaningful error messages.
-* Document interfaces and public APIs; use implementation comments only for non-obvious logic.
-* Follow the same naming convention for variables, classes, constants, etc. already used in each repository.
-```
diff --git a/docs/usage/prompting/microagents-overview.mdx b/docs/usage/prompting/microagents-overview.mdx
deleted file mode 100644
index abc13c2b8dc915d39c40d9692c3f72419b81df49..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/microagents-overview.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
----
-title: Microagents Overview
-description: Microagents are specialized prompts that enhance OpenHands with domain-specific knowledge. They provide expert guidance, automate common tasks, and ensure consistent practices across projects.
----
-
-## Microagent Types
-
-Currently OpenHands supports the following types of microagents:
-
-- [General Microagents](./microagents-repo): General guidelines for OpenHands about the repository.
-- [Keyword-Triggered Microagents](./microagents-keyword): Guidelines activated by specific keywords in prompts.
-
-To customize OpenHands' behavior, create a .openhands/microagents/ directory in the root of your repository and
-add `.md` files inside. For repository-specific guidelines, you can ask OpenHands to analyze your repository and create a comprehensive `repo.md` file (see [General Microagents](./microagents-repo) for details).
-
-
-Loaded microagents take up space in the context window.
-These microagents, alongside user messages, inform OpenHands about the task and the environment.
-
-
-Example repository structure:
-
-```
-some-repository/
-└── .openhands/
- └── microagents/
- └── repo.md # General guidelines
- └── trigger_this.md # Microagent triggered by specific keywords
- └── trigger_that.md # Microagent triggered by specific keywords
-```
-
-## Microagents Frontmatter Requirements
-
-Each microagent file may include frontmatter that provides additional information. In some cases, this frontmatter
-is required:
-
-| Microagent Type | Required |
-|---------------------------------|----------|
-| `General Microagents` | No |
-| `Keyword-Triggered Microagents` | Yes |
diff --git a/docs/usage/prompting/microagents-public.mdx b/docs/usage/prompting/microagents-public.mdx
deleted file mode 100644
index c1e2ca7a4bda73ff73f069cfc99c672755f25ec5..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/microagents-public.mdx
+++ /dev/null
@@ -1,49 +0,0 @@
----
-title: Global Microagents
-description: Global microagents are [keyword-triggered microagents](./microagents-keyword) that apply to all OpenHands users. A list of the current global microagents can be found [in the OpenHands repository](https://github.com/All-Hands-AI/OpenHands/tree/main/microagents).
----
-
-## Contributing a Global Microagent
-
-You can create global microagents and share with the community by opening a pull request to the official repository.
-
-See the [CONTRIBUTING.md](https://github.com/All-Hands-AI/OpenHands/blob/main/CONTRIBUTING.md) for specific instructions on how to contribute to OpenHands.
-
-### Global Microagents Best Practices
-
-- **Clear Scope**: Keep the microagent focused on a specific domain or task.
-- **Explicit Instructions**: Provide clear, unambiguous guidelines.
-- **Useful Examples**: Include practical examples of common use cases.
-- **Safety First**: Include necessary warnings and constraints.
-- **Integration Awareness**: Consider how the microagent interacts with other components.
-
-### Steps to Contribute a Global Microagent
-
-#### 1. Plan the Global Microagent
-
-Before creating a global microagent, consider:
-
-- What specific problem or use case will it address?
-- What unique capabilities or knowledge should it have?
-- What trigger words make sense for activating it?
-- What constraints or guidelines should it follow?
-
-#### 2. Create File
-
-Create a new Markdown file with a descriptive name in the appropriate directory:
-[`microagents/`](https://github.com/All-Hands-AI/OpenHands/tree/main/microagents)
-
-#### 3. Testing the Global Microagent
-
-- Test the agent with various prompts.
-- Verify trigger words activate the agent correctly.
-- Ensure instructions are clear and comprehensive.
-- Check for potential conflicts and overlaps with existing agents.
-
-#### 4. Submission Process
-
-Submit a pull request with:
-
-- The new microagent file.
-- Updated documentation if needed.
-- Description of the agent's purpose and capabilities.
diff --git a/docs/usage/prompting/microagents-repo.mdx b/docs/usage/prompting/microagents-repo.mdx
deleted file mode 100644
index e5c3943d5bccace73306ea4ce56aee5c4367cecc..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/microagents-repo.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
----
-title: General Microagents
-description: General guidelines for OpenHands to work more effectively with the repository.
----
-
-## Usage
-
-These microagents are always loaded as part of the context.
-
-## Frontmatter Syntax
-
-The frontmatter for this type of microagent is optional.
-
-Frontmatter should be enclosed in triple dashes (---) and may include the following fields:
-
-| Field | Description | Required | Default |
-|-----------|-----------------------------------------|----------|----------------|
-| `agent` | The agent this microagent applies to | No | 'CodeActAgent' |
-
-## Creating a Comprehensive Repository Agent
-
-To create an effective repository agent, you can ask OpenHands to analyze your repository with a prompt like:
-
-```
-Please browse the repository, look at the documentation and relevant code, and understand the purpose of this repository.
-
-Specifically, I want you to create a `.openhands/microagents/repo.md` file. This file should contain succinct information that summarizes:
-1. The purpose of this repository
-2. The general setup of this repo
-3. A brief description of the structure of this repo
-
-Read all the GitHub workflows under .github/ of the repository (if this folder exists) to understand the CI checks (e.g., linter, pre-commit), and include those in the repo.md file.
-```
-
-This approach helps OpenHands capture repository context efficiently, reducing the need for repeated searches during conversations and ensuring more accurate solutions.
-
-## Example Content
-
-A comprehensive repository agent file (`.openhands/microagents/repo.md`) should include:
-
-```
-# Repository Purpose
-This project is a TODO application that allows users to track TODO items.
-
-# Setup Instructions
-To set it up, you can run `npm run build`.
-
-# Repository Structure
-- `/src`: Core application code
-- `/tests`: Test suite
-- `/docs`: Documentation
-- `/.github`: CI/CD workflows
-
-# CI/CD Workflows
-- `lint.yml`: Runs ESLint on all JavaScript files
-- `test.yml`: Runs the test suite on pull requests
-
-# Development Guidelines
-Always make sure the tests are passing before committing changes. You can run the tests by running `npm run test`.
-```
-
-[See more examples of general microagents here.](https://github.com/All-Hands-AI/OpenHands/tree/main/.openhands/microagents)
diff --git a/docs/usage/prompting/prompting-best-practices.mdx b/docs/usage/prompting/prompting-best-practices.mdx
deleted file mode 100644
index 4802e85e76bf3fb4c5055f9c87c9e1a0b5267671..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/prompting-best-practices.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: Prompting Best Practices
-description: When working with OpenHands AI software developer, providing clear and effective prompts is key to getting accurate and useful responses. This guide outlines best practices for crafting effective prompts.
----
-
-## Characteristics of Good Prompts
-
-Good prompts are:
-
-- **Concrete**: Clearly describe what functionality should be added or what error needs fixing.
-- **Location-specific**: Specify the locations in the codebase that should be modified, if known.
-- **Appropriately scoped**: Focus on a single feature, typically not exceeding 100 lines of code.
-
-## Examples
-
-### Good Prompt Examples
-
-- Add a function `calculate_average` in `utils/math_operations.py` that takes a list of numbers as input and returns their average.
-- Fix the TypeError in `frontend/src/components/UserProfile.tsx` occurring on line 42. The error suggests we're trying to access a property of undefined.
-- Implement input validation for the email field in the registration form. Update `frontend/src/components/RegistrationForm.tsx` to check if the email is in a valid format before submission.
-
-### Bad Prompt Examples
-
-- Make the code better. (Too vague, not concrete)
-- Rewrite the entire backend to use a different framework. (Not appropriately scoped)
-- There's a bug somewhere in the user authentication. Can you find and fix it? (Lacks specificity and location information)
-
-## Tips for Effective Prompting
-
-- Be as specific as possible about the desired outcome or the problem to be solved.
-- Provide context, including relevant file paths and line numbers if available.
-- Break large tasks into smaller, manageable prompts.
-- Include relevant error messages or logs.
-- Specify the programming language or framework, if not obvious.
-
-The more precise and informative your prompt, the better OpenHands can assist you.
-
-See [Getting Started with OpenHands](../getting-started) for more examples of helpful prompts.
diff --git a/docs/usage/prompting/repository.mdx b/docs/usage/prompting/repository.mdx
deleted file mode 100644
index 0bb64ee47b170e23bc5cd05b756853aeb3f99f90..0000000000000000000000000000000000000000
--- a/docs/usage/prompting/repository.mdx
+++ /dev/null
@@ -1,47 +0,0 @@
----
-title: Repository Customization
-description: You can customize how OpenHands interacts with your repository by creating a `.openhands` directory at the root level.
----
-
-## Microagents
-
-Microagents allow you to extend OpenHands prompts with information specific to your project and define how OpenHands
-should function. See [Microagents Overview](../prompting/microagents-overview) for more information.
-
-
-## Setup Script
-You can add a `.openhands/setup.sh` file, which will run every time OpenHands begins working with your repository.
-This is an ideal location for installing dependencies, setting environment variables, and performing other setup tasks.
-
-For example:
-```bash
-#!/bin/bash
-export MY_ENV_VAR="my value"
-sudo apt-get update
-sudo apt-get install -y lsof
-cd frontend && npm install ; cd ..
-```
-
-## Pre-commit Script
-You can add a `.openhands/pre-commit.sh` file to create a custom git pre-commit hook that runs before each commit.
-This can be used to enforce code quality standards, run tests, or perform other checks before allowing commits.
-
-For example:
-```bash
-#!/bin/bash
-# Run linting checks
-cd frontend && npm run lint
-if [ $? -ne 0 ]; then
- echo "Frontend linting failed. Please fix the issues before committing."
- exit 1
-fi
-
-# Run tests
-cd backend && pytest tests/unit
-if [ $? -ne 0 ]; then
- echo "Backend tests failed. Please fix the issues before committing."
- exit 1
-fi
-
-exit 0
-```
diff --git a/docs/usage/runtimes/daytona.mdx b/docs/usage/runtimes/daytona.mdx
deleted file mode 100644
index 6a4b921a5407440245a362c96afbae6bd7b7278c..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/daytona.mdx
+++ /dev/null
@@ -1,48 +0,0 @@
----
-title: Daytona Runtime
-description: You can use [Daytona](https://www.daytona.io/) as a runtime provider.
----
-
-
-## Step 1: Retrieve Your Daytona API Key
-1. Visit the [Daytona Dashboard](https://app.daytona.io/dashboard/keys).
-2. Click **"Create Key"**.
-3. Enter a name for your key and confirm the creation.
-4. Once the key is generated, copy it.
-
-## Step 2: Set Your API Key as an Environment Variable
-Run the following command in your terminal, replacing `` with the actual key you copied:
-
-### Mac/Linux:
-```bash
-export DAYTONA_API_KEY=""
-```
-
-### Windows PowerShell:
-```powershell
-$env:DAYTONA_API_KEY=""
-```
-
-This step ensures that OpenHands can authenticate with the Daytona platform when it runs.
-
-## Step 3: Run OpenHands Locally Using Docker
-To start the latest version of OpenHands on your machine, execute the following command in your terminal:
-
-### Mac/Linux:
-```bash
-bash -i <(curl -sL https://get.daytona.io/openhands)
-```
-
-### Windows:
-```powershell
-powershell -Command "irm https://get.daytona.io/openhands-windows | iex"
-```
-
-### What This Command Does:
-- Downloads the latest OpenHands release script.
-- Runs the script in an interactive Bash session.
-- Automatically pulls and runs the OpenHands container using Docker.
-
-Once executed, OpenHands should be running locally and ready for use.
-
-For more details and manual initialization, view the entire [README.md](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/runtime/impl/daytona/README.md)
diff --git a/docs/usage/runtimes/docker.mdx b/docs/usage/runtimes/docker.mdx
deleted file mode 100644
index b007eff29ed3b38f6fa5acec07eb4138bb834425..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/docker.mdx
+++ /dev/null
@@ -1,130 +0,0 @@
----
-title: Docker Runtime
-description: This is the default Runtime that's used when you start OpenHands.
----
-
-This is the default Runtime that's used when you start OpenHands.
-
-## Image
-The `SANDBOX_RUNTIME_CONTAINER_IMAGE` from nikolaik is a pre-built runtime image
-that contains our Runtime server, as well as some basic utilities for Python and NodeJS.
-You can also [build your own runtime image](../how-to/custom-sandbox-guide).
-
-## Connecting to Your filesystem
-A useful feature is the ability to connect to your local filesystem. To mount your filesystem into the runtime:
-
-### Using SANDBOX_VOLUMES
-
-The simplest way to mount your local filesystem is to use the `SANDBOX_VOLUMES` environment variable:
-
-```bash
-export SANDBOX_VOLUMES=/path/to/your/code:/workspace:rw
-
-docker run # ...
- -e SANDBOX_USER_ID=$(id -u) \
- -e SANDBOX_VOLUMES=$SANDBOX_VOLUMES \
- # ...
-```
-
-The `SANDBOX_VOLUMES` format is `host_path:container_path[:mode]` where:
-
-- `host_path`: The path on your host machine that you want to mount.
-- `container_path`: The path inside the container where the host path will be mounted.
- - Use `/workspace` for files you want the agent to modify. The agent works in `/workspace` by default.
- - Use a different path (e.g., `/data`) for read-only reference materials or large datasets.
-- `mode`: Optional mount mode, either `rw` (read-write, default) or `ro` (read-only).
-
-You can also specify multiple mounts by separating them with commas (`,`):
-
-```bash
-export SANDBOX_VOLUMES=/path1:/workspace/path1,/path2:/workspace/path2:ro
-```
-
-Examples:
-
-```bash
-# Linux and Mac Example - Writable workspace
-export SANDBOX_VOLUMES=$HOME/OpenHands:/workspace:rw
-
-# WSL on Windows Example - Writable workspace
-export SANDBOX_VOLUMES=/mnt/c/dev/OpenHands:/workspace:rw
-
-# Read-only reference code example
-export SANDBOX_VOLUMES=/path/to/reference/code:/data:ro
-
-# Multiple mounts example - Writable workspace with read-only reference data
-export SANDBOX_VOLUMES=$HOME/projects:/workspace:rw,/path/to/large/dataset:/data:ro
-```
-
-### Using WORKSPACE_* variables (Deprecated)
-
-> **Note:** This method is deprecated and will be removed in a future version. Please use `SANDBOX_VOLUMES` instead.
-
-1. Set `WORKSPACE_BASE`:
-
- ```bash
- export WORKSPACE_BASE=/path/to/your/code
- ```
-
-2. Add the following options to the `docker run` command:
-
- ```bash
- docker run # ...
- -e SANDBOX_USER_ID=$(id -u) \
- -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \
- -v $WORKSPACE_BASE:/opt/workspace_base \
- # ...
- ```
-
-Be careful! There's nothing stopping the OpenHands agent from deleting or modifying
-any files that are mounted into its workspace.
-
-The `-e SANDBOX_USER_ID=$(id -u)` is passed to the Docker command to ensure the sandbox user matches the host user’s
-permissions. This prevents the agent from creating root-owned files in the mounted workspace.
-
-## Hardened Docker Installation
-
-When deploying OpenHands in environments where security is a priority, you should consider implementing a hardened
-Docker configuration. This section provides recommendations for securing your OpenHands Docker deployment beyond the default configuration.
-
-### Security Considerations
-
-The default Docker configuration in the README is designed for ease of use on a local development machine. If you're
-running on a public network (e.g. airport WiFi), you should implement additional security measures.
-
-### Network Binding Security
-
-By default, OpenHands binds to all network interfaces (`0.0.0.0`), which can expose your instance to all networks the
-host is connected to. For a more secure setup:
-
-1. **Restrict Network Binding**: Use the `runtime_binding_address` configuration to restrict which network interfaces OpenHands listens on:
-
- ```bash
- docker run # ...
- -e SANDBOX_RUNTIME_BINDING_ADDRESS=127.0.0.1 \
- # ...
- ```
-
- This configuration ensures OpenHands only listens on the loopback interface (`127.0.0.1`), making it accessible only from the local machine.
-
-2. **Secure Port Binding**: Modify the `-p` flag to bind only to localhost instead of all interfaces:
-
- ```bash
- docker run # ... \
- -p 127.0.0.1:3000:3000 \
- ```
-
- This ensures that the OpenHands web interface is only accessible from the local machine, not from other machines on the network.
-
-### Network Isolation
-
-Use Docker's network features to isolate OpenHands:
-
-```bash
-# Create an isolated network
-docker network create openhands-network
-
-# Run OpenHands in the isolated network
-docker run # ... \
- --network openhands-network \
-```
diff --git a/docs/usage/runtimes/e2b.mdx b/docs/usage/runtimes/e2b.mdx
deleted file mode 100644
index 1149a65afd3ccac77f7e4ee6edcd5df398c594fb..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/e2b.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: E2B Runtime
-description: E2B is an open-source secure cloud environment (sandbox) made for running AI-generated code and agents.
----
-
-[E2B](https://e2b.dev) offers [Python](https://pypi.org/project/e2b/) and [JS/TS](https://www.npmjs.com/package/e2b) SDK to spawn and control these sandboxes.
-
-## Getting started
-
-1. [Get your API key](https://e2b.dev/docs/getting-started/api-key)
-
-1. Set your E2B API key to the `E2B_API_KEY` env var when starting the Docker container
-
-1. **Optional** - Install the CLI with NPM.
- ```sh
- npm install -g @e2b/cli@latest
- ```
- Full CLI API is [here](https://e2b.dev/docs/cli/installation).
-
-## OpenHands sandbox
-You can use the E2B CLI to create a custom sandbox with a Dockerfile. Read the full guide [here](https://e2b.dev/docs/guide/custom-sandbox). The premade OpenHands sandbox for E2B is set up in the `containers` directory. and it's called `openhands`.
-
-## Debugging
-You can connect to a running E2B sandbox with E2B CLI in your terminal.
-
-- List all running sandboxes (based on your API key)
- ```sh
- e2b sandbox list
- ```
-
-- Connect to a running sandbox
- ```sh
- e2b sandbox connect
- ```
-
-## Links
-- [E2B Docs](https://e2b.dev/docs)
-- [E2B GitHub](https://github.com/e2b-dev/e2b)
diff --git a/docs/usage/runtimes/local.mdx b/docs/usage/runtimes/local.mdx
deleted file mode 100644
index c538c897a7749dcdeb7a1bcfd69a727c7ff5f68d..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/local.mdx
+++ /dev/null
@@ -1,70 +0,0 @@
----
-title: Local Runtime
-description: The Local Runtime allows the OpenHands agent to execute actions directly on your local machine without using Docker. This runtime is primarily intended for controlled environments like CI pipelines or testing scenarios where Docker is not available.
----
-
-
-**Security Warning**: The Local Runtime runs without any sandbox isolation. The agent can directly access and modify
-files on your machine. Only use this runtime in controlled environments or when you fully understand the security implications.
-
-
-## Prerequisites
-
-Before using the Local Runtime, ensure that:
-
-1. You can run OpenHands using the [Development workflow](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md).
-2. For Linux and Mac, tmux is available on your system.
-3. For Windows, PowerShell is available on your system.
- - Only [CLI mode](../how-to/cli-mode) and [headless mode](../how-to/headless-mode) are supported in Windows with Local Runtime.
-
-## Configuration
-
-To use the Local Runtime, besides required configurations like the LLM provider, model and API key, you'll need to set
-the following options via environment variables or the [config.toml file](https://github.com/All-Hands-AI/OpenHands/blob/main/config.template.toml) when starting OpenHands:
-
-Via environment variables (please use PowerShell syntax for Windows PowerShell):
-
-```bash
-# Required
-export RUNTIME=local
-
-# Optional but recommended
-# The agent works in /workspace by default, so mount your project directory there
-export SANDBOX_VOLUMES=/path/to/your/workspace:/workspace:rw
-# For read-only data, use a different mount path
-# export SANDBOX_VOLUMES=/path/to/your/workspace:/workspace:rw,/path/to/large/dataset:/data:ro
-```
-
-Via `config.toml`:
-
-```toml
-[core]
-runtime = "local"
-
-[sandbox]
-# The agent works in /workspace by default, so mount your project directory there
-volumes = "/path/to/your/workspace:/workspace:rw"
-# For read-only data, use a different mount path
-# volumes = "/path/to/your/workspace:/workspace:rw,/path/to/large/dataset:/data:ro"
-```
-
-If `SANDBOX_VOLUMES` is not set, the runtime will create a temporary directory for the agent to work in.
-
-## Example Usage
-
-Here's an example of how to start OpenHands with the Local Runtime in Headless Mode:
-
-```bash
-export RUNTIME=local
-export SANDBOX_VOLUMES=/my_folder/myproject:/workspace:rw
-
-poetry run python -m openhands.core.main -t "write a bash script that prints hi"
-```
-
-## Use Cases
-
-The Local Runtime is particularly useful for:
-
-- CI/CD pipelines where Docker is not available.
-- Testing and development of OpenHands itself.
-- Environments where container usage is restricted (e.g. native Windows).
diff --git a/docs/usage/runtimes/modal.mdx b/docs/usage/runtimes/modal.mdx
deleted file mode 100644
index c5702ec2472d45d76cdbd48f82945241074f0a43..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/modal.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Modal Runtime
----
-
-Our partners at [Modal](https://modal.com/) have provided a runtime for OpenHands.
-To use the Modal Runtime, create an account, and then [create an API key.](https://modal.com/settings)
-
-You'll then need to set the following environment variables when starting OpenHands:
-```bash
-docker run # ...
- -e RUNTIME=modal \
- -e MODAL_API_TOKEN_ID="your-id" \
- -e MODAL_API_TOKEN_SECRET="modal-api-key" \
-```
diff --git a/docs/usage/runtimes/overview.mdx b/docs/usage/runtimes/overview.mdx
deleted file mode 100644
index 4513aa5ea0f8ff0d022e70b091ae2177ffefcda2..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/overview.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: Overview
-description: This section is for users that would like to use a runtime other than Docker for OpenHands.
----
-
-A Runtime is an environment where the OpenHands agent can edit files and run
-commands.
-
-By default, OpenHands uses a [Docker-based runtime](/usage/runtimes/docker), running on your local computer.
-This means you only have to pay for the LLM you're using, and your code is only ever sent to the LLM.
-
-We also support other runtimes, which are typically managed by third-parties.
-
-Additionally, we provide a [Local Runtime](/usage/runtimes/local) that runs directly on your machine without Docker,
-which can be useful in controlled environments like CI pipelines.
-
-## Available Runtimes
-
-OpenHands supports several different runtime environments:
-
-- [Docker Runtime](/usage/runtimes/docker) - The default runtime that uses Docker containers for isolation (recommended for most users).
-- [OpenHands Remote Runtime](/usage/runtimes/remote) - Cloud-based runtime for parallel execution (beta).
-- [Local Runtime](/usage/runtimes/local) - Direct execution on your local machine without Docker.
-- And more third-party runtimes:
- - [Modal Runtime](/usage/runtimes/modal) - Runtime provided by our partners at Modal.
- - [Daytona Runtime](/usage/runtimes/daytona) - Runtime provided by Daytona.
diff --git a/docs/usage/runtimes/remote.mdx b/docs/usage/runtimes/remote.mdx
deleted file mode 100644
index 7aaa62c5bcfde6085d16c963f37a6634d9430a9d..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/remote.mdx
+++ /dev/null
@@ -1,7 +0,0 @@
----
-title: Remote Runtime
-description: This runtime is specifically designed for agent evaluation purposes only through the [OpenHands evaluation harness](https://github.com/All-Hands-AI/OpenHands/tree/main/evaluation). It should not be used to launch production OpenHands applications.
----
-
-OpenHands Remote Runtime is currently in beta (read [here](https://runtime.all-hands.dev/) for more details), it allows you to launch runtimes
-in parallel in the cloud. Fill out [this form](https://docs.google.com/forms/d/e/1FAIpQLSckVz_JFwg2_mOxNZjCtr7aoBFI2Mwdan3f75J_TrdMS1JV2g/viewform) to apply if you want to try this out!
diff --git a/docs/usage/runtimes/runloop.mdx b/docs/usage/runtimes/runloop.mdx
deleted file mode 100644
index faaf54c9bb618be47c68b20d32b403fd85a69641..0000000000000000000000000000000000000000
--- a/docs/usage/runtimes/runloop.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
----
-title: Runloop Runtime
-description: Runloop provides a fast, secure and scalable AI sandbox (Devbox). Check out the [runloop docs](https://docs.runloop.ai/overview/what-is-runloop) for more detail.
----
-
-## Access
-Runloop is currently available in a closed beta. For early access, or
-just to say hello, sign up at https://www.runloop.ai/hello
-
-## Set up
-With your runloop API,
-```bash
-export RUNLOOP_API_KEY=
-```
-
-Configure the runtime
-```bash
-export RUNTIME="runloop"
-```
-
-## Interact with your devbox
-Runloop provides additional tools to interact with your Devbox based
-runtime environment. See the [docs](https://docs.runloop.ai/tools) for an up
-to date list of tools.
-
-### Dashboard
-View logs, ssh into, or view your Devbox status from the [dashboard](https://platform.runloop.ai)
-
-### CLI
-Use the Runloop CLI to view logs, execute commands, and more.
-See the setup instructions [here](https://docs.runloop.ai/tools/cli)
diff --git a/docs/usage/search-engine-setup.mdx b/docs/usage/search-engine-setup.mdx
deleted file mode 100644
index 830372f95ab9dc20bbd8e64d756d644237853835..0000000000000000000000000000000000000000
--- a/docs/usage/search-engine-setup.mdx
+++ /dev/null
@@ -1,66 +0,0 @@
----
-title: Search Engine Setup
-description: Configure OpenHands to use Tavily as a search engine
----
-
-## Setting Up Search Engine in OpenHands
-
-OpenHands can be configured to use [Tavily](https://tavily.com/) as a search engine, which allows the agent to search the web for information when needed. This capability enhances the agent's ability to provide up-to-date information and solve problems that require external knowledge.
-
-### Getting a Tavily API Key
-
-To use the search functionality in OpenHands, you'll need to obtain a Tavily API key:
-
-1. Visit [Tavily's website](https://tavily.com/) and sign up for an account
-2. Navigate to the API section in your dashboard
-3. Generate a new API key
-4. Copy the API key (it should start with `tvly-`)
-
-### Configuring Search in OpenHands
-
-Once you have your Tavily API key, you can configure OpenHands to use it:
-
-#### In the OpenHands UI
-
-1. Open OpenHands and navigate to the Settings page by clicking the gear icon
-2. In the LLM settings tab, locate the "Search API Key (Tavily)" field
-3. Enter your Tavily API key (starting with `tvly-`)
-4. Click "Save" to apply the changes
-
-
-The search API key field is optional. If you don't provide a key, the search functionality will not be available to the agent.
-
-
-#### Using Configuration Files
-
-If you're running OpenHands in headless mode or via CLI, you can configure the search API key in your configuration file:
-
-```toml
-# In your OpenHands config file
-[core]
-search_api_key = "tvly-your-api-key-here"
-```
-
-### How Search Works in OpenHands
-
-When the search engine is configured:
-
-1. The agent can decide to search the web when it needs external information
-2. Search queries are sent to Tavily's API via [Tavily's MCP server](https://github.com/tavily-ai/tavily-mcp) which includes a variety of [tools](https://docs.tavily.com/documentation/api-reference/introduction) (search, extract, crawl, map).
-3. Results are returned and incorporated into the agent's context
-4. The agent can use this information to provide more accurate and up-to-date responses
-
-### Limitations
-
-- Search results depend on Tavily's coverage and freshness
-- Usage may be subject to Tavily's rate limits and pricing tiers
-- The agent will only search when it determines that external information is needed
-
-### Troubleshooting
-
-If you encounter issues with the search functionality:
-
-- Verify that your API key is correct and active
-- Check that your API key starts with `tvly-`
-- Ensure you have an active internet connection
-- Check Tavily's status page for any service disruptions
diff --git a/docs/usage/troubleshooting/troubleshooting.mdx b/docs/usage/troubleshooting/troubleshooting.mdx
deleted file mode 100644
index dcffd7da3589d8d1d5f3225fa2702f6d0ae7d2bc..0000000000000000000000000000000000000000
--- a/docs/usage/troubleshooting/troubleshooting.mdx
+++ /dev/null
@@ -1,71 +0,0 @@
----
-title: Troubleshooting
----
-
-
-OpenHands only supports Windows via WSL. Please be sure to run all commands inside your WSL terminal.
-
-
-### Launch docker client failed
-
-**Description**
-
-When running OpenHands, the following error is seen:
-```
-Launch docker client failed. Please make sure you have installed docker and started docker desktop/daemon.
-```
-
-**Resolution**
-
-Try these in order:
-* Confirm `docker` is running on your system. You should be able to run `docker ps` in the terminal successfully.
-* If using Docker Desktop, ensure `Settings > Advanced > Allow the default Docker socket to be used` is enabled.
-* Depending on your configuration you may need `Settings > Resources > Network > Enable host networking` enabled in Docker Desktop.
-* Reinstall Docker Desktop.
-
-### Permission Error
-
-**Description**
-
-On initial prompt, an error is seen with `Permission Denied` or `PermissionError`.
-
-**Resolution**
-
-* Check if the `~/.openhands-state` is owned by `root`. If so, you can:
- * Change the directory's ownership: `sudo chown : ~/.openhands-state`.
- * or update permissions on the directory: `sudo chmod 777 ~/.openhands-state`
- * or delete it if you don’t need previous data. OpenHands will recreate it. You'll need to re-enter LLM settings.
-* If mounting a local directory, ensure your `WORKSPACE_BASE` has the necessary permissions for the user running
- OpenHands.
-
-### Unable to access VS Code tab via local IP
-
-**Description**
-
-When accessing OpenHands through a non-localhost URL (such as a LAN IP address), the VS Code tab shows a "Forbidden"
-error, while other parts of the UI work fine.
-
-**Resolution**
-
-This happens because VS Code runs on a random high port that may not be exposed or accessible from other machines.
-To fix this:
-
-1. Set a specific port for VS Code using the `SANDBOX_VSCODE_PORT` environment variable:
- ```bash
- docker run -it --rm \
- -e SANDBOX_VSCODE_PORT=41234 \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:latest \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v ~/.openhands-state:/.openhands-state \
- -p 3000:3000 \
- -p 41234:41234 \
- --add-host host.docker.internal:host-gateway \
- --name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:latest
- ```
-2. Make sure to expose the same port with `-p 41234:41234` in your Docker command.
-3. If running with the development workflow, you can set this in your `config.toml` file:
- ```toml
- [sandbox]
- vscode_port = 41234
- ```