diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..766252b488ecd95977322b39f2b26e944473cf1b --- /dev/null +++ b/.env.example @@ -0,0 +1,2 @@ +PORT=3000 +NODE_ENV=development diff --git a/.eslintrc.json b/.eslintrc.json new file mode 100644 index 0000000000000000000000000000000000000000..f7424951d3a54825ac7f8172cd10656f667e036b --- /dev/null +++ b/.eslintrc.json @@ -0,0 +1,25 @@ +{ + "parser": "@typescript-eslint/parser", + "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"], + "parserOptions": { + "ecmaVersion": 2020, + "sourceType": "module" + }, + "env": { + "node": true, + "es6": true, + "jest": true + }, + "rules": { + "no-console": "off", + "@typescript-eslint/no-unused-vars": [ + "error", + { + "argsIgnorePattern": "^_", + "varsIgnorePattern": "^_" + } + ], + "@typescript-eslint/no-explicit-any": "off", + "no-undef": "off", + } +} diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..ef3bf669d65b2f43f53ca5fade50a9cc5b797701 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +docs/images/checks-passed.png filter=lfs diff=lfs merge=lfs -text +docs/images/hero-dark.png filter=lfs diff=lfs merge=lfs -text +docs/images/hero-light.png filter=lfs diff=lfs merge=lfs -text diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..79eb368464cb13d55a2b3c38a97e38f81108896c --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,15 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: samanhappy +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +polar: # Replace with a single Polar username +buy_me_a_coffee: # Replace with a single Buy Me a Coffee username +thanks_dev: # Replace with a single thanks.dev username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git "a/.github/ISSUE_TEMPLATE/bug-report---bug-\346\212\245\345\221\212.md" "b/.github/ISSUE_TEMPLATE/bug-report---bug-\346\212\245\345\221\212.md" new file mode 100644 index 0000000000000000000000000000000000000000..5928e2b7bb90bfcfcb7a023a73de6614b6736872 --- /dev/null +++ "b/.github/ISSUE_TEMPLATE/bug-report---bug-\346\212\245\345\221\212.md" @@ -0,0 +1,29 @@ +--- +name: Bug Report / Bug 报告 +about: Create a report to help us improve / 报告问题以帮助改进 +title: '' +labels: bug +assignees: '' + +--- + +**Bug Description / 问题描述** +What happened? / 发生了什么? + +**Steps to Reproduce / 复现步骤** +1. +2. +3. + +**Expected Behavior / 预期行为** +What should happen? / 应该发生什么? + +**Environment / 运行环境** +- Running on / 运行方式: [docker/npx/local / docker/npx/本地] +- Version / 版本: [e.g. 1.0.0] + +**Screenshots / 截图** +If relevant, add screenshots / 如果有帮助的话,请添加截图 + +**Additional Info / 补充信息** +Any other details? / 还有其他信息吗? diff --git "a/.github/ISSUE_TEMPLATE/feature-request---\345\212\237\350\203\275\350\257\267\346\261\202.md" "b/.github/ISSUE_TEMPLATE/feature-request---\345\212\237\350\203\275\350\257\267\346\261\202.md" new file mode 100644 index 0000000000000000000000000000000000000000..bd5ef34124f4731cb52f236d07f48f67785ffc8e --- /dev/null +++ "b/.github/ISSUE_TEMPLATE/feature-request---\345\212\237\350\203\275\350\257\267\346\261\202.md" @@ -0,0 +1,20 @@ +--- +name: Feature request / 功能请求 +about: Suggest an idea for this project / 为项目提出新想法 +title: '' +labels: enhancement +assignees: '' + +--- + +**Current Problem / 当前问题** +What problem are you trying to solve? / 您想要解决什么问题? + +**Proposed Solution / 建议方案** +How would you like this to work? / 您期望的解决方案是什么? + +**Alternatives / 替代方案** +Have you considered any alternatives? / 您是否考虑过其他解决方案? + +**Additional Context / 补充说明** +Any screenshots, mockups, or relevant information? / 有任何截图、设计图或相关信息吗? diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000000000000000000000000000000..f71d469b239d6820ba5cd560cb539e441c4ffec2 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "npm" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "monthly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000000000000000000000000000000000000..829b70afb60c824ee999bf8e7a714e0bf73ebe25 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,61 @@ +name: Build + +on: + push: + tags: ['v*.*.*'] + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + variant: ${{ startsWith(github.ref, 'refs/tags/') && fromJSON('["base", "full"]') || fromJSON('["base"]') }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Update version from tag + if: startsWith(github.ref, 'refs/tags/') + run: | + VERSION=${GITHUB_REF#refs/tags/v} + echo "Updating package.json version to $VERSION" + jq ".version = \"$VERSION\"" package.json > package.json.tmp + mv package.json.tmp package.json + echo "Updated version in package.json:" + grep -m 1 "version" package.json + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: samanhappy/mcphub + tags: | + type=raw,value=edge${{ matrix.variant == 'full' && '-full' || '' }},enable=${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + type=semver,pattern={{version}}${{ matrix.variant == 'full' && '-full' || '' }},enable=${{ startsWith(github.ref, 'refs/tags/') }} + type=raw,value=latest${{ matrix.variant == 'full' && '-full' || '' }},enable=${{ startsWith(github.ref, 'refs/tags/') }} + flavor: | + latest=false + + - name: Build and Push Docker Image + uses: docker/build-push-action@v5 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max,scope=${{ matrix.variant }} + platforms: linux/amd64,linux/arm64 + build-args: | + INSTALL_EXT=${{ matrix.variant == 'full' && 'true' || 'false' }} diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml new file mode 100644 index 0000000000000000000000000000000000000000..e53caba9d77671bb50dba89eef05fb40a71c0fd2 --- /dev/null +++ b/.github/workflows/npm-publish.yml @@ -0,0 +1,58 @@ +name: Publish to NPM + +on: + push: + tags: ['v*.*.*'] + +jobs: + publish-npm: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + registry-url: 'https://registry.npmjs.org' + + - name: Setup pnpm + uses: pnpm/action-setup@v2 + with: + version: 10 + run_install: false + + - name: Get pnpm store directory + id: pnpm-cache + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT + + - name: Setup pnpm cache + uses: actions/cache@v4 + with: + path: ${{ steps.pnpm-cache.outputs.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Update version from tag + run: | + VERSION=${GITHUB_REF#refs/tags/v} + echo "Updating package.json version to $VERSION" + jq ".version = \"$VERSION\"" package.json > package.json.tmp + mv package.json.tmp package.json + echo "Updated version in package.json:" + grep -m 1 "version" package.json + + - name: Build package + run: pnpm build + + - name: Publish to NPM + run: pnpm publish --no-git-checks --access public + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000000000000000000000000000000000..7021fe4dfd91a5281d0b6c17284c66f913b39d83 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,20 @@ +name: GitHub Release + +on: + push: + tags: ['v*.*.*'] + +permissions: + contents: write + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Release + uses: softprops/action-gh-release@v2 + with: + generate_release_notes: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1bb3bc760cc9f4ef4f0b8b0826ff7ce2b7a2c907 --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +# dependencies +/node_modules +/.pnp +.pnp.js + +# production +dist + +# env files +.env +.env.local +.env.development.local +.env.test.local +.env.production.local + +# logs +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# misc +.DS_Store +.idea/ +.vscode/ +*.log +coverage/ diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000000000000000000000000000000000000..ca8527e0ddebfc43ad3bfab4cfd6437e7169f789 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,7 @@ +{ + "semi": true, + "trailingComma": "all", + "singleQuote": true, + "printWidth": 100, + "tabWidth": 2 +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7eff4a5801f48cde495b53102c4d68138664f322 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,59 @@ +FROM python:3.13-slim-bookworm AS base + +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +# 添加 HTTP_PROXY 和 HTTPS_PROXY 环境变量 +ARG HTTP_PROXY="" +ARG HTTPS_PROXY="" +ENV HTTP_PROXY=$HTTP_PROXY +ENV HTTPS_PROXY=$HTTPS_PROXY + +RUN apt-get update && apt-get install -y curl gnupg git \ + && curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \ + && apt-get install -y nodejs \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +RUN npm install -g pnpm + +ARG REQUEST_TIMEOUT=60000 +ENV REQUEST_TIMEOUT=$REQUEST_TIMEOUT + +ARG BASE_PATH="" +ENV BASE_PATH=$BASE_PATH + +ENV PNPM_HOME=/usr/local/share/pnpm +ENV PATH=$PNPM_HOME:$PATH +RUN mkdir -p $PNPM_HOME && \ + pnpm add -g @amap/amap-maps-mcp-server @playwright/mcp@latest tavily-mcp@latest @modelcontextprotocol/server-github @modelcontextprotocol/server-slack + +ARG INSTALL_EXT=false +RUN if [ "$INSTALL_EXT" = "true" ]; then \ + ARCH=$(uname -m); \ + if [ "$ARCH" = "x86_64" ]; then \ + npx -y playwright install --with-deps chrome; \ + else \ + echo "Skipping Chrome installation on non-amd64 architecture: $ARCH"; \ + fi; \ + fi + +RUN uv tool install mcp-server-fetch + +WORKDIR /app + +COPY package.json pnpm-lock.yaml ./ +RUN pnpm install + +COPY . . + +# Download the latest servers.json from mcpm.sh and replace the existing file +RUN curl -s -f --connect-timeout 10 https://mcpm.sh/api/servers.json -o servers.json || echo "Failed to download servers.json, using bundled version" + +RUN pnpm frontend:build && pnpm build + +COPY entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +EXPOSE 3000 + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["pnpm", "start"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index de33b022e0e3a4309d69578b6770e5438096e172..f7b4d8155f16d3ad489627d21fc7bb27d8a5b7e2 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,15 @@ --- -title: Mcphub -emoji: 💻 -colorFrom: purple -colorTo: pink +title: "mcphub" +emoji: "🚀" +colorFrom: blue +colorTo: green sdk: docker -pinned: false +app_port: 3000 --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +### 🚀 一键部署 +[![Deploy with HFSpaceDeploy](https://img.shields.io/badge/Deploy_with-HFSpaceDeploy-green?style=social&logo=rocket)](https://github.com/kfcx/HFSpaceDeploy) + +本项目由[HFSpaceDeploy](https://github.com/kfcx/HFSpaceDeploy)一键部署 + + diff --git a/README.zh.md b/README.zh.md new file mode 100644 index 0000000000000000000000000000000000000000..8971274d20a024d410ef43df109070dde123f888 --- /dev/null +++ b/README.zh.md @@ -0,0 +1,234 @@ +# MCPHub:一站式 MCP 服务器聚合平台 + +[English Version](README.md) | 中文版 + +MCPHub 通过将多个 MCP(Model Context Protocol)服务器组织为灵活的流式 HTTP(SSE)端点,简化了管理与扩展工作。系统支持按需访问全部服务器、单个服务器或按场景分组的服务器集合。 + +![控制面板预览](assets/dashboard.zh.png) + +## 🚀 功能亮点 + +- **广泛的 MCP 服务器支持**:无缝集成任何 MCP 服务器,配置简单。 +- **集中式管理控制台**:在一个简洁的 Web UI 中实时监控所有服务器的状态和性能指标。 +- **灵活的协议兼容**:完全支持 stdio 和 SSE 两种 MCP 协议。 +- **热插拔式配置**:在运行时动态添加、移除或更新服务器配置,无需停机。 +- **基于分组的访问控制**:自定义分组并管理服务器访问权限。 +- **安全认证机制**:内置用户管理,基于 JWT 和 bcrypt,实现角色权限控制。 +- **Docker 就绪**:提供容器化镜像,快速部署。 + +## 🔧 快速开始 + +### 配置 + +通过创建 `mcp_settings.json` 自定义服务器设置: + +```json +{ + "mcpServers": { + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key" + } + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + }, + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "your-bot-token", + "SLACK_TEAM_ID": "your-team-id" + } + } + } +} +``` + +### Docker 部署 + +**推荐**:挂载自定义配置: + +```bash +docker run -p 3000:3000 -v $(pwd)/mcp_settings.json:/app/mcp_settings.json samanhappy/mcphub +``` + +或使用默认配置运行: + +```bash +docker run -p 3000:3000 samanhappy/mcphub +``` + +### 访问控制台 + +打开 `http://localhost:3000`,使用您的账号登录。 + +> **提示**:默认用户名/密码为 `admin` / `admin123`。 + +**控制台功能**: + +- 实时监控所有 MCP 服务器状态 +- 启用/禁用或重新配置服务器 +- 分组管理,组织服务器访问 +- 用户管理,设定权限 + +### 支持流式的 HTTP 端点 + +> 截至目前,各家 AI 客户端对流式的 HTTP 端点支持不一,如果遇到问题,可以使用 SSE 端点或者等待更新。 + +通过以下地址连接 AI 客户端(如 Claude Desktop、Cursor、DeepChat 等): + +``` +http://localhost:3000/mcp +``` + +这个端点为所有 MCP 服务器提供统一的流式 HTTP 接口。它允许您: + +- 向任何配置的 MCP 服务器发送请求 +- 实时接收响应 +- 轻松与各种 AI 客户端和工具集成 +- 对所有服务器使用相同的端点,简化集成过程 + +**智能路由(实验性功能)**: + +智能路由是 MCPHub 的智能工具发现系统,使用向量语义搜索自动为任何给定任务找到最相关的工具。 + +``` +http://localhost:3000/mcp/$smart +``` + +**工作原理:** + +1. **工具索引**:所有 MCP 工具自动转换为向量嵌入并存储在 PostgreSQL 与 pgvector 中 +2. **语义搜索**:用户查询转换为向量并使用余弦相似度与工具嵌入匹配 +3. **智能筛选**:动态阈值确保相关结果且无噪声 +4. **精确执行**:找到的工具可以直接执行并进行适当的参数验证 + +**设置要求:** + +![智能路由](assets/smart-routing.zh.png) + +为了启用智能路由,您需要: + +- 支持 pgvector 扩展的 PostgreSQL +- OpenAI API 密钥(或兼容的嵌入服务) +- 在 MCPHub 设置中启用智能路由 + +**基于分组的 HTTP 端点(推荐)**: +![分组](assets/group.zh.png) +要针对特定服务器分组进行访问,请使用基于分组的 HTTP 端点: + +``` +http://localhost:3000/mcp/{group} +``` + +其中 `{group}` 是您在控制面板中创建的分组 ID 或名称。这样做可以: + +- 连接到按用例组织的特定 MCP 服务器子集 +- 隔离不同的 AI 工具,使其只能访问相关服务器 +- 为不同环境或团队实现更精细的访问控制 +- 通过分组名称轻松识别和管理服务器 +- 允许不同的 AI 客户端使用相同的端点,简化集成过程 + +**针对特定服务器的 HTTP 端点**: +要针对特定服务器进行访问,请使用以下格式: + +``` +http://localhost:3000/mcp/{server} +``` + +其中 `{server}` 是您要连接的服务器名称。这样做可以直接访问特定的 MCP 服务器。 + +> **提示**:如果服务器名称和分组名称相同,则分组名称优先。 + +### SSE 端点集成 (未来可能废弃) + +通过以下地址连接 AI 客户端(如 Claude Desktop、Cursor、DeepChat 等): + +``` +http://localhost:3000/sse +``` + +要启用智能路由,请使用: + +``` +http://localhost:3000/sse/$smart +``` + +要针对特定服务器分组进行访问,请使用基于分组的 SSE 端点: + +``` +http://localhost:3000/sse/{group} +``` + +要针对特定服务器进行访问,请使用以下格式: + +``` +http://localhost:3000/sse/{server} +``` + +## 🧑‍💻 本地开发 + +```bash +git clone https://github.com/samanhappy/mcphub.git +cd mcphub +pnpm install +pnpm dev +``` + +此命令将在开发模式下启动前后端,并启用热重载。 + +> 针对 Windows 用户,可能需要分别启动后端服务器和前端:`pnpm backend:dev`,`pnpm frontend:dev`。 + +## 🛠️ 常见问题 + +### 使用 nginx 反向代理 + +如果您在使用 nginx 反向代理 MCPHub,请确保在 nginx 配置中添加以下内容: + +```nginx +proxy_buffering off +``` + +## 🔍 技术栈 + +- **后端**:Node.js、Express、TypeScript +- **前端**:React、Vite、Tailwind CSS +- **认证**:JWT & bcrypt +- **协议**:Model Context Protocol SDK + +## 👥 贡献指南 + +期待您的贡献! + +- 新功能与优化 +- 文档完善 +- Bug 报告与修复 +- 翻译与建议 + +欢迎加入企微交流共建群,由于群人数限制,有兴趣的同学可以扫码添加管理员为好友后拉入群聊。 + + + +如果觉得项目有帮助,不妨请我喝杯咖啡 ☕️ + + + +## 致谢 + +感谢以下人员的赞赏:小白、琛。你们的支持是我继续前进的动力! + +## 🌟 Star 历史趋势 + +[![Star History Chart](https://api.star-history.com/svg?repos=samanhappy/mcphub&type=Date)](https://www.star-history.com/#samanhappy/mcphub&Date) + +## 📄 许可证 + +本项目采用 [Apache 2.0 许可证](LICENSE)。 diff --git a/articals/assets/sr-conf.png b/articals/assets/sr-conf.png new file mode 100644 index 0000000000000000000000000000000000000000..421006dcb9ae2119dabfb0ff0272528bdcd662ad Binary files /dev/null and b/articals/assets/sr-conf.png differ diff --git a/articals/assets/sr-dc.png b/articals/assets/sr-dc.png new file mode 100644 index 0000000000000000000000000000000000000000..bc970b7d0d1efd74f1bf92f9cc506df8091821fc Binary files /dev/null and b/articals/assets/sr-dc.png differ diff --git a/articals/assets/sr-map-call.png b/articals/assets/sr-map-call.png new file mode 100644 index 0000000000000000000000000000000000000000..1e5ed849c9001449916b3509066966d62f1275d8 Binary files /dev/null and b/articals/assets/sr-map-call.png differ diff --git a/articals/assets/sr-map-result.png b/articals/assets/sr-map-result.png new file mode 100644 index 0000000000000000000000000000000000000000..fd08e4544658ad74c8fda763df998c8fb73e86c2 Binary files /dev/null and b/articals/assets/sr-map-result.png differ diff --git a/articals/assets/sr-map-search.png b/articals/assets/sr-map-search.png new file mode 100644 index 0000000000000000000000000000000000000000..96929bd308fa06e744da9278c6ddf1907406263e Binary files /dev/null and b/articals/assets/sr-map-search.png differ diff --git a/articals/assets/sr-servers.png b/articals/assets/sr-servers.png new file mode 100644 index 0000000000000000000000000000000000000000..66830ad6fca69e5744cdf586abb658d4cc685db3 Binary files /dev/null and b/articals/assets/sr-servers.png differ diff --git a/articals/assets/sr-time.png b/articals/assets/sr-time.png new file mode 100644 index 0000000000000000000000000000000000000000..d744b45662c67d2eb6e15b5be8dbc7051878a764 Binary files /dev/null and b/articals/assets/sr-time.png differ diff --git a/articals/assets/sr-tools.png b/articals/assets/sr-tools.png new file mode 100644 index 0000000000000000000000000000000000000000..06619ed56c25f0270c17ae64a12348c9afdca530 Binary files /dev/null and b/articals/assets/sr-tools.png differ diff --git a/articals/assets/sr-web.png b/articals/assets/sr-web.png new file mode 100644 index 0000000000000000000000000000000000000000..bd2bb159b0792cdf7d3ab9f33a58be4eb24f0168 Binary files /dev/null and b/articals/assets/sr-web.png differ diff --git a/articals/intro.md b/articals/intro.md new file mode 100644 index 0000000000000000000000000000000000000000..f5c6e236175d29ffa786a4e8c5af207f5a8fad18 --- /dev/null +++ b/articals/intro.md @@ -0,0 +1,73 @@ +# 如何一键部署你的专属 MCP 服务 + +随着 MCP 逐渐成为行业事实标准,如何高效搭建和管理多个 MCP 服务已成为个人开发者面临的主要挑战。本文将介绍一种简便的解决方案,帮助您快速构建自己的 MCP 服务。 + +## 什么是 MCP? + +模型上下文协议(Model Context Protocol,MCP)是由 Anthropic 推出的开放标准,旨在为大型语言模型(LLMs)提供标准化接口,使其能够直接连接外部数据源和工具。简言之,MCP 如同 AI 应用的 USB-C 接口,统一解决了数据孤岛和定制化集成的问题。 + +通过 MCP,AI 模型不仅可以实时获取最新信息,还能调用外部工具完成各类任务,实现跨平台、跨数据源的无缝交互,大幅提升 AI 应用的实用性和灵活性。 + +## 当下的 MCP 生态 + +尽管 MCP 的标准化接口为 AI 应用开发提供了便利,但在实际应用中,如何快速搭建和高效管理多个 MCP 服务仍然是一个不小的挑战。MCPHub 正是为解决这一痛点而诞生,它提供了集中管理和动态配置的解决方案,让个人开发者能够轻松应对多样化的需求,无需深入了解每个服务的具体实现细节。 + +## 一键部署,轻松满足个人需求 + +对于个人开发者而言,繁琐的部署流程常常成为创新的绊脚石。MCPHub 的最大亮点在于其"一键部署"功能: + +- **极简部署**:只需一条 Docker 命令,即可在几分钟内启动完整的 MCPHub 服务,快速搭建专属 MCP 服务平台,满足个人项目或实验室环境的各种需求。 + +- **动态扩展**:在使用过程中,您可以随时通过 Web 仪表盘添加、移除或调整 MCP 服务器配置,无需重启整个系统。这种灵活性不仅适用于个人开发测试,也为未来功能扩展提供了无限可能。 + +- **标准化接口**:基于 MCP 标准,您的服务可以无缝对接各种 AI 工具,无论是 Claude Desktop、Cursor 还是其他定制化应用,都能通过统一接口调用外部数据或执行工具操作,实现真正的多源协同工作流程。 + +## 快速上手指南 + +下面,我们将以一个实例演示如何使用 MCPHub 快速搭建基于高德地图 MCP 服务的行程规划助手。 + +### 使用 Docker 部署 + +执行以下命令,即可在本地快速启动 MCPHub 服务: + +```bash +docker run -p 3000:3000 samanhappy/mcphub +``` + +### 访问仪表盘 + +MCPHub 已内置多个常用 MCP 服务,如高德地图、GitHub、Slack、Fetch、Tavily、Playwright 等,开箱即可使用。在浏览器中打开 `http://localhost:3000`,直观的仪表盘将实时显示各个 MCP 服务器的状态,让您轻松管理和监控服务运行情况。 + +![仪表盘预览](../assets/dashboard.png) + +可以看到这些 MCP 服务都已成功连接并正常运行。 + +### 配置高德地图 + +由于高德地图的 MCP 服务需要 API Key,我们需要在仪表盘中进行配置。点击 amap-maps 右侧的 Edit 按钮,在弹出窗口的环境变量部分配置高德地图的 API Key。 + +![配置高德地图](../assets/amap-edit.png) + +点击保存后,MCP Hub 将自动重启高德地图的 MCP 服务,使新配置生效。 + +### 配置 MCP Hub SSE + +MCP Hub 提供了单一聚合的 MCP Server SSE 端点:`http://localhost:3000/sse`,可在任意支持 MCP 的客户端中配置使用。这里我们选择开源的 Cherry Studio 进行演示。 + +![配置 Cherry Studio](../assets/cherry-mcp.png) + +配置成功后,可用工具列表中将显示所有高德 MCP 服务支持的工具功能。 + +### 使用高德地图 MCP 服务 + +现在,我们可以在 Cherry Studio 中使用高德地图的 MCP 服务了。选择智源的 Qwen2.5-7B-Instruct 模型,并确保启用 MCP Server 开关,然后输入:"我明天要从南京去上海旅游,晚上想住在外滩附近,帮我规划一下交通和酒店行程",点击发送按钮。 + +![高德地图行程规划](../assets/amap-result.png) + +可以看到,Cherry Studio 在回答过程中调用了高德地图 MCP 服务的多个工具,包括坐标解析、路线规划、周边搜索等,从而实现了一个更强大的行程规划助手。 + +## 结语 + +MCPHub 的一键部署和动态配置功能,使个人开发者能够轻松搭建和管理多个 MCP 服务,极大地提升了开发效率和应用灵活性。无论是个人项目还是实验室环境,MCPHub 都能提供高效、便捷的解决方案。 + +随着 MCP 生态的不断扩展,我们将持续增加更多服务和功能,为开发者提供更加丰富的工具集。MCPHub 完全开源,采用 MIT 许可证,项目地址 [https://github.com/samanhappy/mcphub](https://github.com/samanhappy/mcphub),期待您的体验与反馈,共同推动 MCP 生态的繁荣发展! \ No newline at end of file diff --git a/articals/intro2.md b/articals/intro2.md new file mode 100644 index 0000000000000000000000000000000000000000..8d30db8c114f7b984b21b8abaad8f6f9dd7088a7 --- /dev/null +++ b/articals/intro2.md @@ -0,0 +1,232 @@ +# 本地部署、一键安装、分组路由:MCPHub 重塑 MCP 服务器体验 + +## 概述 + +现代 AI 应用场景中,将大模型(LLM)与各种数据源和工具无缝对接,往往需要手动编写大量胶水代码,并且无法快速复用​。MCP(Model Context Protocol)协议由 Anthropic 在 2024 年开源,旨在提供类似“USB‑C”接口般的标准化通信方式,简化 AI 助手与内容仓库、业务系统等的集成流程​。然而,MCP 服务器部署常常需要大量环境依赖、手动配置及持续运行,开发者常因安装和配置耗费大量时间和精力​。MCPHub 作为一款开源的一站式聚合平台,通过直观的 Web UI、Docker 镜像和热插拔配置,实现本地或容器里的“一键安装”与“分组路由”,大幅降低 MCP 服务器的使用门槛和运维成本​。 + +## MCPHub 是什么 + +### MCP 协议简介 + +Model Context Protocol(MCP)是一种开放标准,类似“USB‑C”接口,为 AI 助手与内容仓库、业务系统和第三方服务之间提供统一通信协议。它支持 stdio 与 SSE(最新协议中被 Streamable HTTP 取代)两种通信方式,既能满足实时流式数据交换,也可用于批量任务。2024 年由 Anthropic 团队开源发布后,MCP 已在各类 AI 客户端(如 Claude Desktop)中得到应用,成功实现与 GitHub、Slack、网页自动化工具等的无缝对接。 + +### MCPHub 项目概览 + +MCPHub 是一个统一的 MCP 服务器聚合平台,内置 MCP 服务器市场实现一键安装。前端基于 React、Vite 和 Tailwind CSS 构建,后端兼容任意使用 npx 或 uvx 命令启动的 MCP 服务器。它通过一个集中式 Dashboard 实时展示各服务器的运行状态,并支持在运行时热插拔增删改服务器配置,无需停机维护。支持分组式访问控制,可以通过独立的 SSE 端点访问不同的 MCP 服务器组合,管理员可灵活定义不同团队或环境的权限策略。官方提供 Docker 镜像,仅需一条命令即可快速启动本地或云端服务。 + +![MCPHub 控制面板](../assets/dashboard.zh.png) + +## 为什么要使用 MCPHub + +### 1. 复杂的环境依赖与配置 + +- MCP 服务器常依赖 Node.js、Python 等多种运行时,需手动维护大量命令、参数和环境变量。 +- MCPHub 内置 MCP 服务器市场,包含众多常用 MCP 服务器,支持一键安装和自动配置,简化了环境搭建过程。 +- 通过 Docker 部署,MCPHub 可在任何支持 Docker 的平台上运行,避免了环境不一致的问题。 + +![MCPHub 市场](../assets/market.zh.png) + +### 2. 持续运行的服务压力 + +- MCP 要求长连接服务常驻内存,重启或升级时需要人工干预,缺乏弹性。 +- 借助 Docker 容器化部署,MCPHub 可快速重建环境,享受容器带来的弹性与隔离优势。 + +### 3. 路由与分组管理缺乏统一视图 + +- 传统方式下,很难可视化地将不同 MCP 服务按场景分类,容易造成 token 浪费和工具选择精度下降。 +- MCPHub 支持动态创建分组(如“地图检索”、“网页自动化”、“聊天”等),为每个分组生成独立的 SSE 端点,实现各类用例的隔离与优化。 + +![MCPHub 分组](../assets/group.zh.png) + +## 如何使用 MCPHub + +### 快速部署 + +```bash +docker run -p 3000:3000 samanhappy/mcphub +``` + +一条命令就可以在本地快速启动 MCPHub,默认监听 3000 端口。 + +MCPHub 使用`mcp_settings.json`保存所有服务器、分组和用户的配置。你可以创建一个 `mcp_settings.json` 文件,并将其挂载到 Docker 容器中,以便在重启时保留配置。 + +```json +{ + "mcpServers": { + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key" + } + }, + "time-mcp": { + "command": "npx", + "args": [ + "-y", + "time-mcp" + ] + }, + "sequential-thinking": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-sequential-thinking" + ] + } + } +} +``` + +然后挂载配置文件启动: + +```bash +docker run -p 3000:3000 -v $(pwd)/mcp_settings.json:/app/mcp_settings.json samanhappy/mcphub +``` + +> 注意:首次运行时,MCPHub 会自动下载并安装所需的依赖包,可能需要一些时间。 + +### 访问控制台 + +启动后访问 `http://localhost:3000` 即可进入控制台。 + +> 默认登录用户名和密码为 `admin`/`admin123`,登录后可以修改密码以确保安全。 + +控制台提供了服务器管理、分组管理和市场管理等功能,你可以在这里查看所有已安装的 MCP 服务器、创建新的分组、添加或删除服务器等。 + +### 分组路由 & SSE 端点 + +#### 全局 SSE 端点 + +``` +http://localhost:3000/sse +``` + +通过全局 SSE 端点可以访问所有已启用的 MCP 服务器。 + +#### 基于分组的 SSE 端点 + +除了全局 SSE 端点,MCPHub 还支持基于分组的 SSE 端点。你可以为每个分组创建独立的 SSE 端点,以便更好地管理和路由请求。 +分组的 SSE 端点格式如下: + +``` +http://localhost:3000/sse/{groupId} +``` + +其中 `{groupId}` 是分组的唯一标识符,可以从控制台获取。比如我在上面的截图中创建了一个名为 `map` 的分组,选择了 `amap` 和 `sequential-thinking` 两个服务器,那么可以通过以下 URL 访问这个分组的 SSE 端点: + +``` +http://localhost:3000/sse/a800bef7-c4c1-4460-9557-5f4404cdd0bd +``` + +#### 使用分组路由 + +创建了路由分组后,我们就可以在任意支持 MCP 的客户端中使用这个分组的 SSE 端点了。比如在 `Cursor` 中,我们可以在配置文件中指定分组的 SSE 端点: + +```json +{ + "mcpServers": { + "mcphub": { + "url": "http://localhost:3000/sse/a800bef7-c4c1-4460-9557-5f4404cdd0bd" + } + } +} +``` + +配置完成后,可以从 `Cursor` 中看到所有可用的 MCP 服务器工具列表。 + +![Cursor 中的 MCP 配置](../assets/cursor-mcp.png) + +然后,我们可以测试一下,比如输入:深度思考一下,帮我制定一个五一假期从南京出发的自驾行出游计划,要求避开拥堵路线,结合天气情况,并且可以体验到不同的自然风光。 + +![Cursor 中的测试输入](../assets/cursor-query.png) + +接着可以看到,`Cursor` 在运行过程中调用了多个工具。 + +![Cursor 中的工具调用](../assets/cursor-tools.png) + +最终生成结果如下: + +```markdown +基于对路线和天气的全面分析,我为你制定了一个四天的五一自驾游行程方案: + +### 第一天(4月29日):南京→黄山 +- 出发时间:建议早上8点出发 +- 行驶路线:G42沪蓉高速 → S11巢黄高速 +- 行驶时间:约4.5小时(282公里) +- 住宿地点:黄山风景区附近 +- 特别提醒: + * 路线多隧道,注意安全 + * 天气较好,适合出行 + * 建议提前订好黄山景区附近酒店 + +### 第二天(4月30日):黄山→婺源 +- 游览时间:上午游览黄山景区 +- 下午出发前往婺源 +- 行驶路线:G3京台高速 → G56杭瑞高速 +- 行驶时间:约2.5小时(139公里) +- 住宿地点:婺源县城或特色古村落 +- 特别提醒: + * 注意天气可能有阵雨 + * 建议游览篁岭、李坑等著名古村落 + +### 第三天(5月1日):婺源→景德镇 +- 上午游览婺源景点 +- 中午后出发前往景德镇 +- 行驶路线:G56杭瑞高速 +- 行驶时间:约1.5小时(77公里) +- 住宿地点:景德镇市区 +- 特别提醒: + * 可能遇到阵雨天气 + * 建议参观陶瓷博物馆和古窑址 + +### 第四天(5月2日):景德镇→南京 +- 建议上午游览景德镇景点 +- 中午后出发返回南京 +- 行驶路线:G35济广高速 → G4211宁芜高速 +- 行驶时间:约5小时(429公里) +- 特别提醒: + * 路程较长,建议做好休息安排 + * 注意途中加油站分布 + * 建议错峰返程,避免拥堵 + +### 整体建议: +1. 避堵建议: + - 选择错峰出行时间 + - 提前查看实时路况 + - 准备备选绕行路线 + +2. 安全建议: + - 全程多雨,注意雨天行车安全 + - 保持车距,谨慎驾驶 + - 途经隧道较多,注意灯光使用 + +3. 行程准备: + - 提前预订住宿 + - 准备雨具和应急用品 + - 建议下载离线地图 + - 检查车况,特别是雨刮和轮胎 + +4. 景点安排: + - 黄山:建议游览迎客松、光明顶等 + - 婺源:篁岭、李坑、江岭等 + - 景德镇:陶瓷博物馆、古窑民俗博览区 + +这条路线可以让你体验到: +- 黄山的壮丽山景 +- 婺源的田园风光 +- 景德镇的人文历史 +既避开了主要拥堵路段,又能欣赏到不同的自然风光。 +``` + +可以看到,`Cursor` 通过调用 `amap` 和 `sequential-thinking` 两个服务器,成功生成了一个五一假期的自驾游行程方案,并且避开了拥堵路线,结合了天气情况。但是细心的同学可能发现,计划中的开始时间是 4 月 29 日,而今年的五一假期是 5 月 1 日开始的,产生偏差的原因是 `sequential-thinking` 使用了错误的假期时间。如何解决这个问题呢?我们可以尝试在分组中添加支持搜索的 MCP 服务器,这样就可以在查询时自动纠正错误的假期时间了,具体就不在这里展开了。 + +## 结语 + +MCPHub 将本地部署、一键安装、分组路由和可视化管理融为一体,以简洁而强大的设计,彻底解决了 MCP 服务器的部署、配置与运维难题。无论是追求快速验证的开发者,还是需要稳定可靠 AI 工具链的企业用户,都能通过 MCPHub 专注于核心业务与创新,而无需被底层细节所困扰。 + +尽管目前各家平台都在陆续推出各类 MCP 云服务,但在数据隐私、合规性和定制化需求日益增长的背景下,MCPHub 仍然是一个值得关注的本地部署解决方案​。 + +MCPHub 只是我一时兴起开发的小项目,没想到竟收获了这么多关注,非常感谢大家的支持!目前 MCPHub 还有不少地方需要优化和完善,我也专门建了个交流群,方便大家交流反馈。如果你也对这个项目感兴趣,欢迎一起参与建设!项目地址为:https://github.com/samanhappy/mcphub。 + +![企业微信交流群](../assets/wegroup.jpg) diff --git a/articals/smart-routing.md b/articals/smart-routing.md new file mode 100644 index 0000000000000000000000000000000000000000..9613fa1e1824fe2425741b61e8e95d215c7cdc8e --- /dev/null +++ b/articals/smart-routing.md @@ -0,0 +1,177 @@ +# 无限工具,智能路由:MCPHub 引领 AI 工具使用新范式 + +## 概述 + +在现代 AI 应用中,随着 MCP 服务器数量的快速增长和工具种类的不断丰富,如何从数百个可用工具中快速定位最适合当前任务的工具,成为开发者和 AI 助手面临的一项重要挑战。 + +传统做法要么将所有工具暴露给 AI 助手处理,导致 token 消耗巨大、响应延迟严重;要么依赖开发者手动分组配置,灵活性和智能性不足。 + +MCPHub 推出的智能路由功能,基于向量语义搜索技术,实现了自然语言驱动的工具发现与精准推荐。它让 AI 助手能够像人类专家一样,根据任务描述自动选择最优工具组合,大幅提升了系统效率和用户体验。 + +## 什么是智能路由 + +### 技术原理 + +智能路由是 MCPHub 的核心功能之一。它将每个 MCP 工具的名称和描述嵌入为高维语义向量。当用户发起自然语言任务请求时,系统会将该请求也转换为向量,通过计算相似度,快速返回最相关的工具列表。 + +这一过程摒弃了传统的关键词匹配,具备更强的语义理解能力,能够处理自然语言的模糊性和多样性。 + +### 核心组件 + +- **向量嵌入引擎**:支持如 `text-embedding-3-small`、`bge-m3` 等主流模型,将文本描述转为语义向量。 +- **PostgreSQL + pgvector**:使用开源向量数据库方案,支持高效的向量索引和搜索。 +- **两步工作流分离**: + - `search_tools`:负责语义工具发现 + - `call_tool`:执行实际工具调用逻辑 + +## 为什么需要智能路由 + +### 1. 减少认知负荷 + +- 当工具数量超过 100 个,AI 模型难以处理所有工具上下文。 +- 智能路由通过语义压缩,将候选工具缩小至 5~10 个,提高决策效率。 + +### 2. 显著降低 token 消耗 + +- 传统做法传入全量工具描述,可能消耗上千 token。 +- 使用智能路由,通常可将 token 使用降低 70~90%。 + +### 3. 提升调用准确率 + +- 理解任务语义:如“图片增强”→选择图像处理工具,而不是依赖命名关键词。 +- 上下文感知:考虑输入/输出格式和工具组合能力,匹配更合理的执行链路。 + +## 智能路由配置指南 + +### 1. 启动支持 `pgvector` 的 PostgreSQL 数据库 + +```bash +docker run --name mcphub-postgres \ + -e POSTGRES_DB=mcphub \ + -e POSTGRES_USER=mcphub \ + -e POSTGRES_PASSWORD=your_password \ + -p 5432:5432 \ + -d pgvector/pgvector:pg17 +``` + +如已部署 PostgreSQL,可直接创建数据库并启用 `pgvector` 扩展: + +```sql +CREATE DATABASE mcphub; +CREATE EXTENSION vector; +``` + +### 2. 获取 embedding 模型的 API Key + +前往 OpenAI 或其他提供商获取嵌入模型的 API Key。国内用户推荐使用硅基流动 `bge-m3` 免费模型,没有注册过的用户可以使用我的邀请链接:[https://cloud.siliconflow.cn/i/TQhVYBvA](https://cloud.siliconflow.cn/i/TQhVYBvA)。 + +### 3. 控制台配置 + +![配置](./assets/sr-conf.png) + +在 MCPHub 控制台中,进入「智能路由」配置页面,填写以下信息: + +- **数据库 URL**:`postgresql://mcphub:your_password@localhost:5432/mcphub` +- **OpenAI API Key** :填写你获取的嵌入模型 API Key +- **OpenAI 基础 URL**:`https://api.siliconflow.cn/v1` +- **嵌入模型**:推荐使用 `BAAI/bge-m3` + +开启「启用智能路由」后系统会自动: + +- 对所有工具生成向量嵌入 +- 构建向量索引 +- 自动监听新增工具,更新索引 + +## 工具定义 + +### search_tools - 工具搜索 + +```ts +{ + "name": "search_tools", + "arguments": { + "query": "help me resize and convert images", + "limit": 10 + } +} +``` + +### call_tool - 工具执行 + +```ts +{ + "name": "call_tool", + "arguments": { + "toolName": "image_resizer", + "arguments": { + "input_path": "/path/to/image.png", + "width": 800, + "height": 600 + } + } +} +``` + +## 演示 + +下面我将通过几个示例来展示如何使用智能路由。 + +首先,我们需要在 mcphub 添加几个不同类型的 MCP 服务器:`amap`、`time-map`、`fetch`。 + +![添加服务器](./assets/sr-servers.png) + +然后我们需要选择一个支持 MCP 的客户端,这里选择国产的 DeepChat,聊天模型选择 `Qwen3-14B`。 + +接着,在 DeepChat 中添加 mcphub 的智能路由端点: + +![添加智能路由](./assets/sr-dc.png) + +添加成功后,就可以在工具中看到 `search_tools` 和 `call_tool` 两个工具了: + +![工具列表](./assets/sr-tools.png) + +### 示例 1:地图导航 + +输入:从北京如何导航到上海。 + +结果: + +![地图导航](./assets/sr-map-result.png) + +可以看到,DeepChat 先调用了 `search_tools` 工具: + +![搜索工具](./assets/sr-map-search.png) + +然后再调用 `call_tool` 查询具体的导航信息: + +![调用工具](./assets/sr-map-call.png) + +### 示例 2:查询时间 + +输入:使用工具查询美国现在的时间是几点 + +结果: + +![查询时间](./assets/sr-time.png) + +需要说明的是,由于不同的模型对工具调用的支持程度不同,可能会出现一些差异。比如在这个例子中,为了提高准确性,我在输入中明确提到了“使用工具”。 + +### 示例 3:查看网页 + +输入:打开 baidu.com 看看有什么 + +结果: + +![查看网页](./assets/sr-web.png) + +可以看到,DeepChat 成功调用了工具,不过由于百度的 robots.txt 限制,无法获取到具体内容。 + +## 结语 + +借助 MCPHub 的智能路由功能,AI 助手能够更高效地处理复杂任务,显著减少不必要的 token 消耗,同时提升工具调用的准确性与灵活性。作为面向未来的 AI 工具发现与调用基础设施,智能路由不仅使 AI 更聪明地选择和组合工具,还为多 Agent 协同提供了清晰、可控且可扩展的底层能力支撑。 + +> MCPHub 只是我一时兴起开发的小项目,没想到收获了这么多关注,非常感谢大家的支持!目前 MCPHub 还有不少地方需要优化和完善,我也专门建了个交流群,感兴趣的可以添加下面的微信。 + +![微信](../assets/wexin.png) + +> 同时,欢迎大家一起参与建设!项目地址为:[https://github.com/samanhappy/mcphub](https://github.com/samanhappy/mcphub)。 diff --git a/assets/amap-edit.png b/assets/amap-edit.png new file mode 100644 index 0000000000000000000000000000000000000000..c09afcd7512e67902141bdcc4609166fcf2329c8 Binary files /dev/null and b/assets/amap-edit.png differ diff --git a/assets/amap-result.png b/assets/amap-result.png new file mode 100644 index 0000000000000000000000000000000000000000..3aeb11cd3ee1479df58fc7e0e588926cef0e046d Binary files /dev/null and b/assets/amap-result.png differ diff --git a/assets/cherry-mcp.png b/assets/cherry-mcp.png new file mode 100644 index 0000000000000000000000000000000000000000..7bbba909d552a69a96a4bcd439959aba27bf7d84 Binary files /dev/null and b/assets/cherry-mcp.png differ diff --git a/assets/cursor-mcp.png b/assets/cursor-mcp.png new file mode 100644 index 0000000000000000000000000000000000000000..ce6c8ec9ebca25d9f47853dd775e73774d900e5d Binary files /dev/null and b/assets/cursor-mcp.png differ diff --git a/assets/cursor-query.png b/assets/cursor-query.png new file mode 100644 index 0000000000000000000000000000000000000000..dc36bdf5457d05e7b41eead5c38298dfb86ef3e1 Binary files /dev/null and b/assets/cursor-query.png differ diff --git a/assets/cursor-tools.png b/assets/cursor-tools.png new file mode 100644 index 0000000000000000000000000000000000000000..8ca4d8044a506d933259676f9c4a899fd18db9af Binary files /dev/null and b/assets/cursor-tools.png differ diff --git a/assets/dashboard.png b/assets/dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..ecc71f39b0d7c4b2ce6c90af0d6585c35989b121 Binary files /dev/null and b/assets/dashboard.png differ diff --git a/assets/dashboard.zh.png b/assets/dashboard.zh.png new file mode 100644 index 0000000000000000000000000000000000000000..9b7824196435be8443055a6bba18a9e2fc813b11 Binary files /dev/null and b/assets/dashboard.zh.png differ diff --git a/assets/group.png b/assets/group.png new file mode 100644 index 0000000000000000000000000000000000000000..f2e3c0e9db34208cf2aa4c6df1fb5507c495843a Binary files /dev/null and b/assets/group.png differ diff --git a/assets/group.zh.png b/assets/group.zh.png new file mode 100644 index 0000000000000000000000000000000000000000..5720ede9060f0e69414dff4a8a67476716ff7b42 Binary files /dev/null and b/assets/group.zh.png differ diff --git a/assets/market.zh.png b/assets/market.zh.png new file mode 100644 index 0000000000000000000000000000000000000000..3f3137e10b169b37a277c03577cdfee5ab6eecb8 Binary files /dev/null and b/assets/market.zh.png differ diff --git a/assets/reward.png b/assets/reward.png new file mode 100644 index 0000000000000000000000000000000000000000..dafae0267b16b00758af018ae68f13e92dcfcac2 Binary files /dev/null and b/assets/reward.png differ diff --git a/assets/smart-routing.png b/assets/smart-routing.png new file mode 100644 index 0000000000000000000000000000000000000000..561c05f63ef66dcf603219a696844b4d56fa3221 Binary files /dev/null and b/assets/smart-routing.png differ diff --git a/assets/smart-routing.zh.png b/assets/smart-routing.zh.png new file mode 100644 index 0000000000000000000000000000000000000000..95b4a5f3557c40e8a6ce9a6d5a347d103f0b1ac2 Binary files /dev/null and b/assets/smart-routing.zh.png differ diff --git a/assets/wegroup.jpg b/assets/wegroup.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40dd587b1e39624d2a278918a57866f8d56f2dee Binary files /dev/null and b/assets/wegroup.jpg differ diff --git a/assets/wegroup.png b/assets/wegroup.png new file mode 100644 index 0000000000000000000000000000000000000000..35be46018dbef3ea6b85eaefd6965e5ed85859af Binary files /dev/null and b/assets/wegroup.png differ diff --git a/assets/wexin.png b/assets/wexin.png new file mode 100644 index 0000000000000000000000000000000000000000..b72f4c59e10ae7d2809af2cd780cee3069c4917d Binary files /dev/null and b/assets/wexin.png differ diff --git a/bin/cli.js b/bin/cli.js new file mode 100644 index 0000000000000000000000000000000000000000..cf704d927702279dc5f8e87908c4d965bb7213bc --- /dev/null +++ b/bin/cli.js @@ -0,0 +1,96 @@ +#!/usr/bin/env node + +import path from 'path'; +import { fileURLToPath } from 'url'; +import { execSync } from 'child_process'; +import fs from 'fs'; + +// Enable debug logging if needed +// process.env.DEBUG = 'true'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Start with more debug information +console.log('📋 MCPHub CLI'); +console.log(`📁 CLI script location: ${__dirname}`); + +// The npm package directory structure when installed is: +// node_modules/@samanhappy/mcphub/ +// - dist/ +// - bin/ +// - frontend/dist/ + +// Get the package root - this is where package.json is located +function findPackageRoot() { + const isDebug = process.env.DEBUG === 'true'; + + // Possible locations for package.json + const possibleRoots = [ + // Standard npm package location + path.resolve(__dirname, '..'), + // When installed via npx + path.resolve(__dirname, '..', '..', '..') + ]; + + // Special handling for npx + if (process.argv[1] && process.argv[1].includes('_npx')) { + const npxDir = path.dirname(process.argv[1]); + possibleRoots.unshift(path.resolve(npxDir, '..')); + } + + if (isDebug) { + console.log('DEBUG: Checking for package.json in:', possibleRoots); + } + + for (const root of possibleRoots) { + const packageJsonPath = path.join(root, 'package.json'); + if (fs.existsSync(packageJsonPath)) { + try { + const pkg = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); + if (pkg.name === 'mcphub' || pkg.name === '@samanhappy/mcphub') { + if (isDebug) { + console.log(`DEBUG: Found package.json at ${packageJsonPath}`); + } + return root; + } + } catch (e) { + // Continue to the next potential root + } + } + } + + console.log('⚠️ Could not find package.json, using default path'); + return path.resolve(__dirname, '..'); +} + +// Locate and check the frontend distribution +function checkFrontend(packageRoot) { + const isDebug = process.env.DEBUG === 'true'; + const frontendDistPath = path.join(packageRoot, 'frontend', 'dist'); + + if (isDebug) { + console.log(`DEBUG: Checking frontend at: ${frontendDistPath}`); + } + + if (fs.existsSync(frontendDistPath) && fs.existsSync(path.join(frontendDistPath, 'index.html'))) { + console.log('✅ Frontend distribution found'); + return true; + } else { + console.log('⚠️ Frontend distribution not found at', frontendDistPath); + return false; + } +} + +const projectRoot = findPackageRoot(); +console.log(`📦 Using package root: ${projectRoot}`); + +// Check if frontend exists +checkFrontend(projectRoot); + +// Start the server +console.log('🚀 Starting MCPHub server...'); +import(path.join(projectRoot, 'dist', 'index.js')).catch(err => { + console.error('Failed to start MCPHub:', err); + process.exit(1); +}); \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a0e7716975ac7261d39a74e88dfc34eb6a3ce838 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,32 @@ +# Mintlify Starter Kit + +Click on `Use this template` to copy the Mintlify starter kit. The starter kit contains examples including + +- Guide pages +- Navigation +- Customizations +- API Reference pages +- Use of popular components + +### Development + +Install the [Mintlify CLI](https://www.npmjs.com/package/mintlify) to preview the documentation changes locally. To install, use the following command + +``` +npm i -g mintlify +``` + +Run the following command at the root of your documentation (where docs.json is) + +``` +mintlify dev +``` + +### Publishing Changes + +Install our Github App to auto propagate changes from your repo to your deployment. Changes will be deployed to production automatically after pushing to the default branch. Find the link to install on your dashboard. + +#### Troubleshooting + +- Mintlify dev isn't running - Run `mintlify install` it'll re-install dependencies. +- Page loads as a 404 - Make sure you are running in a folder with `docs.json` diff --git a/docs/api-reference/endpoint/create.mdx b/docs/api-reference/endpoint/create.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5689f1b659555c57cd7832b52c94acf4695bb180 --- /dev/null +++ b/docs/api-reference/endpoint/create.mdx @@ -0,0 +1,4 @@ +--- +title: 'Create Plant' +openapi: 'POST /plants' +--- diff --git a/docs/api-reference/endpoint/delete.mdx b/docs/api-reference/endpoint/delete.mdx new file mode 100644 index 0000000000000000000000000000000000000000..657dfc871c8195fca167dde0e5b6cb1af1f551cd --- /dev/null +++ b/docs/api-reference/endpoint/delete.mdx @@ -0,0 +1,4 @@ +--- +title: 'Delete Plant' +openapi: 'DELETE /plants/{id}' +--- diff --git a/docs/api-reference/endpoint/get.mdx b/docs/api-reference/endpoint/get.mdx new file mode 100644 index 0000000000000000000000000000000000000000..56aa09ec1f6ee49424c640574b948d37da653e85 --- /dev/null +++ b/docs/api-reference/endpoint/get.mdx @@ -0,0 +1,4 @@ +--- +title: 'Get Plants' +openapi: 'GET /plants' +--- diff --git a/docs/api-reference/endpoint/webhook.mdx b/docs/api-reference/endpoint/webhook.mdx new file mode 100644 index 0000000000000000000000000000000000000000..329134020e1e9c3e0b135d9d8df87bc7b0df42e0 --- /dev/null +++ b/docs/api-reference/endpoint/webhook.mdx @@ -0,0 +1,4 @@ +--- +title: 'New Plant' +openapi: 'WEBHOOK /plant/webhook' +--- diff --git a/docs/api-reference/introduction.mdx b/docs/api-reference/introduction.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c835b78b549235aaae94fccc2f3a06db7ae74091 --- /dev/null +++ b/docs/api-reference/introduction.mdx @@ -0,0 +1,33 @@ +--- +title: 'Introduction' +description: 'Example section for showcasing API endpoints' +--- + + + If you're not looking to build API reference documentation, you can delete + this section by removing the api-reference folder. + + +## Welcome + +There are two ways to build API documentation: [OpenAPI](https://mintlify.com/docs/api-playground/openapi/setup) and [MDX components](https://mintlify.com/docs/api-playground/mdx/configuration). For the starter kit, we are using the following OpenAPI specification. + + + View the OpenAPI specification file + + +## Authentication + +All API endpoints are authenticated using Bearer tokens and picked up from the specification file. + +```json +"security": [ + { + "bearerAuth": [] + } +] +``` diff --git a/docs/api-reference/openapi.json b/docs/api-reference/openapi.json new file mode 100644 index 0000000000000000000000000000000000000000..da5326efc4f97428f74312d3fc94dbcebbed8416 --- /dev/null +++ b/docs/api-reference/openapi.json @@ -0,0 +1,217 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "OpenAPI Plant Store", + "description": "A sample API that uses a plant store as an example to demonstrate features in the OpenAPI specification", + "license": { + "name": "MIT" + }, + "version": "1.0.0" + }, + "servers": [ + { + "url": "http://sandbox.mintlify.com" + } + ], + "security": [ + { + "bearerAuth": [] + } + ], + "paths": { + "/plants": { + "get": { + "description": "Returns all plants from the system that the user has access to", + "parameters": [ + { + "name": "limit", + "in": "query", + "description": "The maximum number of results to return", + "schema": { + "type": "integer", + "format": "int32" + } + } + ], + "responses": { + "200": { + "description": "Plant response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Plant" + } + } + } + } + }, + "400": { + "description": "Unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + }, + "post": { + "description": "Creates a new plant in the store", + "requestBody": { + "description": "Plant to add to the store", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NewPlant" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "plant response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Plant" + } + } + } + }, + "400": { + "description": "unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } + }, + "/plants/{id}": { + "delete": { + "description": "Deletes a single plant based on the ID supplied", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of plant to delete", + "required": true, + "schema": { + "type": "integer", + "format": "int64" + } + } + ], + "responses": { + "204": { + "description": "Plant deleted", + "content": {} + }, + "400": { + "description": "unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } + } + }, + "webhooks": { + "/plant/webhook": { + "post": { + "description": "Information about a new plant added to the store", + "requestBody": { + "description": "Plant added to the store", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NewPlant" + } + } + } + }, + "responses": { + "200": { + "description": "Return a 200 status to indicate that the data was received successfully" + } + } + } + } + }, + "components": { + "schemas": { + "Plant": { + "required": [ + "name" + ], + "type": "object", + "properties": { + "name": { + "description": "The name of the plant", + "type": "string" + }, + "tag": { + "description": "Tag to specify the type", + "type": "string" + } + } + }, + "NewPlant": { + "allOf": [ + { + "$ref": "#/components/schemas/Plant" + }, + { + "required": [ + "id" + ], + "type": "object", + "properties": { + "id": { + "description": "Identification number of the plant", + "type": "integer", + "format": "int64" + } + } + } + ] + }, + "Error": { + "required": [ + "error", + "message" + ], + "type": "object", + "properties": { + "error": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + }, + "securitySchemes": { + "bearerAuth": { + "type": "http", + "scheme": "bearer" + } + } + } +} \ No newline at end of file diff --git a/docs/configuration/docker-setup.mdx b/docs/configuration/docker-setup.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f4c51e6714b94a8cb51002ac96c23dbc853b6a22 --- /dev/null +++ b/docs/configuration/docker-setup.mdx @@ -0,0 +1,539 @@ +--- +title: 'Docker Setup' +description: 'Deploy MCPHub using Docker and Docker Compose' +--- + +# Docker Setup + +This guide covers deploying MCPHub using Docker, including development and production configurations. + +## Quick Start with Docker + +### Using Pre-built Image + +```bash +# Pull the latest image +docker pull mcphub/mcphub:latest + +# Run with default configuration +docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -v $(pwd)/mcp_settings.json:/app/mcp_settings.json \ + mcphub/mcphub:latest +``` + +### Building from Source + +```bash +# Clone the repository +git clone https://github.com/your-username/mcphub.git +cd mcphub + +# Build the Docker image +docker build -t mcphub:local . + +# Run the container +docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -v $(pwd)/mcp_settings.json:/app/mcp_settings.json \ + mcphub:local +``` + +## Docker Compose Setup + +### Basic Configuration + +Create a `docker-compose.yml` file: + +```yaml +version: '3.8' + +services: + mcphub: + image: mcphub/mcphub:latest + # For local development, use: + # build: . + container_name: mcphub + ports: + - '3000:3000' + environment: + - NODE_ENV=production + - PORT=3000 + - JWT_SECRET=${JWT_SECRET:-your-jwt-secret} + - DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub + volumes: + - ./mcp_settings.json:/app/mcp_settings.json:ro + - ./servers.json:/app/servers.json:ro + - mcphub_data:/app/data + depends_on: + postgres: + condition: service_healthy + restart: unless-stopped + networks: + - mcphub-network + + postgres: + image: postgres:15-alpine + container_name: mcphub-postgres + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=password + volumes: + - postgres_data:/var/lib/postgresql/data + - ./scripts/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql:ro + ports: + - '5432:5432' + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mcphub -d mcphub'] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - mcphub-network + +volumes: + postgres_data: + mcphub_data: + +networks: + mcphub-network: + driver: bridge +``` + +### Production Configuration with Nginx + +```yaml +version: '3.8' + +services: + nginx: + image: nginx:alpine + container_name: mcphub-nginx + ports: + - '80:80' + - '443:443' + volumes: + - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro + - ./ssl:/etc/nginx/ssl:ro + - nginx_logs:/var/log/nginx + depends_on: + - mcphub + restart: unless-stopped + networks: + - mcphub-network + + mcphub: + image: mcphub/mcphub:latest + container_name: mcphub-app + expose: + - '3000' + environment: + - NODE_ENV=production + - PORT=3000 + - JWT_SECRET=${JWT_SECRET} + - JWT_EXPIRES_IN=${JWT_EXPIRES_IN:-24h} + - DATABASE_URL=postgresql://mcphub:${POSTGRES_PASSWORD}@postgres:5432/mcphub + - OPENAI_API_KEY=${OPENAI_API_KEY} + - REDIS_URL=redis://redis:6379 + volumes: + - ./mcp_settings.json:/app/mcp_settings.json:ro + - ./servers.json:/app/servers.json:ro + - mcphub_data:/app/data + - mcphub_logs:/app/logs + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + networks: + - mcphub-network + healthcheck: + test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:3000/health'] + interval: 30s + timeout: 10s + retries: 3 + + postgres: + image: postgres:15-alpine + container_name: mcphub-postgres + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + volumes: + - postgres_data:/var/lib/postgresql/data + - ./backups:/backups + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mcphub -d mcphub'] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - mcphub-network + + redis: + image: redis:7-alpine + container_name: mcphub-redis + command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD} + volumes: + - redis_data:/data + healthcheck: + test: ['CMD', 'redis-cli', 'ping'] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - mcphub-network + +volumes: + postgres_data: + redis_data: + mcphub_data: + mcphub_logs: + nginx_logs: + +networks: + mcphub-network: + driver: bridge +``` + +### Environment Variables + +Create a `.env` file for Docker Compose: + +```env +# Application +NODE_ENV=production +JWT_SECRET=your-super-secret-jwt-key-change-this +JWT_EXPIRES_IN=24h + +# Database +POSTGRES_PASSWORD=your-secure-database-password + +# Redis +REDIS_PASSWORD=your-secure-redis-password + +# External APIs +OPENAI_API_KEY=your-openai-api-key + +# Optional: Custom port +# PORT=3000 +``` + +## Development Setup + +### Development Docker Compose + +Create `docker-compose.dev.yml`: + +```yaml +version: '3.8' + +services: + mcphub-dev: + build: + context: . + dockerfile: Dockerfile.dev + container_name: mcphub-dev + ports: + - '3000:3000' + - '5173:5173' # Frontend dev server + - '9229:9229' # Debug port + environment: + - NODE_ENV=development + - PORT=3000 + - DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub + volumes: + - .:/app + - /app/node_modules + - /app/frontend/node_modules + depends_on: + - postgres + command: pnpm dev + networks: + - mcphub-dev + + postgres: + image: postgres:15-alpine + container_name: mcphub-postgres-dev + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=password + ports: + - '5432:5432' + volumes: + - postgres_dev_data:/var/lib/postgresql/data + networks: + - mcphub-dev + +volumes: + postgres_dev_data: + +networks: + mcphub-dev: + driver: bridge +``` + +### Development Dockerfile + +Create `Dockerfile.dev`: + +```dockerfile +FROM node:20-alpine + +# Install pnpm +RUN npm install -g pnpm + +# Set working directory +WORKDIR /app + +# Copy package files +COPY package.json pnpm-lock.yaml ./ +COPY frontend/package.json ./frontend/ + +# Install dependencies +RUN pnpm install + +# Copy source code +COPY . . + +# Expose ports +EXPOSE 3000 5173 9229 + +# Start development server +CMD ["pnpm", "dev"] +``` + +## Running the Application + +### Development Mode + +```bash +# Start development environment +docker-compose -f docker-compose.dev.yml up -d + +# View logs +docker-compose -f docker-compose.dev.yml logs -f mcphub-dev + +# Stop development environment +docker-compose -f docker-compose.dev.yml down +``` + +### Production Mode + +```bash +# Start production environment +docker-compose up -d + +# View logs +docker-compose logs -f mcphub + +# Stop production environment +docker-compose down +``` + +## Configuration Management + +### MCP Settings Volume Mount + +Create your `mcp_settings.json`: + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + }, + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key" + } + } + } +} +``` + +### Secrets Management + +For production, use Docker secrets: + +```yaml +version: '3.8' + +services: + mcphub: + image: mcphub/mcphub:latest + environment: + - JWT_SECRET_FILE=/run/secrets/jwt_secret + - DATABASE_PASSWORD_FILE=/run/secrets/db_password + secrets: + - jwt_secret + - db_password + +secrets: + jwt_secret: + file: ./secrets/jwt_secret.txt + db_password: + file: ./secrets/db_password.txt +``` + +## Data Persistence + +### Database Backups + +Add backup service to your `docker-compose.yml`: + +```yaml +services: + backup: + image: postgres:15-alpine + container_name: mcphub-backup + environment: + - PGPASSWORD=${POSTGRES_PASSWORD} + volumes: + - ./backups:/backups + - ./scripts/backup.sh:/backup.sh:ro + command: /bin/sh -c "chmod +x /backup.sh && /backup.sh" + depends_on: + - postgres + profiles: + - backup + networks: + - mcphub-network +``` + +Create `scripts/backup.sh`: + +```bash +#!/bin/sh +BACKUP_FILE="/backups/mcphub_$(date +%Y%m%d_%H%M%S).sql" +pg_dump -h postgres -U mcphub -d mcphub > "$BACKUP_FILE" +echo "Backup created: $BACKUP_FILE" + +# Keep only last 7 days of backups +find /backups -name "mcphub_*.sql" -mtime +7 -delete +``` + +Run backup: + +```bash +docker-compose --profile backup run --rm backup +``` + +## Monitoring and Health Checks + +### Health Check Endpoint + +Add to your application: + +```javascript +// In your Express app +app.get('/health', (req, res) => { + res.json({ + status: 'healthy', + timestamp: new Date().toISOString(), + uptime: process.uptime(), + memory: process.memoryUsage(), + version: process.env.npm_package_version, + }); +}); +``` + +### Docker Health Checks + +```yaml +services: + mcphub: + # ... other config + healthcheck: + test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:3000/health'] + interval: 30s + timeout: 10s + retries: 3 + start_period: 60s +``` + +### Monitoring with Watchtower + +Add automatic updates: + +```yaml +services: + watchtower: + image: containrrr/watchtower + container_name: mcphub-watchtower + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - WATCHTOWER_CLEANUP=true + - WATCHTOWER_POLL_INTERVAL=3600 + - WATCHTOWER_INCLUDE_STOPPED=true + restart: unless-stopped +``` + +## Troubleshooting + +### Common Issues + +**Container fails to start**: Check logs with `docker-compose logs mcphub` + +**Database connection errors**: Ensure PostgreSQL is healthy and accessible + +**Port conflicts**: Check if ports 3000/5432 are already in use + +**Volume mount issues**: Verify file paths and permissions + +### Debug Commands + +```bash +# Check container status +docker-compose ps + +# View logs +docker-compose logs -f [service_name] + +# Execute commands in container +docker-compose exec mcphub sh + +# Check database connection +docker-compose exec postgres psql -U mcphub -d mcphub + +# Restart specific service +docker-compose restart mcphub + +# Rebuild and restart +docker-compose up --build -d +``` + +### Performance Optimization + +```yaml +services: + mcphub: + # ... other config + deploy: + resources: + limits: + memory: 512M + cpus: '0.5' + reservations: + memory: 256M + cpus: '0.25' +``` + +This Docker setup provides a complete containerized environment for MCPHub with development and production configurations. diff --git a/docs/configuration/environment-variables.mdx b/docs/configuration/environment-variables.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7235c4d3e8b9120381d1158ba98a01a269d99a27 --- /dev/null +++ b/docs/configuration/environment-variables.mdx @@ -0,0 +1,389 @@ +--- +title: 'Environment Variables' +description: 'Configure MCPHub using environment variables' +--- + +# Environment Variables + +MCPHub uses environment variables for configuration. This guide covers all available variables and their usage. + +## Core Application Settings + +### Server Configuration + +| Variable | Default | Description | +| ----------- | ------------- | ------------------------------------------------------------- | +| `PORT` | `3000` | Port number for the HTTP server | +| `HOST` | `0.0.0.0` | Host address to bind the server | +| `NODE_ENV` | `development` | Application environment (`development`, `production`, `test`) | +| `LOG_LEVEL` | `info` | Logging level (`error`, `warn`, `info`, `debug`) | + +```env +PORT=3000 +HOST=0.0.0.0 +NODE_ENV=production +LOG_LEVEL=info +``` + +### Database Configuration + +| Variable | Default | Description | +| -------------- | ----------- | ---------------------------------- | +| `DATABASE_URL` | - | PostgreSQL connection string | +| `DB_HOST` | `localhost` | Database host | +| `DB_PORT` | `5432` | Database port | +| `DB_NAME` | `mcphub` | Database name | +| `DB_USER` | `mcphub` | Database username | +| `DB_PASSWORD` | - | Database password | +| `DB_SSL` | `false` | Enable SSL for database connection | +| `DB_POOL_MIN` | `2` | Minimum database pool size | +| `DB_POOL_MAX` | `10` | Maximum database pool size | + +```env +# Option 1: Full connection string +DATABASE_URL=postgresql://username:password@localhost:5432/mcphub + +# Option 2: Individual components +DB_HOST=localhost +DB_PORT=5432 +DB_NAME=mcphub +DB_USER=mcphub +DB_PASSWORD=your-password +DB_SSL=false +``` + +## Authentication & Security + +### JWT Configuration + +| Variable | Default | Description | +| ------------------------ | ------- | ------------------------------------------- | +| `JWT_SECRET` | - | Secret key for JWT token signing (required) | +| `JWT_EXPIRES_IN` | `24h` | JWT token expiration time | +| `JWT_REFRESH_EXPIRES_IN` | `7d` | Refresh token expiration time | +| `JWT_ALGORITHM` | `HS256` | JWT signing algorithm | + +```env +JWT_SECRET=your-super-secret-key-change-this-in-production +JWT_EXPIRES_IN=24h +JWT_REFRESH_EXPIRES_IN=7d +``` + +### Session & Security + +| Variable | Default | Description | +| ------------------- | ------- | ------------------------------- | +| `SESSION_SECRET` | - | Session encryption secret | +| `BCRYPT_ROUNDS` | `12` | bcrypt hashing rounds | +| `RATE_LIMIT_WINDOW` | `15` | Rate limiting window in minutes | +| `RATE_LIMIT_MAX` | `100` | Maximum requests per window | +| `CORS_ORIGIN` | `*` | Allowed CORS origins | + +```env +SESSION_SECRET=your-session-secret +BCRYPT_ROUNDS=12 +RATE_LIMIT_WINDOW=15 +RATE_LIMIT_MAX=100 +CORS_ORIGIN=https://your-domain.com,https://admin.your-domain.com +``` + +## External Services + +### OpenAI Configuration + +| Variable | Default | Description | +| ------------------------ | ------------------------ | -------------------------------- | +| `OPENAI_API_KEY` | - | OpenAI API key for smart routing | +| `OPENAI_MODEL` | `gpt-3.5-turbo` | OpenAI model for embeddings | +| `OPENAI_EMBEDDING_MODEL` | `text-embedding-ada-002` | Model for vector embeddings | +| `OPENAI_MAX_TOKENS` | `1000` | Maximum tokens per request | +| `OPENAI_TEMPERATURE` | `0.1` | Temperature for AI responses | + +```env +OPENAI_API_KEY=sk-your-openai-api-key +OPENAI_MODEL=gpt-3.5-turbo +OPENAI_EMBEDDING_MODEL=text-embedding-ada-002 +OPENAI_MAX_TOKENS=1000 +OPENAI_TEMPERATURE=0.1 +``` + +### Redis Configuration (Optional) + +| Variable | Default | Description | +| ---------------- | ----------- | ----------------------- | +| `REDIS_URL` | - | Redis connection string | +| `REDIS_HOST` | `localhost` | Redis host | +| `REDIS_PORT` | `6379` | Redis port | +| `REDIS_PASSWORD` | - | Redis password | +| `REDIS_DB` | `0` | Redis database number | +| `REDIS_PREFIX` | `mcphub:` | Key prefix for Redis | + +```env +# Option 1: Full connection string +REDIS_URL=redis://username:password@localhost:6379/0 + +# Option 2: Individual components +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD=your-redis-password +REDIS_DB=0 +REDIS_PREFIX=mcphub: +``` + +## MCP Server Configuration + +### Default Settings + +| Variable | Default | Description | +| ------------------- | ------------------- | -------------------------------------------- | +| `MCP_SETTINGS_FILE` | `mcp_settings.json` | Path to MCP settings file | +| `MCP_SERVERS_FILE` | `servers.json` | Path to servers configuration | +| `MCP_TIMEOUT` | `30000` | Default timeout for MCP operations (ms) | +| `MCP_MAX_RETRIES` | `3` | Maximum retry attempts for failed operations | +| `MCP_RESTART_DELAY` | `5000` | Delay before restarting failed servers (ms) | + +```env +MCP_SETTINGS_FILE=./config/mcp_settings.json +MCP_SERVERS_FILE=./config/servers.json +MCP_TIMEOUT=30000 +MCP_MAX_RETRIES=3 +MCP_RESTART_DELAY=5000 +``` + +### Smart Routing + +| Variable | Default | Description | +| --------------------------- | ------- | -------------------------------- | +| `SMART_ROUTING_ENABLED` | `true` | Enable AI-powered smart routing | +| `SMART_ROUTING_THRESHOLD` | `0.7` | Similarity threshold for routing | +| `SMART_ROUTING_MAX_RESULTS` | `5` | Maximum tools to return | +| `VECTOR_CACHE_TTL` | `3600` | Vector cache TTL in seconds | + +```env +SMART_ROUTING_ENABLED=true +SMART_ROUTING_THRESHOLD=0.7 +SMART_ROUTING_MAX_RESULTS=5 +VECTOR_CACHE_TTL=3600 +``` + +## File Storage & Uploads + +| Variable | Default | Description | +| -------------------- | ---------------- | ----------------------------------- | +| `UPLOAD_DIR` | `./uploads` | Directory for file uploads | +| `MAX_FILE_SIZE` | `10485760` | Maximum file size in bytes (10MB) | +| `ALLOWED_FILE_TYPES` | `image/*,text/*` | Allowed MIME types | +| `STORAGE_TYPE` | `local` | Storage type (`local`, `s3`, `gcs`) | + +```env +UPLOAD_DIR=./data/uploads +MAX_FILE_SIZE=10485760 +ALLOWED_FILE_TYPES=image/*,text/*,application/json +STORAGE_TYPE=local +``` + +### S3 Storage (Optional) + +| Variable | Default | Description | +| ---------------------- | ----------- | ------------------ | +| `S3_BUCKET` | - | S3 bucket name | +| `S3_REGION` | `us-east-1` | S3 region | +| `S3_ACCESS_KEY_ID` | - | S3 access key | +| `S3_SECRET_ACCESS_KEY` | - | S3 secret key | +| `S3_ENDPOINT` | - | Custom S3 endpoint | + +```env +S3_BUCKET=mcphub-uploads +S3_REGION=us-east-1 +S3_ACCESS_KEY_ID=your-access-key +S3_SECRET_ACCESS_KEY=your-secret-key +``` + +## Monitoring & Logging + +### Application Monitoring + +| Variable | Default | Description | +| ------------------------ | ------- | ----------------------------- | +| `METRICS_ENABLED` | `true` | Enable metrics collection | +| `METRICS_PORT` | `9090` | Port for metrics endpoint | +| `HEALTH_CHECK_INTERVAL` | `30000` | Health check interval (ms) | +| `PERFORMANCE_MONITORING` | `false` | Enable performance monitoring | + +```env +METRICS_ENABLED=true +METRICS_PORT=9090 +HEALTH_CHECK_INTERVAL=30000 +PERFORMANCE_MONITORING=true +``` + +### Logging Configuration + +| Variable | Default | Description | +| ------------------ | ------------ | --------------------------------------- | +| `LOG_FORMAT` | `json` | Log format (`json`, `text`) | +| `LOG_FILE` | - | Log file path (if file logging enabled) | +| `LOG_MAX_SIZE` | `10m` | Maximum log file size | +| `LOG_MAX_FILES` | `5` | Maximum number of log files | +| `LOG_DATE_PATTERN` | `YYYY-MM-DD` | Date pattern for log rotation | + +```env +LOG_FORMAT=json +LOG_FILE=./logs/mcphub.log +LOG_MAX_SIZE=10m +LOG_MAX_FILES=5 +LOG_DATE_PATTERN=YYYY-MM-DD +``` + +## Development & Debug + +| Variable | Default | Description | +| ------------------------ | ------- | ----------------------------------- | +| `DEBUG` | - | Debug namespaces (e.g., `mcphub:*`) | +| `DEV_TOOLS_ENABLED` | `false` | Enable development tools | +| `HOT_RELOAD` | `true` | Enable hot reload in development | +| `MOCK_EXTERNAL_SERVICES` | `false` | Mock external API calls | + +```env +DEBUG=mcphub:* +DEV_TOOLS_ENABLED=true +HOT_RELOAD=true +MOCK_EXTERNAL_SERVICES=false +``` + +## Production Optimization + +| Variable | Default | Description | +| ------------------ | ------- | -------------------------------------- | +| `CLUSTER_MODE` | `false` | Enable cluster mode | +| `WORKER_PROCESSES` | `0` | Number of worker processes (0 = auto) | +| `MEMORY_LIMIT` | - | Memory limit per process | +| `CPU_LIMIT` | - | CPU limit per process | +| `GC_OPTIMIZE` | `false` | Enable garbage collection optimization | + +```env +CLUSTER_MODE=true +WORKER_PROCESSES=4 +MEMORY_LIMIT=512M +GC_OPTIMIZE=true +``` + +## Configuration Examples + +### Development Environment + +```env +# .env.development +NODE_ENV=development +PORT=3000 +LOG_LEVEL=debug + +# Database +DATABASE_URL=postgresql://mcphub:password@localhost:5432/mcphub_dev + +# Auth +JWT_SECRET=dev-secret-key +JWT_EXPIRES_IN=24h + +# OpenAI (optional for development) +# OPENAI_API_KEY=your-dev-key + +# Debug +DEBUG=mcphub:* +DEV_TOOLS_ENABLED=true +HOT_RELOAD=true +``` + +### Production Environment + +```env +# .env.production +NODE_ENV=production +PORT=3000 +LOG_LEVEL=info +LOG_FORMAT=json + +# Database +DATABASE_URL=postgresql://mcphub:secure-password@db.example.com:5432/mcphub +DB_SSL=true +DB_POOL_MAX=20 + +# Security +JWT_SECRET=your-super-secure-production-secret +SESSION_SECRET=your-session-secret +BCRYPT_ROUNDS=14 + +# External Services +OPENAI_API_KEY=your-production-openai-key +REDIS_URL=redis://redis.example.com:6379 + +# Monitoring +METRICS_ENABLED=true +PERFORMANCE_MONITORING=true + +# Optimization +CLUSTER_MODE=true +GC_OPTIMIZE=true +``` + +### Docker Environment + +```env +# .env.docker +NODE_ENV=production +HOST=0.0.0.0 +PORT=3000 + +# Use service names for Docker networking +DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub +REDIS_URL=redis://redis:6379 + +# Security +JWT_SECRET_FILE=/run/secrets/jwt_secret +DB_PASSWORD_FILE=/run/secrets/db_password + +# File paths in container +MCP_SETTINGS_FILE=/app/mcp_settings.json +UPLOAD_DIR=/app/data/uploads +LOG_FILE=/app/logs/mcphub.log +``` + +## Environment Variable Loading + +MCPHub loads environment variables in the following order: + +1. System environment variables +2. `.env.local` (ignored by git) +3. `.env.{NODE_ENV}` (e.g., `.env.production`) +4. `.env` + +### Using dotenv-expand + +MCPHub supports variable expansion: + +```env +BASE_URL=https://api.example.com +API_ENDPOINT=${BASE_URL}/v1 +DATABASE_URL=postgresql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME} +``` + +## Security Best Practices + +1. **Never commit secrets** to version control +2. **Use strong, unique secrets** for production +3. **Rotate secrets regularly** +4. **Use environment-specific files** +5. **Validate all environment variables** at startup +6. **Use Docker secrets** for container deployments + +## Validation + +MCPHub validates environment variables at startup. Invalid configurations will prevent the application from starting with helpful error messages. + +Required variables for production: + +- `JWT_SECRET` +- `DATABASE_URL` or individual DB components +- `OPENAI_API_KEY` (if smart routing is enabled) + +This comprehensive environment configuration ensures MCPHub can be properly configured for any deployment scenario. diff --git a/docs/configuration/mcp-settings.mdx b/docs/configuration/mcp-settings.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f85e6947042d7090ce92a0471d94b087bcd4c3cc --- /dev/null +++ b/docs/configuration/mcp-settings.mdx @@ -0,0 +1,564 @@ +--- +title: 'MCP Settings Configuration' +description: 'Configure MCP servers and their settings for MCPHub' +--- + +# MCP Settings Configuration + +This guide explains how to configure MCP servers in MCPHub using the `mcp_settings.json` file and related configurations. + +## Configuration Files Overview + +MCPHub uses several configuration files: + +- **`mcp_settings.json`**: Main MCP server configurations +- **`servers.json`**: Server metadata and grouping +- **`.env`**: Environment variables and secrets + +## Basic MCP Settings Structure + +### mcp_settings.json + +```json +{ + "mcpServers": { + "server-name": { + "command": "command-to-run", + "args": ["arg1", "arg2"], + "env": { + "ENV_VAR": "value" + }, + "cwd": "/working/directory", + "timeout": 30000, + "restart": true + } + } +} +``` + +### Example Configuration + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"], + "env": { + "USER_AGENT": "MCPHub/1.0" + } + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "timeout": 60000 + }, + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}" + } + } + } +} +``` + +## Server Configuration Options + +### Required Fields + +| Field | Type | Description | +| --------- | ------ | -------------------------- | +| `command` | string | Executable command or path | +| `args` | array | Command-line arguments | + +### Optional Fields + +| Field | Type | Default | Description | +| -------------- | ------- | --------------- | --------------------------- | +| `env` | object | `{}` | Environment variables | +| `cwd` | string | `process.cwd()` | Working directory | +| `timeout` | number | `30000` | Startup timeout (ms) | +| `restart` | boolean | `true` | Auto-restart on failure | +| `maxRestarts` | number | `5` | Maximum restart attempts | +| `restartDelay` | number | `5000` | Delay between restarts (ms) | +| `stdio` | string | `pipe` | stdio configuration | + +## Common MCP Server Examples + +### Web and API Servers + +#### Fetch Server + +```json +{ + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"], + "env": { + "USER_AGENT": "MCPHub/1.0", + "MAX_REDIRECTS": "10" + } + } +} +``` + +#### Web Scraping with Playwright + +```json +{ + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "timeout": 60000, + "env": { + "PLAYWRIGHT_BROWSERS_PATH": "/tmp/browsers" + } + } +} +``` + +### File and System Servers + +#### Filesystem Server + +```json +{ + "filesystem": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"], + "env": { + "ALLOWED_OPERATIONS": "read,write,list" + } + } +} +``` + +#### SQLite Server + +```json +{ + "sqlite": { + "command": "uvx", + "args": ["mcp-server-sqlite", "--db-path", "/path/to/database.db"], + "env": { + "SQLITE_READONLY": "false" + } + } +} +``` + +### Communication Servers + +#### Slack Server + +```json +{ + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}", + "SLACK_APP_TOKEN": "${SLACK_APP_TOKEN}" + } + } +} +``` + +#### Email Server + +```json +{ + "email": { + "command": "python", + "args": ["-m", "mcp_server_email"], + "env": { + "SMTP_HOST": "smtp.gmail.com", + "SMTP_PORT": "587", + "EMAIL_USER": "${EMAIL_USER}", + "EMAIL_PASSWORD": "${EMAIL_PASSWORD}" + } + } +} +``` + +### Development and API Servers + +#### GitHub Server + +```json +{ + "github": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-github"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_TOKEN}" + } + } +} +``` + +#### Google Drive Server + +```json +{ + "gdrive": { + "command": "npx", + "args": ["-y", "@google/mcp-server-gdrive"], + "env": { + "GOOGLE_CLIENT_ID": "${GOOGLE_CLIENT_ID}", + "GOOGLE_CLIENT_SECRET": "${GOOGLE_CLIENT_SECRET}", + "GOOGLE_REFRESH_TOKEN": "${GOOGLE_REFRESH_TOKEN}" + } + } +} +``` + +### Map and Location Services + +#### Amap (高德地图) Server + +```json +{ + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "${AMAP_API_KEY}", + "AMAP_LANGUAGE": "zh-cn" + } + } +} +``` + +#### OpenStreetMap Server + +```json +{ + "osm": { + "command": "python", + "args": ["-m", "mcp_server_osm"], + "env": { + "OSM_USER_AGENT": "MCPHub/1.0" + } + } +} +``` + +## Advanced Configuration + +### Environment Variable Substitution + +MCPHub supports environment variable substitution using `${VAR_NAME}` syntax: + +```json +{ + "mcpServers": { + "api-server": { + "command": "python", + "args": ["-m", "api_server"], + "env": { + "API_KEY": "${API_KEY}", + "API_URL": "${API_BASE_URL}/v1", + "DEBUG": "${NODE_ENV:development}" + } + } + } +} +``` + +Default values can be specified with `${VAR_NAME:default}`: + +```json +{ + "timeout": "${MCP_TIMEOUT:30000}", + "maxRestarts": "${MCP_MAX_RESTARTS:5}" +} +``` + +### Conditional Configuration + +Use different configurations based on environment: + +```json +{ + "mcpServers": { + "database": { + "command": "python", + "args": ["-m", "db_server"], + "env": { + "DB_URL": "${NODE_ENV:development == 'production' ? DATABASE_URL : DEV_DATABASE_URL}" + } + } + } +} +``` + +### Custom Server Scripts + +#### Local Python Server + +```json +{ + "custom-python": { + "command": "python", + "args": ["./servers/custom_server.py"], + "cwd": "/app/custom-servers", + "env": { + "PYTHONPATH": "/app/custom-servers", + "CONFIG_FILE": "./config.json" + } + } +} +``` + +#### Local Node.js Server + +```json +{ + "custom-node": { + "command": "node", + "args": ["./servers/custom-server.js"], + "cwd": "/app/custom-servers", + "env": { + "NODE_ENV": "production" + } + } +} +``` + +## Server Metadata Configuration + +### servers.json + +Complement `mcp_settings.json` with server metadata: + +```json +{ + "servers": { + "fetch": { + "name": "Fetch Server", + "description": "HTTP client for web requests", + "category": "web", + "tags": ["http", "api", "web"], + "version": "1.0.0", + "author": "MCPHub Team", + "documentation": "https://docs.mcphub.com/servers/fetch", + "enabled": true + }, + "playwright": { + "name": "Playwright Browser", + "description": "Web automation and scraping", + "category": "automation", + "tags": ["browser", "scraping", "automation"], + "version": "2.0.0", + "enabled": true + } + }, + "groups": { + "web-tools": { + "name": "Web Tools", + "description": "Tools for web interaction", + "servers": ["fetch", "playwright"], + "access": "public" + }, + "admin-tools": { + "name": "Admin Tools", + "description": "Administrative utilities", + "servers": ["filesystem", "database"], + "access": "admin" + } + } +} +``` + +## Group Management + +### Group Configuration + +```json +{ + "groups": { + "production": { + "name": "Production Tools", + "description": "Stable production servers", + "servers": ["fetch", "slack", "github"], + "access": "authenticated", + "rateLimit": { + "requestsPerMinute": 100, + "burstLimit": 20 + } + }, + "experimental": { + "name": "Experimental Features", + "description": "Beta and experimental servers", + "servers": ["experimental-ai", "beta-search"], + "access": "admin", + "enabled": false + } + } +} +``` + +### Access Control + +| Access Level | Description | +| --------------- | -------------------------- | +| `public` | No authentication required | +| `authenticated` | Valid JWT token required | +| `admin` | Admin role required | +| `custom` | Custom permission logic | + +## Dynamic Configuration + +### Hot Reloading + +MCPHub supports hot reloading of configurations: + +```bash +# Reload configurations without restart +curl -X POST http://localhost:3000/api/admin/reload-config \ + -H "Authorization: Bearer your-admin-token" +``` + +### Configuration Validation + +MCPHub validates configurations on startup and reload: + +```json +{ + "validation": { + "strict": true, + "allowUnknownServers": false, + "requireDocumentation": true + } +} +``` + +## Best Practices + +### Security + +1. **Use environment variables** for sensitive data: + + ```json + { + "env": { + "API_KEY": "${API_KEY}", + "DATABASE_PASSWORD": "${DB_PASSWORD}" + } + } + ``` + +2. **Limit server permissions**: + ```json + { + "filesystem": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/restricted/path"], + "env": { + "READONLY": "true" + } + } + } + ``` + +### Performance + +1. **Set appropriate timeouts**: + + ```json + { + "timeout": 30000, + "maxRestarts": 3, + "restartDelay": 5000 + } + ``` + +2. **Resource limits**: + ```json + { + "env": { + "NODE_OPTIONS": "--max-old-space-size=512", + "MEMORY_LIMIT": "512MB" + } + } + ``` + +### Monitoring + +1. **Enable health checks**: + + ```json + { + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000 + } + } + ``` + +2. **Logging configuration**: + ```json + { + "env": { + "LOG_LEVEL": "info", + "LOG_FORMAT": "json" + } + } + ``` + +## Troubleshooting + +### Common Issues + +**Server won't start**: Check command and arguments + +```bash +# Test command manually +uvx mcp-server-fetch +``` + +**Environment variables not found**: Verify `.env` file + +```bash +# Check environment +printenv | grep API_KEY +``` + +**Permission errors**: Check file permissions and paths + +```bash +# Verify executable permissions +ls -la /path/to/server +``` + +### Debug Configuration + +Enable debug mode for detailed logging: + +```json +{ + "debug": { + "enabled": true, + "logLevel": "debug", + "includeEnv": false, + "logStartup": true + } +} +``` + +### Validation Errors + +Common validation errors and solutions: + +1. **Missing required fields**: Add `command` and `args` +2. **Invalid timeout**: Use number, not string +3. **Environment variable not found**: Check `.env` file +4. **Command not found**: Verify installation and PATH + +This comprehensive guide covers all aspects of configuring MCP servers in MCPHub for various use cases and environments. diff --git a/docs/configuration/nginx.mdx b/docs/configuration/nginx.mdx new file mode 100644 index 0000000000000000000000000000000000000000..599da1268a70fc15b20da2b4be64bc28121c210e --- /dev/null +++ b/docs/configuration/nginx.mdx @@ -0,0 +1,373 @@ +--- +title: 'Nginx Configuration' +description: 'Configure Nginx as a reverse proxy for MCPHub' +--- + +# Nginx Configuration + +This guide explains how to configure Nginx as a reverse proxy for MCPHub, including SSL termination, load balancing, and caching strategies. + +## Basic Reverse Proxy Setup + +### Configuration File + +Create or update your Nginx configuration file (`/etc/nginx/sites-available/mcphub`): + +```nginx +server { + listen 80; + server_name your-domain.com; + + # Redirect HTTP to HTTPS + return 301 https://$server_name$request_uri; +} + +server { + listen 443 ssl http2; + server_name your-domain.com; + + # SSL Configuration + ssl_certificate /path/to/your/certificate.crt; + ssl_certificate_key /path/to/your/private.key; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384; + ssl_prefer_server_ciphers off; + + # Security Headers + add_header X-Frame-Options DENY; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + + # Gzip Compression + gzip on; + gzip_vary on; + gzip_min_length 1024; + gzip_types + text/plain + text/css + text/xml + text/javascript + application/json + application/javascript + application/xml+rss + application/atom+xml + image/svg+xml; + + # Main application + location / { + proxy_pass http://127.0.0.1:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + proxy_read_timeout 86400; + } + + # API endpoints with longer timeout for MCP operations + location /api/ { + proxy_pass http://127.0.0.1:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + proxy_read_timeout 300; + proxy_connect_timeout 60; + proxy_send_timeout 60; + } + + # Static assets caching + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { + proxy_pass http://127.0.0.1:3000; + proxy_cache_valid 200 1d; + proxy_cache_valid 404 1m; + add_header Cache-Control "public, immutable"; + expires 1y; + } +} +``` + +### Enable the Configuration + +```bash +# Create symbolic link to enable the site +sudo ln -s /etc/nginx/sites-available/mcphub /etc/nginx/sites-enabled/ + +# Test configuration +sudo nginx -t + +# Reload Nginx +sudo systemctl reload nginx +``` + +## Load Balancing Configuration + +For high-availability setups with multiple MCPHub instances: + +```nginx +upstream mcphub_backend { + least_conn; + server 127.0.0.1:3000 weight=1 max_fails=3 fail_timeout=30s; + server 127.0.0.1:3001 weight=1 max_fails=3 fail_timeout=30s; + server 127.0.0.1:3002 weight=1 max_fails=3 fail_timeout=30s; + + # Health check (Nginx Plus feature) + # health_check interval=5s fails=3 passes=2; +} + +server { + listen 443 ssl http2; + server_name your-domain.com; + + # SSL and other configurations... + + location / { + proxy_pass http://mcphub_backend; + # Other proxy settings... + } +} +``` + +## Caching Configuration + +### Browser Caching + +```nginx +# Cache static assets +location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { + proxy_pass http://127.0.0.1:3000; + expires 1y; + add_header Cache-Control "public, immutable"; +} + +# Cache API responses (be careful with dynamic content) +location /api/public/ { + proxy_pass http://127.0.0.1:3000; + proxy_cache mcphub_cache; + proxy_cache_valid 200 5m; + proxy_cache_key "$scheme$request_method$host$request_uri"; + add_header X-Cache-Status $upstream_cache_status; +} +``` + +### Nginx Proxy Cache + +Add to the `http` block in `nginx.conf`: + +```nginx +http { + # Proxy cache configuration + proxy_cache_path /var/cache/nginx/mcphub + levels=1:2 + keys_zone=mcphub_cache:10m + max_size=1g + inactive=60m + use_temp_path=off; + + # Other configurations... +} +``` + +## WebSocket Support + +For real-time features and SSE (Server-Sent Events): + +```nginx +location /api/stream { + proxy_pass http://127.0.0.1:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Disable buffering for real-time responses + proxy_buffering off; + proxy_cache off; + + # Timeouts for long-lived connections + proxy_read_timeout 24h; + proxy_send_timeout 24h; +} +``` + +## Security Configuration + +### Rate Limiting + +```nginx +http { + # Define rate limiting zones + limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; + limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s; + + server { + # Apply rate limiting to API endpoints + location /api/ { + limit_req zone=api burst=20 nodelay; + # Other configurations... + } + + # Strict rate limiting for login endpoints + location /api/auth/login { + limit_req zone=login burst=5; + # Other configurations... + } + } +} +``` + +### IP Whitelisting + +```nginx +# Allow specific IPs for admin endpoints +location /api/admin/ { + allow 192.168.1.0/24; + allow 10.0.0.0/8; + deny all; + + proxy_pass http://127.0.0.1:3000; + # Other proxy settings... +} +``` + +## Monitoring and Logging + +### Access Logs + +```nginx +http { + # Custom log format + log_format mcphub_format '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + '$request_time $upstream_response_time'; + + server { + # Enable access logging + access_log /var/log/nginx/mcphub_access.log mcphub_format; + error_log /var/log/nginx/mcphub_error.log; + + # Other configurations... + } +} +``` + +### Status Page + +```nginx +location /nginx_status { + stub_status; + allow 127.0.0.1; + deny all; +} +``` + +## Docker Integration + +When running MCPHub in Docker, update the proxy configuration: + +```nginx +upstream mcphub_docker { + server mcphub:3000; # Docker service name +} + +server { + location / { + proxy_pass http://mcphub_docker; + # Other proxy settings... + } +} +``` + +## Complete Example Configuration + +Here's a production-ready example using the provided `nginx.conf.example`: + +```bash +# Copy the example configuration +cp nginx.conf.example /etc/nginx/sites-available/mcphub + +# Update the configuration with your domain and paths +sudo nano /etc/nginx/sites-available/mcphub + +# Enable the site +sudo ln -s /etc/nginx/sites-available/mcphub /etc/nginx/sites-enabled/ + +# Test and reload +sudo nginx -t && sudo systemctl reload nginx +``` + +## Troubleshooting + +### Common Issues + +**502 Bad Gateway**: Check if MCPHub is running and accessible + +**504 Gateway Timeout**: Increase `proxy_read_timeout` for long-running operations + +**WebSocket connection failures**: Ensure proper `Upgrade` and `Connection` headers + +**Cache issues**: Clear proxy cache or disable for development + +### Debug Commands + +```bash +# Test Nginx configuration +sudo nginx -t + +# Check Nginx status +sudo systemctl status nginx + +# View error logs +sudo tail -f /var/log/nginx/error.log + +# Check if MCPHub is responding +curl -I http://localhost:3000 +``` + +## Performance Optimization + +### Worker Processes + +```nginx +# In nginx.conf +worker_processes auto; +worker_connections 1024; +``` + +### Buffer Sizes + +```nginx +proxy_buffering on; +proxy_buffer_size 128k; +proxy_buffers 4 256k; +proxy_busy_buffers_size 256k; +``` + +### Keep-Alive + +```nginx +upstream mcphub_backend { + server 127.0.0.1:3000; + keepalive 32; +} + +location / { + proxy_pass http://mcphub_backend; + proxy_http_version 1.1; + proxy_set_header Connection ""; +} +``` + +This configuration provides a solid foundation for running MCPHub behind Nginx with proper security, performance, and reliability features. diff --git a/docs/development.mdx b/docs/development.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fc3fec155f188f0dba9fd45265439594918be8f6 --- /dev/null +++ b/docs/development.mdx @@ -0,0 +1,107 @@ +--- +title: 'Development' +description: 'Preview changes locally to update your docs' +--- + + + **Prerequisite**: Please install Node.js (version 19 or higher) before proceeding.
+ Please upgrade to ```docs.json``` before proceeding and delete the legacy ```mint.json``` file. +
+ +Follow these steps to install and run Mintlify on your operating system: + +**Step 1**: Install Mintlify: + + + + ```bash npm + npm i -g mintlify + ``` + +```bash yarn +yarn global add mintlify +``` + + + +**Step 2**: Navigate to the docs directory (where the `docs.json` file is located) and execute the following command: + +```bash +mintlify dev +``` + +A local preview of your documentation will be available at `http://localhost:3000`. + +### Custom Ports + +By default, Mintlify uses port 3000. You can customize the port Mintlify runs on by using the `--port` flag. To run Mintlify on port 3333, for instance, use this command: + +```bash +mintlify dev --port 3333 +``` + +If you attempt to run Mintlify on a port that's already in use, it will use the next available port: + +```md +Port 3000 is already in use. Trying 3001 instead. +``` + +## Mintlify Versions + +Please note that each CLI release is associated with a specific version of Mintlify. If your local website doesn't align with the production version, please update the CLI: + + + +```bash npm +npm i -g mintlify@latest +``` + +```bash yarn +yarn global upgrade mintlify +``` + + + +## Validating Links + +The CLI can assist with validating reference links made in your documentation. To identify any broken links, use the following command: + +```bash +mintlify broken-links +``` + +## Deployment + + + Unlimited editors available under the [Pro + Plan](https://mintlify.com/pricing) and above. + + +If the deployment is successful, you should see the following: + + + + + +## Code Formatting + +We suggest using extensions on your IDE to recognize and format MDX. If you're a VSCode user, consider the [MDX VSCode extension](https://marketplace.visualstudio.com/items?itemName=unifiedjs.vscode-mdx) for syntax highlighting, and [Prettier](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) for code formatting. + +## Troubleshooting + + + + + This may be due to an outdated version of node. Try the following: + 1. Remove the currently-installed version of mintlify: `npm remove -g mintlify` + 2. Upgrade to Node v19 or higher. + 3. Reinstall mintlify: `npm install -g mintlify` + + + + + Solution: Go to the root of your device and delete the \~/.mintlify folder. Afterwards, run `mintlify dev` again. + + + +Curious about what changed in the CLI version? [Check out the CLI changelog.](https://www.npmjs.com/package/mintlify?activeTab=versions) diff --git a/docs/development/architecture.mdx b/docs/development/architecture.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d588b91dad730d03d356fa8a9b10be76d4287cda --- /dev/null +++ b/docs/development/architecture.mdx @@ -0,0 +1,724 @@ +--- +title: 'Architecture Overview' +description: "Understand MCPHub's system architecture and design principles" +--- + +## System Overview + +MCPHub is designed as a scalable, modular platform for managing Model Context Protocol (MCP) servers. The architecture follows modern web application patterns with clear separation of concerns, microservices-ready design, and extensibility in mind. + +## High-Level Architecture + +```mermaid +graph TB + subgraph "Client Layer" + WEB[Web Dashboard] + API[External APIs] + CLI[CLI Tools] + end + + subgraph "Application Layer" + LB[Load Balancer/Nginx] + APP[MCPHub Server] + WS[WebSocket Server] + end + + subgraph "Service Layer" + MCP[MCP Service] + AUTH[Auth Service] + ROUTE[Smart Routing] + MON[Monitoring Service] + end + + subgraph "Data Layer" + PG[(PostgreSQL)] + REDIS[(Redis)] + VECTOR[(Vector Store)] + end + + subgraph "MCP Servers" + GITHUB[GitHub MCP] + FS[Filesystem MCP] + DB[Database MCP] + CUSTOM[Custom MCP] + end + + WEB --> LB + API --> LB + CLI --> LB + LB --> APP + APP --> WS + APP --> MCP + APP --> AUTH + APP --> ROUTE + APP --> MON + + MCP --> GITHUB + MCP --> FS + MCP --> DB + MCP --> CUSTOM + + AUTH --> PG + AUTH --> REDIS + ROUTE --> VECTOR + MON --> PG + MON --> REDIS +``` + +## Core Components + +### 1. Application Server + +The main Node.js/Express application that handles all HTTP requests and coordinates between services. + +```typescript +// src/server.ts - Main application entry point +class MCPHubServer { + private app: Express; + private httpServer: Server; + private wsServer: WebSocketServer; + + async start(): Promise { + await this.initializeDatabase(); + await this.initializeServices(); + await this.setupRoutes(); + await this.startServer(); + } +} +``` + +**Key Responsibilities:** + +- HTTP request handling +- WebSocket connections for real-time features +- Service coordination +- Middleware chain management +- Error handling and logging + +### 2. MCP Service Layer + +Manages the lifecycle and communication with MCP servers. + +```typescript +// src/services/mcpService.ts +class MCPService { + private servers: Map = new Map(); + private processManager: ProcessManager; + + async startServer(config: MCPServerConfig): Promise { + const instance = await this.processManager.spawn(config); + this.servers.set(config.name, instance); + await this.waitForHealthy(instance); + } + + async executeRequest(serverName: string, request: MCPRequest): Promise { + const server = this.servers.get(serverName); + return await server.sendRequest(request); + } +} +``` + +**Key Features:** + +- Process lifecycle management +- Health monitoring +- Request routing +- Error recovery +- Resource management + +### 3. Smart Routing Engine + +Provides AI-powered tool discovery and routing using vector embeddings. + +```typescript +// src/services/smartRouting.ts +class SmartRoutingService { + private vectorStore: VectorStore; + private embeddingService: EmbeddingService; + + async findRelevantTools(query: string): Promise { + const queryEmbedding = await this.embeddingService.embed(query); + const matches = await this.vectorStore.similaritySearch(queryEmbedding); + return this.rankResults(matches, query); + } + + async indexTool(tool: ToolDefinition): Promise { + const embedding = await this.embeddingService.embed(tool.description); + await this.vectorStore.upsert(tool.id, embedding, tool); + } +} +``` + +**Components:** + +- Vector embedding generation +- Similarity search +- Result ranking and filtering +- Tool metadata management + +### 4. Authentication & Authorization + +Handles user authentication, session management, and access control. + +```typescript +// src/services/authService.ts +class AuthService { + async authenticate(credentials: Credentials): Promise { + const user = await this.validateCredentials(credentials); + const token = await this.generateJWT(user); + await this.createSession(user, token); + return user; + } + + async authorize(user: User, resource: string, action: string): Promise { + const permissions = await this.getUserPermissions(user); + return this.checkPermission(permissions, resource, action); + } +} +``` + +**Features:** + +- JWT-based authentication +- Role-based access control (RBAC) +- Session management +- API key authentication +- Group-based permissions + +### 5. Monitoring & Logging + +Provides comprehensive monitoring, metrics collection, and logging. + +```typescript +// src/services/monitoringService.ts +class MonitoringService { + private metricsCollector: MetricsCollector; + private alertManager: AlertManager; + + async collectMetrics(): Promise { + const systemMetrics = await this.getSystemMetrics(); + const serverMetrics = await this.getMCPServerMetrics(); + await this.metricsCollector.record(systemMetrics, serverMetrics); + await this.checkAlerts(systemMetrics, serverMetrics); + } +} +``` + +**Capabilities:** + +- Real-time metrics collection +- Performance monitoring +- Error tracking +- Alert management +- Audit logging + +## Data Architecture + +### Database Schema + +```sql +-- Core entities +CREATE TABLE users ( + id UUID PRIMARY KEY, + username VARCHAR UNIQUE NOT NULL, + email VARCHAR UNIQUE NOT NULL, + password_hash VARCHAR NOT NULL, + role VARCHAR NOT NULL DEFAULT 'user', + created_at TIMESTAMP DEFAULT NOW() +); + +CREATE TABLE servers ( + id UUID PRIMARY KEY, + name VARCHAR UNIQUE NOT NULL, + command VARCHAR NOT NULL, + args JSONB NOT NULL DEFAULT '[]', + env JSONB DEFAULT '{}', + group_name VARCHAR, + status VARCHAR DEFAULT 'stopped', + config JSONB DEFAULT '{}', + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() +); + +CREATE TABLE groups ( + id UUID PRIMARY KEY, + name VARCHAR UNIQUE NOT NULL, + description TEXT, + config JSONB DEFAULT '{}', + created_at TIMESTAMP DEFAULT NOW() +); + +-- Vector search for smart routing +CREATE TABLE tool_embeddings ( + id UUID PRIMARY KEY, + server_name VARCHAR NOT NULL, + tool_name VARCHAR NOT NULL, + description TEXT, + embedding vector(1536), + metadata JSONB DEFAULT '{}', + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() +); + +-- Monitoring and logging +CREATE TABLE request_logs ( + id UUID PRIMARY KEY, + user_id UUID REFERENCES users(id), + server_name VARCHAR NOT NULL, + tool_name VARCHAR, + request_data JSONB, + response_data JSONB, + status VARCHAR NOT NULL, + duration_ms INTEGER, + created_at TIMESTAMP DEFAULT NOW() +); +``` + +### Caching Strategy + +```typescript +// src/services/cacheService.ts +class CacheService { + // Multi-layer caching strategy + private memoryCache: Map = new Map(); + private redisCache: Redis; + + async get(key: string): Promise { + // L1: Memory cache + const memoryEntry = this.memoryCache.get(key); + if (memoryEntry && !this.isExpired(memoryEntry)) { + return memoryEntry.value; + } + + // L2: Redis cache + const redisValue = await this.redisCache.get(key); + if (redisValue) { + const value = JSON.parse(redisValue); + this.memoryCache.set(key, { value, expiry: Date.now() + 60000 }); + return value; + } + + return null; + } +} +``` + +**Cache Layers:** + +- **L1 (Memory)**: Fast access for frequently used data +- **L2 (Redis)**: Shared cache across instances +- **L3 (Database)**: Persistent storage with query optimization + +## Communication Patterns + +### Request Flow + +```mermaid +sequenceDiagram + participant Client + participant API + participant Auth + participant Router + participant MCP + participant Server + + Client->>API: HTTP Request + API->>Auth: Validate Token + Auth-->>API: User Context + API->>Router: Route Request + Router->>Router: Find Target Server + Router->>MCP: Execute Request + MCP->>Server: MCP Protocol + Server-->>MCP: MCP Response + MCP-->>Router: Formatted Response + Router-->>API: Response Data + API-->>Client: HTTP Response +``` + +### WebSocket Communication + +```typescript +// src/services/websocketService.ts +class WebSocketService { + private connections: Map = new Map(); + + handleConnection(ws: WebSocket, userId: string): void { + this.connections.set(userId, ws); + + ws.on('message', async (data) => { + const message = JSON.parse(data.toString()); + await this.handleMessage(userId, message); + }); + + ws.on('close', () => { + this.connections.delete(userId); + }); + } + + broadcast(event: string, data: any): void { + this.connections.forEach((ws) => { + ws.send(JSON.stringify({ event, data })); + }); + } +} +``` + +### Event-Driven Architecture + +```typescript +// src/events/eventBus.ts +class EventBus { + private listeners: Map = new Map(); + + emit(event: string, data: any): void { + const handlers = this.listeners.get(event) || []; + handlers.forEach((handler) => handler(data)); + } + + on(event: string, handler: EventListener): void { + const handlers = this.listeners.get(event) || []; + handlers.push(handler); + this.listeners.set(event, handlers); + } +} + +// Usage +eventBus.on('server.started', (data) => { + logger.info(`Server ${data.name} started`); + monitoringService.updateServerStatus(data.name, 'running'); +}); +``` + +## Security Architecture + +### Authentication Flow + +```mermaid +graph LR + A[Client] --> B[Login Request] + B --> C[Validate Credentials] + C --> D[Generate JWT] + D --> E[Create Session] + E --> F[Return Token] + F --> G[Store in Cookie/Header] + G --> H[Subsequent Requests] + H --> I[Validate Token] + I --> J[Check Permissions] + J --> K[Allow/Deny Access] +``` + +### Authorization Model + +```typescript +// Role-Based Access Control (RBAC) +interface Permission { + resource: string; // e.g., 'servers', 'groups', 'users' + action: string; // e.g., 'create', 'read', 'update', 'delete' + scope?: string; // e.g., 'own', 'group', 'all' +} + +interface Role { + name: string; + permissions: Permission[]; +} + +const roles: Role[] = [ + { + name: 'admin', + permissions: [{ resource: '*', action: '*', scope: 'all' }], + }, + { + name: 'manager', + permissions: [ + { resource: 'servers', action: '*', scope: 'group' }, + { resource: 'groups', action: 'read', scope: 'all' }, + ], + }, + { + name: 'user', + permissions: [ + { resource: 'servers', action: 'read', scope: 'group' }, + { resource: 'tools', action: 'execute', scope: 'group' }, + ], + }, +]; +``` + +## Scalability Considerations + +### Horizontal Scaling + +```yaml +# Kubernetes deployment for scaling +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub +spec: + replicas: 3 + selector: + matchLabels: + app: mcphub + template: + spec: + containers: + - name: mcphub + image: mcphub:latest + resources: + requests: + memory: '256Mi' + cpu: '200m' + limits: + memory: '512Mi' + cpu: '500m' +``` + +### Load Balancing Strategy + +```typescript +// src/services/loadBalancer.ts +class LoadBalancer { + private servers: ServerInstance[] = []; + private algorithm: 'round-robin' | 'least-connections' | 'weighted'; + + selectServer(): ServerInstance { + switch (this.algorithm) { + case 'round-robin': + return this.roundRobin(); + case 'least-connections': + return this.leastConnections(); + case 'weighted': + return this.weighted(); + } + } +} +``` + +### Database Scaling + +```typescript +// Database connection management +class DatabaseManager { + private readPool: Pool; // Read replicas + private writePool: Pool; // Primary database + + async query(sql: string, params: any[]): Promise { + if (this.isReadOperation(sql)) { + return this.readPool.query(sql, params); + } else { + return this.writePool.query(sql, params); + } + } +} +``` + +## Performance Optimization + +### Query Optimization + +```sql +-- Optimized queries with proper indexing +CREATE INDEX CONCURRENTLY idx_servers_status_group +ON servers(status, group_name) +WHERE status IN ('running', 'starting'); + +CREATE INDEX CONCURRENTLY idx_tool_embeddings_similarity +ON tool_embeddings USING ivfflat (embedding vector_cosine_ops) +WITH (lists = 100); + +CREATE INDEX CONCURRENTLY idx_request_logs_performance +ON request_logs(created_at, status, duration_ms) +WHERE created_at > NOW() - INTERVAL '30 days'; +``` + +### Caching Strategy + +```typescript +// Multi-level caching +class CacheManager { + // Cache server configurations + @Cache({ ttl: 300, key: 'server-config' }) + async getServerConfig(name: string): Promise { + return this.database.getServerConfig(name); + } + + // Cache tool metadata for smart routing + @Cache({ ttl: 3600, key: 'tool-metadata' }) + async getToolMetadata(): Promise { + return this.database.getToolMetadata(); + } + + // Cache user permissions + @Cache({ ttl: 600, key: 'user-permissions' }) + async getUserPermissions(userId: string): Promise { + return this.authService.getUserPermissions(userId); + } +} +``` + +## Monitoring & Observability + +### Metrics Collection + +```typescript +// src/services/metricsService.ts +class MetricsService { + private prometheus: PrometheusRegistry; + + constructor() { + this.initializeMetrics(); + } + + private initializeMetrics(): void { + // Request metrics + this.requestCount = new Counter({ + name: 'mcphub_requests_total', + help: 'Total number of requests', + labelNames: ['method', 'route', 'status'], + }); + + // Server metrics + this.serverStatus = new Gauge({ + name: 'mcphub_server_status', + help: 'Status of MCP servers', + labelNames: ['server_name', 'status'], + }); + + // Performance metrics + this.responseTime = new Histogram({ + name: 'mcphub_response_time_seconds', + help: 'Response time in seconds', + labelNames: ['route'], + }); + } +} +``` + +### Distributed Tracing + +```typescript +// OpenTelemetry integration +import { trace } from '@opentelemetry/api'; + +class MCPService { + async executeRequest(serverName: string, request: MCPRequest): Promise { + const span = trace.getActiveSpan(); + span?.setAttributes({ + 'mcp.server': serverName, + 'mcp.tool': request.tool, + 'mcp.request_id': request.id, + }); + + try { + const response = await this.sendRequest(serverName, request); + span?.setStatus({ code: SpanStatusCode.OK }); + return response; + } catch (error) { + span?.setStatus({ + code: SpanStatusCode.ERROR, + message: error.message, + }); + throw error; + } + } +} +``` + +## Extension Points + +### Plugin Architecture + +```typescript +// Plugin interface +interface MCPHubPlugin { + name: string; + version: string; + init(context: PluginContext): Promise; + destroy(): Promise; +} + +// Plugin manager +class PluginManager { + private plugins: Map = new Map(); + + async loadPlugin(plugin: MCPHubPlugin): Promise { + await plugin.init(this.createContext()); + this.plugins.set(plugin.name, plugin); + } + + private createContext(): PluginContext { + return { + eventBus: this.eventBus, + logger: this.logger, + database: this.database, + // ... other services + }; + } +} +``` + +### Custom Middleware + +```typescript +// Custom middleware registration +class MiddlewareManager { + register(middleware: Middleware): void { + this.app.use(middleware); + } + + registerRoute(path: string, middleware: Middleware): void { + this.app.use(path, middleware); + } +} + +// Example custom middleware +const customAuthMiddleware: Middleware = (req, res, next) => { + // Custom authentication logic + next(); +}; +``` + +## Deployment Architecture + +### Container Strategy + +```dockerfile +# Multi-stage build for optimized images +FROM node:18-alpine AS builder +WORKDIR /app +COPY package*.json ./ +RUN npm ci --only=production + +FROM node:18-alpine AS runtime +RUN addgroup -g 1001 -S nodejs +RUN adduser -S mcphub -u 1001 +WORKDIR /app +COPY --from=builder --chown=mcphub:nodejs /app . +USER mcphub +EXPOSE 3000 +CMD ["node", "dist/server.js"] +``` + +### Infrastructure as Code + +```terraform +# Terraform configuration for AWS deployment +resource "aws_ecs_cluster" "mcphub" { + name = "mcphub-cluster" +} + +resource "aws_ecs_service" "mcphub" { + name = "mcphub" + cluster = aws_ecs_cluster.mcphub.id + task_definition = aws_ecs_task_definition.mcphub.arn + desired_count = 3 + + load_balancer { + target_group_arn = aws_lb_target_group.mcphub.arn + container_name = "mcphub" + container_port = 3000 + } +} +``` + +This architecture provides a solid foundation for building a scalable, maintainable, and extensible MCP server management platform while following modern software development best practices. diff --git a/docs/development/contributing.mdx b/docs/development/contributing.mdx new file mode 100644 index 0000000000000000000000000000000000000000..63f665cb18c566007b2cdb045c9bb16035b42fbf --- /dev/null +++ b/docs/development/contributing.mdx @@ -0,0 +1,597 @@ +--- +title: 'Contributing to MCPHub' +description: 'Learn how to contribute to the MCPHub project' +--- + +## Welcome Contributors! 🎉 + +Thank you for your interest in contributing to MCPHub! This guide will help you get started with contributing to the project, whether you're fixing bugs, adding features, improving documentation, or helping with testing. + +## Quick Start + +1. **Fork the repository** on GitHub +2. **Clone your fork** locally +3. **Create a branch** for your changes +4. **Make your changes** following our guidelines +5. **Test your changes** thoroughly +6. **Submit a pull request** + +## Ways to Contribute + +### 🐛 Bug Reports + +Help us improve MCPHub by reporting bugs: + +- **Search existing issues** first to avoid duplicates +- **Use the bug report template** when creating issues +- **Provide detailed information** including steps to reproduce +- **Include system information** (OS, Node.js version, etc.) + +```markdown +## Bug Report Template + +**Description** +A clear description of what the bug is. + +**Steps to Reproduce** + +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected Behavior** +What you expected to happen. + +**Actual Behavior** +What actually happened. + +**Environment** + +- OS: [e.g. macOS 12.0] +- Node.js: [e.g. 18.17.0] +- MCPHub Version: [e.g. 1.2.3] +- Browser: [e.g. Chrome 91.0] + +**Additional Context** +Any other context about the problem. +``` + +### ✨ Feature Requests + +We welcome feature suggestions: + +- **Check existing feature requests** to avoid duplicates +- **Use the feature request template** +- **Explain the use case** and why it would be valuable +- **Consider implementation complexity** + +### 🔧 Code Contributions + +Ready to write some code? Here's how: + +#### Setting Up Development Environment + +```bash +# 1. Fork and clone the repository +git clone https://github.com/YOUR_USERNAME/mcphub.git +cd mcphub + +# 2. Add upstream remote +git remote add upstream https://github.com/mcphub/mcphub.git + +# 3. Install dependencies +pnpm install + +# 4. Set up environment +cp .env.example .env.development + +# 5. Start development environment +docker-compose -f docker-compose.dev.yml up -d +pnpm run migrate +pnpm run seed + +# 6. Start development server +pnpm run dev +``` + +#### Branch Naming Convention + +Use descriptive branch names with prefixes: + +```bash +# Features +git checkout -b feature/smart-routing-improvements +git checkout -b feature/user-authentication + +# Bug fixes +git checkout -b fix/server-startup-error +git checkout -b fix/memory-leak-in-cache + +# Documentation +git checkout -b docs/api-reference-update +git checkout -b docs/deployment-guide + +# Refactoring +git checkout -b refactor/auth-service-cleanup +git checkout -b refactor/database-queries +``` + +### 📚 Documentation + +Help improve our documentation: + +- **Fix typos and grammar** +- **Improve existing guides** +- **Add missing documentation** +- **Create tutorials and examples** +- **Translate documentation** + +## Development Guidelines + +### Code Style + +We use ESLint and Prettier to maintain code quality: + +```bash +# Check code style +pnpm run lint + +# Fix automatically fixable issues +pnpm run lint:fix + +# Format code +pnpm run format + +# Type check +pnpm run type-check +``` + +#### TypeScript Best Practices + +```typescript +// ✅ Good: Use proper types +interface MCPServerConfig { + name: string; + command: string; + args: string[]; + env?: Record; +} + +// ✅ Good: Use async/await +async function startServer(config: MCPServerConfig): Promise { + try { + await mcpService.start(config); + } catch (error) { + logger.error('Failed to start server', { error, config }); + throw error; + } +} + +// ❌ Bad: Using any type +function processData(data: any): any { + return data.something(); +} + +// ❌ Bad: Not handling errors +async function riskyOperation(): Promise { + await dangerousFunction(); // Could throw +} +``` + +#### React/Frontend Guidelines + +```tsx +// ✅ Good: Functional components with proper typing +interface ServerCardProps { + server: MCPServer; + onStart: (serverId: string) => void; + onStop: (serverId: string) => void; +} + +const ServerCard: React.FC = ({ server, onStart, onStop }) => { + const handleStart = useCallback(() => { + onStart(server.id); + }, [server.id, onStart]); + + return ( + + + {server.name} + + {server.status} + + + +

{server.description}

+
+ + {server.status === 'stopped' ? ( + + ) : ( + + )} + +
+ ); +}; +``` + +### Testing Requirements + +All contributions must include appropriate tests: + +#### Unit Tests + +```typescript +// src/services/__tests__/mcpService.test.ts +import { MCPService } from '../mcpService'; +import { mockLogger, mockDatabase } from '../../__mocks__'; + +describe('MCPService', () => { + let service: MCPService; + + beforeEach(() => { + service = new MCPService(mockLogger, mockDatabase); + }); + + describe('startServer', () => { + it('should start a server successfully', async () => { + const config = { + name: 'test-server', + command: 'node', + args: ['server.js'], + }; + + await service.startServer(config); + + expect(service.getServerStatus('test-server')).toBe('running'); + }); + + it('should handle server startup failures', async () => { + const invalidConfig = { + name: 'invalid-server', + command: 'invalid-command', + args: [], + }; + + await expect(service.startServer(invalidConfig)).rejects.toThrow( + 'Failed to start server: Command not found', + ); + }); + }); +}); +``` + +#### Integration Tests + +```typescript +// src/__tests__/integration/server-api.test.ts +import request from 'supertest'; +import { app } from '../../app'; +import { setupTestDatabase, teardownTestDatabase } from '../helpers/database'; + +describe('Server API Integration', () => { + beforeAll(async () => { + await setupTestDatabase(); + }); + + afterAll(async () => { + await teardownTestDatabase(); + }); + + describe('POST /api/servers', () => { + it('should create a new server', async () => { + const serverData = { + name: 'test-server', + command: 'node', + args: ['server.js'], + group: 'development', + }; + + const response = await request(app).post('/api/servers').send(serverData).expect(201); + + expect(response.body).toMatchObject({ + name: 'test-server', + status: 'stopped', + group: 'development', + }); + }); + }); +}); +``` + +#### End-to-End Tests + +```typescript +// tests/e2e/server-management.spec.ts +import { test, expect } from '@playwright/test'; + +test.describe('Server Management', () => { + test('should create and manage MCP servers', async ({ page }) => { + await page.goto('/dashboard'); + + // Create new server + await page.click('[data-testid="add-server-button"]'); + await page.fill('[data-testid="server-name-input"]', 'test-server'); + await page.fill('[data-testid="server-command-input"]', 'node server.js'); + await page.click('[data-testid="save-server-button"]'); + + // Verify server appears in list + await expect(page.locator('[data-testid="server-list"]')).toContainText('test-server'); + + // Start the server + await page.click('[data-testid="start-server-test-server"]'); + + // Verify server is running + await expect(page.locator('[data-testid="server-status-test-server"]')).toContainText( + 'running', + ); + }); +}); +``` + +### Commit Guidelines + +We follow [Conventional Commits](https://www.conventionalcommits.org/): + +```bash +# Format: [optional scope]: + +# Features +git commit -m "feat(auth): add JWT token refresh functionality" +git commit -m "feat(ui): implement server status dashboard" + +# Bug fixes +git commit -m "fix(api): resolve memory leak in server manager" +git commit -m "fix(db): handle connection timeout gracefully" + +# Documentation +git commit -m "docs(api): add examples for server endpoints" +git commit -m "docs(readme): update installation instructions" + +# Refactoring +git commit -m "refactor(services): extract auth logic into separate module" + +# Tests +git commit -m "test(api): add integration tests for server management" + +# Chores +git commit -m "chore(deps): update dependencies to latest versions" +``` + +#### Commit Types + +- **feat**: New feature +- **fix**: Bug fix +- **docs**: Documentation changes +- **style**: Code style changes (formatting, etc.) +- **refactor**: Code refactoring +- **test**: Adding or updating tests +- **chore**: Maintenance tasks +- **perf**: Performance improvements +- **ci**: CI/CD changes + +### Pull Request Process + +#### Before Submitting + +```bash +# 1. Sync with upstream +git fetch upstream +git checkout main +git merge upstream/main + +# 2. Rebase your feature branch +git checkout feature/your-feature +git rebase main + +# 3. Run all checks +pnpm run lint +pnpm run type-check +pnpm run test +pnpm run build + +# 4. Update documentation if needed +# 5. Add/update tests for your changes +``` + +#### Pull Request Template + +```markdown +## Description + +Brief description of the changes and motivation. + +## Type of Change + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] Documentation update + +## Testing + +- [ ] Unit tests pass +- [ ] Integration tests pass +- [ ] E2E tests pass (if applicable) +- [ ] Manual testing completed + +## Documentation + +- [ ] Code is self-documenting +- [ ] API documentation updated +- [ ] User documentation updated +- [ ] README updated (if needed) + +## Checklist + +- [ ] My code follows the project's style guidelines +- [ ] I have performed a self-review of my code +- [ ] I have commented my code, particularly in hard-to-understand areas +- [ ] I have made corresponding changes to the documentation +- [ ] My changes generate no new warnings +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] New and existing unit tests pass locally with my changes + +## Screenshots (if applicable) + +Add screenshots to help explain your changes. + +## Additional Notes + +Any additional information that reviewers should know. +``` + +### Code Review Process + +#### For Contributors + +- **Be patient**: Reviews take time, and reviewers may have questions +- **Be responsive**: Address feedback promptly and clearly +- **Be open**: Accept constructive criticism and suggestions +- **Ask questions**: If feedback is unclear, ask for clarification + +#### For Reviewers + +- **Be constructive**: Provide helpful suggestions, not just criticism +- **Be specific**: Point out exact issues and suggest solutions +- **Be timely**: Review PRs within a reasonable timeframe +- **Be encouraging**: Recognize good work and improvements + +## Community Guidelines + +### Code of Conduct + +We are committed to providing a welcoming and inspiring community for all: + +- **Be respectful**: Treat everyone with respect and kindness +- **Be inclusive**: Welcome people of all backgrounds and skill levels +- **Be collaborative**: Work together towards common goals +- **Be patient**: Help others learn and grow +- **Be professional**: Maintain professional communication + +### Communication Channels + +- **GitHub Issues**: Bug reports and feature requests +- **GitHub Discussions**: General questions and community chat +- **Discord**: Real-time community chat (link in README) +- **Email**: Security issues and private matters + +## Getting Help + +### Documentation + +- [Getting Started Guide](./getting-started.mdx) +- [Architecture Overview](./architecture.mdx) +- [API Reference](../api-reference/introduction.mdx) +- [Configuration Guide](../configuration/mcp-settings.mdx) + +### Common Issues + +**Build Failures** + +```bash +# Clear and reinstall dependencies +rm -rf node_modules pnpm-lock.yaml +pnpm install + +# Clear build cache +rm -rf dist/ +pnpm run build +``` + +**Test Failures** + +```bash +# Run tests with verbose output +pnpm run test -- --verbose + +# Run specific test file +pnpm test src/services/mcpService.test.ts + +# Debug tests +pnpm run test:debug +``` + +**Database Issues** + +```bash +# Reset database +pnpm run db:reset + +# Run migrations +pnpm run migrate + +# Seed development data +pnpm run seed +``` + +### Getting Support + +If you need help: + +1. **Check the documentation** first +2. **Search existing issues** on GitHub +3. **Ask in GitHub Discussions** for general questions +4. **Create an issue** if you found a bug +5. **Join our Discord** for real-time help + +## Recognition + +Contributors will be recognized in several ways: + +- **Contributors file**: All contributors are listed in CONTRIBUTORS.md +- **Release notes**: Significant contributions are mentioned in release notes +- **GitHub badges**: Active contributors receive special recognition +- **Community showcase**: Outstanding contributions are featured in our blog + +## Advanced Topics + +### Maintainer Guidelines + +For project maintainers: + +#### Release Process + +```bash +# 1. Create release branch +git checkout -b release/v1.2.0 + +# 2. Update version +npm version 1.2.0 --no-git-tag-version + +# 3. Update changelog +# Edit CHANGELOG.md + +# 4. Commit changes +git add . +git commit -m "chore(release): prepare v1.2.0" + +# 5. Create PR for review +# 6. After merge, tag release +git tag v1.2.0 +git push origin v1.2.0 +``` + +#### Security Handling + +For security issues: + +1. **Do not** create public issues +2. **Email** security@mcphub.dev +3. **Wait** for response before disclosure +4. **Coordinate** with maintainers on fixes + +### Architectural Decisions + +When making significant changes: + +1. **Create an RFC** (Request for Comments) issue +2. **Discuss** with the community +3. **Get approval** from maintainers +4. **Document** decisions in ADR (Architecture Decision Records) + +## Thank You! 🙏 + +Thank you for taking the time to contribute to MCPHub! Every contribution, no matter how small, helps make the project better for everyone. We look forward to collaborating with you! diff --git a/docs/development/getting-started.mdx b/docs/development/getting-started.mdx new file mode 100644 index 0000000000000000000000000000000000000000..315629bfe5c4b66663d1b0ded07ff00914ec652e --- /dev/null +++ b/docs/development/getting-started.mdx @@ -0,0 +1,244 @@ +--- +title: 'Getting Started with Development' +description: 'Learn how to set up your development environment for MCPHub' +--- + +# Getting Started with Development + +This guide will help you set up your development environment for contributing to MCPHub. + +## Prerequisites + +Before you begin, ensure you have the following installed: + +- **Node.js** (version 18 or higher) +- **pnpm** (recommended package manager) +- **Git** +- **Docker** (optional, for containerized development) + +## Setting Up the Development Environment + +### 1. Clone the Repository + +```bash +git clone https://github.com/your-username/mcphub.git +cd mcphub +``` + +### 2. Install Dependencies + +```bash +pnpm install +``` + +### 3. Environment Configuration + +Create a `.env` file in the root directory: + +```bash +cp .env.example .env +``` + +Configure the following environment variables: + +```env +# Server Configuration +PORT=3000 +NODE_ENV=development + +# Database Configuration +DATABASE_URL=postgresql://username:password@localhost:5432/mcphub + +# JWT Configuration +JWT_SECRET=your-secret-key +JWT_EXPIRES_IN=24h + +# OpenAI Configuration (for smart routing) +OPENAI_API_KEY=your-openai-api-key +``` + +### 4. Database Setup + +If using PostgreSQL, create a database: + +```bash +createdb mcphub +``` + +### 5. MCP Server Configuration + +Create or modify `mcp_settings.json`: + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + } +} +``` + +## Development Workflow + +### Running the Development Server + +Start both backend and frontend in development mode: + +```bash +pnpm dev +``` + +This will start: + +- Backend server on `http://localhost:3000` +- Frontend development server on `http://localhost:5173` + +### Running Backend Only + +```bash +pnpm backend:dev +``` + +### Running Frontend Only + +```bash +pnpm frontend:dev +``` + +### Building the Project + +Build both backend and frontend: + +```bash +pnpm build +``` + +## Project Structure + +``` +mcphub/ +├── src/ # Backend source code +│ ├── controllers/ # Express controllers +│ ├── routes/ # API routes +│ ├── services/ # Business logic +│ ├── models/ # Database models +│ └── utils/ # Utility functions +├── frontend/ # Frontend React application +│ ├── src/ +│ │ ├── components/ # React components +│ │ ├── pages/ # Page components +│ │ ├── services/ # API services +│ │ └── utils/ # Frontend utilities +├── docs/ # Documentation +├── bin/ # CLI scripts +└── scripts/ # Build and utility scripts +``` + +## Development Tools + +### Linting and Formatting + +```bash +# Run ESLint +pnpm lint + +# Format code with Prettier +pnpm format +``` + +### Testing + +```bash +# Run tests +pnpm test + +# Run tests in watch mode +pnpm test --watch +``` + +### Debugging + +To debug the backend with Node.js inspector: + +```bash +pnpm backend:debug +``` + +Then attach your debugger to `http://localhost:9229`. + +## Making Changes + +### Backend Development + +1. **Controllers**: Handle HTTP requests and responses +2. **Services**: Implement business logic +3. **Models**: Define database schemas +4. **Routes**: Define API endpoints + +### Frontend Development + +1. **Components**: Reusable React components +2. **Pages**: Route-specific components +3. **Services**: API communication +4. **Hooks**: Custom React hooks + +### Adding New MCP Servers + +1. Update `mcp_settings.json` with the new server configuration +2. Test the server integration +3. Update documentation if needed + +## Common Development Tasks + +### Adding a New API Endpoint + +1. Create a controller in `src/controllers/` +2. Define the route in `src/routes/` +3. Add any necessary middleware +4. Write tests for the new endpoint + +### Adding a New Frontend Feature + +1. Create components in `frontend/src/components/` +2. Add routes if needed +3. Implement API integration +4. Style with Tailwind CSS + +### Database Migrations + +When modifying database schemas: + +1. Update models in `src/models/` +2. Create migration scripts if using TypeORM +3. Test migrations locally + +## Troubleshooting + +### Common Issues + +**Port conflicts**: Ensure ports 3000 and 5173 are available + +**Database connection**: Verify PostgreSQL is running and credentials are correct + +**MCP server startup**: Check server configurations in `mcp_settings.json` + +**Permission issues**: Ensure MCP servers have necessary permissions + +### Getting Help + +- Check the [Contributing Guide](/development/contributing) +- Review [Architecture Documentation](/development/architecture) +- Open an issue on GitHub for bugs +- Join our community discussions + +## Next Steps + +- Read the [Architecture Overview](/development/architecture) +- Learn about [Contributing Guidelines](/development/contributing) +- Explore [Configuration Options](/configuration/environment-variables) diff --git a/docs/docs.json b/docs/docs.json new file mode 100644 index 0000000000000000000000000000000000000000..a763e2526db569ccbf33ed73a62f2ee67c6b59ec --- /dev/null +++ b/docs/docs.json @@ -0,0 +1,162 @@ +{ + "$schema": "https://mintlify.com/docs.json", + "theme": "mint", + "name": "MCPHub Documentation", + "description": "The Unified Hub for Model Context Protocol (MCP) Servers", + "colors": { + "primary": "#16A34A", + "light": "#07C983", + "dark": "#15803D" + }, + "favicon": "/favicon.ico", + "navigation": { + "tabs": [ + { + "tab": "English", + "groups": [ + { + "group": "Get Started", + "pages": [ + "index", + "quickstart", + "installation" + ] + }, + { + "group": "Core Features", + "pages": [ + "features/server-management", + "features/group-management", + "features/smart-routing", + "features/authentication", + "features/monitoring" + ] + }, + { + "group": "Configuration", + "pages": [ + "configuration/mcp-settings", + "configuration/environment-variables", + "configuration/docker-setup", + "configuration/nginx" + ] + }, + { + "group": "Development", + "pages": [ + "development/getting-started", + "development/architecture", + "development/contributing" + ] + } + ] + }, + { + "tab": "中文", + "groups": [ + { + "group": "开始使用", + "pages": [ + "zh/index", + "zh/quickstart", + "zh/installation" + ] + }, + { + "group": "核心功能", + "pages": [ + "zh/features/server-management", + "zh/features/group-management", + "zh/features/smart-routing", + "zh/features/authentication", + "zh/features/monitoring" + ] + }, + { + "group": "配置指南", + "pages": [ + "zh/configuration/mcp-settings", + "zh/configuration/environment-variables", + "zh/configuration/docker-setup", + "zh/configuration/nginx" + ] + }, + { + "group": "开发指南", + "pages": [ + "zh/development/getting-started", + "zh/development/architecture", + "zh/development/contributing" + ] + } + ] + }, + { + "tab": "API Reference", + "groups": [ + { + "group": "MCP Endpoints", + "pages": [ + "api-reference/introduction", + "api-reference/mcp-http", + "api-reference/mcp-sse", + "api-reference/smart-routing" + ] + }, + { + "group": "Management API", + "pages": [ + "api-reference/servers", + "api-reference/groups", + "api-reference/auth", + "api-reference/logs", + "api-reference/config" + ] + } + ] + } + ], + "global": { + "anchors": [ + { + "anchor": "GitHub", + "href": "https://github.com/samanhappy/mcphub", + "icon": "github" + }, + { + "anchor": "Discord", + "href": "https://discord.gg/qMKNsn5Q", + "icon": "discord" + }, + { + "anchor": "Sponsor", + "href": "https://ko-fi.com/samanhappy", + "icon": "heart" + } + ] + } + }, + "logo": { + "light": "/logo/light.svg", + "dark": "/logo/dark.svg" + }, + "navbar": { + "links": [ + { + "label": "Demo", + "href": "http://localhost:3000" + } + ], + "primary": { + "type": "button", + "label": "Get Started", + "href": "https://docs.hubmcp.dev/quickstart" + } + }, + "footer": { + "socials": { + "github": "https://github.com/samanhappy/mcphub", + "discord": "https://discord.gg/qMKNsn5Q" + } + } +} \ No newline at end of file diff --git a/docs/essentials/code.mdx b/docs/essentials/code.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d2a462a7a286341c85fccb4760e72051f66b62b1 --- /dev/null +++ b/docs/essentials/code.mdx @@ -0,0 +1,37 @@ +--- +title: 'Code Blocks' +description: 'Display inline code and code blocks' +icon: 'code' +--- + +## Basic + +### Inline Code + +To denote a `word` or `phrase` as code, enclose it in backticks (`). + +``` +To denote a `word` or `phrase` as code, enclose it in backticks (`). +``` + +### Code Block + +Use [fenced code blocks](https://www.markdownguide.org/extended-syntax/#fenced-code-blocks) by enclosing code in three backticks and follow the leading ticks with the programming language of your snippet to get syntax highlighting. Optionally, you can also write the name of your code after the programming language. + +```java HelloWorld.java +class HelloWorld { + public static void main(String[] args) { + System.out.println("Hello, World!"); + } +} +``` + +````md +```java HelloWorld.java +class HelloWorld { + public static void main(String[] args) { + System.out.println("Hello, World!"); + } +} +``` +```` diff --git a/docs/essentials/images.mdx b/docs/essentials/images.mdx new file mode 100644 index 0000000000000000000000000000000000000000..60ad42d38b8e30a2bcb615861c8111e94091dae1 --- /dev/null +++ b/docs/essentials/images.mdx @@ -0,0 +1,59 @@ +--- +title: 'Images and Embeds' +description: 'Add image, video, and other HTML elements' +icon: 'image' +--- + + + +## Image + +### Using Markdown + +The [markdown syntax](https://www.markdownguide.org/basic-syntax/#images) lets you add images using the following code + +```md +![title](/path/image.jpg) +``` + +Note that the image file size must be less than 5MB. Otherwise, we recommend hosting on a service like [Cloudinary](https://cloudinary.com/) or [S3](https://aws.amazon.com/s3/). You can then use that URL and embed. + +### Using Embeds + +To get more customizability with images, you can also use [embeds](/writing-content/embed) to add images + +```html + +``` + +## Embeds and HTML elements + + + +
+ + + +Mintlify supports [HTML tags in Markdown](https://www.markdownguide.org/basic-syntax/#html). This is helpful if you prefer HTML tags to Markdown syntax, and lets you create documentation with infinite flexibility. + + + +### iFrames + +Loads another HTML page within the document. Most commonly used for embedding videos. + +```html + +``` diff --git a/docs/essentials/markdown.mdx b/docs/essentials/markdown.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c8ad9c1f3dd190f2760fa4db9e786dd6a6498753 --- /dev/null +++ b/docs/essentials/markdown.mdx @@ -0,0 +1,88 @@ +--- +title: 'Markdown Syntax' +description: 'Text, title, and styling in standard markdown' +icon: 'text-size' +--- + +## Titles + +Best used for section headers. + +```md +## Titles +``` + +### Subtitles + +Best use to subsection headers. + +```md +### Subtitles +``` + + + +Each **title** and **subtitle** creates an anchor and also shows up on the table of contents on the right. + + + +## Text Formatting + +We support most markdown formatting. Simply add `**`, `_`, or `~` around text to format it. + +| Style | How to write it | Result | +| ------------- | ----------------- | --------------- | +| Bold | `**bold**` | **bold** | +| Italic | `_italic_` | _italic_ | +| Strikethrough | `~strikethrough~` | ~strikethrough~ | + +You can combine these. For example, write `**_bold and italic_**` to get **_bold and italic_** text. + +You need to use HTML to write superscript and subscript text. That is, add `` or `` around your text. + +| Text Size | How to write it | Result | +| ----------- | ------------------------ | ---------------------- | +| Superscript | `superscript` | superscript | +| Subscript | `subscript` | subscript | + +## Linking to Pages + +You can add a link by wrapping text in `[]()`. You would write `[link to google](https://google.com)` to [link to google](https://google.com). + +Links to pages in your docs need to be root-relative. Basically, you should include the entire folder path. For example, `[link to text](/writing-content/text)` links to the page "Text" in our components section. + +Relative links like `[link to text](../text)` will open slower because we cannot optimize them as easily. + +## Blockquotes + +### Singleline + +To create a blockquote, add a `>` in front of a paragraph. + +> Dorothy followed her through many of the beautiful rooms in her castle. + +```md +> Dorothy followed her through many of the beautiful rooms in her castle. +``` + +### Multiline + +> Dorothy followed her through many of the beautiful rooms in her castle. +> +> The Witch bade her clean the pots and kettles and sweep the floor and keep the fire fed with wood. + +```md +> Dorothy followed her through many of the beautiful rooms in her castle. +> +> The Witch bade her clean the pots and kettles and sweep the floor and keep the fire fed with wood. +``` + +### LaTeX + +Mintlify supports [LaTeX](https://www.latex-project.org) through the Latex component. + +8 x (vk x H1 - H2) = (0,1) + +```md +8 x (vk x H1 - H2) = (0,1) +``` diff --git a/docs/essentials/navigation.mdx b/docs/essentials/navigation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f21213c06f6566d75600015dab06e570ccc87a6d --- /dev/null +++ b/docs/essentials/navigation.mdx @@ -0,0 +1,87 @@ +--- +title: 'Navigation' +description: 'The navigation field in docs.json defines the pages that go in the navigation menu' +icon: 'map' +--- + +The navigation menu is the list of links on every website. + +You will likely update `docs.json` every time you add a new page. Pages do not show up automatically. + +## Navigation syntax + +Our navigation syntax is recursive which means you can make nested navigation groups. You don't need to include `.mdx` in page names. + + + +```json Regular Navigation +"navigation": { + "tabs": [ + { + "tab": "Docs", + "groups": [ + { + "group": "Getting Started", + "pages": ["quickstart"] + } + ] + } + ] +} +``` + +```json Nested Navigation +"navigation": { + "tabs": [ + { + "tab": "Docs", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "quickstart", + { + "group": "Nested Reference Pages", + "pages": ["nested-reference-page"] + } + ] + } + ] + } + ] +} +``` + + + +## Folders + +Simply put your MDX files in folders and update the paths in `docs.json`. + +For example, to have a page at `https://yoursite.com/your-folder/your-page` you would make a folder called `your-folder` containing an MDX file called `your-page.mdx`. + + + +You cannot use `api` for the name of a folder unless you nest it inside another folder. Mintlify uses Next.js which reserves the top-level `api` folder for internal server calls. A folder name such as `api-reference` would be accepted. + + + +```json Navigation With Folder +"navigation": { + "tabs": [ + { + "tab": "Docs", + "groups": [ + { + "group": "Group Name", + "pages": ["your-folder/your-page"] + } + ] + } + ] +} +``` + +## Hidden Pages + +MDX files not included in `docs.json` will not show up in the sidebar but are accessible through the search bar and by linking directly to them. diff --git a/docs/essentials/reusable-snippets.mdx b/docs/essentials/reusable-snippets.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a0a55297ab56d27c02b6b2a961024d29f39228ef --- /dev/null +++ b/docs/essentials/reusable-snippets.mdx @@ -0,0 +1,110 @@ +--- +title: Reusable Snippets +description: Reusable, custom snippets to keep content in sync +icon: 'recycle' +--- + +import SnippetIntro from '/snippets/snippet-intro.mdx'; + + + +## Creating a custom snippet + +**Pre-condition**: You must create your snippet file in the `snippets` directory. + + + Any page in the `snippets` directory will be treated as a snippet and will not + be rendered into a standalone page. If you want to create a standalone page + from the snippet, import the snippet into another file and call it as a + component. + + +### Default export + +1. Add content to your snippet file that you want to re-use across multiple + locations. Optionally, you can add variables that can be filled in via props + when you import the snippet. + +```mdx snippets/my-snippet.mdx +Hello world! This is my content I want to reuse across pages. My keyword of the +day is {word}. +``` + + + The content that you want to reuse must be inside the `snippets` directory in + order for the import to work. + + +2. Import the snippet into your destination file. + +```mdx destination-file.mdx +--- +title: My title +description: My Description +--- + +import MySnippet from '/snippets/path/to/my-snippet.mdx'; + +## Header + +Lorem impsum dolor sit amet. + + +``` + +### Reusable variables + +1. Export a variable from your snippet file: + +```mdx snippets/path/to/custom-variables.mdx +export const myName = 'my name'; + +export const myObject = { fruit: 'strawberries' }; +``` + +2. Import the snippet from your destination file and use the variable: + +```mdx destination-file.mdx +--- +title: My title +description: My Description +--- + +import { myName, myObject } from '/snippets/path/to/custom-variables.mdx'; + +Hello, my name is {myName} and I like {myObject.fruit}. +``` + +### Reusable components + +1. Inside your snippet file, create a component that takes in props by exporting + your component in the form of an arrow function. + +```mdx snippets/custom-component.mdx +export const MyComponent = ({ title }) => ( +
+

{title}

+

... snippet content ...

+
+); +``` + + + MDX does not compile inside the body of an arrow function. Stick to HTML + syntax when you can or use a default export if you need to use MDX. + + +2. Import the snippet into your destination file and pass in the props + +```mdx destination-file.mdx +--- +title: My title +description: My Description +--- + +import { MyComponent } from '/snippets/custom-component.mdx'; + +Lorem ipsum dolor sit amet. + + +``` diff --git a/docs/essentials/settings.mdx b/docs/essentials/settings.mdx new file mode 100644 index 0000000000000000000000000000000000000000..884de13a43dde0c4e989976b041ca03e2260d96d --- /dev/null +++ b/docs/essentials/settings.mdx @@ -0,0 +1,318 @@ +--- +title: 'Global Settings' +description: 'Mintlify gives you complete control over the look and feel of your documentation using the docs.json file' +icon: 'gear' +--- + +Every Mintlify site needs a `docs.json` file with the core configuration settings. Learn more about the [properties](#properties) below. + +## Properties + + +Name of your project. Used for the global title. + +Example: `mintlify` + + + + + An array of groups with all the pages within that group + + + The name of the group. + + Example: `Settings` + + + + The relative paths to the markdown files that will serve as pages. + + Example: `["customization", "page"]` + + + + + + + + Path to logo image or object with path to "light" and "dark" mode logo images + + + Path to the logo in light mode + + + Path to the logo in dark mode + + + Where clicking on the logo links you to + + + + + + Path to the favicon image + + + + Hex color codes for your global theme + + + The primary color. Used for most often for highlighted content, section + headers, accents, in light mode + + + The primary color for dark mode. Used for most often for highlighted + content, section headers, accents, in dark mode + + + The primary color for important buttons + + + The color of the background in both light and dark mode + + + The hex color code of the background in light mode + + + The hex color code of the background in dark mode + + + + + + + + Array of `name`s and `url`s of links you want to include in the topbar + + + The name of the button. + + Example: `Contact us` + + + The url once you click on the button. Example: `https://mintlify.com/docs` + + + + + + + + + Link shows a button. GitHub shows the repo information at the url provided including the number of GitHub stars. + + + If `link`: What the button links to. + + If `github`: Link to the repository to load GitHub information from. + + + Text inside the button. Only required if `type` is a `link`. + + + + + + + Array of version names. Only use this if you want to show different versions + of docs with a dropdown in the navigation bar. + + + + An array of the anchors, includes the `icon`, `color`, and `url`. + + + The [Font Awesome](https://fontawesome.com/search?q=heart) icon used to feature the anchor. + + Example: `comments` + + + The name of the anchor label. + + Example: `Community` + + + The start of the URL that marks what pages go in the anchor. Generally, this is the name of the folder you put your pages in. + + + The hex color of the anchor icon background. Can also be a gradient if you pass an object with the properties `from` and `to` that are each a hex color. + + + Used if you want to hide an anchor until the correct docs version is selected. + + + Pass `true` if you want to hide the anchor until you directly link someone to docs inside it. + + + One of: "brands", "duotone", "light", "sharp-solid", "solid", or "thin" + + + + + + + Override the default configurations for the top-most anchor. + + + The name of the top-most anchor + + + Font Awesome icon. + + + One of: "brands", "duotone", "light", "sharp-solid", "solid", or "thin" + + + + + + An array of navigational tabs. + + + The name of the tab label. + + + The start of the URL that marks what pages go in the tab. Generally, this + is the name of the folder you put your pages in. + + + + + + Configuration for API settings. Learn more about API pages at [API Components](/api-playground/demo). + + + The base url for all API endpoints. If `baseUrl` is an array, it will enable for multiple base url + options that the user can toggle. + + + + + + The authentication strategy used for all API endpoints. + + + The name of the authentication parameter used in the API playground. + + If method is `basic`, the format should be `[usernameName]:[passwordName]` + + + The default value that's designed to be a prefix for the authentication input field. + + E.g. If an `inputPrefix` of `AuthKey` would inherit the default input result of the authentication field as `AuthKey`. + + + + + + Configurations for the API playground + + + + Whether the playground is showing, hidden, or only displaying the endpoint with no added user interactivity `simple` + + Learn more at the [playground guides](/api-playground/demo) + + + + + + Enabling this flag ensures that key ordering in OpenAPI pages matches the key ordering defined in the OpenAPI file. + + This behavior will soon be enabled by default, at which point this field will be deprecated. + + + + + + + A string or an array of strings of URL(s) or relative path(s) pointing to your + OpenAPI file. + + Examples: + + ```json Absolute + "openapi": "https://example.com/openapi.json" + ``` + ```json Relative + "openapi": "/openapi.json" + ``` + ```json Multiple + "openapi": ["https://example.com/openapi1.json", "/openapi2.json", "/openapi3.json"] + ``` + + + + + + An object of social media accounts where the key:property pair represents the social media platform and the account url. + + Example: + ```json + { + "x": "https://x.com/mintlify", + "website": "https://mintlify.com" + } + ``` + + + One of the following values `website`, `facebook`, `x`, `discord`, `slack`, `github`, `linkedin`, `instagram`, `hacker-news` + + Example: `x` + + + The URL to the social platform. + + Example: `https://x.com/mintlify` + + + + + + Configurations to enable feedback buttons + + + + Enables a button to allow users to suggest edits via pull requests + + + Enables a button to allow users to raise an issue about the documentation + + + + + + Customize the dark mode toggle. + + + Set if you always want to show light or dark mode for new users. When not + set, we default to the same mode as the user's operating system. + + + Set to true to hide the dark/light mode toggle. You can combine `isHidden` with `default` to force your docs to only use light or dark mode. For example: + + + ```json Only Dark Mode + "modeToggle": { + "default": "dark", + "isHidden": true + } + ``` + + ```json Only Light Mode + "modeToggle": { + "default": "light", + "isHidden": true + } + ``` + + + + + + + + + A background image to be displayed behind every page. See example with + [Infisical](https://infisical.com/docs) and [FRPC](https://frpc.io). + diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..c8150a9c4e1ed7bd4b4612bd6c27bc321e5fd810 Binary files /dev/null and b/docs/favicon.ico differ diff --git a/docs/favicon.svg b/docs/favicon.svg new file mode 100644 index 0000000000000000000000000000000000000000..b785c738bf178e7072e15ad6770e13ad1032d54b --- /dev/null +++ b/docs/favicon.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/docs/features/authentication.mdx b/docs/features/authentication.mdx new file mode 100644 index 0000000000000000000000000000000000000000..36c7ad526578e25cb56c5354b330b030ec65c790 --- /dev/null +++ b/docs/features/authentication.mdx @@ -0,0 +1,338 @@ +--- +title: 'Authentication & Security' +description: 'Configure authentication and security settings for MCPHub' +--- + +## Overview + +MCPHub provides flexible authentication mechanisms to secure your MCP server management platform. The system supports multiple authentication methods and role-based access control. + +## Authentication Methods + +### Environment-based Authentication + +Configure basic authentication using environment variables: + +```bash +# Basic auth credentials +AUTH_USERNAME=admin +AUTH_PASSWORD=your-secure-password + +# JWT settings +JWT_SECRET=your-jwt-secret-key +JWT_EXPIRES_IN=24h +``` + +### Database Authentication + +For production deployments, enable database-backed user management: + +```json +{ + "auth": { + "provider": "database", + "database": { + "url": "postgresql://user:pass@localhost:5432/mcphub", + "userTable": "users" + } + } +} +``` + +## User Management + +### Creating Users + +Create users via the admin interface or API: + +```bash +# Via API +curl -X POST http://localhost:3000/api/auth/users \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -d '{ + "username": "newuser", + "email": "user@example.com", + "password": "securepassword", + "role": "user" + }' +``` + +### User Roles + +MCPHub supports role-based access control: + +- **Admin**: Full system access, user management, server configuration +- **Manager**: Server management, group management, monitoring +- **User**: Basic server access within assigned groups +- **Viewer**: Read-only access to assigned resources + +## Group-based Access Control + +### Assigning Users to Groups + +```bash +# Add user to group +curl -X POST http://localhost:3000/api/groups/{groupId}/users \ + -H "Authorization: Bearer $TOKEN" \ + -d '{"userId": "user123"}' +``` + +### Group Permissions + +Configure group-level permissions: + +```json +{ + "groupId": "dev-team", + "permissions": { + "servers": ["read", "write", "execute"], + "tools": ["read", "execute"], + "logs": ["read"], + "config": ["read"] + } +} +``` + +## API Authentication + +### JWT Token Authentication + +```javascript +// Login to get token +const response = await fetch('/api/auth/login', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + username: 'your-username', + password: 'your-password', + }), +}); + +const { token } = await response.json(); + +// Use token for authenticated requests +const serversResponse = await fetch('/api/servers', { + headers: { Authorization: `Bearer ${token}` }, +}); +``` + +### API Key Authentication + +For service-to-service communication: + +```bash +# Generate API key +curl -X POST http://localhost:3000/api/auth/api-keys \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -d '{ + "name": "my-service", + "permissions": ["servers:read", "tools:execute"] + }' + +# Use API key +curl -H "X-API-Key: your-api-key" \ + http://localhost:3000/api/servers +``` + +## Security Configuration + +### HTTPS Setup + +Configure HTTPS for production: + +```yaml +# docker-compose.yml +services: + mcphub: + environment: + - HTTPS_ENABLED=true + - SSL_CERT_PATH=/certs/cert.pem + - SSL_KEY_PATH=/certs/key.pem + volumes: + - ./certs:/certs:ro +``` + +### CORS Configuration + +Configure CORS for web applications: + +```json +{ + "cors": { + "origin": ["https://your-frontend.com"], + "credentials": true, + "methods": ["GET", "POST", "PUT", "DELETE"] + } +} +``` + +### Rate Limiting + +Protect against abuse with rate limiting: + +```json +{ + "rateLimit": { + "windowMs": 900000, + "max": 100, + "message": "Too many requests from this IP" + } +} +``` + +## Session Management + +### Session Configuration + +```json +{ + "session": { + "secret": "your-session-secret", + "cookie": { + "secure": true, + "httpOnly": true, + "maxAge": 86400000 + }, + "store": "redis", + "redis": { + "host": "localhost", + "port": 6379 + } + } +} +``` + +### Logout and Session Cleanup + +```javascript +// Logout endpoint +app.post('/api/auth/logout', (req, res) => { + req.session.destroy(); + res.json({ message: 'Logged out successfully' }); +}); +``` + +## Security Best Practices + +### Password Security + +- Use strong password requirements +- Implement password hashing with bcrypt +- Support password reset functionality +- Enable two-factor authentication (2FA) + +### Token Security + +- Use secure JWT secrets +- Implement token rotation +- Set appropriate expiration times +- Store tokens securely in httpOnly cookies + +### Network Security + +- Use HTTPS in production +- Implement proper CORS policies +- Enable request validation +- Use security headers (helmet.js) + +### Monitoring Security Events + +```javascript +// Log security events +const auditLog = { + event: 'login_attempt', + user: username, + ip: req.ip, + userAgent: req.headers['user-agent'], + success: true, + timestamp: new Date(), +}; +``` + +## Troubleshooting + +### Common Authentication Issues + +**Invalid Credentials** + +```bash +# Check user exists and password is correct +curl -X POST http://localhost:3000/api/auth/verify \ + -d '{"username": "user", "password": "pass"}' +``` + +**Token Expiration** + +```javascript +// Handle token refresh +if (response.status === 401) { + const newToken = await refreshToken(); + // Retry request with new token +} +``` + +**Permission Denied** + +```bash +# Check user permissions +curl -H "Authorization: Bearer $TOKEN" \ + http://localhost:3000/api/auth/permissions +``` + +### Debug Authentication + +Enable authentication debugging: + +```bash +DEBUG=mcphub:auth npm start +``` + +## Integration Examples + +### Frontend Integration + +```javascript +// React authentication hook +const useAuth = () => { + const [user, setUser] = useState(null); + + const login = async (credentials) => { + const response = await fetch('/api/auth/login', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(credentials), + }); + + if (response.ok) { + const userData = await response.json(); + setUser(userData.user); + return true; + } + return false; + }; + + return { user, login }; +}; +``` + +### Middleware Integration + +```javascript +// Express middleware +const authMiddleware = (req, res, next) => { + const token = req.headers.authorization?.split(' ')[1]; + + if (!token) { + return res.status(401).json({ error: 'No token provided' }); + } + + try { + const decoded = jwt.verify(token, process.env.JWT_SECRET); + req.user = decoded; + next(); + } catch (error) { + res.status(401).json({ error: 'Invalid token' }); + } +}; +``` diff --git a/docs/features/group-management.mdx b/docs/features/group-management.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d3e60c9eb3f3664055a46220abd89e6bc04dd09a --- /dev/null +++ b/docs/features/group-management.mdx @@ -0,0 +1,588 @@ +--- +title: 'Group Management' +description: 'Organize servers into logical groups for streamlined access control' +--- + +## Overview + +Group Management in MCPHub allows you to organize your MCP servers into logical collections based on functionality, use case, or access requirements. This enables fine-grained control over which tools are available to different AI clients and users. + +## Core Concepts + +### What are Groups? + +Groups are named collections of MCP servers that can be accessed through dedicated endpoints. Instead of connecting to all servers at once, AI clients can connect to specific groups to access only relevant tools. + +### Benefits of Groups + +- **Focused Tool Access**: AI clients see only relevant tools for their use case +- **Better Performance**: Reduced tool discovery overhead +- **Enhanced Security**: Limit access to sensitive tools +- **Improved Organization**: Logical separation of functionality +- **Simplified Management**: Easier to manage related servers together + +## Creating Groups + +### Via Dashboard + +1. **Navigate to Groups Section**: Click "Groups" in the main navigation +2. **Click "Create Group"**: Start the group creation process +3. **Fill Group Details**: + + - **Name**: Unique identifier for the group + - **Display Name**: Human-readable name + - **Description**: Purpose and contents of the group + - **Access Level**: Public, Private, or Restricted + +4. **Add Servers**: Select servers to include in the group + +### Via API + +Create groups programmatically: + +```bash +curl -X POST http://localhost:3000/api/groups \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "name": "web-automation", + "displayName": "Web Automation Tools", + "description": "Browser automation and web scraping tools", + "servers": ["playwright", "fetch"], + "accessLevel": "public" + }' +``` + +### Via Configuration File + +Define groups in your `mcp_settings.json`: + +```json +{ + "mcpServers": { + "fetch": { "command": "uvx", "args": ["mcp-server-fetch"] }, + "playwright": { "command": "npx", "args": ["@playwright/mcp@latest"] }, + "slack": { "command": "npx", "args": ["@modelcontextprotocol/server-slack"] } + }, + "groups": { + "web-tools": { + "displayName": "Web Tools", + "description": "Web scraping and browser automation", + "servers": ["fetch", "playwright"], + "accessLevel": "public" + }, + "communication": { + "displayName": "Communication Tools", + "description": "Messaging and collaboration tools", + "servers": ["slack"], + "accessLevel": "private" + } + } +} +``` + +## Group Types and Use Cases + + + + **Purpose**: Browser automation and web scraping + + **Servers**: + - `playwright`: Browser automation + - `fetch`: HTTP requests and web scraping + - `selenium`: Alternative browser automation + + **Use Cases**: + - Automated testing + - Data collection + - Web monitoring + - Content analysis + + **Endpoint**: `http://localhost:3000/mcp/web-automation` + + + + + **Purpose**: Data manipulation and analysis + + **Servers**: + - `sqlite`: Database operations + - `filesystem`: File operations + - `spreadsheet`: Excel/CSV processing + + **Use Cases**: + - Data analysis + - Report generation + - File processing + - Database queries + + **Endpoint**: `http://localhost:3000/mcp/data-processing` + + + + + **Purpose**: Messaging and collaboration + + **Servers**: + - `slack`: Slack integration + - `discord`: Discord bot + - `email`: Email sending + - `sms`: SMS notifications + + **Use Cases**: + - Team notifications + - Customer communication + - Alert systems + - Social media management + + **Endpoint**: `http://localhost:3000/mcp/communication` + + + + + **Purpose**: Software development tools + + **Servers**: + - `github`: GitHub operations + - `gitlab`: GitLab integration + - `docker`: Container management + - `kubernetes`: K8s operations + + **Use Cases**: + - Code deployment + - Repository management + - CI/CD operations + - Infrastructure management + + **Endpoint**: `http://localhost:3000/mcp/development` + + + + + **Purpose**: Machine learning and AI tools + + **Servers**: + - `openai`: OpenAI API integration + - `huggingface`: Hugging Face models + - `vector-db`: Vector database operations + + **Use Cases**: + - Model inference + - Data embeddings + - Natural language processing + - Computer vision + + **Endpoint**: `http://localhost:3000/mcp/ai-ml` + + + + +## Group Access Control + +### Access Levels + + + + **Public Groups**: + - Accessible to all authenticated users + - No additional permissions required + - Visible in group listings + - Default access level + + ```json + { + "name": "public-tools", + "accessLevel": "public", + "servers": ["fetch", "calculator"] + } + ``` + + + + + **Private Groups**: + - Only visible to group members + - Requires explicit user assignment + - Hidden from public listings + - Admin-controlled membership + + ```json + { + "name": "internal-tools", + "accessLevel": "private", + "members": ["user1", "user2"], + "servers": ["internal-api", "database"] + } + ``` + + + + + **Restricted Groups**: + - Role-based access control + - Requires specific permissions + - Audit logging enabled + - Time-limited access + + ```json + { + "name": "admin-tools", + "accessLevel": "restricted", + "requiredRoles": ["admin", "operator"], + "servers": ["system-control", "user-management"] + } + ``` + + + + +### User Management + +Assign users to groups: + +```bash +# Add user to group +curl -X POST http://localhost:3000/api/groups/web-tools/members \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{"userId": "user123"}' + +# Remove user from group +curl -X DELETE http://localhost:3000/api/groups/web-tools/members/user123 \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" + +# List group members +curl http://localhost:3000/api/groups/web-tools/members \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +## Group Endpoints + +### Accessing Groups + +Each group gets its own MCP endpoint: + + + + ``` + http://localhost:3000/mcp/{group-name} + ``` + + Examples: + - `http://localhost:3000/mcp/web-tools` + - `http://localhost:3000/mcp/data-processing` + - `http://localhost:3000/mcp/communication` + + + + + ``` + http://localhost:3000/sse/{group-name} + ``` + + Examples: + - `http://localhost:3000/sse/web-tools` + - `http://localhost:3000/sse/data-processing` + - `http://localhost:3000/sse/communication` + + + + +### Group Tool Discovery + +When connecting to a group endpoint, AI clients will only see tools from servers within that group: + +```bash +# List tools in web-tools group +curl -X POST http://localhost:3000/mcp/web-tools \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + }' +``` + +Response will only include tools from `fetch` and `playwright` servers. + +## Dynamic Group Management + +### Adding Servers to Groups + + + + 1. Navigate to the group in the dashboard + 2. Click "Manage Servers" + 3. Select additional servers to add + 4. Click "Save Changes" + + + + ```bash + curl -X POST http://localhost:3000/api/groups/web-tools/servers \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{"serverId": "new-server"}' + ``` + + + +### Removing Servers from Groups + + + + 1. Navigate to the group in the dashboard + 2. Click "Manage Servers" + 3. Unselect servers to remove + 4. Click "Save Changes" + + + + ```bash + curl -X DELETE http://localhost:3000/api/groups/web-tools/servers/server-name \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" + ``` + + + +### Batch Server Updates + +Update multiple servers at once: + +```bash +curl -X PUT http://localhost:3000/api/groups/web-tools/servers \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "servers": ["fetch", "playwright", "selenium"] + }' +``` + +## Group Monitoring + +### Group Status + +Monitor group health and activity: + +```bash +# Get group status +curl http://localhost:3000/api/groups/web-tools/status \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +Response includes: + +- Number of active servers +- Tool count +- Active connections +- Recent activity + +### Group Analytics + +Track group usage: + +```bash +# Get group analytics +curl http://localhost:3000/api/groups/web-tools/analytics \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +Metrics include: + +- Request count by tool +- Response times +- Error rates +- User activity + +## Advanced Group Features + +### Nested Groups + +Create hierarchical group structures: + +```json +{ + "groups": { + "development": { + "displayName": "Development Tools", + "subGroups": ["frontend-dev", "backend-dev", "devops"] + }, + "frontend-dev": { + "displayName": "Frontend Development", + "servers": ["playwright", "webpack-server"], + "parent": "development" + }, + "backend-dev": { + "displayName": "Backend Development", + "servers": ["database", "api-server"], + "parent": "development" + } + } +} +``` + +### Group Templates + +Use templates for quick group creation: + +```json +{ + "groupTemplates": { + "web-project": { + "description": "Standard web project toolset", + "servers": ["fetch", "playwright", "filesystem"], + "accessLevel": "public" + }, + "data-science": { + "description": "Data science and ML tools", + "servers": ["python-tools", "jupyter", "vector-db"], + "accessLevel": "private" + } + } +} +``` + +Apply template: + +```bash +curl -X POST http://localhost:3000/api/groups/from-template \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "name": "my-web-project", + "template": "web-project", + "displayName": "My Web Project Tools" + }' +``` + +### Group Policies + +Define policies for group behavior: + +```json +{ + "groupPolicies": { + "web-tools": { + "maxConcurrentConnections": 10, + "requestTimeout": 30000, + "rateLimiting": { + "requestsPerMinute": 100, + "burstLimit": 20 + }, + "allowedOrigins": ["localhost", "myapp.com"] + } + } +} +``` + +## Best Practices + +### Group Organization + + + **Organize by Use Case**: Group servers based on what users want to accomplish, not just technical + similarity. + + + + **Keep Groups Focused**: Avoid creating groups with too many diverse tools. Smaller, focused + groups are more useful. + + + + **Use Descriptive Names**: Choose names that clearly indicate the group's purpose and contents. + + +### Security Considerations + + + **Principle of Least Privilege**: Only give users access to groups they actually need. + + + + **Sensitive Tool Isolation**: Keep sensitive tools in restricted groups with proper access + controls. + + + + **Regular Access Reviews**: Periodically review group memberships and remove unnecessary access. + + +### Performance Optimization + + + **Balance Group Size**: Very large groups may have slower tool discovery. Consider splitting into + smaller groups. + + + + **Monitor Usage**: Use analytics to identify which groups are heavily used and optimize + accordingly. + + +## Troubleshooting + + + + **Check:** + - User has proper permissions + - Group exists and is active + - Servers in group are running + - Network connectivity + + **Solutions:** + 1. Verify user group membership + 2. Check group configuration + 3. Test individual server connections + 4. Review access logs + + + + + **Possible causes:** + - Server not properly added to group + - Server is not running + - Tool discovery failed + - Caching issues + + **Debug steps:** + 1. Verify server is in group configuration + 2. Check server status + 3. Force refresh tool discovery + 4. Clear group cache + + + + + **Common issues:** + - Too many servers in group + - Slow server responses + - Network latency + - Resource constraints + + **Optimizations:** + 1. Split large groups + 2. Monitor server performance + 3. Implement request caching + 4. Use connection pooling + + + + +## Next Steps + + + + AI-powered tool discovery across groups + + + User management and access control + + + Complete group management API + + + Advanced group configuration options + + diff --git a/docs/features/monitoring.mdx b/docs/features/monitoring.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9d8772948dc72457d015bea707c4f7bc82ef45b4 --- /dev/null +++ b/docs/features/monitoring.mdx @@ -0,0 +1,526 @@ +--- +title: 'Monitoring & Logging' +description: 'Monitor your MCP servers and analyze system logs with MCPHub' +--- + +## Overview + +MCPHub provides comprehensive monitoring and logging capabilities to help you track server performance, debug issues, and maintain system health. + +## Real-time Monitoring + +### Server Status Dashboard + +The MCPHub dashboard provides real-time monitoring of all registered MCP servers: + +- **Server Health**: Online/offline status with automatic health checks +- **Response Times**: Average, min, max response times per server +- **Request Volume**: Requests per second/minute/hour +- **Error Rates**: Success/failure ratios and error trends +- **Resource Usage**: Memory and CPU utilization (when available) + +### Health Check Configuration + +Configure health checks for your MCP servers: + +```json +{ + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000, + "retries": 3, + "endpoints": { + "ping": "/health", + "tools": "/tools/list" + } + } +} +``` + +### Monitoring API + +Get monitoring data programmatically: + +```bash +# Get server health status +curl http://localhost:3000/api/monitoring/health + +# Get performance metrics +curl http://localhost:3000/api/monitoring/metrics?server=my-server&range=1h + +# Get system overview +curl http://localhost:3000/api/monitoring/overview +``` + +## Logging System + +### Log Levels + +MCPHub supports standard log levels: + +- **ERROR**: Critical errors requiring immediate attention +- **WARN**: Warning conditions that should be monitored +- **INFO**: General operational messages +- **DEBUG**: Detailed debugging information +- **TRACE**: Very detailed trace information + +### Log Configuration + +Configure logging in your environment: + +```bash +# Set log level +LOG_LEVEL=info + +# Enable structured logging +LOG_FORMAT=json + +# Log file location +LOG_FILE=/var/log/mcphub/app.log + +# Enable request logging +ENABLE_REQUEST_LOGS=true +``` + +### Structured Logging + +MCPHub uses structured logging for better analysis: + +```json +{ + "timestamp": "2024-01-20T10:30:00Z", + "level": "info", + "message": "MCP server request completed", + "server": "github-mcp", + "tool": "search_repositories", + "duration": 245, + "status": "success", + "requestId": "req_123456", + "userId": "user_789" +} +``` + +## Log Management + +### Log Storage Options + +#### File-based Logging + +```yaml +# docker-compose.yml +services: + mcphub: + volumes: + - ./logs:/app/logs + environment: + - LOG_FILE=/app/logs/mcphub.log + - LOG_ROTATION=daily + - LOG_MAX_SIZE=100MB + - LOG_MAX_FILES=7 +``` + +#### Database Logging + +```json +{ + "logging": { + "database": { + "enabled": true, + "table": "logs", + "retention": "30d", + "indexes": ["timestamp", "level", "server"] + } + } +} +``` + +#### External Log Services + +```bash +# Syslog integration +SYSLOG_ENABLED=true +SYSLOG_HOST=localhost +SYSLOG_PORT=514 +SYSLOG_FACILITY=local0 + +# ELK Stack integration +ELASTICSEARCH_URL=http://localhost:9200 +ELASTICSEARCH_INDEX=mcphub-logs +``` + +### Log Rotation + +Automatic log rotation configuration: + +```json +{ + "logRotation": { + "enabled": true, + "maxSize": "100MB", + "maxFiles": 10, + "compress": true, + "interval": "daily" + } +} +``` + +## Metrics Collection + +### System Metrics + +MCPHub collects various system metrics: + +```javascript +// Example metrics collected +{ + "timestamp": "2024-01-20T10:30:00Z", + "metrics": { + "requests": { + "total": 1547, + "success": 1523, + "errors": 24, + "rate": 12.5 + }, + "servers": { + "online": 8, + "offline": 2, + "total": 10 + }, + "performance": { + "avgResponseTime": 156, + "p95ResponseTime": 324, + "p99ResponseTime": 567 + }, + "system": { + "memoryUsage": "245MB", + "cpuUsage": "15%", + "uptime": "72h 35m" + } + } +} +``` + +### Custom Metrics + +Add custom metrics for your use case: + +```javascript +// Custom metric example +const customMetric = { + name: 'tool_usage', + type: 'counter', + tags: { + server: 'github-mcp', + tool: 'search_repositories', + result: 'success', + }, + value: 1, +}; + +// Send to metrics endpoint +await fetch('/api/monitoring/metrics', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(customMetric), +}); +``` + +## Alerting + +### Alert Configuration + +Set up alerts for critical conditions: + +```json +{ + "alerts": { + "serverDown": { + "condition": "server.status == 'offline'", + "duration": "5m", + "severity": "critical", + "channels": ["email", "slack"] + }, + "highErrorRate": { + "condition": "errors.rate > 0.1", + "duration": "2m", + "severity": "warning", + "channels": ["slack"] + }, + "slowResponse": { + "condition": "response.p95 > 1000", + "duration": "5m", + "severity": "warning", + "channels": ["email"] + } + } +} +``` + +### Notification Channels + +#### Email Notifications + +```bash +# Email configuration +SMTP_HOST=smtp.gmail.com +SMTP_PORT=587 +SMTP_USER=alerts@yourcompany.com +SMTP_PASS=your-app-password +ALERT_EMAIL_TO=admin@yourcompany.com +``` + +#### Slack Integration + +```bash +# Slack webhook +SLACK_WEBHOOK_URL=https://hooks.slack.com/services/... +SLACK_CHANNEL=#mcphub-alerts +``` + +#### Webhook Notifications + +```json +{ + "webhooks": [ + { + "url": "https://your-service.com/webhooks/mcphub", + "events": ["server.down", "error.rate.high"], + "headers": { + "Authorization": "Bearer your-token" + } + } + ] +} +``` + +## Log Analysis + +### Query Logs + +Use the logs API to query and analyze logs: + +```bash +# Get recent errors +curl "http://localhost:3000/api/logs?level=error&since=1h" + +# Search logs by server +curl "http://localhost:3000/api/logs?server=github-mcp&limit=100" + +# Get logs for specific request +curl "http://localhost:3000/api/logs?requestId=req_123456" + +# Filter by time range +curl "http://localhost:3000/api/logs?from=2024-01-20T00:00:00Z&to=2024-01-20T23:59:59Z" +``` + +### Log Aggregation + +Aggregate logs for insights: + +```bash +# Error summary by server +curl "http://localhost:3000/api/logs/aggregate?groupBy=server&level=error&since=24h" + +# Request volume over time +curl "http://localhost:3000/api/logs/aggregate?groupBy=hour&type=request&since=7d" +``` + +## Performance Monitoring + +### Response Time Tracking + +Monitor MCP server response times: + +```javascript +// Response time metrics +{ + "server": "github-mcp", + "tool": "search_repositories", + "metrics": { + "calls": 156, + "avgTime": 234, + "minTime": 89, + "maxTime": 1205, + "p50": 201, + "p95": 567, + "p99": 892 + } +} +``` + +### Error Rate Monitoring + +Track error rates and patterns: + +```bash +# Get error rates by server +curl "http://localhost:3000/api/monitoring/errors?groupBy=server&since=1h" + +# Get error details +curl "http://localhost:3000/api/monitoring/errors?server=github-mcp&details=true" +``` + +## Integration with External Tools + +### Prometheus Integration + +Export metrics to Prometheus: + +```yaml +# prometheus.yml +scrape_configs: + - job_name: 'mcphub' + static_configs: + - targets: ['localhost:3000'] + metrics_path: '/api/monitoring/prometheus' + scrape_interval: 30s +``` + +### Grafana Dashboards + +Import MCPHub Grafana dashboard: + +```json +{ + "dashboard": { + "title": "MCPHub Monitoring", + "panels": [ + { + "title": "Server Status", + "type": "stat", + "targets": [ + { + "expr": "mcphub_servers_online", + "legendFormat": "Online" + } + ] + }, + { + "title": "Request Rate", + "type": "graph", + "targets": [ + { + "expr": "rate(mcphub_requests_total[5m])", + "legendFormat": "Requests/sec" + } + ] + } + ] + } +} +``` + +### ELK Stack Integration + +Configure Logstash for log processing: + +```ruby +# logstash.conf +input { + beats { + port => 5044 + } +} + +filter { + if [fields][service] == "mcphub" { + json { + source => "message" + } + + date { + match => [ "timestamp", "ISO8601" ] + } + } +} + +output { + elasticsearch { + hosts => ["localhost:9200"] + index => "mcphub-logs-%{+YYYY.MM.dd}" + } +} +``` + +## Troubleshooting + +### Common Monitoring Issues + +**Missing Metrics** + +```bash +# Check metrics endpoint +curl http://localhost:3000/api/monitoring/health + +# Verify configuration +grep -r "monitoring" /path/to/config/ +``` + +**Log File Issues** + +```bash +# Check log file permissions +ls -la /var/log/mcphub/ + +# Verify disk space +df -h /var/log/ + +# Check log rotation +logrotate -d /etc/logrotate.d/mcphub +``` + +**Performance Issues** + +```bash +# Monitor system resources +top -p $(pgrep -f mcphub) + +# Check database connections +curl http://localhost:3000/api/monitoring/database + +# Analyze slow queries +curl http://localhost:3000/api/monitoring/slow-queries +``` + +### Debug Mode + +Enable debug logging for troubleshooting: + +```bash +# Enable debug mode +DEBUG=mcphub:* npm start + +# Or set environment variable +export DEBUG=mcphub:monitoring,mcphub:logging +``` + +## Best Practices + +### Log Management + +- Use structured logging with consistent formats +- Implement proper log levels and filtering +- Set up log rotation and retention policies +- Monitor log file sizes and disk usage + +### Monitoring Setup + +- Configure appropriate health check intervals +- Set up alerts for critical conditions +- Monitor both system and application metrics +- Use dashboards for visual monitoring + +### Performance Optimization + +- Index log database tables appropriately +- Use log sampling for high-volume scenarios +- Implement proper caching for metrics +- Regular cleanup of old logs and metrics + +### Security Considerations + +- Sanitize sensitive data in logs +- Secure access to monitoring endpoints +- Use authentication for external integrations +- Encrypt log transmission when using external services diff --git a/docs/features/server-management.mdx b/docs/features/server-management.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c1fd98d5f3c26dd2e1a2a48d78a6a836a9fb3402 --- /dev/null +++ b/docs/features/server-management.mdx @@ -0,0 +1,509 @@ +--- +title: 'Server Management' +description: 'Centrally manage multiple MCP servers with hot-swappable configuration' +--- + +## Overview + +MCPHub's server management system allows you to centrally configure, monitor, and control multiple MCP (Model Context Protocol) servers from a single dashboard. All changes are applied in real-time without requiring server restarts. + +## Adding MCP Servers + +### Via Dashboard + +1. **Access the Dashboard**: Navigate to `http://localhost:3000` and log in +2. **Click "Add Server"**: Located in the servers section +3. **Fill Server Details**: + - **Name**: Unique identifier for the server + - **Command**: Executable command (e.g., `npx`, `uvx`, `python`) + - **Arguments**: Array of command arguments + - **Environment Variables**: Key-value pairs for environment setup + - **Working Directory**: Optional working directory for the command + +### Via Configuration File + +Edit your `mcp_settings.json` file: + +```json +{ + "mcpServers": { + "server-name": { + "command": "command-to-run", + "args": ["arg1", "arg2"], + "env": { + "API_KEY": "your-api-key", + "CONFIG_VALUE": "some-value" + }, + "cwd": "/optional/working/directory" + } + } +} +``` + +### Via API + +Use the REST API to add servers programmatically: + +```bash +curl -X POST http://localhost:3000/api/servers \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "name": "fetch-server", + "command": "uvx", + "args": ["mcp-server-fetch"], + "env": {} + }' +``` + +## Popular MCP Server Examples + + + + Provides web scraping and HTTP request capabilities: + + ```json + { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + } + } + ``` + + **Available Tools:** + - `fetch`: Make HTTP requests + - `fetch_html`: Scrape web pages + - `fetch_json`: Get JSON data from APIs + + + + + Browser automation for web interactions: + + ```json + { + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + } + ``` + + **Available Tools:** + - `playwright_navigate`: Navigate to web pages + - `playwright_screenshot`: Take screenshots + - `playwright_click`: Click elements + - `playwright_fill`: Fill forms + + + + + File and directory management: + + ```json + { + "filesystem": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"] + } + } + ``` + + **Available Tools:** + - `read_file`: Read file contents + - `write_file`: Write to files + - `create_directory`: Create directories + - `list_directory`: List directory contents + + + + + Database operations: + + ```json + { + "sqlite": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-sqlite", "/path/to/database.db"] + } + } + ``` + + **Available Tools:** + - `execute_query`: Execute SQL queries + - `describe_tables`: Get table schemas + - `create_table`: Create new tables + + + + + Slack workspace integration: + + ```json + { + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "xoxb-your-bot-token", + "SLACK_TEAM_ID": "T1234567890" + } + } + } + ``` + + **Available Tools:** + - `send_slack_message`: Send messages to channels + - `list_slack_channels`: List available channels + - `get_slack_thread`: Get thread messages + + + + + GitHub repository operations: + + ```json + { + "github": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-github"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "ghp_your_token" + } + } + } + ``` + + **Available Tools:** + - `create_or_update_file`: Create/update repository files + - `search_repositories`: Search GitHub repositories + - `create_issue`: Create issues + - `create_pull_request`: Create pull requests + + + + + Google Drive file operations: + + ```json + { + "gdrive": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-gdrive"], + "env": { + "GDRIVE_CLIENT_ID": "your-client-id", + "GDRIVE_CLIENT_SECRET": "your-client-secret", + "GDRIVE_REDIRECT_URI": "your-redirect-uri" + } + } + } + ``` + + **Available Tools:** + - `gdrive_search`: Search files and folders + - `gdrive_read`: Read file contents + - `gdrive_create`: Create new files + + + + + Chinese mapping and location services: + + ```json + { + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key" + } + } + } + ``` + + **Available Tools:** + - `search_location`: Search for locations + - `get_directions`: Get route directions + - `reverse_geocode`: Convert coordinates to addresses + + + + +## Server Lifecycle Management + +### Starting Servers + +Servers are automatically started when: + +- MCPHub boots up +- A server is added via the dashboard or API +- A server configuration is updated +- A stopped server is manually restarted + +### Stopping Servers + +You can stop servers: + +- **Via Dashboard**: Toggle the server status switch +- **Via API**: Send a POST request to `/api/servers/{name}/toggle` +- **Automatically**: Servers stop if they crash or encounter errors + +### Restarting Servers + +Servers are automatically restarted: + +- When configuration changes are made +- After environment variable updates +- When manually triggered via dashboard or API + +## Server Status Monitoring + +### Status Indicators + +Each server displays a status indicator: + +- 🟢 **Running**: Server is active and responding +- 🟡 **Starting**: Server is initializing +- 🔴 **Stopped**: Server is not running +- ⚠️ **Error**: Server encountered an error + +### Real-time Logs + +View server logs in real-time: + +1. **Dashboard Logs**: Click on a server to view its logs +2. **API Logs**: Access logs via `/api/logs` endpoint +3. **Streaming Logs**: Subscribe to log streams via WebSocket + +### Health Checks + +MCPHub automatically performs health checks: + +- **Initialization Check**: Verifies server starts successfully +- **Tool Discovery**: Confirms available tools are detected +- **Response Check**: Tests server responsiveness +- **Resource Monitoring**: Tracks CPU and memory usage + +## Configuration Management + +### Environment Variables + +Servers can use environment variables for configuration: + +```json +{ + "server-name": { + "command": "python", + "args": ["server.py"], + "env": { + "API_KEY": "${YOUR_API_KEY}", + "DEBUG": "true", + "MAX_CONNECTIONS": "10" + } + } +} +``` + +**Environment Variable Expansion:** + +- `${VAR_NAME}`: Expands to environment variable value +- `${VAR_NAME:-default}`: Uses default if variable not set +- `${VAR_NAME:+value}`: Uses value if variable is set + +### Working Directory + +Set the working directory for server execution: + +```json +{ + "server-name": { + "command": "./local-script.sh", + "args": [], + "cwd": "/path/to/server/directory" + } +} +``` + +### Command Variations + +Different ways to specify server commands: + + + + ```json + { + "npm-server": { + "command": "npx", + "args": ["-y", "package-name", "--option", "value"] + } + } + ``` + + + + ```json + { + "python-server": { + "command": "uvx", + "args": ["package-name", "--config", "config.json"] + } + } + ``` + + + + ```json + { + "direct-python": { + "command": "python", + "args": ["-m", "module_name", "--arg", "value"] + } + } + ``` + + + + ```json + { + "local-script": { + "command": "./server.sh", + "args": ["--port", "8080"], + "cwd": "/path/to/script" + } + } + ``` + + + +## Advanced Features + +### Hot Reloading + +MCPHub supports hot reloading of server configurations: + +1. **Config File Changes**: Automatically detects changes to `mcp_settings.json` +2. **Dashboard Updates**: Immediately applies changes made through the web interface +3. **API Updates**: Real-time updates via REST API calls +4. **Zero Downtime**: Graceful server restarts without affecting other servers + +### Resource Limits + +Control server resource usage: + +```json +{ + "resource-limited-server": { + "command": "memory-intensive-server", + "args": [], + "limits": { + "memory": "512MB", + "cpu": "50%", + "timeout": "30s" + } + } +} +``` + +### Dependency Management + +Handle server dependencies: + + + + MCPHub can automatically install missing packages: + + ```json + { + "auto-install-server": { + "command": "npx", + "args": ["-y", "package-that-might-not-exist"], + "autoInstall": true + } + } + ``` + + + + + Run setup scripts before starting servers: + + ```json + { + "setup-server": { + "preStart": ["npm install", "pip install -r requirements.txt"], + "command": "python", + "args": ["server.py"] + } + } + ``` + + + + +## Troubleshooting + + + + **Check the following:** + - Command is available in PATH + - All required environment variables are set + - Working directory exists and is accessible + - Network ports are not blocked + - Dependencies are installed + + **Debug steps:** + 1. Check server logs in the dashboard + 2. Test command manually in terminal + 3. Verify environment variable expansion + 4. Check file permissions + + + + + **Common causes:** + - Invalid configuration parameters + - Missing API keys or credentials + - Resource limits exceeded + - Dependency conflicts + + **Solutions:** + 1. Review server logs for error messages + 2. Test with minimal configuration + 3. Verify all credentials and API keys + 4. Check system resource availability + + + + + **Possible issues:** + - Server not fully initialized + - Tool discovery timeout + - Communication protocol mismatch + - Server reporting errors + + **Debug steps:** + 1. Wait for server initialization to complete + 2. Check server logs for tool registration messages + 3. Test direct communication with server + 4. Verify MCP protocol compatibility + + + + +## Next Steps + + + + Organize servers into logical groups + + + Set up AI-powered tool discovery + + + Server management API documentation + + + Detailed configuration options + + diff --git a/docs/features/smart-routing.mdx b/docs/features/smart-routing.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d7aa9358d93ac0cffab6a19bf6c8de6c27771402 --- /dev/null +++ b/docs/features/smart-routing.mdx @@ -0,0 +1,720 @@ +--- +title: 'Smart Routing' +description: 'AI-powered tool discovery using vector semantic search' +--- + +## Overview + +Smart Routing is MCPHub's intelligent tool discovery system that uses vector semantic search to automatically find the most relevant tools for any given task. Instead of manually specifying which tools to use, AI clients can describe what they want to accomplish, and Smart Routing will identify and provide access to the most appropriate tools. + +## How Smart Routing Works + +### 1. Tool Indexing + +When servers start up, Smart Routing automatically: + +- Discovers all available tools from MCP servers +- Extracts tool metadata (names, descriptions, parameters) +- Converts tool information to vector embeddings +- Stores embeddings in PostgreSQL with pgvector + +### 2. Semantic Search + +When a query is made: + +- User queries are converted to vector embeddings +- Similarity search finds matching tools using cosine similarity +- Dynamic thresholds filter out irrelevant results +- Results are ranked by relevance score + +### 3. Intelligent Filtering + +Smart Routing applies several filters: + +- **Relevance Threshold**: Only returns tools above similarity threshold +- **Context Awareness**: Considers conversation context +- **Tool Availability**: Ensures tools are currently accessible +- **Permission Filtering**: Respects user access permissions + +### 4. Tool Execution + +Found tools can be directly executed: + +- Parameter validation ensures correct tool usage +- Error handling provides helpful feedback +- Response formatting maintains consistency +- Logging tracks tool usage for analytics + +## Prerequisites + +Smart Routing requires additional setup compared to basic MCPHub usage: + +### Required Components + +1. **PostgreSQL with pgvector**: Vector database for embeddings storage +2. **Embedding Service**: OpenAI API or compatible service +3. **Environment Configuration**: Proper configuration variables + +### Quick Setup + + + + Use this `docker-compose.yml` for complete setup: + + ```yaml + version: '3.8' + services: + mcphub: + image: samanhappy/mcphub:latest + ports: + - "3000:3000" + environment: + - DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub + - OPENAI_API_KEY=your_openai_api_key + - ENABLE_SMART_ROUTING=true + depends_on: + - postgres + volumes: + - ./mcp_settings.json:/app/mcp_settings.json + + postgres: + image: pgvector/pgvector:pg16 + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=password + volumes: + - postgres_data:/var/lib/postgresql/data + ports: + - "5432:5432" + + volumes: + postgres_data: + ``` + + Start with: + ```bash + docker-compose up -d + ``` + + + + + 1. **Install PostgreSQL with pgvector**: + ```bash + # Using Docker + docker run -d \ + --name mcphub-postgres \ + -e POSTGRES_DB=mcphub \ + -e POSTGRES_USER=mcphub \ + -e POSTGRES_PASSWORD=your_password \ + -p 5432:5432 \ + pgvector/pgvector:pg16 + ``` + + 2. **Set Environment Variables**: + ```bash + export DATABASE_URL="postgresql://mcphub:your_password@localhost:5432/mcphub" + export OPENAI_API_KEY="your_openai_api_key" + export ENABLE_SMART_ROUTING="true" + ``` + + 3. **Start MCPHub**: + ```bash + mcphub + ``` + + + + + Deploy with these Kubernetes manifests: + + ```yaml + # postgres-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: postgres + spec: + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: pgvector/pgvector:pg16 + env: + - name: POSTGRES_DB + value: mcphub + - name: POSTGRES_USER + value: mcphub + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-secret + key: password + ports: + - containerPort: 5432 + --- + # mcphub-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mcphub + spec: + selector: + matchLabels: + app: mcphub + template: + metadata: + labels: + app: mcphub + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + env: + - name: DATABASE_URL + value: "postgresql://mcphub:password@postgres:5432/mcphub" + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: openai-secret + key: api-key + - name: ENABLE_SMART_ROUTING + value: "true" + ports: + - containerPort: 3000 + ``` + + + + +## Configuration + +### Environment Variables + +Configure Smart Routing with these environment variables: + +```bash +# Required +DATABASE_URL=postgresql://user:password@host:5432/database +OPENAI_API_KEY=your_openai_api_key + +# Optional +ENABLE_SMART_ROUTING=true +EMBEDDING_MODEL=text-embedding-3-small +SIMILARITY_THRESHOLD=0.7 +MAX_TOOLS_RETURNED=10 +EMBEDDING_BATCH_SIZE=100 +``` + +### Configuration Options + + + + ```bash + # Full PostgreSQL connection string + DATABASE_URL=postgresql://username:password@host:port/database?schema=public + + # SSL configuration for cloud databases + DATABASE_URL=postgresql://user:pass@host:5432/db?sslmode=require + + # Connection pool settings + DATABASE_POOL_SIZE=20 + DATABASE_TIMEOUT=30000 + ``` + + + + + ```bash + # OpenAI (default) + OPENAI_API_KEY=sk-your-api-key + EMBEDDING_MODEL=text-embedding-3-small + + # Azure OpenAI + AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com + AZURE_OPENAI_API_KEY=your-api-key + AZURE_OPENAI_DEPLOYMENT=your-embedding-deployment + + # Custom embedding service + EMBEDDING_SERVICE_URL=https://your-embedding-service.com + EMBEDDING_SERVICE_API_KEY=your-api-key + ``` + + + + + ```bash + # Similarity threshold (0.0 to 1.0) + SIMILARITY_THRESHOLD=0.7 + + # Maximum tools to return + MAX_TOOLS_RETURNED=10 + + # Minimum query length for smart routing + MIN_QUERY_LENGTH=5 + + # Cache TTL for embeddings (seconds) + EMBEDDING_CACHE_TTL=3600 + ``` + + + + +## Using Smart Routing + +### Smart Routing Endpoint + +Access Smart Routing through the special `$smart` endpoint: + + + + ``` + http://localhost:3000/mcp/$smart + ``` + + + + ``` + http://localhost:3000/sse/$smart + ``` + + + +### Basic Usage + +Connect your AI client to the Smart Routing endpoint and make natural language requests: + +```bash +# Example: Find tools for web scraping +curl -X POST http://localhost:3000/mcp/$smart \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/search", + "params": { + "query": "scrape website content and extract text" + } + }' +``` + +Response: + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": "fetch_html", + "server": "fetch", + "description": "Fetch and parse HTML content from a URL", + "relevanceScore": 0.92, + "parameters": { ... } + }, + { + "name": "playwright_navigate", + "server": "playwright", + "description": "Navigate to a web page and extract content", + "relevanceScore": 0.87, + "parameters": { ... } + } + ] + } +} +``` + +### Advanced Queries + +Smart Routing supports various query types: + + + + ```bash + # What you want to accomplish + curl -X POST http://localhost:3000/mcp/$smart \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/search", + "params": { + "query": "send a message to a slack channel" + } + }' + ``` + + + + ```bash + # Specific domain or technology + curl -X POST http://localhost:3000/mcp/$smart \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/search", + "params": { + "query": "database operations SQL queries" + } + }' + ``` + + + + ```bash + # Specific actions + curl -X POST http://localhost:3000/mcp/$smart \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/search", + "params": { + "query": "create file upload to github repository" + } + }' + ``` + + + + ```bash + # Include context for better results + curl -X POST http://localhost:3000/mcp/$smart \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/search", + "params": { + "query": "automated testing web application", + "context": { + "project": "e-commerce website", + "technologies": ["React", "Node.js"], + "environment": "staging" + } + } + }' + ``` + + + +### Tool Execution + +Once Smart Routing finds relevant tools, you can execute them directly: + +```bash +# Execute a found tool +curl -X POST http://localhost:3000/mcp/$smart \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": { + "name": "fetch_html", + "arguments": { + "url": "https://example.com" + } + } + }' +``` + +## Performance Optimization + +### Embedding Cache + +Smart Routing caches embeddings to improve performance: + +```bash +# Configure cache settings +EMBEDDING_CACHE_TTL=3600 # Cache for 1 hour +EMBEDDING_CACHE_SIZE=10000 # Cache up to 10k embeddings +EMBEDDING_CACHE_CLEANUP=300 # Cleanup every 5 minutes +``` + +### Batch Processing + +Tools are indexed in batches for efficiency: + +```bash +# Batch size for embedding generation +EMBEDDING_BATCH_SIZE=100 + +# Concurrent embedding requests +EMBEDDING_CONCURRENCY=5 + +# Index update frequency +INDEX_UPDATE_INTERVAL=3600 # Re-index every hour +``` + +### Database Optimization + +Optimize PostgreSQL for vector operations: + +```sql +-- Create indexes for better performance +CREATE INDEX ON tool_embeddings USING hnsw (embedding vector_cosine_ops); + +-- Adjust PostgreSQL settings +ALTER SYSTEM SET shared_preload_libraries = 'vector'; +ALTER SYSTEM SET max_connections = 200; +ALTER SYSTEM SET shared_buffers = '256MB'; +ALTER SYSTEM SET effective_cache_size = '1GB'; +``` + +## Monitoring and Analytics + +### Smart Routing Metrics + +Monitor Smart Routing performance: + +```bash +# Get Smart Routing statistics +curl http://localhost:3000/api/smart-routing/stats \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +Response includes: + +- Query count and frequency +- Average response time +- Embedding cache hit rate +- Most popular tools +- Query patterns + +### Tool Usage Analytics + +Track which tools are found and used: + +```bash +# Get tool usage analytics +curl http://localhost:3000/api/smart-routing/analytics \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +Metrics include: + +- Tool discovery rates +- Execution success rates +- User satisfaction scores +- Query-to-execution conversion + +### Performance Monitoring + +Monitor system performance: + +```bash +# Database performance +curl http://localhost:3000/api/smart-routing/db-stats \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" + +# Embedding service status +curl http://localhost:3000/api/smart-routing/embedding-stats \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +## Advanced Features + +### Custom Embeddings + +Use custom embedding models: + +```bash +# Hugging Face models +EMBEDDING_SERVICE=huggingface +HUGGINGFACE_MODEL=sentence-transformers/all-MiniLM-L6-v2 +HUGGINGFACE_API_KEY=your_api_key + +# Local embedding service +EMBEDDING_SERVICE=local +EMBEDDING_SERVICE_URL=http://localhost:8080/embeddings +``` + +### Query Enhancement + +Enhance queries for better results: + +```json +{ + "queryEnhancement": { + "enabled": true, + "expandAcronyms": true, + "addSynonyms": true, + "contextualExpansion": true + } +} +``` + +### Result Filtering + +Filter results based on criteria: + +```json +{ + "resultFiltering": { + "minRelevanceScore": 0.7, + "maxResults": 10, + "preferredServers": ["fetch", "playwright"], + "excludeServers": ["deprecated-server"] + } +} +``` + +### Feedback Learning + +Improve results based on user feedback: + +```bash +# Provide feedback on search results +curl -X POST http://localhost:3000/api/smart-routing/feedback \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "queryId": "search-123", + "toolName": "fetch_html", + "rating": 5, + "successful": true, + "comments": "Perfect tool for the task" + }' +``` + +## Troubleshooting + + + + **Symptoms:** + - Smart Routing not available + - Database connection errors + - Embedding storage failures + + **Solutions:** + 1. Verify PostgreSQL is running + 2. Check DATABASE_URL format + 3. Ensure pgvector extension is installed + 4. Test connection manually: + ```bash + psql $DATABASE_URL -c "SELECT 1;" + ``` + + + + + **Symptoms:** + - Tool indexing failures + - Query processing errors + - API rate limit errors + + **Solutions:** + 1. Verify API key validity + 2. Check network connectivity + 3. Monitor rate limits + 4. Test embedding service: + ```bash + curl -X POST https://api.openai.com/v1/embeddings \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{"input": "test", "model": "text-embedding-3-small"}' + ``` + + + + + **Symptoms:** + - Irrelevant tools returned + - Low relevance scores + - Missing expected tools + + **Solutions:** + 1. Adjust similarity threshold + 2. Re-index tools with better descriptions + 3. Use more specific queries + 4. Check tool metadata quality + ```bash + # Re-index all tools + curl -X POST http://localhost:3000/api/smart-routing/reindex \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" + ``` + + + + + **Symptoms:** + - Slow query responses + - High database load + - Memory usage spikes + + **Solutions:** + 1. Optimize database configuration + 2. Increase cache sizes + 3. Reduce batch sizes + 4. Monitor system resources + ```bash + # Check system performance + curl http://localhost:3000/api/smart-routing/performance \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" + ``` + + + + +## Best Practices + +### Query Writing + + + **Be Descriptive**: Use specific, descriptive language in queries for better tool matching. + + + + **Include Context**: Provide relevant context about your task or domain for more accurate results. + + +**Use Natural Language**: Write queries as you would describe the task to a human. + +### Tool Descriptions + + + **Quality Metadata**: Ensure MCP servers provide high-quality tool descriptions and metadata. + + +**Regular Updates**: Keep tool descriptions current as functionality evolves. + + + **Consistent Naming**: Use consistent naming conventions across tools and servers. + + +### System Maintenance + +**Regular Re-indexing**: Periodically re-index tools to ensure embedding quality. + +**Monitor Performance**: Track query patterns and optimize based on usage. + + + **Update Models**: Consider updating to newer embedding models as they become available. + + +## Next Steps + + + + User management and access control + + + System monitoring and analytics + + + Complete Smart Routing API documentation + + + Advanced configuration options + + diff --git a/docs/images/checks-passed.png b/docs/images/checks-passed.png new file mode 100644 index 0000000000000000000000000000000000000000..97603943defbd694eacebf9559b61ddc13020f60 --- /dev/null +++ b/docs/images/checks-passed.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93add382731d1e6d443b128bbe1ac747b62d0efa1b8372ee3fcd37a59d86da30 +size 160724 diff --git a/docs/images/hero-dark.png b/docs/images/hero-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..bb88cc1e9ac845ba84d05b743b0d4d5821b9f083 --- /dev/null +++ b/docs/images/hero-dark.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b01bca41fd68bb5a9279596e131578a734d719f1ff6840f230014a421fc919a8 +size 110614 diff --git a/docs/images/hero-light.png b/docs/images/hero-light.png new file mode 100644 index 0000000000000000000000000000000000000000..b72ce28ca0aaeca4f173474c603272bd75eb272e --- /dev/null +++ b/docs/images/hero-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41a7a3663ae3226dc0838af887963f143c3554d891781ec5c7c0f6e7c5fb0b32 +size 104264 diff --git a/docs/index.mdx b/docs/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..141acc652b72da13847f5a78fbc7644e6d8e76cf --- /dev/null +++ b/docs/index.mdx @@ -0,0 +1,95 @@ +--- +title: MCPHub Documentation +description: 'The Unified Hub for Model Context Protocol (MCP) Servers' +--- + +Hero Light +Hero Dark + +# Welcome to MCPHub + +MCPHub makes it easy to manage and scale multiple MCP (Model Context Protocol) servers by organizing them into flexible Streamable HTTP (SSE) endpoints—supporting access to all servers, individual servers, or logical server groups. + +## Key Features + + + + Centrally manage multiple MCP servers with hot-swappable configuration + + + AI-powered tool discovery using vector semantic search + + + Organize servers into logical groups for streamlined access control + + + Monitor server status and performance from a unified dashboard + + + +## Quick Start + +Get MCPHub running in minutes with Docker: + +```bash +docker run -p 3000:3000 samanhappy/mcphub +``` + +Or with custom configuration: + +```bash +docker run -p 3000:3000 -v $(pwd)/mcp_settings.json:/app/mcp_settings.json samanhappy/mcphub +``` + +Access the dashboard at `http://localhost:3000` with default credentials: + +- Username: `admin` +- Password: `admin123` + +## Core Concepts + +### MCP Endpoints + +MCPHub provides multiple ways to access your MCP servers: + +- **Unified Access**: `http://localhost:3000/mcp` - Access all servers +- **Group Access**: `http://localhost:3000/mcp/{group}` - Access specific groups +- **Server Access**: `http://localhost:3000/mcp/{server}` - Access individual servers +- **Smart Routing**: `http://localhost:3000/mcp/$smart` - AI-powered tool discovery + +### Protocol Support + +- **HTTP MCP**: Modern streamable HTTP interface (recommended) +- **SSE**: Server-Sent Events for legacy compatibility +- **stdio**: Native MCP protocol for server communication + +## Getting Started + + + + Get MCPHub running in 5 minutes + + + Detailed installation instructions for all platforms + + + Learn how to configure your MCP servers + + + Complete API documentation + + + +## Community & Support + + + + Source code and issue tracking + + + Join our community discussions + + + Support the project development + + diff --git a/docs/installation.mdx b/docs/installation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cae64f49574e1b875390df6c9e8f7b1d2b9a7df3 --- /dev/null +++ b/docs/installation.mdx @@ -0,0 +1,575 @@ +--- +title: 'Installation Guide' +description: 'Detailed installation instructions for all platforms' +--- + +## Prerequisites + +Before installing MCPHub, ensure you have the following prerequisites: + +- **Node.js** 18+ (for local development) +- **Docker** (recommended for production) +- **pnpm** (for local development) + +Optional for Smart Routing: + +- **PostgreSQL** with pgvector extension +- **OpenAI API Key** or compatible embedding service + +## Installation Methods + + + + ### Docker Installation + + Docker is the recommended way to deploy MCPHub in production. + + #### 1. Basic Installation + + ```bash + # Pull the latest image + docker pull samanhappy/mcphub:latest + + # Run with default settings + docker run -d \ + --name mcphub \ + -p 3000:3000 \ + samanhappy/mcphub:latest + ``` + + #### 2. With Custom Configuration + + ```bash + # Create your configuration file + cat > mcp_settings.json << 'EOF' + { + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + } + } + EOF + + # Run with mounted config + docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -v $(pwd)/mcp_settings.json:/app/mcp_settings.json \ + samanhappy/mcphub:latest + ``` + + #### 3. With Environment Variables + + ```bash + docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -e PORT=3000 \ + -e BASE_PATH="" \ + -e REQUEST_TIMEOUT=60000 \ + samanhappy/mcphub:latest + ``` + + #### 4. Docker Compose + + Create a `docker-compose.yml` file: + + ```yaml + version: '3.8' + services: + mcphub: + image: samanhappy/mcphub:latest + ports: + - "3000:3000" + volumes: + - ./mcp_settings.json:/app/mcp_settings.json + environment: + - PORT=3000 + - BASE_PATH="" + - REQUEST_TIMEOUT=60000 + restart: unless-stopped + + # Optional: PostgreSQL for Smart Routing + postgres: + image: pgvector/pgvector:pg16 + environment: + POSTGRES_DB: mcphub + POSTGRES_USER: mcphub + POSTGRES_PASSWORD: mcphub_password + volumes: + - postgres_data:/var/lib/postgresql/data + ports: + - "5432:5432" + + volumes: + postgres_data: + ``` + + Run with: + ```bash + docker-compose up -d + ``` + + + + + ### npm Package Installation + + Install MCPHub as a global npm package: + + #### 1. Global Installation + + ```bash + # Install globally + npm install -g @samanhappy/mcphub + + # Or with yarn + yarn global add @samanhappy/mcphub + + # Or with pnpm + pnpm add -g @samanhappy/mcphub + ``` + + #### 2. Running MCPHub + + ```bash + # Run with default settings + mcphub + + # Run with custom port + PORT=8080 mcphub + + # Run with custom config path + MCP_SETTINGS_PATH=/path/to/mcp_settings.json mcphub + ``` + + #### 3. Local Installation + + You can also install MCPHub locally in a project: + + ```bash + # Create a new directory + mkdir my-mcphub + cd my-mcphub + + # Initialize package.json + npm init -y + + # Install MCPHub locally + npm install @samanhappy/mcphub + + # Create a start script + echo '#!/bin/bash\nnpx mcphub' > start.sh + chmod +x start.sh + + # Run MCPHub + ./start.sh + ``` + + + + + ### Local Development Setup + + For development, customization, or contribution: + + #### 1. Clone Repository + + ```bash + # Clone the repository + git clone https://github.com/samanhappy/mcphub.git + cd mcphub + ``` + + #### 2. Install Dependencies + + ```bash + # Install dependencies with pnpm (recommended) + pnpm install + + # Or with npm + npm install + + # Or with yarn + yarn install + ``` + + #### 3. Development Mode + + ```bash + # Start both backend and frontend in development mode + pnpm dev + + # This will start: + # - Backend on http://localhost:3001 + # - Frontend on http://localhost:5173 + # - Frontend proxies API calls to backend + ``` + + #### 4. Build for Production + + ```bash + # Build both backend and frontend + pnpm build + + # Start production server + pnpm start + ``` + + #### 5. Development Scripts + + ```bash + # Backend only (for API development) + pnpm backend:dev + + # Frontend only (when backend is running separately) + pnpm frontend:dev + + # Run tests + pnpm test + + # Lint code + pnpm lint + + # Format code + pnpm format + ``` + + + On Windows, you may need to run backend and frontend separately: + ```bash + # Terminal 1: Backend + pnpm backend:dev + + # Terminal 2: Frontend + pnpm frontend:dev + ``` + + + + + + ### Kubernetes Deployment + + Deploy MCPHub on Kubernetes with these manifests: + + #### 1. ConfigMap for Settings + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: mcphub-config + data: + mcp_settings.json: | + { + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + } + } + ``` + + #### 2. Deployment + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mcphub + spec: + replicas: 1 + selector: + matchLabels: + app: mcphub + template: + metadata: + labels: + app: mcphub + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + ports: + - containerPort: 3000 + env: + - name: PORT + value: "3000" + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + volumes: + - name: config + configMap: + name: mcphub-config + ``` + + #### 3. Service + + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: mcphub-service + spec: + selector: + app: mcphub + ports: + - port: 80 + targetPort: 3000 + type: ClusterIP + ``` + + #### 4. Ingress (Optional) + + ```yaml + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: mcphub-ingress + annotations: + nginx.ingress.kubernetes.io/proxy-buffering: "off" + spec: + rules: + - host: mcphub.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: mcphub-service + port: + number: 80 + ``` + + Deploy with: + ```bash + kubectl apply -f mcphub-configmap.yaml + kubectl apply -f mcphub-deployment.yaml + kubectl apply -f mcphub-service.yaml + kubectl apply -f mcphub-ingress.yaml + ``` + + + + +## Smart Routing Setup (Optional) + +Smart Routing provides AI-powered tool discovery using vector semantic search. + +### Prerequisites + +1. **PostgreSQL with pgvector** +2. **OpenAI API Key** (or compatible embedding service) + +### Database Setup + + + + ```bash + # Run PostgreSQL with pgvector + docker run -d \ + --name mcphub-postgres \ + -e POSTGRES_DB=mcphub \ + -e POSTGRES_USER=mcphub \ + -e POSTGRES_PASSWORD=your_password \ + -p 5432:5432 \ + pgvector/pgvector:pg16 + ``` + + + + If you have an existing PostgreSQL instance: + + ```sql + -- Connect to your PostgreSQL instance + -- Create database + CREATE DATABASE mcphub; + + -- Connect to the mcphub database + \c mcphub; + + -- Enable pgvector extension + CREATE EXTENSION IF NOT EXISTS vector; + ``` + + + + + For cloud providers (AWS RDS, Google Cloud SQL, etc.): + + 1. Enable the pgvector extension in your cloud provider's console + 2. Create a database named `mcphub` + 3. Note down the connection details + + + + +### Environment Configuration + +Set the following environment variables: + +```bash +# Database connection +DATABASE_URL=postgresql://mcphub:your_password@localhost:5432/mcphub + +# OpenAI API for embeddings +OPENAI_API_KEY=your_openai_api_key + +# Optional: Custom embedding model +EMBEDDING_MODEL=text-embedding-3-small + +# Optional: Enable smart routing +ENABLE_SMART_ROUTING=true +``` + +## Verification + +After installation, verify MCPHub is working: + +### 1. Health Check + +```bash +curl http://localhost:3000/api/health +``` + +Expected response: + +```json +{ + "status": "ok", + "version": "x.x.x", + "uptime": 123 +} +``` + +### 2. Dashboard Access + +Open your browser and navigate to: + +``` +http://localhost:3000 +``` + +### 3. API Test + +```bash +curl -X POST http://localhost:3000/mcp \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + }' +``` + +## Troubleshooting + + + + **Port already in use:** + ```bash + # Check what's using port 3000 + lsof -i :3000 + + # Use a different port + docker run -p 8080:3000 samanhappy/mcphub + ``` + + **Container won't start:** + ```bash + # Check container logs + docker logs mcphub + + # Run interactively for debugging + docker run -it --rm samanhappy/mcphub /bin/bash + ``` + + + + + **Permission errors:** + ```bash + # Use npx instead of global install + npx @samanhappy/mcphub + + # Or fix npm permissions + npm config set prefix ~/.npm-global + export PATH=~/.npm-global/bin:$PATH + ``` + + **Node version issues:** + ```bash + # Check Node version + node --version + + # Install Node 18+ using nvm + nvm install 18 + nvm use 18 + ``` + + + + + **Can't access dashboard:** + - Check if MCPHub is running: `ps aux | grep mcphub` + - Verify port binding: `netstat -tlnp | grep 3000` + - Check firewall settings + - Try accessing via `127.0.0.1:3000` instead of `localhost:3000` + + **AI clients can't connect:** + - Ensure the endpoint URL is correct + - Check if MCPHub is behind a proxy + - Verify network policies in Kubernetes/Docker environments + + + + + **Database connection failed:** + ```bash + # Test database connection + psql $DATABASE_URL -c "SELECT 1;" + + # Check if pgvector is installed + psql $DATABASE_URL -c "CREATE EXTENSION IF NOT EXISTS vector;" + ``` + + **Embedding service errors:** + - Verify OpenAI API key is valid + - Check internet connectivity + - Monitor rate limits + + + + +## Next Steps + + + + Configure your MCP servers and settings + + + Get up and running in 5 minutes + + + Learn how to manage your MCP servers + + + Explore the complete API documentation + + diff --git a/docs/logo/dark.svg b/docs/logo/dark.svg new file mode 100644 index 0000000000000000000000000000000000000000..8b343cd6fc9095a51d8d5287ff1ad6297ef9937f --- /dev/null +++ b/docs/logo/dark.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/logo/light.svg b/docs/logo/light.svg new file mode 100644 index 0000000000000000000000000000000000000000..03e62bf1d9fcb79434827929c323ea2e2d5676cf --- /dev/null +++ b/docs/logo/light.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a7635076170afa67f7d4821e818a62bc6ce49a84 --- /dev/null +++ b/docs/quickstart.mdx @@ -0,0 +1,228 @@ +--- +title: 'Quick Start Guide' +description: 'Get MCPHub running in 5 minutes' +--- + +## Installation + + + + The fastest way to get started with MCPHub is using Docker: + + ```bash + # Run with default configuration + docker run -p 3000:3000 samanhappy/mcphub + ``` + + Or mount your custom configuration: + + ```bash + # Run with custom MCP settings + docker run -p 3000:3000 \ + -v $(pwd)/mcp_settings.json:/app/mcp_settings.json \ + samanhappy/mcphub + ``` + + + + For development or customization: + + ```bash + # Clone the repository + git clone https://github.com/samanhappy/mcphub.git + cd mcphub + + # Install dependencies + pnpm install + + # Start development servers + pnpm dev + ``` + + This starts both backend (port 3001) and frontend (port 5173) in development mode. + + + + Install MCPHub as a global package: + + ```bash + # Install globally + npm install -g @samanhappy/mcphub + + # Run MCPHub + mcphub + ``` + + + + +## Initial Setup + +### 1. Access the Dashboard + +Open your browser and navigate to: + +``` +http://localhost:3000 +``` + +### 2. Login + +Use the default credentials: + +- **Username**: `admin` +- **Password**: `admin123` + +Change these default credentials immediately after first login for security. + +### 3. Configure Your First MCP Server + +1. Click **"Add Server"** in the dashboard +2. Enter server details: + - **Name**: A unique identifier (e.g., `fetch`) + - **Command**: The executable command (`uvx`) + - **Args**: Command arguments (`["mcp-server-fetch"]`) + - **Environment**: Any required environment variables + +Example configuration for a fetch server: + +```json +{ + "name": "fetch", + "command": "uvx", + "args": ["mcp-server-fetch"], + "env": {} +} +``` + +## Basic Usage + +### Connecting AI Clients + +Once your servers are configured, connect your AI clients using MCPHub endpoints: + + + + Access all configured MCP servers: ``` http://localhost:3000/mcp ``` + + + Access servers in a specific group: ``` http://localhost:3000/mcp/{group - name} + ``` + + + Access a single server: ``` http://localhost:3000/mcp/{server - name} + ``` + + + Use AI-powered tool discovery: ``` http://localhost:3000/mcp/$smart ``` + Smart routing requires PostgreSQL with pgvector and an OpenAI API key. + + + +### Example: Adding Popular MCP Servers + +Here are some popular MCP servers you can add: + + + + ```json + { + "name": "fetch", + "command": "uvx", + "args": ["mcp-server-fetch"] + } + ``` + + + + ```json + { + "name": "playwright", + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + ``` + + + + ```json + { + "name": "amap", + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key-here" + } + } + ``` + + + + ```json + { + "name": "slack", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "your-bot-token", + "SLACK_TEAM_ID": "your-team-id" + } + } + ``` + + + +## Verification + +Test your setup by making a simple request: + +```bash +curl -X POST http://localhost:3000/mcp \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + }' +``` + +You should receive a list of available tools from your configured MCP servers. + +## Next Steps + + + + Learn advanced server configuration and management + + + Organize servers into logical groups + + + Set up AI-powered tool discovery + + + Explore the complete API documentation + + + +## Troubleshooting + + + + - Check if the MCP server command is accessible in your PATH - Verify environment variables are + correctly set - Check MCPHub logs for detailed error messages + + + + - Ensure MCPHub is running on the correct port - Check firewall settings - Verify the endpoint + URL format + + + + - Verify credentials are correct - Check if JWT token is valid - Try clearing browser cache and + cookies + + + +Need more help? Join our [Discord community](https://discord.gg/qMKNsn5Q) for support! diff --git a/docs/snippets/snippet-intro.mdx b/docs/snippets/snippet-intro.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c57e7c756477da11be3899f7eabc2ba4c85cf888 --- /dev/null +++ b/docs/snippets/snippet-intro.mdx @@ -0,0 +1,4 @@ +One of the core principles of software development is DRY (Don't Repeat +Yourself). This is a principle that apply to documentation as +well. If you find yourself repeating the same content in multiple places, you +should consider creating a custom snippet to keep your content in sync. diff --git a/docs/zh/api-reference/endpoint/create.mdx b/docs/zh/api-reference/endpoint/create.mdx new file mode 100644 index 0000000000000000000000000000000000000000..19987104eb870ecef107e560ed872c2f3fb51894 --- /dev/null +++ b/docs/zh/api-reference/endpoint/create.mdx @@ -0,0 +1,572 @@ +--- +title: '创建资源' +description: '创建新的 MCP 服务器、用户和组' +--- + +## 创建服务器 + +### 端点 + +```http +POST /api/servers +``` + +### 请求 + +#### 请求头 + +```http +Content-Type: application/json +Authorization: Bearer YOUR_JWT_TOKEN +``` + +#### 请求体 + +```json +{ + "name": "文件系统服务器", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"], + "env": { + "NODE_ENV": "production", + "DEBUG": "mcp:*", + "MAX_FILES": "1000" + }, + "cwd": "/app/workspace", + "timeout": 30000, + "retries": 3, + "enabled": true, + "description": "提供文件系统访问的 MCP 服务器", + "tags": ["filesystem", "production"], + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000, + "retries": 3, + "endpoint": "/health" + }, + "resources": { + "memory": { + "limit": "512MB", + "warning": "400MB" + }, + "cpu": { + "limit": "50%" + } + }, + "logging": { + "level": "info", + "file": "/var/log/mcphub/server.log", + "maxSize": "100MB", + "maxFiles": 5 + } +} +``` + +#### 必填字段 + +- `name` (string): 服务器唯一名称 +- `command` (string): 执行命令 +- `args` (array): 命令参数数组 + +#### 可选字段 + +- `env` (object): 环境变量键值对 +- `cwd` (string): 工作目录 +- `timeout` (number): 超时时间(毫秒) +- `retries` (number): 重试次数 +- `enabled` (boolean): 是否启用(默认 true) +- `description` (string): 服务器描述 +- `tags` (array): 标签数组 +- `healthCheck` (object): 健康检查配置 +- `resources` (object): 资源限制配置 +- `logging` (object): 日志配置 + +### 响应 + +#### 成功响应 (201 Created) + +```json +{ + "success": true, + "data": { + "id": "server-abc123", + "name": "文件系统服务器", + "status": "stopped", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"], + "env": { + "NODE_ENV": "production", + "DEBUG": "mcp:*", + "MAX_FILES": "1000" + }, + "cwd": "/app/workspace", + "timeout": 30000, + "retries": 3, + "enabled": true, + "description": "提供文件系统访问的 MCP 服务器", + "tags": ["filesystem", "production"], + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000, + "retries": 3, + "endpoint": "/health", + "status": "unknown" + }, + "resources": { + "memory": { + "limit": "512MB", + "warning": "400MB", + "current": "0MB" + }, + "cpu": { + "limit": "50%", + "current": "0%" + } + }, + "logging": { + "level": "info", + "file": "/var/log/mcphub/server.log", + "maxSize": "100MB", + "maxFiles": 5, + "currentSize": "0MB" + }, + "createdAt": "2024-01-01T12:00:00Z", + "updatedAt": "2024-01-01T12:00:00Z", + "createdBy": "user123" + }, + "message": "服务器创建成功" +} +``` + +#### 错误响应 + +**400 Bad Request - 参数错误** + +```json +{ + "success": false, + "error": { + "code": "VALIDATION_ERROR", + "message": "请求数据验证失败", + "details": [ + { + "field": "name", + "message": "服务器名称不能为空" + }, + { + "field": "command", + "message": "执行命令不能为空" + } + ] + } +} +``` + +**409 Conflict - 名称冲突** + +```json +{ + "success": false, + "error": { + "code": "RESOURCE_CONFLICT", + "message": "服务器名称已存在", + "details": { + "field": "name", + "value": "文件系统服务器", + "conflictingResourceId": "server-xyz789" + } + } +} +``` + +### 示例 + +#### cURL + +```bash +curl -X POST http://localhost:3000/api/servers \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "name": "文件系统服务器", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/data"], + "env": { + "NODE_ENV": "production" + }, + "description": "生产环境文件系统服务器" + }' +``` + +#### JavaScript + +```javascript +const response = await fetch('/api/servers', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${token}`, + }, + body: JSON.stringify({ + name: '文件系统服务器', + command: 'npx', + args: ['-y', '@modelcontextprotocol/server-filesystem', '/data'], + env: { + NODE_ENV: 'production', + }, + description: '生产环境文件系统服务器', + }), +}); + +const result = await response.json(); +if (result.success) { + console.log('服务器创建成功:', result.data); +} else { + console.error('创建失败:', result.error); +} +``` + +#### Python + +```python +import requests + +response = requests.post( + 'http://localhost:3000/api/servers', + headers={ + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {token}' + }, + json={ + 'name': '文件系统服务器', + 'command': 'npx', + 'args': ['-y', '@modelcontextprotocol/server-filesystem', '/data'], + 'env': { + 'NODE_ENV': 'production' + }, + 'description': '生产环境文件系统服务器' + } +) + +if response.status_code == 201: + result = response.json() + print('服务器创建成功:', result['data']) +else: + error = response.json() + print('创建失败:', error['error']) +``` + +## 创建用户 + +### 端点 + +```http +POST /api/users +``` + +### 请求体 + +```json +{ + "username": "newuser", + "email": "user@example.com", + "password": "SecurePassword123!", + "role": "user", + "groups": ["dev-team", "qa-team"], + "profile": { + "firstName": "张", + "lastName": "三", + "department": "开发部", + "title": "软件工程师", + "phone": "+86-138-0013-8000", + "location": "北京" + }, + "preferences": { + "language": "zh-CN", + "timezone": "Asia/Shanghai", + "notifications": { + "email": true, + "slack": false, + "browser": true + } + }, + "enabled": true +} +``` + +### 响应 (201 Created) + +```json +{ + "success": true, + "data": { + "id": "user-abc123", + "username": "newuser", + "email": "user@example.com", + "role": "user", + "groups": [ + { + "id": "dev-team", + "name": "开发团队", + "role": "member" + } + ], + "profile": { + "firstName": "张", + "lastName": "三", + "fullName": "张三", + "department": "开发部", + "title": "软件工程师", + "phone": "+86-138-0013-8000", + "location": "北京", + "avatar": null + }, + "preferences": { + "language": "zh-CN", + "timezone": "Asia/Shanghai", + "notifications": { + "email": true, + "slack": false, + "browser": true + } + }, + "enabled": true, + "lastLoginAt": null, + "createdAt": "2024-01-01T12:00:00Z", + "updatedAt": "2024-01-01T12:00:00Z" + }, + "message": "用户创建成功" +} +``` + +## 创建组 + +### 端点 + +```http +POST /api/groups +``` + +### 请求体 + +```json +{ + "name": "dev-team", + "displayName": "开发团队", + "description": "前端和后端开发人员", + "parentGroup": null, + "permissions": { + "servers": { + "create": false, + "read": true, + "update": true, + "delete": false, + "execute": true + }, + "tools": { + "filesystem": { + "read": true, + "write": true, + "paths": ["/app/data", "/tmp"] + }, + "web-search": { + "enabled": true, + "maxQueries": 100 + } + }, + "monitoring": { + "viewLogs": true, + "viewMetrics": true, + "exportData": false + } + }, + "settings": { + "autoAssign": false, + "maxMembers": 50, + "requireApproval": true, + "sessionTimeout": "8h" + }, + "quotas": { + "requests": { + "daily": 1000, + "monthly": 30000 + }, + "storage": { + "maxSize": "10GB" + } + } +} +``` + +### 响应 (201 Created) + +```json +{ + "success": true, + "data": { + "id": "group-abc123", + "name": "dev-team", + "displayName": "开发团队", + "description": "前端和后端开发人员", + "parentGroup": null, + "permissions": { + "servers": { + "create": false, + "read": true, + "update": true, + "delete": false, + "execute": true + }, + "tools": { + "filesystem": { + "read": true, + "write": true, + "paths": ["/app/data", "/tmp"] + }, + "web-search": { + "enabled": true, + "maxQueries": 100 + } + }, + "monitoring": { + "viewLogs": true, + "viewMetrics": true, + "exportData": false + } + }, + "settings": { + "autoAssign": false, + "maxMembers": 50, + "requireApproval": true, + "sessionTimeout": "8h" + }, + "quotas": { + "requests": { + "daily": 1000, + "monthly": 30000 + }, + "storage": { + "maxSize": "10GB" + } + }, + "memberCount": 0, + "serverCount": 0, + "createdAt": "2024-01-01T12:00:00Z", + "updatedAt": "2024-01-01T12:00:00Z", + "createdBy": "admin" + }, + "message": "组创建成功" +} +``` + +## 批量创建 + +### 批量创建服务器 + +```http +POST /api/servers/bulk +``` + +#### 请求体 + +```json +{ + "servers": [ + { + "name": "dev-server-1", + "command": "python", + "args": ["-m", "mcp_server"], + "env": { "ENV": "development" } + }, + { + "name": "dev-server-2", + "command": "node", + "args": ["server.js"], + "env": { "ENV": "development" } + } + ], + "options": { + "skipExisting": true, + "validateAll": true, + "startAfterCreate": false + } +} +``` + +#### 响应 (201 Created) + +```json +{ + "success": true, + "data": { + "created": [ + { + "id": "server-1", + "name": "dev-server-1", + "status": "created" + }, + { + "id": "server-2", + "name": "dev-server-2", + "status": "created" + } + ], + "skipped": [], + "failed": [], + "summary": { + "total": 2, + "created": 2, + "skipped": 0, + "failed": 0 + } + }, + "message": "批量创建完成,成功创建 2 个服务器" +} +``` + +## 验证 + +### 预验证创建请求 + +在实际创建资源之前验证请求: + +```http +POST /api/servers/validate +``` + +#### 请求体 + +```json +{ + "name": "test-server", + "command": "invalid-command", + "args": [] +} +``` + +#### 响应 + +```json +{ + "success": false, + "data": { + "valid": false, + "errors": [ + { + "field": "command", + "message": "命令 'invalid-command' 不存在或无法执行" + } + ], + "warnings": [ + { + "field": "args", + "message": "参数数组为空,服务器可能无法正常启动" + } + ] + } +} +``` + +有关更多 API 端点信息,请参阅 [获取资源](/zh/api-reference/endpoint/get)、[删除资源](/zh/api-reference/endpoint/delete) 和 [WebHooks](/zh/api-reference/endpoint/webhook) 文档。 diff --git a/docs/zh/api-reference/endpoint/delete.mdx b/docs/zh/api-reference/endpoint/delete.mdx new file mode 100644 index 0000000000000000000000000000000000000000..51aa07a466fae9a2985ff0d3308fb9fd782d1b87 --- /dev/null +++ b/docs/zh/api-reference/endpoint/delete.mdx @@ -0,0 +1,303 @@ +--- +title: 删除资源 API +description: 删除各种资源的 API 端点,包括服务器、组和配置等 +--- + +# 删除资源 API + +本文档描述了用于删除各种资源的 API 端点。 + +## 删除 MCP 服务器 + +删除指定的 MCP 服务器配置。 + +### 端点 + +```http +DELETE /api/servers/{id} +``` + +### 参数 + +| 参数名 | 类型 | 位置 | 必需 | 描述 | +| ------ | ------ | ---- | ---- | ------------------ | +| id | string | path | 是 | 服务器的唯一标识符 | + +### 请求示例 + +```bash +curl -X DELETE \ + 'https://api.mcphub.io/api/servers/mcp-server-123' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' \ + -H 'Content-Type: application/json' +``` + +### 响应 + +#### 成功响应 (204 No Content) + +```json +{ + "success": true, + "message": "服务器已成功删除", + "data": { + "id": "mcp-server-123", + "deletedAt": "2024-01-15T10:30:00Z" + } +} +``` + +#### 错误响应 + +**404 Not Found** + +```json +{ + "error": { + "code": "SERVER_NOT_FOUND", + "message": "指定的服务器不存在", + "details": { + "serverId": "mcp-server-123" + } + } +} +``` + +**409 Conflict** + +```json +{ + "error": { + "code": "SERVER_IN_USE", + "message": "服务器正在使用中,无法删除", + "details": { + "activeConnections": 5, + "associatedGroups": ["group-1", "group-2"] + } + } +} +``` + +## 删除服务器组 + +删除指定的服务器组。 + +### 端点 + +```http +DELETE /api/groups/{id} +``` + +### 参数 + +| 参数名 | 类型 | 位置 | 必需 | 描述 | +| ------ | ------- | ----- | ---- | ------------------------------ | +| id | string | path | 是 | 组的唯一标识符 | +| force | boolean | query | 否 | 是否强制删除(包含服务器的组) | + +### 请求示例 + +```bash +curl -X DELETE \ + 'https://api.mcphub.io/api/groups/production-group?force=true' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' \ + -H 'Content-Type: application/json' +``` + +### 响应 + +#### 成功响应 (204 No Content) + +```json +{ + "success": true, + "message": "服务器组已成功删除", + "data": { + "id": "production-group", + "deletedServers": ["server-1", "server-2"], + "deletedAt": "2024-01-15T10:30:00Z" + } +} +``` + +## 删除配置项 + +删除指定的配置项。 + +### 端点 + +```http +DELETE /api/config/{key} +``` + +### 参数 + +| 参数名 | 类型 | 位置 | 必需 | 描述 | +| ------ | ------ | ---- | ---- | -------- | +| key | string | path | 是 | 配置键名 | + +### 请求示例 + +```bash +curl -X DELETE \ + 'https://api.mcphub.io/api/config/custom-setting' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` + +### 响应 + +#### 成功响应 (200 OK) + +```json +{ + "success": true, + "message": "配置项已删除", + "data": { + "key": "custom-setting", + "previousValue": "old-value", + "deletedAt": "2024-01-15T10:30:00Z" + } +} +``` + +## 批量删除 + +### 批量删除服务器 + +删除多个 MCP 服务器。 + +#### 端点 + +```http +DELETE /api/servers/batch +``` + +#### 请求体 + +```json +{ + "serverIds": ["server-1", "server-2", "server-3"], + "force": false +} +``` + +#### 响应 + +```json +{ + "success": true, + "message": "批量删除完成", + "data": { + "deleted": ["server-1", "server-3"], + "failed": [ + { + "id": "server-2", + "reason": "服务器正在使用中" + } + ], + "summary": { + "total": 3, + "deleted": 2, + "failed": 1 + } + } +} +``` + +## 软删除 vs 硬删除 + +### 软删除 + +默认情况下,MCPHub 使用软删除机制: + +- 资源被标记为已删除但保留在数据库中 +- 可以通过恢复 API 恢复删除的资源 +- 删除的资源在列表 API 中默认不显示 + +### 硬删除 + +使用 `permanent=true` 参数执行硬删除: + +```bash +curl -X DELETE \ + 'https://api.mcphub.io/api/servers/mcp-server-123?permanent=true' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` + +硬删除操作不可逆,请谨慎使用。 + +## 权限要求 + +| 操作 | 所需权限 | +| ---------- | ------------------------ | +| 删除服务器 | `servers:delete` | +| 删除组 | `groups:delete` | +| 删除配置 | `config:delete` | +| 硬删除 | `admin:permanent_delete` | + +## 错误代码 + +| 错误代码 | HTTP 状态码 | 描述 | +| -------------------------- | ----------- | ---------------- | +| `RESOURCE_NOT_FOUND` | 404 | 资源不存在 | +| `RESOURCE_IN_USE` | 409 | 资源正在使用中 | +| `INSUFFICIENT_PERMISSIONS` | 403 | 权限不足 | +| `VALIDATION_ERROR` | 400 | 请求参数验证失败 | +| `INTERNAL_ERROR` | 500 | 服务器内部错误 | + +## 最佳实践 + +### 1. 删除前检查 + +在删除资源前,建议先检查资源的使用情况: + +```bash +# 检查服务器使用情况 +curl -X GET \ + 'https://api.mcphub.io/api/servers/mcp-server-123/usage' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` + +### 2. 备份重要数据 + +对于重要资源,建议在删除前进行备份: + +```bash +# 导出服务器配置 +curl -X GET \ + 'https://api.mcphub.io/api/servers/mcp-server-123/export' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' \ + > server-backup.json +``` + +### 3. 使用事务删除 + +对于复杂的删除操作,使用事务确保数据一致性: + +```json +{ + "transaction": true, + "operations": [ + { + "type": "delete", + "resource": "server", + "id": "server-1" + }, + { + "type": "delete", + "resource": "group", + "id": "group-1" + } + ] +} +``` + +## 恢复删除的资源 + +软删除的资源可以通过恢复 API 恢复: + +```bash +curl -X POST \ + 'https://api.mcphub.io/api/servers/mcp-server-123/restore' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` diff --git a/docs/zh/api-reference/endpoint/get.mdx b/docs/zh/api-reference/endpoint/get.mdx new file mode 100644 index 0000000000000000000000000000000000000000..da0e52750f14a86393315395f09b99271c5ba9d8 --- /dev/null +++ b/docs/zh/api-reference/endpoint/get.mdx @@ -0,0 +1,607 @@ +--- +title: '获取资源' +description: '查询和检索 MCP 服务器、用户和组信息' +--- + +## 获取服务器列表 + +### 端点 + +```http +GET /api/servers +``` + +### 查询参数 + +| 参数 | 类型 | 描述 | 示例 | +| ---------------- | ------- | ------------------------------- | ---------------------------- | +| `page` | integer | 页码(从 1 开始) | `?page=2` | +| `limit` | integer | 每页记录数(默认 20,最大 100) | `?limit=50` | +| `sort` | string | 排序字段 | `?sort=name` | +| `order` | string | 排序顺序(asc/desc) | `?order=desc` | +| `status` | string | 过滤服务器状态 | `?status=running` | +| `search` | string | 搜索服务器名称或描述 | `?search=python` | +| `group` | string | 过滤所属组 | `?group=dev-team` | +| `tags` | string | 过滤标签(逗号分隔) | `?tags=python,production` | +| `enabled` | boolean | 过滤启用状态 | `?enabled=true` | +| `created_after` | string | 创建时间起始 | `?created_after=2024-01-01` | +| `created_before` | string | 创建时间结束 | `?created_before=2024-01-31` | + +### 响应 + +```json +{ + "success": true, + "data": { + "items": [ + { + "id": "server-abc123", + "name": "文件系统服务器", + "status": "running", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/data"], + "env": { + "NODE_ENV": "production" + }, + "cwd": "/app", + "pid": 12345, + "uptime": 3600000, + "enabled": true, + "description": "提供文件系统访问的 MCP 服务器", + "tags": ["filesystem", "production"], + "health": { + "status": "healthy", + "lastCheck": "2024-01-01T12:00:00Z", + "responseTime": "45ms" + }, + "resources": { + "memory": { + "used": "128MB", + "limit": "512MB", + "percentage": 25 + }, + "cpu": { + "used": "15%", + "limit": "50%" + } + }, + "stats": { + "totalRequests": 1523, + "errorCount": 2, + "avgResponseTime": "234ms" + }, + "lastRestart": "2024-01-01T08:00:00Z", + "createdAt": "2024-01-01T00:00:00Z", + "updatedAt": "2024-01-01T12:00:00Z" + } + ], + "pagination": { + "page": 1, + "limit": 20, + "total": 45, + "pages": 3, + "hasNext": true, + "hasPrev": false + }, + "filters": { + "status": "running", + "totalFiltered": 12 + } + } +} +``` + +### 示例 + +```bash +# 获取运行中的服务器,按名称排序 +curl -X GET "http://localhost:3000/api/servers?status=running&sort=name&order=asc" \ + -H "Authorization: Bearer $TOKEN" + +# 搜索包含 "python" 的服务器 +curl -X GET "http://localhost:3000/api/servers?search=python&limit=10" \ + -H "Authorization: Bearer $TOKEN" + +# 获取开发团队的服务器 +curl -X GET "http://localhost:3000/api/servers?group=dev-team" \ + -H "Authorization: Bearer $TOKEN" +``` + +## 获取服务器详情 + +### 端点 + +```http +GET /api/servers/{serverId} +``` + +### 路径参数 + +- `serverId` (string): 服务器唯一标识符 + +### 查询参数 + +| 参数 | 类型 | 描述 | +| --------------- | ------ | ----------------------------------------------- | +| `include` | string | 包含额外信息(逗号分隔):`logs,metrics,events` | +| `metrics_range` | string | 指标时间范围:`1h`, `24h`, `7d` | + +### 响应 + +```json +{ + "success": true, + "data": { + "id": "server-abc123", + "name": "文件系统服务器", + "status": "running", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/data"], + "env": { + "NODE_ENV": "production", + "DEBUG": "mcp:*" + }, + "cwd": "/app", + "pid": 12345, + "uptime": 3600000, + "enabled": true, + "description": "提供文件系统访问的 MCP 服务器", + "tags": ["filesystem", "production"], + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000, + "retries": 3, + "endpoint": "/health", + "status": "healthy", + "lastCheck": "2024-01-01T12:00:00Z", + "responseTime": "45ms", + "consecutiveFailures": 0 + }, + "resources": { + "memory": { + "used": "128MB", + "limit": "512MB", + "warning": "400MB", + "percentage": 25 + }, + "cpu": { + "used": "15%", + "limit": "50%", + "cores": 4 + }, + "network": { + "bytesIn": "1.2GB", + "bytesOut": "890MB" + } + }, + "stats": { + "totalRequests": 1523, + "successfulRequests": 1521, + "errorCount": 2, + "avgResponseTime": "234ms", + "p95ResponseTime": "450ms", + "requestsPerMinute": 25, + "lastError": { + "timestamp": "2024-01-01T11:30:00Z", + "message": "Temporary connection timeout", + "count": 1 + } + }, + "capabilities": [ + { + "type": "tool", + "name": "read_file", + "description": "读取文件内容", + "schema": { + "type": "object", + "properties": { + "path": { "type": "string" } + } + } + }, + { + "type": "tool", + "name": "write_file", + "description": "写入文件内容", + "schema": { + "type": "object", + "properties": { + "path": { "type": "string" }, + "content": { "type": "string" } + } + } + } + ], + "groups": [ + { + "id": "dev-team", + "name": "开发团队", + "permissions": ["read", "write", "execute"] + } + ], + "events": [ + { + "id": "event-123", + "type": "started", + "timestamp": "2024-01-01T08:00:00Z", + "message": "服务器启动成功", + "metadata": { + "pid": 12345, + "startupTime": "2.3s" + } + } + ], + "lastRestart": "2024-01-01T08:00:00Z", + "createdAt": "2024-01-01T00:00:00Z", + "updatedAt": "2024-01-01T12:00:00Z", + "createdBy": "admin" + } +} +``` + +### 示例 + +```bash +# 获取服务器基本信息 +curl -X GET "http://localhost:3000/api/servers/server-abc123" \ + -H "Authorization: Bearer $TOKEN" + +# 获取服务器详情包含日志和指标 +curl -X GET "http://localhost:3000/api/servers/server-abc123?include=logs,metrics&metrics_range=24h" \ + -H "Authorization: Bearer $TOKEN" +``` + +## 获取服务器状态 + +### 端点 + +```http +GET /api/servers/{serverId}/status +``` + +### 响应 + +```json +{ + "success": true, + "data": { + "serverId": "server-abc123", + "status": "running", + "health": "healthy", + "pid": 12345, + "uptime": 3600000, + "startedAt": "2024-01-01T08:00:00Z", + "lastHealthCheck": "2024-01-01T12:00:00Z", + "resources": { + "memory": { + "rss": 134217728, + "heapTotal": 67108864, + "heapUsed": 45088768, + "external": 8388608 + }, + "cpu": { + "user": 1000000, + "system": 500000, + "percentage": 15.5 + } + }, + "connections": { + "active": 5, + "total": 127 + }, + "performance": { + "requestsPerSecond": 12.5, + "avgResponseTime": "234ms", + "errorRate": "0.1%" + } + } +} +``` + +## 获取服务器日志 + +### 端点 + +```http +GET /api/servers/{serverId}/logs +``` + +### 查询参数 + +| 参数 | 类型 | 描述 | +| -------- | ------- | ---------------------------------------------- | +| `level` | string | 日志级别过滤:`error`, `warn`, `info`, `debug` | +| `limit` | integer | 返回日志条数(默认 100,最大 1000) | +| `since` | string | 开始时间(ISO 8601 格式) | +| `until` | string | 结束时间(ISO 8601 格式) | +| `follow` | boolean | 实时跟踪日志流 | +| `search` | string | 搜索日志内容 | + +### 响应 + +```json +{ + "success": true, + "data": { + "logs": [ + { + "id": "log-123", + "timestamp": "2024-01-01T12:00:00Z", + "level": "info", + "message": "处理请求: read_file", + "source": "mcp-server", + "metadata": { + "requestId": "req-456", + "userId": "user-789", + "duration": "45ms" + } + }, + { + "id": "log-124", + "timestamp": "2024-01-01T12:00:05Z", + "level": "error", + "message": "文件不存在: /nonexistent/file.txt", + "source": "filesystem", + "metadata": { + "requestId": "req-457", + "path": "/nonexistent/file.txt", + "error": "ENOENT" + } + } + ], + "pagination": { + "limit": 100, + "total": 1523, + "hasMore": true, + "nextCursor": "cursor-abc123" + } + } +} +``` + +### 实时日志流 + +```bash +# 实时跟踪日志 +curl -X GET "http://localhost:3000/api/servers/server-abc123/logs?follow=true" \ + -H "Authorization: Bearer $TOKEN" \ + -H "Accept: text/event-stream" +``` + +## 获取服务器指标 + +### 端点 + +```http +GET /api/servers/{serverId}/metrics +``` + +### 查询参数 + +| 参数 | 类型 | 描述 | +| ------------- | ------ | ------------------------------------------- | +| `timeRange` | string | 时间范围:`1h`, `24h`, `7d`, `30d` | +| `granularity` | string | 数据粒度:`1m`, `5m`, `1h`, `1d` | +| `metrics` | string | 指定指标(逗号分隔):`cpu,memory,requests` | + +### 响应 + +```json +{ + "success": true, + "data": { + "timeRange": "1h", + "granularity": "5m", + "metrics": { + "cpu": { + "data": [ + { "timestamp": "2024-01-01T11:00:00Z", "value": 12.5 }, + { "timestamp": "2024-01-01T11:05:00Z", "value": 15.2 } + ], + "summary": { + "avg": 13.8, + "min": 8.1, + "max": 18.5, + "current": 15.2 + } + }, + "memory": { + "data": [ + { "timestamp": "2024-01-01T11:00:00Z", "value": 125 }, + { "timestamp": "2024-01-01T11:05:00Z", "value": 128 } + ], + "summary": { + "avg": 126.5, + "min": 120, + "max": 135, + "current": 128 + } + }, + "requests": { + "data": [ + { "timestamp": "2024-01-01T11:00:00Z", "value": 45 }, + { "timestamp": "2024-01-01T11:05:00Z", "value": 52 } + ], + "summary": { + "total": 2847, + "avg": 48.5, + "peak": 67 + } + }, + "responseTime": { + "data": [ + { "timestamp": "2024-01-01T11:00:00Z", "avg": 230, "p95": 450 }, + { "timestamp": "2024-01-01T11:05:00Z", "avg": 245, "p95": 480 } + ], + "summary": { + "avgResponseTime": "237ms", + "p95ResponseTime": "465ms" + } + } + } + } +} +``` + +## 获取用户列表 + +### 端点 + +```http +GET /api/users +``` + +### 查询参数 + +| 参数 | 类型 | 描述 | +| ------------------ | ------- | ---------------- | +| `role` | string | 过滤用户角色 | +| `group` | string | 过滤所属组 | +| `enabled` | boolean | 过滤启用状态 | +| `search` | string | 搜索用户名或邮箱 | +| `last_login_after` | string | 最后登录时间起始 | + +### 响应 + +```json +{ + "success": true, + "data": { + "items": [ + { + "id": "user-abc123", + "username": "zhangsan", + "email": "zhangsan@example.com", + "role": "user", + "enabled": true, + "profile": { + "firstName": "张", + "lastName": "三", + "fullName": "张三", + "department": "开发部", + "title": "软件工程师" + }, + "groups": [ + { + "id": "dev-team", + "name": "开发团队", + "role": "member" + } + ], + "stats": { + "totalSessions": 45, + "totalRequests": 1234, + "lastRequestAt": "2024-01-01T11:30:00Z" + }, + "lastLoginAt": "2024-01-01T08:00:00Z", + "createdAt": "2023-12-01T00:00:00Z" + } + ], + "pagination": { + "page": 1, + "limit": 20, + "total": 89, + "pages": 5 + } + } +} +``` + +## 获取组列表 + +### 端点 + +```http +GET /api/groups +``` + +### 响应 + +```json +{ + "success": true, + "data": { + "items": [ + { + "id": "group-abc123", + "name": "dev-team", + "displayName": "开发团队", + "description": "前端和后端开发人员", + "memberCount": 12, + "serverCount": 8, + "parentGroup": null, + "children": [], + "permissions": { + "servers": ["read", "write", "execute"], + "tools": ["read", "execute"] + }, + "quotas": { + "requests": { + "used": 750, + "limit": 1000 + } + }, + "createdAt": "2023-12-01T00:00:00Z" + } + ] + } +} +``` + +## 搜索 + +### 全局搜索 + +```http +GET /api/search +``` + +### 查询参数 + +| 参数 | 类型 | 描述 | +| ------- | ------- | ---------------------------------------------- | +| `q` | string | 搜索关键词 | +| `type` | string | 资源类型:`servers`, `users`, `groups`, `logs` | +| `limit` | integer | 每种类型的最大结果数 | + +### 响应 + +```json +{ + "success": true, + "data": { + "query": "python", + "results": { + "servers": [ + { + "id": "server-1", + "name": "Python MCP Server", + "type": "server", + "relevance": 0.95 + } + ], + "users": [], + "groups": [ + { + "id": "python-devs", + "name": "Python 开发者", + "type": "group", + "relevance": 0.8 + } + ], + "logs": [ + { + "id": "log-123", + "message": "Starting Python server...", + "type": "log", + "relevance": 0.7 + } + ] + }, + "total": 3 + } +} +``` + +有关更多信息,请参阅 [创建资源](/zh/api-reference/endpoint/create)、[删除资源](/zh/api-reference/endpoint/delete) 和 [WebHooks](/zh/api-reference/endpoint/webhook) 文档。 diff --git a/docs/zh/api-reference/endpoint/webhook.mdx b/docs/zh/api-reference/endpoint/webhook.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a38b4c90e9e5d593ebf493bebe9722b84eae58cd --- /dev/null +++ b/docs/zh/api-reference/endpoint/webhook.mdx @@ -0,0 +1,615 @@ +--- +title: WebHooks API +description: 配置和管理 WebHook 事件通知的完整指南 +--- + +# WebHooks API + +WebHooks 允许 MCPHub 在特定事件发生时向您的应用程序发送实时通知。 + +## 概述 + +MCPHub WebHooks 系统支持以下功能: + +- 实时事件通知 +- 自定义过滤器 +- 重试机制 +- 签名验证 +- 批量事件处理 + +## 支持的事件类型 + +| 事件类型 | 描述 | +| ----------------------- | -------------- | +| `server.created` | MCP 服务器创建 | +| `server.updated` | MCP 服务器更新 | +| `server.deleted` | MCP 服务器删除 | +| `server.status_changed` | 服务器状态变更 | +| `group.created` | 服务器组创建 | +| `group.updated` | 服务器组更新 | +| `group.deleted` | 服务器组删除 | +| `user.login` | 用户登录 | +| `user.logout` | 用户登出 | +| `config.changed` | 配置变更 | +| `system.error` | 系统错误 | + +## 创建 WebHook + +### 端点 + +```http +POST /api/webhooks +``` + +### 请求体 + +```json +{ + "url": "https://your-app.com/webhook", + "events": ["server.created", "server.status_changed"], + "secret": "your-webhook-secret", + "active": true, + "config": { + "contentType": "application/json", + "insecureSsl": false, + "retryCount": 3, + "timeout": 30 + }, + "filters": { + "serverGroups": ["production", "staging"], + "serverTypes": ["ai-assistant", "data-processor"] + } +} +``` + +### 响应 + +```json +{ + "success": true, + "data": { + "id": "webhook-123", + "url": "https://your-app.com/webhook", + "events": ["server.created", "server.status_changed"], + "active": true, + "secret": "your-webhook-secret", + "config": { + "contentType": "application/json", + "insecureSsl": false, + "retryCount": 3, + "timeout": 30 + }, + "filters": { + "serverGroups": ["production", "staging"], + "serverTypes": ["ai-assistant", "data-processor"] + }, + "createdAt": "2024-01-15T10:30:00Z", + "updatedAt": "2024-01-15T10:30:00Z" + } +} +``` + +## 获取 WebHook 列表 + +### 端点 + +```http +GET /api/webhooks +``` + +### 查询参数 + +| 参数名 | 类型 | 描述 | +| ------ | ------- | -------------------- | +| page | integer | 页码(默认:1) | +| limit | integer | 每页数量(默认:20) | +| active | boolean | 过滤活跃状态 | +| event | string | 过滤事件类型 | + +### 请求示例 + +```bash +curl -X GET \ + 'https://api.mcphub.io/api/webhooks?active=true&limit=10' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` + +### 响应 + +```json +{ + "success": true, + "data": { + "webhooks": [ + { + "id": "webhook-123", + "url": "https://your-app.com/webhook", + "events": ["server.created", "server.status_changed"], + "active": true, + "lastDelivery": "2024-01-15T09:30:00Z", + "deliveryCount": 145, + "failureCount": 2, + "createdAt": "2024-01-10T10:30:00Z" + } + ], + "pagination": { + "page": 1, + "limit": 10, + "total": 25, + "pages": 3 + } + } +} +``` + +## 获取单个 WebHook + +### 端点 + +```http +GET /api/webhooks/{id} +``` + +### 响应 + +```json +{ + "success": true, + "data": { + "id": "webhook-123", + "url": "https://your-app.com/webhook", + "events": ["server.created", "server.status_changed"], + "active": true, + "secret": "your-webhook-secret", + "config": { + "contentType": "application/json", + "insecureSsl": false, + "retryCount": 3, + "timeout": 30 + }, + "filters": { + "serverGroups": ["production", "staging"], + "serverTypes": ["ai-assistant", "data-processor"] + }, + "stats": { + "totalDeliveries": 145, + "successfulDeliveries": 143, + "failedDeliveries": 2, + "lastDelivery": "2024-01-15T09:30:00Z", + "lastSuccess": "2024-01-15T09:30:00Z", + "lastFailure": "2024-01-14T15:20:00Z" + }, + "createdAt": "2024-01-10T10:30:00Z", + "updatedAt": "2024-01-15T10:30:00Z" + } +} +``` + +## 更新 WebHook + +### 端点 + +```http +PUT /api/webhooks/{id} +``` + +### 请求体 + +```json +{ + "url": "https://your-app.com/new-webhook", + "events": ["server.created", "server.updated", "server.deleted"], + "active": true, + "config": { + "retryCount": 5, + "timeout": 45 + } +} +``` + +## 删除 WebHook + +### 端点 + +```http +DELETE /api/webhooks/{id} +``` + +### 响应 + +```json +{ + "success": true, + "message": "WebHook 已成功删除" +} +``` + +## WebHook 事件格式 + +### 基本结构 + +所有 WebHook 事件都遵循以下基本结构: + +```json +{ + "id": "event-123", + "type": "server.created", + "timestamp": "2024-01-15T10:30:00Z", + "version": "1.0", + "data": { + // 事件特定数据 + }, + "metadata": { + "source": "mcphub", + "environment": "production", + "triggeredBy": "user-456" + } +} +``` + +### 服务器事件示例 + +#### server.created + +```json +{ + "id": "event-123", + "type": "server.created", + "timestamp": "2024-01-15T10:30:00Z", + "version": "1.0", + "data": { + "server": { + "id": "mcp-server-123", + "name": "AI Assistant Server", + "type": "ai-assistant", + "endpoint": "https://ai-assistant.example.com", + "group": "production", + "status": "active", + "capabilities": ["chat", "completion"], + "createdAt": "2024-01-15T10:30:00Z" + } + }, + "metadata": { + "source": "mcphub", + "environment": "production", + "triggeredBy": "user-456" + } +} +``` + +#### server.status_changed + +```json +{ + "id": "event-124", + "type": "server.status_changed", + "timestamp": "2024-01-15T11:30:00Z", + "version": "1.0", + "data": { + "server": { + "id": "mcp-server-123", + "name": "AI Assistant Server", + "previousStatus": "active", + "currentStatus": "inactive", + "reason": "Health check failed", + "lastHealthCheck": "2024-01-15T11:25:00Z" + } + }, + "metadata": { + "source": "mcphub", + "environment": "production", + "triggeredBy": "system" + } +} +``` + +## 签名验证 + +MCPHub 使用 HMAC-SHA256 签名来验证 WebHook 的真实性。 + +### 签名生成 + +签名在 `X-MCPHub-Signature-256` 头中发送: + +``` +X-MCPHub-Signature-256: sha256=5757107ea39eca8e35d1e8... +``` + +### 验证示例 + +#### Node.js + +```javascript +const crypto = require('crypto'); + +function verifySignature(payload, signature, secret) { + const expectedSignature = crypto + .createHmac('sha256', secret) + .update(payload, 'utf8') + .digest('hex'); + + const actualSignature = signature.replace('sha256=', ''); + + return crypto.timingSafeEqual( + Buffer.from(expectedSignature, 'hex'), + Buffer.from(actualSignature, 'hex'), + ); +} + +// Express.js 中间件示例 +app.use('/webhook', express.raw({ type: 'application/json' }), (req, res) => { + const signature = req.headers['x-mcphub-signature-256']; + const payload = req.body; + + if (!verifySignature(payload, signature, process.env.WEBHOOK_SECRET)) { + return res.status(401).send('Unauthorized'); + } + + // 处理 WebHook 事件 + const event = JSON.parse(payload); + console.log('收到事件:', event.type); + + res.status(200).send('OK'); +}); +``` + +#### Python + +```python +import hmac +import hashlib + +def verify_signature(payload, signature, secret): + expected_signature = hmac.new( + secret.encode('utf-8'), + payload, + hashlib.sha256 + ).hexdigest() + + actual_signature = signature.replace('sha256=', '') + + return hmac.compare_digest(expected_signature, actual_signature) + +# Flask 示例 +from flask import Flask, request, jsonify +import json + +app = Flask(__name__) + +@app.route('/webhook', methods=['POST']) +def webhook(): + signature = request.headers.get('X-MCPHub-Signature-256') + payload = request.get_data() + + if not verify_signature(payload, signature, 'your-webhook-secret'): + return jsonify({'error': 'Unauthorized'}), 401 + + event = json.loads(payload) + print(f'收到事件: {event["type"]}') + + return jsonify({'status': 'success'}), 200 +``` + +## 重试机制 + +MCPHub 对失败的 WebHook 交付实施指数退避重试: + +- **重试次数**: 可配置(默认 3 次) +- **重试间隔**: 2^n 秒(n 为重试次数) +- **最大间隔**: 300 秒(5 分钟) +- **超时设置**: 可配置(默认 30 秒) + +### 重试时间表 + +| 尝试次数 | 延迟时间 | +| -------- | -------- | +| 1 | 立即 | +| 2 | 2 秒 | +| 3 | 4 秒 | +| 4 | 8 秒 | +| 5 | 16 秒 | + +## 获取交付历史 + +### 端点 + +```http +GET /api/webhooks/{id}/deliveries +``` + +### 查询参数 + +| 参数名 | 类型 | 描述 | +| ---------- | ------- | ------------------------------------ | +| page | integer | 页码 | +| limit | integer | 每页数量 | +| status | string | 过滤状态(success, failed, pending) | +| event_type | string | 过滤事件类型 | + +### 响应 + +```json +{ + "success": true, + "data": { + "deliveries": [ + { + "id": "delivery-123", + "eventId": "event-123", + "eventType": "server.created", + "url": "https://your-app.com/webhook", + "status": "success", + "responseCode": 200, + "responseTime": 145, + "attempts": 1, + "deliveredAt": "2024-01-15T10:30:15Z", + "nextRetry": null + }, + { + "id": "delivery-124", + "eventId": "event-124", + "eventType": "server.status_changed", + "url": "https://your-app.com/webhook", + "status": "failed", + "responseCode": 500, + "responseTime": 30000, + "attempts": 3, + "error": "Connection timeout", + "deliveredAt": null, + "nextRetry": "2024-01-15T11:45:00Z" + } + ], + "pagination": { + "page": 1, + "limit": 20, + "total": 145, + "pages": 8 + } + } +} +``` + +## 测试 WebHook + +### 端点 + +```http +POST /api/webhooks/{id}/test +``` + +### 请求体 + +```json +{ + "eventType": "server.created", + "customData": { + "test": true, + "message": "这是一个测试事件" + } +} +``` + +### 响应 + +```json +{ + "success": true, + "data": { + "deliveryId": "delivery-test-123", + "status": "delivered", + "responseCode": 200, + "responseTime": 124, + "sentAt": "2024-01-15T10:30:00Z" + } +} +``` + +## 最佳实践 + +### 1. 幂等性处理 + +确保您的 WebHook 端点能够处理重复事件: + +```javascript +const processedEvents = new Set(); + +app.post('/webhook', (req, res) => { + const event = req.body; + + // 检查事件是否已处理 + if (processedEvents.has(event.id)) { + return res.status(200).send('Already processed'); + } + + // 处理事件 + processEvent(event); + + // 记录已处理的事件 + processedEvents.add(event.id); + + res.status(200).send('OK'); +}); +``` + +### 2. 异步处理 + +对于复杂的处理逻辑,使用异步处理避免阻塞: + +```javascript +app.post('/webhook', async (req, res) => { + const event = req.body; + + // 立即响应 + res.status(200).send('OK'); + + // 异步处理事件 + setImmediate(() => { + processEventAsync(event); + }); +}); +``` + +### 3. 错误处理 + +实施适当的错误处理和日志记录: + +```javascript +app.post('/webhook', (req, res) => { + try { + const event = req.body; + processEvent(event); + res.status(200).send('OK'); + } catch (error) { + console.error('WebHook 处理错误:', error); + res.status(500).send('Internal Server Error'); + } +}); +``` + +### 4. 监控和告警 + +监控 WebHook 的交付状态: + +```bash +# 检查失败的交付 +curl -X GET \ + 'https://api.mcphub.io/api/webhooks/webhook-123/deliveries?status=failed' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` + +## 故障排除 + +### 常见问题 + +1. **签名验证失败** + + - 检查密钥是否正确 + - 确保使用原始请求体进行验证 + - 验证 HMAC 计算实现 + +2. **超时错误** + + - 增加 WebHook 超时设置 + - 优化端点响应时间 + - 使用异步处理 + +3. **重复事件** + - 实施幂等性检查 + - 使用事件 ID 去重 + - 记录处理状态 + +### 调试工具 + +使用 MCPHub 提供的调试工具: + +```bash +# 查看最近的交付日志 +curl -X GET \ + 'https://api.mcphub.io/api/webhooks/webhook-123/deliveries?limit=5' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' + +# 重新发送失败的事件 +curl -X POST \ + 'https://api.mcphub.io/api/webhooks/delivery-124/redeliver' \ + -H 'Authorization: Bearer YOUR_API_TOKEN' +``` diff --git a/docs/zh/api-reference/introduction.mdx b/docs/zh/api-reference/introduction.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a488d2262194c539c39092273bf63678e0770a90 --- /dev/null +++ b/docs/zh/api-reference/introduction.mdx @@ -0,0 +1,717 @@ +--- +title: 'API 参考' +description: 'MCPHub REST API 完整参考文档' +--- + +## 概述 + +MCPHub 提供全面的 REST API,用于管理 MCP 服务器、用户、组和监控。所有 API 端点都需要身份验证,并支持 JSON 格式的请求和响应。 + +## 基础信息 + +### 基础 URL + +``` +https://your-mcphub-instance.com/api +``` + +### 身份验证 + +所有 API 请求都需要身份验证。支持以下方法: + +#### JWT 令牌认证 + +```bash +curl -X GET https://api.mcphub.com/servers \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" +``` + +#### API 密钥认证 + +```bash +curl -X GET https://api.mcphub.com/servers \ + -H "X-API-Key: YOUR_API_KEY" +``` + +### 请求格式 + +- **Content-Type**: `application/json` +- **Accept**: `application/json` +- **User-Agent**: 建议包含您的应用程序名称和版本 + +### 响应格式 + +所有响应都采用 JSON 格式: + +```json +{ + "success": true, + "data": { + // 响应数据 + }, + "message": "操作成功", + "timestamp": "2024-01-01T12:00:00Z" +} +``` + +错误响应格式: + +```json +{ + "success": false, + "error": { + "code": "VALIDATION_ERROR", + "message": "请求数据无效", + "details": { + "field": "name", + "reason": "名称不能为空" + } + }, + "timestamp": "2024-01-01T12:00:00Z" +} +``` + +## 状态码 + +| 状态码 | 说明 | +| ------ | -------------------- | +| 200 | 请求成功 | +| 201 | 资源创建成功 | +| 204 | 请求成功,无返回内容 | +| 400 | 请求参数错误 | +| 401 | 未授权访问 | +| 403 | 权限不足 | +| 404 | 资源不存在 | +| 409 | 资源冲突 | +| 422 | 请求数据验证失败 | +| 429 | 请求频率超限 | +| 500 | 服务器内部错误 | + +## 分页 + +支持分页的端点使用以下参数: + +- `page`: 页码(从 1 开始) +- `limit`: 每页记录数(默认 20,最大 100) +- `sort`: 排序字段 +- `order`: 排序顺序(`asc` 或 `desc`) + +```bash +curl -X GET "https://api.mcphub.com/servers?page=2&limit=50&sort=name&order=asc" \ + -H "Authorization: Bearer $TOKEN" +``` + +分页响应格式: + +```json +{ + "success": true, + "data": { + "items": [...], + "pagination": { + "page": 2, + "limit": 50, + "total": 234, + "pages": 5, + "hasNext": true, + "hasPrev": true + } + } +} +``` + +## 过滤和搜索 + +支持过滤的端点可以使用以下参数: + +- `search`: 全文搜索 +- `filter[field]`: 字段过滤 +- `status`: 状态过滤 +- `created_after`: 创建时间筛选 +- `created_before`: 创建时间筛选 + +```bash +curl -X GET "https://api.mcphub.com/servers?search=python&filter[status]=running&created_after=2024-01-01" \ + -H "Authorization: Bearer $TOKEN" +``` + +## API 端点 + +### 服务器管理 + +#### 获取服务器列表 + +```http +GET /api/servers +``` + +参数: + +- `status` (可选): 过滤服务器状态 (`running`, `stopped`, `error`) +- `group` (可选): 过滤所属组 +- `search` (可选): 搜索服务器名称或描述 + +示例响应: + +```json +{ + "success": true, + "data": { + "items": [ + { + "id": "server-1", + "name": "文件系统服务器", + "status": "running", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/data"], + "env": { + "NODE_ENV": "production" + }, + "cwd": "/app", + "pid": 12345, + "uptime": 3600000, + "lastRestart": "2024-01-01T12:00:00Z", + "createdAt": "2024-01-01T10:00:00Z", + "updatedAt": "2024-01-01T12:00:00Z" + } + ] + } +} +``` + +#### 创建服务器 + +```http +POST /api/servers +``` + +请求体: + +```json +{ + "name": "新服务器", + "command": "python", + "args": ["-m", "mcp_server"], + "env": { + "API_KEY": "your-api-key", + "LOG_LEVEL": "INFO" + }, + "cwd": "/app/python-server", + "enabled": true, + "description": "Python MCP 服务器", + "tags": ["python", "production"] +} +``` + +#### 获取服务器详情 + +```http +GET /api/servers/{serverId} +``` + +#### 更新服务器 + +```http +PUT /api/servers/{serverId} +``` + +#### 删除服务器 + +```http +DELETE /api/servers/{serverId} +``` + +#### 启动服务器 + +```http +POST /api/servers/{serverId}/start +``` + +#### 停止服务器 + +```http +POST /api/servers/{serverId}/stop +``` + +请求体(可选): + +```json +{ + "graceful": true, + "timeout": 30000 +} +``` + +#### 重启服务器 + +```http +POST /api/servers/{serverId}/restart +``` + +#### 获取服务器日志 + +```http +GET /api/servers/{serverId}/logs +``` + +参数: + +- `level` (可选): 日志级别过滤 +- `limit` (可选): 返回日志条数 +- `since` (可选): 开始时间 +- `follow` (可选): 实时跟踪日志 + +### 用户管理 + +#### 获取用户列表 + +```http +GET /api/users +``` + +#### 创建用户 + +```http +POST /api/users +``` + +请求体: + +```json +{ + "username": "newuser", + "email": "user@example.com", + "password": "securepassword", + "role": "user", + "groups": ["dev-team"], + "profile": { + "firstName": "张", + "lastName": "三", + "department": "开发部" + } +} +``` + +#### 获取用户详情 + +```http +GET /api/users/{userId} +``` + +#### 更新用户 + +```http +PUT /api/users/{userId} +``` + +#### 删除用户 + +```http +DELETE /api/users/{userId} +``` + +### 组管理 + +#### 获取组列表 + +```http +GET /api/groups +``` + +#### 创建组 + +```http +POST /api/groups +``` + +请求体: + +```json +{ + "name": "dev-team", + "displayName": "开发团队", + "description": "前端和后端开发人员", + "parentGroup": null, + "permissions": { + "servers": ["read", "write", "execute"], + "tools": ["read", "execute"] + }, + "settings": { + "autoAssign": false, + "maxMembers": 50, + "requireApproval": true + } +} +``` + +#### 添加用户到组 + +```http +POST /api/groups/{groupId}/members +``` + +请求体: + +```json +{ + "userId": "user123", + "role": "member" +} +``` + +#### 从组中移除用户 + +```http +DELETE /api/groups/{groupId}/members/{userId} +``` + +#### 分配服务器到组 + +```http +POST /api/groups/{groupId}/servers +``` + +请求体: + +```json +{ + "serverId": "server-1", + "permissions": ["read", "write", "execute"] +} +``` + +### 身份验证 + +#### 登录 + +```http +POST /api/auth/login +``` + +请求体: + +```json +{ + "username": "admin", + "password": "password", + "mfaCode": "123456" +} +``` + +响应: + +```json +{ + "success": true, + "data": { + "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "refreshToken": "refresh_token_here", + "expiresIn": 86400, + "user": { + "id": "user123", + "username": "admin", + "role": "admin", + "permissions": ["*"] + } + } +} +``` + +#### 刷新令牌 + +```http +POST /api/auth/refresh +``` + +#### 注销 + +```http +POST /api/auth/logout +``` + +#### 验证令牌 + +```http +GET /api/auth/verify +``` + +### 监控 + +#### 获取系统状态 + +```http +GET /api/monitoring/status +``` + +响应: + +```json +{ + "success": true, + "data": { + "system": { + "uptime": 86400, + "version": "2.1.0", + "nodeVersion": "18.17.0" + }, + "servers": { + "total": 12, + "running": 10, + "stopped": 1, + "error": 1 + }, + "performance": { + "requestsPerMinute": 85, + "avgResponseTime": "245ms", + "errorRate": "0.3%" + } + } +} +``` + +#### 获取性能指标 + +```http +GET /api/monitoring/metrics +``` + +参数: + +- `timeRange`: 时间范围 (`1h`, `24h`, `7d`, `30d`) +- `granularity`: 数据粒度 (`1m`, `5m`, `1h`, `1d`) +- `metrics`: 指定指标名称(逗号分隔) + +#### 获取日志 + +```http +GET /api/monitoring/logs +``` + +参数: + +- `level`: 日志级别 +- `source`: 日志源 +- `limit`: 返回条数 +- `since`: 开始时间 +- `until`: 结束时间 + +### 配置管理 + +#### 获取系统配置 + +```http +GET /api/config +``` + +#### 更新系统配置 + +```http +PUT /api/config +``` + +请求体: + +```json +{ + "smtp": { + "host": "smtp.example.com", + "port": 587, + "secure": false, + "auth": { + "user": "noreply@example.com", + "pass": "password" + } + }, + "notifications": { + "email": true, + "slack": true, + "webhook": "https://hooks.example.com/notifications" + } +} +``` + +## WebSocket API + +MCPHub 支持 WebSocket 连接以获取实时更新。 + +### 连接 + +```javascript +const ws = new WebSocket('wss://api.mcphub.com/ws'); +ws.onopen = function () { + // 发送认证消息 + ws.send( + JSON.stringify({ + type: 'auth', + token: 'YOUR_JWT_TOKEN', + }), + ); +}; +``` + +### 订阅事件 + +```javascript +// 订阅服务器状态更新 +ws.send( + JSON.stringify({ + type: 'subscribe', + channel: 'server-status', + filters: { + serverId: 'server-1', + }, + }), +); + +// 订阅系统监控 +ws.send( + JSON.stringify({ + type: 'subscribe', + channel: 'monitoring', + metrics: ['cpu', 'memory', 'requests'], + }), +); +``` + +### 事件类型 + +- `server-status`: 服务器状态变化 +- `server-logs`: 实时日志流 +- `monitoring`: 系统监控指标 +- `alerts`: 系统警报 +- `user-activity`: 用户活动事件 + +## 错误处理 + +### 错误代码 + +| 错误代码 | 描述 | +| ----------------------- | -------------- | +| `INVALID_REQUEST` | 请求格式无效 | +| `AUTHENTICATION_FAILED` | 身份验证失败 | +| `AUTHORIZATION_FAILED` | 权限不足 | +| `RESOURCE_NOT_FOUND` | 资源不存在 | +| `RESOURCE_CONFLICT` | 资源冲突 | +| `VALIDATION_ERROR` | 数据验证失败 | +| `RATE_LIMIT_EXCEEDED` | 请求频率超限 | +| `SERVER_ERROR` | 服务器内部错误 | + +### 错误处理示例 + +```javascript +async function handleApiRequest() { + try { + const response = await fetch('/api/servers', { + headers: { + Authorization: `Bearer ${token}`, + 'Content-Type': 'application/json', + }, + }); + + const data = await response.json(); + + if (!data.success) { + switch (data.error.code) { + case 'AUTHENTICATION_FAILED': + // 重新登录 + redirectToLogin(); + break; + case 'RATE_LIMIT_EXCEEDED': + // 延迟重试 + setTimeout(() => handleApiRequest(), 5000); + break; + default: + // 显示错误消息 + showError(data.error.message); + } + return; + } + + // 处理成功响应 + handleSuccessResponse(data.data); + } catch (error) { + // 处理网络错误 + console.error('网络请求失败:', error); + } +} +``` + +## 速率限制 + +API 实施速率限制以防止滥用: + +- **默认限制**: 每分钟 100 请求 +- **认证用户**: 每分钟 1000 请求 +- **管理员**: 每分钟 5000 请求 + +响应头包含速率限制信息: + +``` +X-RateLimit-Limit: 1000 +X-RateLimit-Remaining: 999 +X-RateLimit-Reset: 1609459200 +``` + +## SDK 和客户端库 + +### JavaScript/Node.js + +```bash +npm install @mcphub/sdk +``` + +```javascript +import { MCPHubClient } from '@mcphub/sdk'; + +const client = new MCPHubClient({ + baseURL: 'https://api.mcphub.com', + token: 'YOUR_JWT_TOKEN', +}); + +// 获取服务器列表 +const servers = await client.servers.list(); + +// 创建服务器 +const newServer = await client.servers.create({ + name: '新服务器', + command: 'python', + args: ['-m', 'mcp_server'], +}); +``` + +### Python + +```bash +pip install mcphub-sdk +``` + +```python +from mcphub_sdk import MCPHubClient + +client = MCPHubClient( + base_url='https://api.mcphub.com', + token='YOUR_JWT_TOKEN' +) + +# 获取服务器列表 +servers = client.servers.list() + +# 创建服务器 +new_server = client.servers.create( + name='新服务器', + command='python', + args=['-m', 'mcp_server'] +) +``` + +## 最佳实践 + +1. **使用 HTTPS**: 始终通过 HTTPS 访问 API +2. **安全存储令牌**: 不要在客户端代码中硬编码令牌 +3. **处理错误**: 实施适当的错误处理和重试逻辑 +4. **遵守速率限制**: 监控速率限制并实施退避策略 +5. **使用分页**: 对于大数据集使用分页参数 +6. **缓存响应**: 适当缓存 API 响应以减少请求 +7. **版本控制**: 使用 API 版本号以确保兼容性 + +有关更多信息,请参阅我们的 [SDK 文档](https://docs.mcphub.com/sdk) 和 [示例代码](https://github.com/mcphub/examples)。 diff --git a/docs/zh/configuration/docker-setup.mdx b/docs/zh/configuration/docker-setup.mdx new file mode 100644 index 0000000000000000000000000000000000000000..250e66b76114f99f97f3032d02f268227e19bf1f --- /dev/null +++ b/docs/zh/configuration/docker-setup.mdx @@ -0,0 +1,539 @@ +--- +title: 'Docker 部署' +description: '使用 Docker 和 Docker Compose 部署 MCPHub' +--- + +# Docker 部署 + +本指南介绍使用 Docker 部署 MCPHub,包括开发和生产配置。 + +## Docker 快速开始 + +### 使用预构建镜像 + +```bash +# 拉取最新镜像 +docker pull mcphub/mcphub:latest + +# 使用默认配置运行 +docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -v $(pwd)/mcp_settings.json:/app/mcp_settings.json \ + mcphub/mcphub:latest +``` + +### 从源码构建 + +```bash +# 克隆仓库 +git clone https://github.com/your-username/mcphub.git +cd mcphub + +# 构建 Docker 镜像 +docker build -t mcphub:local . + +# 运行容器 +docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -v $(pwd)/mcp_settings.json:/app/mcp_settings.json \ + mcphub:local +``` + +## Docker Compose 设置 + +### 基本配置 + +创建 `docker-compose.yml` 文件: + +```yaml +version: '3.8' + +services: + mcphub: + image: mcphub/mcphub:latest + # 本地开发时使用: + # build: . + container_name: mcphub + ports: + - '3000:3000' + environment: + - NODE_ENV=production + - PORT=3000 + - JWT_SECRET=${JWT_SECRET:-your-jwt-secret} + - DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub + volumes: + - ./mcp_settings.json:/app/mcp_settings.json:ro + - ./servers.json:/app/servers.json:ro + - mcphub_data:/app/data + depends_on: + postgres: + condition: service_healthy + restart: unless-stopped + networks: + - mcphub-network + + postgres: + image: postgres:15-alpine + container_name: mcphub-postgres + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=password + volumes: + - postgres_data:/var/lib/postgresql/data + - ./scripts/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql:ro + ports: + - '5432:5432' + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mcphub -d mcphub'] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - mcphub-network + +volumes: + postgres_data: + mcphub_data: + +networks: + mcphub-network: + driver: bridge +``` + +### 生产配置(包含 Nginx) + +```yaml +version: '3.8' + +services: + nginx: + image: nginx:alpine + container_name: mcphub-nginx + ports: + - '80:80' + - '443:443' + volumes: + - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro + - ./ssl:/etc/nginx/ssl:ro + - nginx_logs:/var/log/nginx + depends_on: + - mcphub + restart: unless-stopped + networks: + - mcphub-network + + mcphub: + image: mcphub/mcphub:latest + container_name: mcphub-app + expose: + - '3000' + environment: + - NODE_ENV=production + - PORT=3000 + - JWT_SECRET=${JWT_SECRET} + - JWT_EXPIRES_IN=${JWT_EXPIRES_IN:-24h} + - DATABASE_URL=postgresql://mcphub:${POSTGRES_PASSWORD}@postgres:5432/mcphub + - OPENAI_API_KEY=${OPENAI_API_KEY} + - REDIS_URL=redis://redis:6379 + volumes: + - ./mcp_settings.json:/app/mcp_settings.json:ro + - ./servers.json:/app/servers.json:ro + - mcphub_data:/app/data + - mcphub_logs:/app/logs + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + networks: + - mcphub-network + healthcheck: + test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:3000/health'] + interval: 30s + timeout: 10s + retries: 3 + + postgres: + image: postgres:15-alpine + container_name: mcphub-postgres + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + volumes: + - postgres_data:/var/lib/postgresql/data + - ./backups:/backups + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mcphub -d mcphub'] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - mcphub-network + + redis: + image: redis:7-alpine + container_name: mcphub-redis + command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD} + volumes: + - redis_data:/data + healthcheck: + test: ['CMD', 'redis-cli', 'ping'] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + networks: + - mcphub-network + +volumes: + postgres_data: + redis_data: + mcphub_data: + mcphub_logs: + nginx_logs: + +networks: + mcphub-network: + driver: bridge +``` + +### 环境变量 + +为 Docker Compose 创建 `.env` 文件: + +```env +# 应用程序 +NODE_ENV=production +JWT_SECRET=your-super-secret-jwt-key-change-this +JWT_EXPIRES_IN=24h + +# 数据库 +POSTGRES_PASSWORD=your-secure-database-password + +# Redis +REDIS_PASSWORD=your-secure-redis-password + +# 外部 API +OPENAI_API_KEY=your-openai-api-key + +# 可选:自定义端口 +# PORT=3000 +``` + +## 开发设置 + +### 开发 Docker Compose + +创建 `docker-compose.dev.yml`: + +```yaml +version: '3.8' + +services: + mcphub-dev: + build: + context: . + dockerfile: Dockerfile.dev + container_name: mcphub-dev + ports: + - '3000:3000' + - '5173:5173' # 前端开发服务器 + - '9229:9229' # 调试端口 + environment: + - NODE_ENV=development + - PORT=3000 + - DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub + volumes: + - .:/app + - /app/node_modules + - /app/frontend/node_modules + depends_on: + - postgres + command: pnpm dev + networks: + - mcphub-dev + + postgres: + image: postgres:15-alpine + container_name: mcphub-postgres-dev + environment: + - POSTGRES_DB=mcphub + - POSTGRES_USER=mcphub + - POSTGRES_PASSWORD=password + ports: + - '5432:5432' + volumes: + - postgres_dev_data:/var/lib/postgresql/data + networks: + - mcphub-dev + +volumes: + postgres_dev_data: + +networks: + mcphub-dev: + driver: bridge +``` + +### 开发 Dockerfile + +创建 `Dockerfile.dev`: + +```dockerfile +FROM node:20-alpine + +# 安装 pnpm +RUN npm install -g pnpm + +# 设置工作目录 +WORKDIR /app + +# 复制包文件 +COPY package.json pnpm-lock.yaml ./ +COPY frontend/package.json ./frontend/ + +# 安装依赖 +RUN pnpm install + +# 复制源代码 +COPY . . + +# 暴露端口 +EXPOSE 3000 5173 9229 + +# 启动开发服务器 +CMD ["pnpm", "dev"] +``` + +## 运行应用程序 + +### 开发模式 + +```bash +# 启动开发环境 +docker-compose -f docker-compose.dev.yml up -d + +# 查看日志 +docker-compose -f docker-compose.dev.yml logs -f mcphub-dev + +# 停止开发环境 +docker-compose -f docker-compose.dev.yml down +``` + +### 生产模式 + +```bash +# 启动生产环境 +docker-compose up -d + +# 查看日志 +docker-compose logs -f mcphub + +# 停止生产环境 +docker-compose down +``` + +## 配置管理 + +### MCP 设置卷挂载 + +创建您的 `mcp_settings.json`: + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + }, + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key" + } + } + } +} +``` + +### 密钥管理 + +对于生产环境,使用 Docker 密钥: + +```yaml +version: '3.8' + +services: + mcphub: + image: mcphub/mcphub:latest + environment: + - JWT_SECRET_FILE=/run/secrets/jwt_secret + - DATABASE_PASSWORD_FILE=/run/secrets/db_password + secrets: + - jwt_secret + - db_password + +secrets: + jwt_secret: + file: ./secrets/jwt_secret.txt + db_password: + file: ./secrets/db_password.txt +``` + +## 数据持久化 + +### 数据库备份 + +在 `docker-compose.yml` 中添加备份服务: + +```yaml +services: + backup: + image: postgres:15-alpine + container_name: mcphub-backup + environment: + - PGPASSWORD=${POSTGRES_PASSWORD} + volumes: + - ./backups:/backups + - ./scripts/backup.sh:/backup.sh:ro + command: /bin/sh -c "chmod +x /backup.sh && /backup.sh" + depends_on: + - postgres + profiles: + - backup + networks: + - mcphub-network +``` + +创建 `scripts/backup.sh`: + +```bash +#!/bin/sh +BACKUP_FILE="/backups/mcphub_$(date +%Y%m%d_%H%M%S).sql" +pg_dump -h postgres -U mcphub -d mcphub > "$BACKUP_FILE" +echo "备份已创建:$BACKUP_FILE" + +# 只保留最近 7 天的备份 +find /backups -name "mcphub_*.sql" -mtime +7 -delete +``` + +运行备份: + +```bash +docker-compose --profile backup run --rm backup +``` + +## 监控和健康检查 + +### 健康检查端点 + +在您的应用程序中添加: + +```javascript +// 在您的 Express 应用中 +app.get('/health', (req, res) => { + res.json({ + status: 'healthy', + timestamp: new Date().toISOString(), + uptime: process.uptime(), + memory: process.memoryUsage(), + version: process.env.npm_package_version, + }); +}); +``` + +### Docker 健康检查 + +```yaml +services: + mcphub: + # ... 其他配置 + healthcheck: + test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:3000/health'] + interval: 30s + timeout: 10s + retries: 3 + start_period: 60s +``` + +### 使用 Watchtower 监控 + +添加自动更新: + +```yaml +services: + watchtower: + image: containrrr/watchtower + container_name: mcphub-watchtower + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - WATCHTOWER_CLEANUP=true + - WATCHTOWER_POLL_INTERVAL=3600 + - WATCHTOWER_INCLUDE_STOPPED=true + restart: unless-stopped +``` + +## 故障排除 + +### 常见问题 + +**容器启动失败**:使用 `docker-compose logs mcphub` 检查日志 + +**数据库连接错误**:确保 PostgreSQL 健康且可访问 + +**端口冲突**:检查端口 3000/5432 是否已被占用 + +**卷挂载问题**:验证文件路径和权限 + +### 调试命令 + +```bash +# 检查容器状态 +docker-compose ps + +# 查看日志 +docker-compose logs -f [service_name] + +# 在容器中执行命令 +docker-compose exec mcphub sh + +# 检查数据库连接 +docker-compose exec postgres psql -U mcphub -d mcphub + +# 重启特定服务 +docker-compose restart mcphub + +# 重新构建并重启 +docker-compose up --build -d +``` + +### 性能优化 + +```yaml +services: + mcphub: + # ... 其他配置 + deploy: + resources: + limits: + memory: 512M + cpus: '0.5' + reservations: + memory: 256M + cpus: '0.25' +``` + +此 Docker 设置为 MCPHub 提供了完整的容器化环境,包含开发和生产配置。 diff --git a/docs/zh/configuration/environment-variables.mdx b/docs/zh/configuration/environment-variables.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ca80d3373b2021cdf854c966844d5cee987547ea --- /dev/null +++ b/docs/zh/configuration/environment-variables.mdx @@ -0,0 +1,389 @@ +--- +title: '环境变量配置' +description: '使用环境变量配置 MCPHub' +--- + +# 环境变量配置 + +MCPHub 使用环境变量进行配置。本指南涵盖所有可用变量及其用法。 + +## 核心应用设置 + +### 服务器配置 + +| 变量 | 默认值 | 描述 | +| ----------- | ------------- | ----------------------------------------------- | +| `PORT` | `3000` | HTTP 服务器端口号 | +| `HOST` | `0.0.0.0` | 服务器绑定的主机地址 | +| `NODE_ENV` | `development` | 应用环境(`development`、`production`、`test`) | +| `LOG_LEVEL` | `info` | 日志级别(`error`、`warn`、`info`、`debug`) | + +```env +PORT=3000 +HOST=0.0.0.0 +NODE_ENV=production +LOG_LEVEL=info +``` + +### 数据库配置 + +| 变量 | 默认值 | 描述 | +| -------------- | ----------- | --------------------- | +| `DATABASE_URL` | - | PostgreSQL 连接字符串 | +| `DB_HOST` | `localhost` | 数据库主机 | +| `DB_PORT` | `5432` | 数据库端口 | +| `DB_NAME` | `mcphub` | 数据库名称 | +| `DB_USER` | `mcphub` | 数据库用户名 | +| `DB_PASSWORD` | - | 数据库密码 | +| `DB_SSL` | `false` | 启用数据库 SSL 连接 | +| `DB_POOL_MIN` | `2` | 最小数据库连接池大小 | +| `DB_POOL_MAX` | `10` | 最大数据库连接池大小 | + +```env +# 选项 1:完整连接字符串 +DATABASE_URL=postgresql://username:password@localhost:5432/mcphub + +# 选项 2:单独组件 +DB_HOST=localhost +DB_PORT=5432 +DB_NAME=mcphub +DB_USER=mcphub +DB_PASSWORD=your-password +DB_SSL=false +``` + +## 认证与安全 + +### JWT 配置 + +| 变量 | 默认值 | 描述 | +| ------------------------ | ------- | ------------------------ | +| `JWT_SECRET` | - | JWT 令牌签名密钥(必需) | +| `JWT_EXPIRES_IN` | `24h` | JWT 令牌过期时间 | +| `JWT_REFRESH_EXPIRES_IN` | `7d` | 刷新令牌过期时间 | +| `JWT_ALGORITHM` | `HS256` | JWT 签名算法 | + +```env +JWT_SECRET=your-super-secret-key-change-this-in-production +JWT_EXPIRES_IN=24h +JWT_REFRESH_EXPIRES_IN=7d +``` + +### 会话与安全 + +| 变量 | 默认值 | 描述 | +| ------------------- | ------ | -------------------- | +| `SESSION_SECRET` | - | 会话加密密钥 | +| `BCRYPT_ROUNDS` | `12` | bcrypt 哈希轮数 | +| `RATE_LIMIT_WINDOW` | `15` | 速率限制窗口(分钟) | +| `RATE_LIMIT_MAX` | `100` | 每个窗口最大请求数 | +| `CORS_ORIGIN` | `*` | 允许的 CORS 来源 | + +```env +SESSION_SECRET=your-session-secret +BCRYPT_ROUNDS=12 +RATE_LIMIT_WINDOW=15 +RATE_LIMIT_MAX=100 +CORS_ORIGIN=https://your-domain.com,https://admin.your-domain.com +``` + +## 外部服务 + +### OpenAI 配置 + +| 变量 | 默认值 | 描述 | +| ------------------------ | ------------------------ | ------------------------------- | +| `OPENAI_API_KEY` | - | OpenAI API 密钥(用于智能路由) | +| `OPENAI_MODEL` | `gpt-3.5-turbo` | OpenAI 嵌入模型 | +| `OPENAI_EMBEDDING_MODEL` | `text-embedding-ada-002` | 向量嵌入模型 | +| `OPENAI_MAX_TOKENS` | `1000` | 每个请求最大令牌数 | +| `OPENAI_TEMPERATURE` | `0.1` | AI 响应温度 | + +```env +OPENAI_API_KEY=sk-your-openai-api-key +OPENAI_MODEL=gpt-3.5-turbo +OPENAI_EMBEDDING_MODEL=text-embedding-ada-002 +OPENAI_MAX_TOKENS=1000 +OPENAI_TEMPERATURE=0.1 +``` + +### Redis 配置(可选) + +| 变量 | 默认值 | 描述 | +| ---------------- | ----------- | ---------------- | +| `REDIS_URL` | - | Redis 连接字符串 | +| `REDIS_HOST` | `localhost` | Redis 主机 | +| `REDIS_PORT` | `6379` | Redis 端口 | +| `REDIS_PASSWORD` | - | Redis 密码 | +| `REDIS_DB` | `0` | Redis 数据库编号 | +| `REDIS_PREFIX` | `mcphub:` | Redis 键前缀 | + +```env +# 选项 1:完整连接字符串 +REDIS_URL=redis://username:password@localhost:6379/0 + +# 选项 2:单独组件 +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD=your-redis-password +REDIS_DB=0 +REDIS_PREFIX=mcphub: +``` + +## MCP 服务器配置 + +### 默认设置 + +| 变量 | 默认值 | 描述 | +| ------------------- | ------------------- | ---------------------------- | +| `MCP_SETTINGS_FILE` | `mcp_settings.json` | MCP 设置文件路径 | +| `MCP_SERVERS_FILE` | `servers.json` | 服务器配置文件路径 | +| `MCP_TIMEOUT` | `30000` | MCP 操作默认超时(毫秒) | +| `MCP_MAX_RETRIES` | `3` | 失败操作最大重试次数 | +| `MCP_RESTART_DELAY` | `5000` | 重启失败服务器的延迟(毫秒) | + +```env +MCP_SETTINGS_FILE=./config/mcp_settings.json +MCP_SERVERS_FILE=./config/servers.json +MCP_TIMEOUT=30000 +MCP_MAX_RETRIES=3 +MCP_RESTART_DELAY=5000 +``` + +### 智能路由 + +| 变量 | 默认值 | 描述 | +| --------------------------- | ------ | ---------------------- | +| `SMART_ROUTING_ENABLED` | `true` | 启用 AI 驱动的智能路由 | +| `SMART_ROUTING_THRESHOLD` | `0.7` | 路由相似度阈值 | +| `SMART_ROUTING_MAX_RESULTS` | `5` | 返回的最大工具数 | +| `VECTOR_CACHE_TTL` | `3600` | 向量缓存 TTL(秒) | + +```env +SMART_ROUTING_ENABLED=true +SMART_ROUTING_THRESHOLD=0.7 +SMART_ROUTING_MAX_RESULTS=5 +VECTOR_CACHE_TTL=3600 +``` + +## 文件存储与上传 + +| 变量 | 默认值 | 描述 | +| -------------------- | ---------------- | -------------------------------- | +| `UPLOAD_DIR` | `./uploads` | 文件上传目录 | +| `MAX_FILE_SIZE` | `10485760` | 最大文件大小(字节,10MB) | +| `ALLOWED_FILE_TYPES` | `image/*,text/*` | 允许的 MIME 类型 | +| `STORAGE_TYPE` | `local` | 存储类型(`local`、`s3`、`gcs`) | + +```env +UPLOAD_DIR=./data/uploads +MAX_FILE_SIZE=10485760 +ALLOWED_FILE_TYPES=image/*,text/*,application/json +STORAGE_TYPE=local +``` + +### S3 存储(可选) + +| 变量 | 默认值 | 描述 | +| ---------------------- | ----------- | -------------- | +| `S3_BUCKET` | - | S3 存储桶名称 | +| `S3_REGION` | `us-east-1` | S3 区域 | +| `S3_ACCESS_KEY_ID` | - | S3 访问密钥 | +| `S3_SECRET_ACCESS_KEY` | - | S3 密钥 | +| `S3_ENDPOINT` | - | 自定义 S3 端点 | + +```env +S3_BUCKET=mcphub-uploads +S3_REGION=us-east-1 +S3_ACCESS_KEY_ID=your-access-key +S3_SECRET_ACCESS_KEY=your-secret-key +``` + +## 监控与日志 + +### 应用监控 + +| 变量 | 默认值 | 描述 | +| ------------------------ | ------- | -------------------- | +| `METRICS_ENABLED` | `true` | 启用指标收集 | +| `METRICS_PORT` | `9090` | 指标端点端口 | +| `HEALTH_CHECK_INTERVAL` | `30000` | 健康检查间隔(毫秒) | +| `PERFORMANCE_MONITORING` | `false` | 启用性能监控 | + +```env +METRICS_ENABLED=true +METRICS_PORT=9090 +HEALTH_CHECK_INTERVAL=30000 +PERFORMANCE_MONITORING=true +``` + +### 日志配置 + +| 变量 | 默认值 | 描述 | +| ------------------ | ------------ | -------------------------------- | +| `LOG_FORMAT` | `json` | 日志格式(`json`、`text`) | +| `LOG_FILE` | - | 日志文件路径(如果启用文件日志) | +| `LOG_MAX_SIZE` | `10m` | 最大日志文件大小 | +| `LOG_MAX_FILES` | `5` | 最大日志文件数 | +| `LOG_DATE_PATTERN` | `YYYY-MM-DD` | 日志轮换日期模式 | + +```env +LOG_FORMAT=json +LOG_FILE=./logs/mcphub.log +LOG_MAX_SIZE=10m +LOG_MAX_FILES=5 +LOG_DATE_PATTERN=YYYY-MM-DD +``` + +## 开发与调试 + +| 变量 | 默认值 | 描述 | +| ------------------------ | ------- | ------------------------------- | +| `DEBUG` | - | 调试命名空间(例如 `mcphub:*`) | +| `DEV_TOOLS_ENABLED` | `false` | 启用开发工具 | +| `HOT_RELOAD` | `true` | 在开发中启用热重载 | +| `MOCK_EXTERNAL_SERVICES` | `false` | 模拟外部 API 调用 | + +```env +DEBUG=mcphub:* +DEV_TOOLS_ENABLED=true +HOT_RELOAD=true +MOCK_EXTERNAL_SERVICES=false +``` + +## 生产优化 + +| 变量 | 默认值 | 描述 | +| ------------------ | ------- | ---------------------- | +| `CLUSTER_MODE` | `false` | 启用集群模式 | +| `WORKER_PROCESSES` | `0` | 工作进程数(0 = 自动) | +| `MEMORY_LIMIT` | - | 每个进程内存限制 | +| `CPU_LIMIT` | - | 每个进程 CPU 限制 | +| `GC_OPTIMIZE` | `false` | 启用垃圾回收优化 | + +```env +CLUSTER_MODE=true +WORKER_PROCESSES=4 +MEMORY_LIMIT=512M +GC_OPTIMIZE=true +``` + +## 配置示例 + +### 开发环境 + +```env +# .env.development +NODE_ENV=development +PORT=3000 +LOG_LEVEL=debug + +# 数据库 +DATABASE_URL=postgresql://mcphub:password@localhost:5432/mcphub_dev + +# 认证 +JWT_SECRET=dev-secret-key +JWT_EXPIRES_IN=24h + +# OpenAI(开发时可选) +# OPENAI_API_KEY=your-dev-key + +# 调试 +DEBUG=mcphub:* +DEV_TOOLS_ENABLED=true +HOT_RELOAD=true +``` + +### 生产环境 + +```env +# .env.production +NODE_ENV=production +PORT=3000 +LOG_LEVEL=info +LOG_FORMAT=json + +# 数据库 +DATABASE_URL=postgresql://mcphub:secure-password@db.example.com:5432/mcphub +DB_SSL=true +DB_POOL_MAX=20 + +# 安全 +JWT_SECRET=your-super-secure-production-secret +SESSION_SECRET=your-session-secret +BCRYPT_ROUNDS=14 + +# 外部服务 +OPENAI_API_KEY=your-production-openai-key +REDIS_URL=redis://redis.example.com:6379 + +# 监控 +METRICS_ENABLED=true +PERFORMANCE_MONITORING=true + +# 优化 +CLUSTER_MODE=true +GC_OPTIMIZE=true +``` + +### Docker 环境 + +```env +# .env.docker +NODE_ENV=production +HOST=0.0.0.0 +PORT=3000 + +# 使用 Docker 网络的服务名 +DATABASE_URL=postgresql://mcphub:password@postgres:5432/mcphub +REDIS_URL=redis://redis:6379 + +# 安全 +JWT_SECRET_FILE=/run/secrets/jwt_secret +DB_PASSWORD_FILE=/run/secrets/db_password + +# 容器中的文件路径 +MCP_SETTINGS_FILE=/app/mcp_settings.json +UPLOAD_DIR=/app/data/uploads +LOG_FILE=/app/logs/mcphub.log +``` + +## 环境变量加载 + +MCPHub 按以下顺序加载环境变量: + +1. 系统环境变量 +2. `.env.local`(被 git 忽略) +3. `.env.{NODE_ENV}`(例如 `.env.production`) +4. `.env` + +### 使用 dotenv-expand + +MCPHub 支持变量扩展: + +```env +BASE_URL=https://api.example.com +API_ENDPOINT=${BASE_URL}/v1 +DATABASE_URL=postgresql://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME} +``` + +## 安全最佳实践 + +1. **永远不要提交密钥**到版本控制 +2. **为生产使用强唯一密钥** +3. **定期轮换密钥** +4. **使用特定于环境的文件** +5. **在启动时验证所有环境变量** +6. **为容器部署使用 Docker 密钥** + +## 验证 + +MCPHub 在启动时验证环境变量。无效配置将阻止应用程序启动并提供有用的错误消息。 + +生产环境必需变量: + +- `JWT_SECRET` +- `DATABASE_URL` 或单独的数据库组件 +- `OPENAI_API_KEY`(如果启用智能路由) + +这个全面的环境配置确保 MCPHub 可以为任何部署场景正确配置。 diff --git a/docs/zh/configuration/mcp-settings.mdx b/docs/zh/configuration/mcp-settings.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fdfdd89f52e9c86769b3836dc8a520612e689be6 --- /dev/null +++ b/docs/zh/configuration/mcp-settings.mdx @@ -0,0 +1,564 @@ +--- +title: 'MCP 设置配置' +description: '配置 MCPHub 的 MCP 服务器及其设置' +--- + +# MCP 设置配置 + +本指南说明如何使用 `mcp_settings.json` 文件和相关配置在 MCPHub 中配置 MCP 服务器。 + +## 配置文件概述 + +MCPHub 使用几个配置文件: + +- **`mcp_settings.json`**:主要的 MCP 服务器配置 +- **`servers.json`**:服务器元数据和分组 +- **`.env`**:环境变量和密钥 + +## 基本 MCP 设置结构 + +### mcp_settings.json + +```json +{ + "mcpServers": { + "server-name": { + "command": "command-to-run", + "args": ["arg1", "arg2"], + "env": { + "ENV_VAR": "value" + }, + "cwd": "/working/directory", + "timeout": 30000, + "restart": true + } + } +} +``` + +### 示例配置 + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"], + "env": { + "USER_AGENT": "MCPHub/1.0" + } + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "timeout": 60000 + }, + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}" + } + } + } +} +``` + +## 服务器配置选项 + +### 必需字段 + +| 字段 | 类型 | 描述 | +| --------- | ------ | ---------------- | +| `command` | string | 可执行命令或路径 | +| `args` | array | 命令行参数 | + +### 可选字段 + +| 字段 | 类型 | 默认值 | 描述 | +| -------------- | ------- | --------------- | ------------------ | +| `env` | object | `{}` | 环境变量 | +| `cwd` | string | `process.cwd()` | 工作目录 | +| `timeout` | number | `30000` | 启动超时(毫秒) | +| `restart` | boolean | `true` | 失败时自动重启 | +| `maxRestarts` | number | `5` | 最大重启次数 | +| `restartDelay` | number | `5000` | 重启间延迟(毫秒) | +| `stdio` | string | `pipe` | stdio 配置 | + +## 常见 MCP 服务器示例 + +### Web 和 API 服务器 + +#### Fetch 服务器 + +```json +{ + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"], + "env": { + "USER_AGENT": "MCPHub/1.0", + "MAX_REDIRECTS": "10" + } + } +} +``` + +#### 使用 Playwright 进行网页抓取 + +```json +{ + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "timeout": 60000, + "env": { + "PLAYWRIGHT_BROWSERS_PATH": "/tmp/browsers" + } + } +} +``` + +### 文件和系统服务器 + +#### 文件系统服务器 + +```json +{ + "filesystem": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"], + "env": { + "ALLOWED_OPERATIONS": "read,write,list" + } + } +} +``` + +#### SQLite 服务器 + +```json +{ + "sqlite": { + "command": "uvx", + "args": ["mcp-server-sqlite", "--db-path", "/path/to/database.db"], + "env": { + "SQLITE_READONLY": "false" + } + } +} +``` + +### 通信服务器 + +#### Slack 服务器 + +```json +{ + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}", + "SLACK_APP_TOKEN": "${SLACK_APP_TOKEN}" + } + } +} +``` + +#### 邮件服务器 + +```json +{ + "email": { + "command": "python", + "args": ["-m", "mcp_server_email"], + "env": { + "SMTP_HOST": "smtp.gmail.com", + "SMTP_PORT": "587", + "EMAIL_USER": "${EMAIL_USER}", + "EMAIL_PASSWORD": "${EMAIL_PASSWORD}" + } + } +} +``` + +### 开发和 API 服务器 + +#### GitHub 服务器 + +```json +{ + "github": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-github"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_TOKEN}" + } + } +} +``` + +#### Google Drive 服务器 + +```json +{ + "gdrive": { + "command": "npx", + "args": ["-y", "@google/mcp-server-gdrive"], + "env": { + "GOOGLE_CLIENT_ID": "${GOOGLE_CLIENT_ID}", + "GOOGLE_CLIENT_SECRET": "${GOOGLE_CLIENT_SECRET}", + "GOOGLE_REFRESH_TOKEN": "${GOOGLE_REFRESH_TOKEN}" + } + } +} +``` + +### 地图和位置服务 + +#### 高德地图服务器 + +```json +{ + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "${AMAP_API_KEY}", + "AMAP_LANGUAGE": "zh-cn" + } + } +} +``` + +#### OpenStreetMap 服务器 + +```json +{ + "osm": { + "command": "python", + "args": ["-m", "mcp_server_osm"], + "env": { + "OSM_USER_AGENT": "MCPHub/1.0" + } + } +} +``` + +## 高级配置 + +### 环境变量替换 + +MCPHub 支持使用 `${VAR_NAME}` 语法进行环境变量替换: + +```json +{ + "mcpServers": { + "api-server": { + "command": "python", + "args": ["-m", "api_server"], + "env": { + "API_KEY": "${API_KEY}", + "API_URL": "${API_BASE_URL}/v1", + "DEBUG": "${NODE_ENV:development}" + } + } + } +} +``` + +可以使用 `${VAR_NAME:default}` 指定默认值: + +```json +{ + "timeout": "${MCP_TIMEOUT:30000}", + "maxRestarts": "${MCP_MAX_RESTARTS:5}" +} +``` + +### 条件配置 + +根据环境使用不同配置: + +```json +{ + "mcpServers": { + "database": { + "command": "python", + "args": ["-m", "db_server"], + "env": { + "DB_URL": "${NODE_ENV:development == 'production' ? DATABASE_URL : DEV_DATABASE_URL}" + } + } + } +} +``` + +### 自定义服务器脚本 + +#### 本地 Python 服务器 + +```json +{ + "custom-python": { + "command": "python", + "args": ["./servers/custom_server.py"], + "cwd": "/app/custom-servers", + "env": { + "PYTHONPATH": "/app/custom-servers", + "CONFIG_FILE": "./config.json" + } + } +} +``` + +#### 本地 Node.js 服务器 + +```json +{ + "custom-node": { + "command": "node", + "args": ["./servers/custom-server.js"], + "cwd": "/app/custom-servers", + "env": { + "NODE_ENV": "production" + } + } +} +``` + +## 服务器元数据配置 + +### servers.json + +使用服务器元数据补充 `mcp_settings.json`: + +```json +{ + "servers": { + "fetch": { + "name": "Fetch 服务器", + "description": "用于网络请求的 HTTP 客户端", + "category": "web", + "tags": ["http", "api", "web"], + "version": "1.0.0", + "author": "MCPHub 团队", + "documentation": "https://docs.mcphub.com/servers/fetch", + "enabled": true + }, + "playwright": { + "name": "Playwright 浏览器", + "description": "网页自动化和抓取", + "category": "automation", + "tags": ["browser", "scraping", "automation"], + "version": "2.0.0", + "enabled": true + } + }, + "groups": { + "web-tools": { + "name": "网页工具", + "description": "用于网页交互的工具", + "servers": ["fetch", "playwright"], + "access": "public" + }, + "admin-tools": { + "name": "管理工具", + "description": "管理实用程序", + "servers": ["filesystem", "database"], + "access": "admin" + } + } +} +``` + +## 组管理 + +### 组配置 + +```json +{ + "groups": { + "production": { + "name": "生产工具", + "description": "稳定的生产服务器", + "servers": ["fetch", "slack", "github"], + "access": "authenticated", + "rateLimit": { + "requestsPerMinute": 100, + "burstLimit": 20 + } + }, + "experimental": { + "name": "实验功能", + "description": "测试版和实验性服务器", + "servers": ["experimental-ai", "beta-search"], + "access": "admin", + "enabled": false + } + } +} +``` + +### 访问控制 + +| 访问级别 | 描述 | +| --------------- | ------------------- | +| `public` | 无需认证 | +| `authenticated` | 需要有效的 JWT 令牌 | +| `admin` | 需要管理员角色 | +| `custom` | 自定义权限逻辑 | + +## 动态配置 + +### 热重载 + +MCPHub 支持配置热重载: + +```bash +# 不重启重新加载配置 +curl -X POST http://localhost:3000/api/admin/reload-config \ + -H "Authorization: Bearer your-admin-token" +``` + +### 配置验证 + +MCPHub 在启动和重新加载时验证配置: + +```json +{ + "validation": { + "strict": true, + "allowUnknownServers": false, + "requireDocumentation": true + } +} +``` + +## 最佳实践 + +### 安全 + +1. **对敏感数据使用环境变量**: + + ```json + { + "env": { + "API_KEY": "${API_KEY}", + "DATABASE_PASSWORD": "${DB_PASSWORD}" + } + } + ``` + +2. **限制服务器权限**: + ```json + { + "filesystem": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/restricted/path"], + "env": { + "READONLY": "true" + } + } + } + ``` + +### 性能 + +1. **设置适当的超时**: + + ```json + { + "timeout": 30000, + "maxRestarts": 3, + "restartDelay": 5000 + } + ``` + +2. **资源限制**: + ```json + { + "env": { + "NODE_OPTIONS": "--max-old-space-size=512", + "MEMORY_LIMIT": "512MB" + } + } + ``` + +### 监控 + +1. **启用健康检查**: + + ```json + { + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000 + } + } + ``` + +2. **日志配置**: + ```json + { + "env": { + "LOG_LEVEL": "info", + "LOG_FORMAT": "json" + } + } + ``` + +## 故障排除 + +### 常见问题 + +**服务器无法启动**:检查命令和参数 + +```bash +# 手动测试命令 +uvx mcp-server-fetch +``` + +**找不到环境变量**:验证 `.env` 文件 + +```bash +# 检查环境 +printenv | grep API_KEY +``` + +**权限错误**:检查文件权限和路径 + +```bash +# 验证可执行权限 +ls -la /path/to/server +``` + +### 调试配置 + +启用调试模式进行详细日志记录: + +```json +{ + "debug": { + "enabled": true, + "logLevel": "debug", + "includeEnv": false, + "logStartup": true + } +} +``` + +### 验证错误 + +常见验证错误和解决方案: + +1. **缺少必需字段**:添加 `command` 和 `args` +2. **无效超时**:使用数字,不是字符串 +3. **找不到环境变量**:检查 `.env` 文件 +4. **找不到命令**:验证安装和 PATH + +这个全面的指南涵盖了在 MCPHub 中为各种用例和环境配置 MCP 服务器的所有方面。 diff --git a/docs/zh/configuration/nginx.mdx b/docs/zh/configuration/nginx.mdx new file mode 100644 index 0000000000000000000000000000000000000000..51bfb0a71d3eae091ce332b4df574912b48f5016 --- /dev/null +++ b/docs/zh/configuration/nginx.mdx @@ -0,0 +1,373 @@ +--- +title: 'Nginx 配置' +description: '配置 Nginx 作为 MCPHub 的反向代理' +--- + +# Nginx 配置 + +本指南说明如何配置 Nginx 作为 MCPHub 的反向代理,包括 SSL 终止、负载均衡和缓存策略。 + +## 基本反向代理设置 + +### 配置文件 + +创建或更新您的 Nginx 配置文件(`/etc/nginx/sites-available/mcphub`): + +```nginx +server { + listen 80; + server_name your-domain.com; + + # 将 HTTP 重定向到 HTTPS + return 301 https://$server_name$request_uri; +} + +server { + listen 443 ssl http2; + server_name your-domain.com; + + # SSL 配置 + ssl_certificate /path/to/your/certificate.crt; + ssl_certificate_key /path/to/your/private.key; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384; + ssl_prefer_server_ciphers off; + + # 安全头 + add_header X-Frame-Options DENY; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + + # Gzip 压缩 + gzip on; + gzip_vary on; + gzip_min_length 1024; + gzip_types + text/plain + text/css + text/xml + text/javascript + application/json + application/javascript + application/xml+rss + application/atom+xml + image/svg+xml; + + # 主应用程序 + location / { + proxy_pass http://127.0.0.1:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + proxy_read_timeout 86400; + } + + # API 端点,为 MCP 操作设置更长的超时 + location /api/ { + proxy_pass http://127.0.0.1:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + proxy_read_timeout 300; + proxy_connect_timeout 60; + proxy_send_timeout 60; + } + + # 静态资源缓存 + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { + proxy_pass http://127.0.0.1:3000; + proxy_cache_valid 200 1d; + proxy_cache_valid 404 1m; + add_header Cache-Control "public, immutable"; + expires 1y; + } +} +``` + +### 启用配置 + +```bash +# 创建符号链接启用站点 +sudo ln -s /etc/nginx/sites-available/mcphub /etc/nginx/sites-enabled/ + +# 测试配置 +sudo nginx -t + +# 重新加载 Nginx +sudo systemctl reload nginx +``` + +## 负载均衡配置 + +对于具有多个 MCPHub 实例的高可用性设置: + +```nginx +upstream mcphub_backend { + least_conn; + server 127.0.0.1:3000 weight=1 max_fails=3 fail_timeout=30s; + server 127.0.0.1:3001 weight=1 max_fails=3 fail_timeout=30s; + server 127.0.0.1:3002 weight=1 max_fails=3 fail_timeout=30s; + + # 健康检查(Nginx Plus 功能) + # health_check interval=5s fails=3 passes=2; +} + +server { + listen 443 ssl http2; + server_name your-domain.com; + + # SSL 和其他配置... + + location / { + proxy_pass http://mcphub_backend; + # 其他代理设置... + } +} +``` + +## 缓存配置 + +### 浏览器缓存 + +```nginx +# 缓存静态资源 +location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { + proxy_pass http://127.0.0.1:3000; + expires 1y; + add_header Cache-Control "public, immutable"; +} + +# 缓存 API 响应(小心动态内容) +location /api/public/ { + proxy_pass http://127.0.0.1:3000; + proxy_cache mcphub_cache; + proxy_cache_valid 200 5m; + proxy_cache_key "$scheme$request_method$host$request_uri"; + add_header X-Cache-Status $upstream_cache_status; +} +``` + +### Nginx 代理缓存 + +在 `nginx.conf` 的 `http` 块中添加: + +```nginx +http { + # 代理缓存配置 + proxy_cache_path /var/cache/nginx/mcphub + levels=1:2 + keys_zone=mcphub_cache:10m + max_size=1g + inactive=60m + use_temp_path=off; + + # 其他配置... +} +``` + +## WebSocket 支持 + +对于实时功能和 SSE(服务器发送事件): + +```nginx +location /api/stream { + proxy_pass http://127.0.0.1:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # 禁用实时响应的缓冲 + proxy_buffering off; + proxy_cache off; + + # 长连接超时 + proxy_read_timeout 24h; + proxy_send_timeout 24h; +} +``` + +## 安全配置 + +### 速率限制 + +```nginx +http { + # 定义速率限制区域 + limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; + limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s; + + server { + # 对 API 端点应用速率限制 + location /api/ { + limit_req zone=api burst=20 nodelay; + # 其他配置... + } + + # 登录端点的严格速率限制 + location /api/auth/login { + limit_req zone=login burst=5; + # 其他配置... + } + } +} +``` + +### IP 白名单 + +```nginx +# 为管理端点允许特定 IP +location /api/admin/ { + allow 192.168.1.0/24; + allow 10.0.0.0/8; + deny all; + + proxy_pass http://127.0.0.1:3000; + # 其他代理设置... +} +``` + +## 监控和日志 + +### 访问日志 + +```nginx +http { + # 自定义日志格式 + log_format mcphub_format '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + '$request_time $upstream_response_time'; + + server { + # 启用访问日志 + access_log /var/log/nginx/mcphub_access.log mcphub_format; + error_log /var/log/nginx/mcphub_error.log; + + # 其他配置... + } +} +``` + +### 状态页面 + +```nginx +location /nginx_status { + stub_status; + allow 127.0.0.1; + deny all; +} +``` + +## Docker 集成 + +当在 Docker 中运行 MCPHub 时,更新代理配置: + +```nginx +upstream mcphub_docker { + server mcphub:3000; # Docker 服务名 +} + +server { + location / { + proxy_pass http://mcphub_docker; + # 其他代理设置... + } +} +``` + +## 完整示例配置 + +使用提供的 `nginx.conf.example` 的生产就绪示例: + +```bash +# 复制示例配置 +cp nginx.conf.example /etc/nginx/sites-available/mcphub + +# 使用您的域名和路径更新配置 +sudo nano /etc/nginx/sites-available/mcphub + +# 启用站点 +sudo ln -s /etc/nginx/sites-available/mcphub /etc/nginx/sites-enabled/ + +# 测试并重新加载 +sudo nginx -t && sudo systemctl reload nginx +``` + +## 故障排除 + +### 常见问题 + +**502 Bad Gateway**:检查 MCPHub 是否正在运行且可访问 + +**504 Gateway Timeout**:为长时间运行的操作增加 `proxy_read_timeout` + +**WebSocket 连接失败**:确保正确的 `Upgrade` 和 `Connection` 头 + +**缓存问题**:清除代理缓存或在开发中禁用 + +### 调试命令 + +```bash +# 测试 Nginx 配置 +sudo nginx -t + +# 检查 Nginx 状态 +sudo systemctl status nginx + +# 查看错误日志 +sudo tail -f /var/log/nginx/error.log + +# 检查 MCPHub 是否响应 +curl -I http://localhost:3000 +``` + +## 性能优化 + +### 工作进程 + +```nginx +# 在 nginx.conf 中 +worker_processes auto; +worker_connections 1024; +``` + +### 缓冲区大小 + +```nginx +proxy_buffering on; +proxy_buffer_size 128k; +proxy_buffers 4 256k; +proxy_busy_buffers_size 256k; +``` + +### Keep-Alive + +```nginx +upstream mcphub_backend { + server 127.0.0.1:3000; + keepalive 32; +} + +location / { + proxy_pass http://mcphub_backend; + proxy_http_version 1.1; + proxy_set_header Connection ""; +} +``` + +此配置为在 Nginx 后运行 MCPHub 提供了坚实的基础,具有适当的安全性、性能和可靠性功能。 diff --git a/docs/zh/development.mdx b/docs/zh/development.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5eff9cbf7adbddc3a3b2a7064d9f0d4889ad925f --- /dev/null +++ b/docs/zh/development.mdx @@ -0,0 +1,421 @@ +--- +title: '开发指南' +description: 'MCPHub 本地开发环境搭建和开发工作流指南' +--- + +## 概述 + +本指南将帮助您搭建 MCPHub 的本地开发环境,了解项目结构,并掌握开发工作流。 + +**前提条件**:请确保已安装 Node.js 18+ 和 Git。 + +## 环境准备 + +### 系统要求 + +在开始开发之前,请确保您的系统满足以下要求: + + + + - **Node.js**: 18.0+ 版本 - **npm**: 8.0+ 版本 - **Git**: 最新版本 - **Docker**: + 可选,用于容器化开发 + + + - **VS Code**: 推荐的代码编辑器 - **Postman**: API 测试工具 - **TablePlus**: 数据库管理工具 - + **Docker Desktop**: 容器管理 + + + +### 验证环境 + +```bash +# 检查 Node.js 版本 +node --version # 应该 >= 18.0.0 + +# 检查 npm 版本 +npm --version # 应该 >= 8.0.0 + +# 检查 Git 版本 +git --version + +# 检查 Docker(可选) +docker --version +``` + +## 克隆项目 + +### 获取源代码 + +```bash +# 克隆主仓库 +git clone https://github.com/mcphub/mcphub.git +cd mcphub + +# 或者克隆您的 fork +git clone https://github.com/YOUR_USERNAME/mcphub.git +cd mcphub +``` + +### 项目结构 + +``` +mcphub/ +├── src/ # 源代码目录 +│ ├── controllers/ # 控制器层 +│ ├── middleware/ # 中间件 +│ ├── models/ # 数据模型 +│ ├── routes/ # 路由定义 +│ ├── services/ # 业务逻辑层 +│ ├── utils/ # 工具函数 +│ └── index.ts # 应用入口 +├── tests/ # 测试文件 +├── docs/ # 文档源码 +├── docker/ # Docker 配置 +├── scripts/ # 构建脚本 +├── prisma/ # 数据库模式 +├── package.json # 项目依赖 +├── tsconfig.json # TypeScript 配置 +├── .env.example # 环境变量示例 +└── README.md # 项目说明 +``` + +## 安装依赖 + +### 安装项目依赖 + +```bash +# 安装生产和开发依赖 +npm install + +# 仅安装生产依赖 +npm ci --only=production +``` + +### 全局工具安装 + +```bash +# 安装 TypeScript 编译器 +npm install -g typescript + +# 安装开发工具 +npm install -g tsx nodemon prisma + +# 安装 MCPHub CLI(可选) +npm install -g @mcphub/cli +``` + +## 配置开发环境 + +### 环境变量配置 + +```bash +# 复制环境变量模板 +cp .env.example .env + +# 编辑环境变量 +nano .env +``` + +开发环境的 `.env` 配置示例: + +```bash title=".env" +# 应用配置 +NODE_ENV=development +PORT=3000 +HOST=localhost + +# 数据库配置 +DATABASE_URL=sqlite:./data/dev.db + +# JWT 配置 +JWT_SECRET=dev-jwt-secret-key +JWT_EXPIRES_IN=7d + +# 日志配置 +LOG_LEVEL=debug +LOG_FORMAT=dev + +# CORS 配置 +CORS_ORIGIN=http://localhost:3000,http://localhost:3001 + +# 管理员账户 +ADMIN_EMAIL=dev@mcphub.io +ADMIN_PASSWORD=dev123 + +# 开发功能开关 +ENABLE_DEBUG_ROUTES=true +ENABLE_SWAGGER=true +ENABLE_HOT_RELOAD=true +``` + +### 数据库初始化 + +```bash +# 生成 Prisma 客户端 +npx prisma generate + +# 运行数据库迁移 +npx prisma migrate dev --name init + +# 填充测试数据 +npm run db:seed +``` + +## 启动开发服务器 + +### 开发模式启动 + +```bash +# 启动开发服务器(带热重载) +npm run dev + +# 或者使用 tsx 直接运行 +npx tsx watch src/index.ts +``` + +### 后台模式启动 + +```bash +# 使用 PM2 启动(需要先安装 PM2) +npm install -g pm2 +npm run dev:pm2 + +# 查看进程状态 +pm2 status + +# 查看日志 +pm2 logs mcphub-dev +``` + +### 验证启动 + +访问以下 URL 验证服务是否正常启动: + +- **主页**: http://localhost:3000 +- **健康检查**: http://localhost:3000/health +- **API 文档**: http://localhost:3000/api/docs +- **管理界面**: http://localhost:3000/admin + +## 开发工作流 + +### 1. 功能开发流程 + +```bash +# 1. 创建功能分支 +git checkout -b feature/your-feature-name + +# 2. 进行开发... + +# 3. 运行测试 +npm test + +# 4. 代码格式化 +npm run lint:fix + +# 5. 提交代码 +git add . +git commit -m "feat: add your feature description" + +# 6. 推送分支 +git push origin feature/your-feature-name + +# 7. 创建 Pull Request +``` + +### 2. 代码规范 + +MCPHub 项目使用以下代码规范工具: + +```bash +# 代码检查 +npm run lint + +# 自动修复 +npm run lint:fix + +# 格式化代码 +npm run format + +# 类型检查 +npm run type-check +``` + +### 3. 测试开发 + +```bash +# 运行所有测试 +npm test + +# 运行单元测试 +npm run test:unit + +# 运行集成测试 +npm run test:integration + +# 运行测试并生成覆盖率报告 +npm run test:coverage + +# 监听模式运行测试 +npm run test:watch +``` + +## 调试技巧 + +### 1. VS Code 调试配置 + +创建 `.vscode/launch.json` 文件: + +```json title=".vscode/launch.json" +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug MCPHub", + "type": "node", + "request": "launch", + "program": "${workspaceFolder}/src/index.ts", + "runtimeArgs": ["-r", "tsx/cjs"], + "env": { + "NODE_ENV": "development" + }, + "console": "integratedTerminal", + "skipFiles": ["/**"] + } + ] +} +``` + +### 2. 日志调试 + +使用内置的日志系统进行调试: + +```typescript +import { logger } from '@/utils/logger'; + +// 不同级别的日志 +logger.debug('调试信息', { data }); +logger.info('信息日志', { userId }); +logger.warn('警告信息', { error }); +logger.error('错误信息', { error, stack }); +``` + +### 3. 数据库调试 + +```bash +# 查看数据库内容 +npx prisma studio + +# 重置数据库 +npx prisma migrate reset + +# 查看迁移状态 +npx prisma migrate status +``` + +## 常用开发命令 + +### 项目管理 + +```bash +# 安装新依赖 +npm install package-name +npm install -D package-name # 开发依赖 + +# 更新依赖 +npm update + +# 清理缓存 +npm cache clean --force + +# 重新安装依赖 +rm -rf node_modules package-lock.json +npm install +``` + +### 构建和部署 + +```bash +# 构建项目 +npm run build + +# 预览构建结果 +npm run preview + +# 构建 Docker 镜像 +npm run docker:build + +# 运行 Docker 容器 +npm run docker:run +``` + +### 数据库操作 + +```bash +# 创建新迁移 +npx prisma migrate dev --name your-migration-name + +# 重置数据库 +npx prisma migrate reset + +# 推送模式变更 +npx prisma db push + +# 生成客户端 +npx prisma generate +``` + +## 常见问题 + + + + **错误信息**: `Error: listen EADDRINUSE :::3000` + + **解决方案**: + ```bash + # 查找占用端口的进程 + lsof -i :3000 + + # 杀死进程 + kill -9 PID + + # 或者使用不同端口 + PORT=3001 npm run dev + ``` + + + + **可能原因**: 数据库文件权限或路径问题 **解决方案**: ```bash # 检查数据库文件 ls -la data/ # + 重新初始化数据库 rm data/dev.db npx prisma migrate dev ``` + + + + **解决方案**: + ```bash + # 清理构建缓存 + npm run clean + + # 重新安装类型定义 + npm install @types/node @types/express + + # 重新生成 Prisma 客户端 + npx prisma generate + ``` + + + +## 进阶主题 + + + + 了解 MCPHub 的整体架构和设计模式 + + + 学习如何开发和设计 RESTful API + + + 掌握性能分析和优化技巧 + + + 了解生产环境部署的最佳实践 + + diff --git a/docs/zh/development/getting-started.mdx b/docs/zh/development/getting-started.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c319b0da3e92089e8943464c78662cf76c7a8bb5 --- /dev/null +++ b/docs/zh/development/getting-started.mdx @@ -0,0 +1,244 @@ +--- +title: '开发环境搭建' +description: '学习如何为 MCPHub 搭建开发环境' +--- + +# 开发环境搭建 + +本指南将帮助您搭建 MCPHub 的开发环境,为项目贡献代码。 + +## 先决条件 + +在开始之前,请确保您已安装以下软件: + +- **Node.js**(版本 18 或更高) +- **pnpm**(推荐的包管理器) +- **Git** +- **Docker**(可选,用于容器化开发) + +## 搭建开发环境 + +### 1. 克隆仓库 + +```bash +git clone https://github.com/your-username/mcphub.git +cd mcphub +``` + +### 2. 安装依赖 + +```bash +pnpm install +``` + +### 3. 环境配置 + +在根目录创建 `.env` 文件: + +```bash +cp .env.example .env +``` + +配置以下环境变量: + +```env +# 服务器配置 +PORT=3000 +NODE_ENV=development + +# 数据库配置 +DATABASE_URL=postgresql://username:password@localhost:5432/mcphub + +# JWT 配置 +JWT_SECRET=your-secret-key +JWT_EXPIRES_IN=24h + +# OpenAI 配置(用于智能路由) +OPENAI_API_KEY=your-openai-api-key +``` + +### 4. 数据库设置 + +如果使用 PostgreSQL,创建数据库: + +```bash +createdb mcphub +``` + +### 5. MCP 服务器配置 + +创建或修改 `mcp_settings.json`: + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + } +} +``` + +## 开发工作流 + +### 运行开发服务器 + +同时启动后端和前端开发模式: + +```bash +pnpm dev +``` + +这将启动: + +- 后端服务器:`http://localhost:3000` +- 前端开发服务器:`http://localhost:5173` + +### 仅运行后端 + +```bash +pnpm backend:dev +``` + +### 仅运行前端 + +```bash +pnpm frontend:dev +``` + +### 构建项目 + +构建后端和前端: + +```bash +pnpm build +``` + +## 项目结构 + +``` +mcphub/ +├── src/ # 后端源代码 +│ ├── controllers/ # Express 控制器 +│ ├── routes/ # API 路由 +│ ├── services/ # 业务逻辑 +│ ├── models/ # 数据库模型 +│ └── utils/ # 工具函数 +├── frontend/ # 前端 React 应用 +│ ├── src/ +│ │ ├── components/ # React 组件 +│ │ ├── pages/ # 页面组件 +│ │ ├── services/ # API 服务 +│ │ └── utils/ # 前端工具 +├── docs/ # 文档 +├── bin/ # CLI 脚本 +└── scripts/ # 构建和工具脚本 +``` + +## 开发工具 + +### 代码检查和格式化 + +```bash +# 运行 ESLint +pnpm lint + +# 使用 Prettier 格式化代码 +pnpm format +``` + +### 测试 + +```bash +# 运行测试 +pnpm test + +# 监视模式运行测试 +pnpm test --watch +``` + +### 调试 + +使用 Node.js 检查器调试后端: + +```bash +pnpm backend:debug +``` + +然后将调试器连接到 `http://localhost:9229`。 + +## 进行修改 + +### 后端开发 + +1. **控制器**:处理 HTTP 请求和响应 +2. **服务**:实现业务逻辑 +3. **模型**:定义数据库架构 +4. **路由**:定义 API 端点 + +### 前端开发 + +1. **组件**:可重用的 React 组件 +2. **页面**:特定路由的组件 +3. **服务**:API 通信 +4. **钩子**:自定义 React 钩子 + +### 添加新的 MCP 服务器 + +1. 使用新的服务器配置更新 `mcp_settings.json` +2. 测试服务器集成 +3. 必要时更新文档 + +## 常见开发任务 + +### 添加新的 API 端点 + +1. 在 `src/controllers/` 中创建控制器 +2. 在 `src/routes/` 中定义路由 +3. 添加必要的中间件 +4. 为新端点编写测试 + +### 添加新的前端功能 + +1. 在 `frontend/src/components/` 中创建组件 +2. 根据需要添加路由 +3. 实现 API 集成 +4. 使用 Tailwind CSS 进行样式设计 + +### 数据库迁移 + +修改数据库架构时: + +1. 更新 `src/models/` 中的模型 +2. 如果使用 TypeORM,创建迁移脚本 +3. 在本地测试迁移 + +## 故障排除 + +### 常见问题 + +**端口冲突**:确保端口 3000 和 5173 可用 + +**数据库连接**:验证 PostgreSQL 正在运行且凭据正确 + +**MCP 服务器启动**:检查 `mcp_settings.json` 中的服务器配置 + +**权限问题**:确保 MCP 服务器具有必要的权限 + +### 获取帮助 + +- 查看[贡献指南](/zh/development/contributing) +- 阅读[架构文档](/zh/development/architecture) +- 在 GitHub 上提交问题报告 bug +- 加入我们的社区讨论 + +## 下一步 + +- 阅读[架构概述](/zh/development/architecture) +- 了解[贡献指南](/zh/development/contributing) +- 探索[配置选项](/zh/configuration/environment-variables) diff --git a/docs/zh/essentials/code.mdx b/docs/zh/essentials/code.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d68342371d480bc4d208552cefad820cdd04784a --- /dev/null +++ b/docs/zh/essentials/code.mdx @@ -0,0 +1,892 @@ +--- +title: '代码块' +description: 'MCPHub 文档中代码块的编写和展示指南' +--- + +## 内联代码 + +在 MCPHub 文档中使用内联代码来标记命令、配置键、文件名或短代码片段: + +```md +使用 `mcphub start` 命令启动服务器,配置 `MCPHUB_PORT` 环境变量。 +``` + +使用 `mcphub start` 命令启动服务器,配置 `MCPHUB_PORT` 环境变量。 + +## 代码块语法 + +### 基本代码块 + +MCPHub 支持多种编程语言的语法高亮: + +````md +```javascript +// JavaScript 示例 +const mcpClient = new MCPClient({ + endpoint: process.env.MCPHUB_ENDPOINT, + apiKey: process.env.MCPHUB_API_KEY, +}); +``` +```` + +```javascript +// JavaScript 示例 +const mcpClient = new MCPClient({ + endpoint: process.env.MCPHUB_ENDPOINT, + apiKey: process.env.MCPHUB_API_KEY, +}); +``` + +### TypeScript 代码 + +````md +```typescript +interface MCPServerConfig { + id: string; + name: string; + endpoint: string; + capabilities: string[]; + metadata?: Record; +} + +class MCPServer implements MCPServerConfig { + constructor( + public id: string, + public name: string, + public endpoint: string, + public capabilities: string[], + ) {} +} +``` +```` + +```typescript +interface MCPServerConfig { + id: string; + name: string; + endpoint: string; + capabilities: string[]; + metadata?: Record; +} + +class MCPServer implements MCPServerConfig { + constructor( + public id: string, + public name: string, + public endpoint: string, + public capabilities: string[], + ) {} +} +``` + +### Python 代码 + +````md +```python +import requests +from typing import Dict, List, Optional + +class MCPHubClient: + def __init__(self, endpoint: str, api_key: str): + self.endpoint = endpoint + self.api_key = api_key + self.headers = { + 'Authorization': f'Bearer {api_key}', + 'Content-Type': 'application/json' + } + + def create_server(self, config: Dict) -> Dict: + response = requests.post( + f'{self.endpoint}/api/servers', + json=config, + headers=self.headers + ) + return response.json() +``` +```` + +```python +import requests +from typing import Dict, List, Optional + +class MCPHubClient: + def __init__(self, endpoint: str, api_key: str): + self.endpoint = endpoint + self.api_key = api_key + self.headers = { + 'Authorization': f'Bearer {api_key}', + 'Content-Type': 'application/json' + } + + def create_server(self, config: Dict) -> Dict: + response = requests.post( + f'{self.endpoint}/api/servers', + json=config, + headers=self.headers + ) + return response.json() +``` + +## 配置文件 + +### YAML 配置 + +````md +```yaml title="mcphub.yml" +server: + port: 3000 + host: 0.0.0.0 + +database: + type: postgresql + host: localhost + port: 5432 + database: mcphub + username: mcphub_user + password: secure_password + +mcp: + servers: + - id: ai-assistant + name: AI Assistant Server + endpoint: https://ai.example.com + capabilities: + - chat + - completion + - id: data-processor + name: Data Processing Server + endpoint: https://data.example.com + capabilities: + - analysis + - transformation + +routing: + strategy: round_robin + health_check: + enabled: true + interval: 30s + timeout: 5s + +logging: + level: info + format: json + file: /var/log/mcphub/app.log +``` +```` + +```yaml title="mcphub.yml" +server: + port: 3000 + host: 0.0.0.0 + +database: + type: postgresql + host: localhost + port: 5432 + database: mcphub + username: mcphub_user + password: secure_password + +mcp: + servers: + - id: ai-assistant + name: AI Assistant Server + endpoint: https://ai.example.com + capabilities: + - chat + - completion + - id: data-processor + name: Data Processing Server + endpoint: https://data.example.com + capabilities: + - analysis + - transformation + +routing: + strategy: round_robin + health_check: + enabled: true + interval: 30s + timeout: 5s + +logging: + level: info + format: json + file: /var/log/mcphub/app.log +``` + +### JSON 配置 + +````md +```json title="package.json" +{ + "name": "@mcphub/server", + "version": "2.1.0", + "description": "Model Context Protocol Hub Server", + "main": "dist/index.js", + "scripts": { + "start": "node dist/index.js", + "dev": "tsx watch src/index.ts", + "build": "tsc", + "test": "jest", + "test:watch": "jest --watch", + "lint": "eslint src/**/*.ts", + "migrate": "prisma migrate deploy" + }, + "dependencies": { + "@prisma/client": "^5.7.0", + "express": "^4.18.2", + "helmet": "^7.1.0", + "cors": "^2.8.5", + "jsonwebtoken": "^9.0.2", + "bcryptjs": "^2.4.3", + "winston": "^3.11.0" + }, + "devDependencies": { + "@types/node": "^20.10.0", + "@types/express": "^4.17.21", + "typescript": "^5.3.0", + "tsx": "^4.6.0", + "jest": "^29.7.0", + "eslint": "^8.55.0" + } +} +``` +```` + +```json title="package.json" +{ + "name": "@mcphub/server", + "version": "2.1.0", + "description": "Model Context Protocol Hub Server", + "main": "dist/index.js", + "scripts": { + "start": "node dist/index.js", + "dev": "tsx watch src/index.ts", + "build": "tsc", + "test": "jest", + "test:watch": "jest --watch", + "lint": "eslint src/**/*.ts", + "migrate": "prisma migrate deploy" + }, + "dependencies": { + "@prisma/client": "^5.7.0", + "express": "^4.18.2", + "helmet": "^7.1.0", + "cors": "^2.8.5", + "jsonwebtoken": "^9.0.2", + "bcryptjs": "^2.4.3", + "winston": "^3.11.0" + }, + "devDependencies": { + "@types/node": "^20.10.0", + "@types/express": "^4.17.21", + "typescript": "^5.3.0", + "tsx": "^4.6.0", + "jest": "^29.7.0", + "eslint": "^8.55.0" + } +} +``` + +### Docker 配置 + +````md +```dockerfile title="Dockerfile" +FROM node:18-alpine AS builder + +WORKDIR /app + +# 复制依赖文件 +COPY package*.json ./ +COPY tsconfig.json ./ + +# 安装依赖 +RUN npm ci --only=production + +# 复制源码 +COPY src/ ./src/ + +# 构建应用 +RUN npm run build + +# 生产环境镜像 +FROM node:18-alpine AS production + +WORKDIR /app + +# 创建非 root 用户 +RUN addgroup -g 1001 -S nodejs +RUN adduser -S mcphub -u 1001 + +# 复制构建产物 +COPY --from=builder /app/dist ./dist +COPY --from=builder /app/node_modules ./node_modules +COPY --from=builder /app/package*.json ./ + +# 设置权限 +USER mcphub + +# 健康检查 +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD node dist/health-check.js + +EXPOSE 3000 + +CMD ["node", "dist/index.js"] +``` +```` + +```dockerfile title="Dockerfile" +FROM node:18-alpine AS builder + +WORKDIR /app + +# 复制依赖文件 +COPY package*.json ./ +COPY tsconfig.json ./ + +# 安装依赖 +RUN npm ci --only=production + +# 复制源码 +COPY src/ ./src/ + +# 构建应用 +RUN npm run build + +# 生产环境镜像 +FROM node:18-alpine AS production + +WORKDIR /app + +# 创建非 root 用户 +RUN addgroup -g 1001 -S nodejs +RUN adduser -S mcphub -u 1001 + +# 复制构建产物 +COPY --from=builder /app/dist ./dist +COPY --from=builder /app/node_modules ./node_modules +COPY --from=builder /app/package*.json ./ + +# 设置权限 +USER mcphub + +# 健康检查 +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD node dist/health-check.js + +EXPOSE 3000 + +CMD ["node", "dist/index.js"] +``` + +## 终端命令 + +### Bash/Shell 命令 + +````md +```bash +# 克隆 MCPHub 仓库 +git clone https://github.com/mcphub/mcphub.git +cd mcphub + +# 安装依赖 +npm install + +# 复制环境变量文件 +cp .env.example .env + +# 设置数据库 +npm run db:setup + +# 启动开发服务器 +npm run dev + +# 构建生产版本 +npm run build + +# 启动生产服务器 +npm start +``` +```` + +```bash +# 克隆 MCPHub 仓库 +git clone https://github.com/mcphub/mcphub.git +cd mcphub + +# 安装依赖 +npm install + +# 复制环境变量文件 +cp .env.example .env + +# 设置数据库 +npm run db:setup + +# 启动开发服务器 +npm run dev + +# 构建生产版本 +npm run build + +# 启动生产服务器 +npm start +``` + +### PowerShell 命令 + +````md +```powershell +# Windows PowerShell 安装步骤 +# 克隆仓库 +git clone https://github.com/mcphub/mcphub.git +Set-Location mcphub + +# 安装 Node.js 依赖 +npm install + +# 复制环境变量文件 +Copy-Item .env.example .env + +# 启动开发服务器 +npm run dev +``` +```` + +```powershell +# Windows PowerShell 安装步骤 +# 克隆仓库 +git clone https://github.com/mcphub/mcphub.git +Set-Location mcphub + +# 安装 Node.js 依赖 +npm install + +# 复制环境变量文件 +Copy-Item .env.example .env + +# 启动开发服务器 +npm run dev +``` + +### Docker 命令 + +````md +```bash +# 使用 Docker 运行 MCPHub +docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -e NODE_ENV=production \ + -e DATABASE_URL=postgresql://user:pass@host:5432/mcphub \ + -e JWT_SECRET=your-secret-key \ + mcphub/server:latest + +# 查看日志 +docker logs mcphub + +# 进入容器 +docker exec -it mcphub sh + +# 停止容器 +docker stop mcphub + +# 使用 Docker Compose +docker-compose up -d +``` +```` + +```bash +# 使用 Docker 运行 MCPHub +docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -e NODE_ENV=production \ + -e DATABASE_URL=postgresql://user:pass@host:5432/mcphub \ + -e JWT_SECRET=your-secret-key \ + mcphub/server:latest + +# 查看日志 +docker logs mcphub + +# 进入容器 +docker exec -it mcphub sh + +# 停止容器 +docker stop mcphub + +# 使用 Docker Compose +docker-compose up -d +``` + +## API 请求示例 + +### cURL 命令 + +````md +```bash +# 创建新的 MCP 服务器 +curl -X POST https://api.mcphub.io/api/servers \ + -H "Authorization: Bearer YOUR_API_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "AI Assistant Server", + "endpoint": "https://ai.example.com", + "capabilities": ["chat", "completion"], + "groupId": "production" + }' + +# 获取服务器列表 +curl -X GET "https://api.mcphub.io/api/servers?limit=10&active=true" \ + -H "Authorization: Bearer YOUR_API_TOKEN" + +# 更新服务器配置 +curl -X PUT https://api.mcphub.io/api/servers/server-123 \ + -H "Authorization: Bearer YOUR_API_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Updated AI Assistant", + "active": true + }' + +# 删除服务器 +curl -X DELETE https://api.mcphub.io/api/servers/server-123 \ + -H "Authorization: Bearer YOUR_API_TOKEN" +``` +```` + +```bash +# 创建新的 MCP 服务器 +curl -X POST https://api.mcphub.io/api/servers \ + -H "Authorization: Bearer YOUR_API_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "AI Assistant Server", + "endpoint": "https://ai.example.com", + "capabilities": ["chat", "completion"], + "groupId": "production" + }' + +# 获取服务器列表 +curl -X GET "https://api.mcphub.io/api/servers?limit=10&active=true" \ + -H "Authorization: Bearer YOUR_API_TOKEN" + +# 更新服务器配置 +curl -X PUT https://api.mcphub.io/api/servers/server-123 \ + -H "Authorization: Bearer YOUR_API_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Updated AI Assistant", + "active": true + }' + +# 删除服务器 +curl -X DELETE https://api.mcphub.io/api/servers/server-123 \ + -H "Authorization: Bearer YOUR_API_TOKEN" +``` + +### HTTP 请求示例 + +````md +```http +POST /api/servers HTTP/1.1 +Host: api.mcphub.io +Authorization: Bearer YOUR_API_TOKEN +Content-Type: application/json + +{ + "name": "AI Assistant Server", + "endpoint": "https://ai.example.com", + "capabilities": ["chat", "completion"], + "groupId": "production" +} +``` +```` + +```http +POST /api/servers HTTP/1.1 +Host: api.mcphub.io +Authorization: Bearer YOUR_API_TOKEN +Content-Type: application/json + +{ + "name": "AI Assistant Server", + "endpoint": "https://ai.example.com", + "capabilities": ["chat", "completion"], + "groupId": "production" +} +``` + +## 数据库查询 + +### SQL 查询 + +````md +```sql +-- 查询活跃的 MCP 服务器 +SELECT + id, + name, + endpoint, + status, + created_at +FROM mcp_servers +WHERE status = 'active' +ORDER BY created_at DESC; + +-- 统计每个组的服务器数量 +SELECT + g.name as group_name, + COUNT(s.id) as server_count +FROM server_groups g +LEFT JOIN mcp_servers s ON g.id = s.group_id +GROUP BY g.id, g.name +ORDER BY server_count DESC; + +-- 查询最近的错误日志 +SELECT + timestamp, + level, + message, + metadata +FROM logs +WHERE level = 'error' + AND timestamp >= NOW() - INTERVAL '1 hour' +ORDER BY timestamp DESC +LIMIT 50; +``` +```` + +```sql +-- 查询活跃的 MCP 服务器 +SELECT + id, + name, + endpoint, + status, + created_at +FROM mcp_servers +WHERE status = 'active' +ORDER BY created_at DESC; + +-- 统计每个组的服务器数量 +SELECT + g.name as group_name, + COUNT(s.id) as server_count +FROM server_groups g +LEFT JOIN mcp_servers s ON g.id = s.group_id +GROUP BY g.id, g.name +ORDER BY server_count DESC; + +-- 查询最近的错误日志 +SELECT + timestamp, + level, + message, + metadata +FROM logs +WHERE level = 'error' + AND timestamp >= NOW() - INTERVAL '1 hour' +ORDER BY timestamp DESC +LIMIT 50; +``` + +## 代码块最佳实践 + +### 1. 语言标识 + +始终为代码块指定正确的语言: + +````md +````javascript // ✅ 正确 +```js // ✅ 也可以 +```; // ❌ 避免无语言标识 +```` +```` + +### 2. 文件名标题 + +为配置文件和示例添加文件名: + +````md +```yaml title="docker-compose.yml" +version: '3.8' +services: + mcphub: + image: mcphub/server:latest +``` +```` + +### 3. 突出显示重要行 + +使用行号高亮重要代码: + +````md +```javascript {3,7-9} +const express = require('express'); +const app = express(); +const port = process.env.PORT || 3000; // 重要:端口配置 + +app.get('/health', (req, res) => { + res.json({ status: 'ok' }); +}); +app.listen(port, () => { + // 重要:服务器启动 + console.log(`Server running on port ${port}`); +}); // 重要:结束 +``` +```` + +### 4. 代码注释 + +添加有意义的中文注释: + +```javascript +// 初始化 MCPHub 客户端 +const client = new MCPHubClient({ + endpoint: 'https://api.mcphub.io', + apiKey: process.env.API_KEY, + timeout: 30000, // 30 秒超时 + retries: 3, // 重试 3 次 +}); + +// 配置路由策略 +client.setRoutingStrategy({ + type: 'weighted', // 加权轮询 + healthCheck: true, // 启用健康检查 + fallback: 'round_robin', // 降级策略 +}); +``` + +### 5. 错误处理示例 + +展示完整的错误处理: + +```javascript +try { + const server = await mcpClient.createServer({ + name: 'AI Assistant', + endpoint: 'https://ai.example.com', + }); + + console.log('服务器创建成功:', server.id); +} catch (error) { + if (error.code === 'DUPLICATE_SERVER') { + console.log('服务器已存在,跳过创建'); + } else if (error.code === 'INVALID_ENDPOINT') { + console.error('无效的端点地址:', error.message); + } else { + console.error('创建失败:', error.message); + throw error; // 重新抛出未知错误 + } +} +``` + +## 支持的语言 + +MCPHub 文档支持以下编程语言的语法高亮: + +- **JavaScript/TypeScript**: `javascript`, `js`, `typescript`, `ts` +- **Python**: `python`, `py` +- **Shell/Bash**: `bash`, `shell`, `sh` +- **PowerShell**: `powershell`, `ps1` +- **SQL**: `sql`, `postgresql`, `mysql` +- **YAML**: `yaml`, `yml` +- **JSON**: `json` +- **XML**: `xml` +- **HTML**: `html` +- **CSS**: `css` +- **Dockerfile**: `dockerfile` +- **Go**: `go` +- **Rust**: `rust` +- **Java**: `java` +- **C#**: `csharp`, `cs` +- **PHP**: `php` +- **Ruby**: `ruby` +- **HTTP**: `http` +- **Markdown**: `markdown`, `md` + +````` + +### 使用三个反引号 + +````md +```javascript +console.log('Hello World'); +````` + +````` + +### 语法高亮 + +我们使用 [Prism](https://prismjs.com/) 来语法高亮显示。Prism 支持 [各种编程语言](https://prismjs.com/#supported-languages)。 + +要添加语法高亮显示,请在代码块的第一行指定语言。 + +````md +```python +def hello(): + print("Hello World") +``` +````` + +```python +def hello(): + print("Hello World") +``` + +## 代码组 + + + +```bash npm +npm i mintlify +``` + +```bash yarn +yarn add mintlify +``` + +```bash pnpm +pnpm add mintlify +``` + + + +`CodeGroup` 允许您将多个代码块组合在一起,并为它们提供选项卡。 + +````md + + +```bash npm +npm i mintlify +``` + +```bash yarn +yarn add mintlify +``` + +```bash pnpm +pnpm add mintlify +``` + + +```` + +### 代码标题 + +您也可以为代码块设置标题: + +```javascript hello.js +const hello = 'world'; +console.log(hello); +``` + +````md +```javascript hello.js +const hello = 'world'; +console.log(hello); +``` +```` diff --git a/docs/zh/essentials/images.mdx b/docs/zh/essentials/images.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7c5e885737364a6fa257c833932baf0032ec2ea3 --- /dev/null +++ b/docs/zh/essentials/images.mdx @@ -0,0 +1,134 @@ +--- +title: '图片和视频' +description: '在您的文档中添加图片和视频' +--- + +## 图片 + +### 使用 Markdown + +您可以使用标准的 Markdown 语法添加图片: + +![描述](/images/hero-light.png) + +```md +![描述](/images/hero-light.png) +``` + +### 使用 HTML + +您也可以使用原始 HTML 获得更多自定义选项: + + + +```html + +``` + +### 图片组件 + +使用内置的 `` 组件来显示响应式的明暗主题图片: + +Hero Light +Hero Dark + +```jsx +Hero Light +Hero Dark +``` + +## 图片缩放 + +您可以使图片在点击时可缩放(类似于中等缩放)使用 `zoom` 属性。 + +可缩放 + +```jsx +可缩放 +``` + +## 嵌入视频 + +### YouTube + + + +```html + +``` + +### Loom + + + +```html + +``` + +## 图片最佳实践 + +### 大小优化 + +- 保持图片尺寸在合理范围内(通常不超过 1000px 宽度) +- 使用适当的图片格式(PNG 用于图标,JPG 用于照片) +- 考虑使用现代格式如 WebP 来减少文件大小 + +### 无障碍性 + +- 始终包含描述性的 `alt` 文本 +- 确保图片在各种屏幕尺寸下都能正常显示 + +### 组织 + +- 将图片存储在 `/images` 或 `/assets` 文件夹中 +- 使用描述性的文件名 diff --git a/docs/zh/essentials/markdown.mdx b/docs/zh/essentials/markdown.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bc182965cbb6651771bd6cb0c7c3bb0d89170125 --- /dev/null +++ b/docs/zh/essentials/markdown.mdx @@ -0,0 +1,412 @@ +--- +title: 'Markdown 语法' +description: 'MCPHub 文档的 Markdown 编写指南和最佳实践' +--- + +## 标题 + +在 MCPHub 文档中,每个页面应该只使用一个 `#` 标题,它会自动成为页面标题。 + +```md +# MCP 服务器配置指南 + +## 快速开始 + +### 安装依赖 + +#### 系统要求 + +##### Node.js 版本 + +###### 推荐版本 +``` + + + +# 标题 1 + +## 标题 2 + +### 标题 3 + +#### 标题 4 + +##### 标题 5 + +###### 标题 6 + + + +## 文本格式 + +### 基本格式 + +MCPHub 文档支持标准的 Markdown 文本格式: + +```md +**粗体文本** - 用于强调重要概念 +_斜体文本_ - 用于强调或引用 +`行内代码` - 用于命令、配置键或代码片段 +~~删除线~~ - 用于标记过时的内容 +``` + +**粗体文本** - 用于强调重要概念 +_斜体文本_ - 用于强调或引用 +`行内代码` - 用于命令、配置键或代码片段 +~~删除线~~ - 用于标记过时的内容 + +### 链接 + +#### 内部链接 + +链接到其他文档页面: + +```md +查看 [服务器配置指南](/zh/configuration/mcp-settings) 获取详细信息。 +``` + +查看 [服务器配置指南](/zh/configuration/mcp-settings) 获取详细信息。 + +#### 外部链接 + +```md +访问 [Model Context Protocol 官网](https://modelcontextprotocol.io) 了解更多。 +``` + +访问 [Model Context Protocol 官网](https://modelcontextprotocol.io) 了解更多。 + +## 列表 + +### 无序列表 + +适用于功能列表、要求等: + +```md +MCPHub 主要功能: + +- 智能路由分发 +- 服务器组管理 +- 实时监控 +- 身份认证 + - JWT 令牌 + - API 密钥 + - OAuth 2.0 +``` + +MCPHub 主要功能: + +- 智能路由分发 +- 服务器组管理 +- 实时监控 +- 身份认证 + - JWT 令牌 + - API 密钥 + - OAuth 2.0 + +### 有序列表 + +适用于步骤说明、安装指南等: + +```md +快速部署步骤: + +1. 克隆仓库 +2. 安装依赖 +3. 配置环境变量 +4. 启动服务 +5. 验证部署 +``` + +快速部署步骤: + +1. 克隆仓库 +2. 安装依赖 +3. 配置环境变量 +4. 启动服务 +5. 验证部署 + +## 代码块 + +### 基本代码块 + +````md +```javascript +// MCPHub 客户端初始化 +const mcpClient = new MCPClient({ + endpoint: 'https://api.mcphub.io', + apiKey: process.env.MCPHUB_API_KEY, +}); +``` +```` + +```javascript +// MCPHub 客户端初始化 +const mcpClient = new MCPClient({ + endpoint: 'https://api.mcphub.io', + apiKey: process.env.MCPHUB_API_KEY, +}); +``` + +### 配置文件示例 + +````md +```yaml title="docker-compose.yml" +version: '3.8' +services: + mcphub: + image: mcphub/server:latest + ports: + - '3000:3000' + environment: + - NODE_ENV=production + - DATABASE_URL=postgresql://user:pass@db:5432/mcphub +``` +```` + +```yaml title="docker-compose.yml" +version: '3.8' +services: + mcphub: + image: mcphub/server:latest + ports: + - '3000:3000' + environment: + - NODE_ENV=production + - DATABASE_URL=postgresql://user:pass@db:5432/mcphub +``` + +### 终端命令 + +````md +```bash +# 安装 MCPHub CLI +npm install -g @mcphub/cli + +# 初始化项目 +mcphub init my-project + +# 启动开发服务器 +mcphub dev +``` +```` + +```bash +# 安装 MCPHub CLI +npm install -g @mcphub/cli + +# 初始化项目 +mcphub init my-project + +# 启动开发服务器 +mcphub dev +``` + +## 表格 + +### 基本表格 + +```md +| 功能 | 开源版 | 企业版 | +| ------------ | ------ | ------ | +| 基础路由 | ✅ | ✅ | +| 智能负载均衡 | ❌ | ✅ | +| 高级监控 | ❌ | ✅ | +| 24/7 支持 | ❌ | ✅ | +``` + +| 功能 | 开源版 | 企业版 | +| ------------ | ------ | ------ | +| 基础路由 | ✅ | ✅ | +| 智能负载均衡 | ❌ | ✅ | +| 高级监控 | ❌ | ✅ | +| 24/7 支持 | ❌ | ✅ | + +### API 参数表格 + +```md +| 参数名 | 类型 | 必需 | 描述 | +| ---------- | ------- | ---- | ---------------------- | +| `serverId` | string | 是 | 服务器唯一标识符 | +| `groupId` | string | 否 | 服务器组 ID | +| `active` | boolean | 否 | 是否激活(默认:true) | +``` + +| 参数名 | 类型 | 必需 | 描述 | +| ---------- | ------- | ---- | ---------------------- | +| `serverId` | string | 是 | 服务器唯一标识符 | +| `groupId` | string | 否 | 服务器组 ID | +| `active` | boolean | 否 | 是否激活(默认:true) | + +## 引用块 + +### 信息提示 + +```md +> 📝 **提示** +> 在生产环境中部署前,请确保已正确配置所有环境变量。 +``` + +> 📝 **提示** +> 在生产环境中部署前,请确保已正确配置所有环境变量。 + +### 警告信息 + +```md +> ⚠️ **警告** +> 修改核心配置可能会影响系统稳定性,请谨慎操作。 +``` + +> ⚠️ **警告** +> 修改核心配置可能会影响系统稳定性,请谨慎操作。 + +## 任务列表 + +```md +- [x] 完成服务器配置 +- [x] 设置数据库连接 +- [ ] 配置负载均衡 +- [ ] 设置监控告警 +- [ ] 编写单元测试 +``` + +- [x] 完成服务器配置 +- [x] 设置数据库连接 +- [ ] 配置负载均衡 +- [ ] 设置监控告警 +- [ ] 编写单元测试 + +## 水平分割线 + +用于分隔不同的内容部分: + +```md +## 第一部分 + +内容... + +--- + +## 第二部分 + +更多内容... +``` + +--- + +## 转义字符 + +当需要显示 Markdown 特殊字符时: + +```md +\*这不是斜体\* +\`这不是代码\` +\[这不是链接\] +``` + +\*这不是斜体\* +\`这不是代码\` +\[这不是链接\] + +## MCPHub 文档特定约定 + +### 配置项格式 + +环境变量和配置项使用特定格式: + +```md +设置 `MCPHUB_PORT` 环境变量为 `3000`。 +``` + +设置 `MCPHUB_PORT` 环境变量为 `3000`。 + +### API 端点格式 + +```md +`GET /api/servers/{id}` - 获取服务器详情 +``` + +`GET /api/servers/{id}` - 获取服务器详情 + +### 版本标记 + +```md +该功能在 v2.1.0+ 版本中可用。 +``` + +该功能在 v2.1.0+ 版本中可用。 + +## 最佳实践 + +1. **标题层级**:保持清晰的标题层级结构 +2. **代码示例**:为所有代码块指定语言 +3. **链接检查**:确保所有内部链接有效 +4. **图片描述**:为图片添加有意义的 alt 文本 +5. **一致性**:在整个文档中保持术语和格式一致 + +### 文档模板示例 + +````md +--- +title: '功能名称' +description: '简短的功能描述' +--- + +## 概述 + +简要介绍该功能的用途和重要性。 + +## 快速开始 + +### 前提条件 + +- 系统要求 +- 依赖软件 + +### 安装步骤 + +1. 第一步 +2. 第二步 +3. 第三步 + +```bash +# 示例命令 +npm install example +``` + +## 配置 + +### 基本配置 + +| 配置项 | 类型 | 描述 | +| --------- | ------ | -------- | +| `option1` | string | 选项描述 | + +### 高级配置 + +详细的配置说明... + +## 示例 + +### 基本用法 + +```javascript +// 代码示例 +const example = new Example(); +``` + +### 高级用法 + +更复杂的使用场景... + +## 故障排除 + +### 常见问题 + +**问题**:描述问题 +**解决方案**:解决步骤 + +## 参考资料 + +- [相关文档链接](/link) +- [外部资源](https://example.com) +```` diff --git a/docs/zh/essentials/navigation.mdx b/docs/zh/essentials/navigation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..80902a6e9a2f1dba714c7be6594b916afc5b45b0 --- /dev/null +++ b/docs/zh/essentials/navigation.mdx @@ -0,0 +1,596 @@ +--- +title: '导航配置' +description: 'MCPHub 文档的导航结构配置指南' +--- + +## 基础导航 + +MCPHub 文档的导航在 `docs.json` 文件中配置。基本导航结构包含组和页面: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "快速开始", + "pages": ["zh/index", "zh/quickstart"] + }, + { + "group": "开发指南", + "pages": [ + "zh/development/getting-started", + "zh/development/api-integration", + "zh/development/testing" + ] + }, + { + "group": "配置管理", + "pages": [ + "zh/configuration/environment-variables", + "zh/configuration/mcp-settings", + "zh/configuration/docker-setup", + "zh/configuration/nginx" + ] + } + ] +} +``` + +## 标签导航 + +当您的文档有多个主要部分时,可以使用标签来组织内容。 + +```json docs.json +{ + "navigation": { + "tabs": [ + { + "tab": "指南", + "groups": [ + { + "group": "基础", + "pages": ["basics/introduction"] + } + ] + }, + { + "tab": "API 参考", + "groups": [ + { + "group": "端点", + "pages": ["api/users", "api/products"] + } + ] + } + ] + } +} +``` + +## 页面引用 + +### 文件路径引用 + +最常见的方式是通过文件路径引用页面(不包含 `.mdx` 扩展名): + +```json +{ + "pages": ["quickstart", "advanced/configuration"] +} +``` + +### 外部链接 + +您也可以在导航中包含外部链接: + +```json +{ + "pages": [ + "introduction", + { + "page": "GitHub", + "href": "https://github.com" + } + ] +} +``` + +## 分组 + +### 基本分组 + +每个组都有一个名称和页面列表: + +```json +{ + "group": "API 基础", + "pages": ["api/authentication", "api/errors", "api/rate-limits"] +} +``` + +### 分组版本控制 + +您可以为组指定版本: + +```json +{ + "group": "API v2", + "version": "v2.0", + "pages": ["api/v2/users"] +} +``` + +## 全局导航元素 + +### 锚点 + +在所有页面上显示的持久链接: + +```json docs.json +{ + "navigation": { + "global": { + "anchors": [ + { + "anchor": "API 参考", + "href": "/api-reference", + "icon": "square-terminal" + }, + { + "anchor": "社区", + "href": "https://community.example.com", + "icon": "discord" + } + ] + } + } +} +``` + +### 导航栏 + +配置顶部导航栏的链接: + +```json docs.json +{ + "navbar": { + "links": [ + { + "label": "支持", + "href": "mailto:support@example.com" + } + ], + "primary": { + "type": "button", + "label": "仪表板", + "href": "https://dashboard.example.com" + } + } +} +``` + +## 分层导航结构 + +### 多级导航 + +MCPHub 文档支持多级分层导航: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "核心功能", + "pages": [ + { + "group": "服务器管理", + "pages": [ + "zh/features/server-management", + "zh/features/server-health", + "zh/features/server-scaling" + ] + }, + { + "group": "智能路由", + "pages": [ + "zh/features/smart-routing", + "zh/features/load-balancing", + "zh/features/failover" + ] + } + ] + } + ] +} +``` + +### 条件导航 + +根据用户权限或版本显示不同的导航项: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "API 参考", + "pages": [ + "zh/api-reference/introduction", + "zh/api-reference/authentication", + { + "group": "端点", + "pages": [ + "zh/api-reference/endpoint/get", + "zh/api-reference/endpoint/create", + "zh/api-reference/endpoint/delete", + "zh/api-reference/endpoint/webhook" + ] + } + ] + }, + { + "group": "企业功能", + "icon": "crown", + "version": "enterprise", + "pages": ["zh/enterprise/sso", "zh/enterprise/audit-logs", "zh/enterprise/compliance"] + } + ] +} +``` + +## 标签导航 + +对于多产品或多语言文档,使用标签组织内容: + +```json title="docs.json" +{ + "tabs": [ + { + "name": "文档", + "url": "https://docs.mcphub.io" + }, + { + "name": "API", + "url": "https://api.mcphub.io" + }, + { + "name": "SDK", + "url": "https://sdk.mcphub.io" + } + ], + "navigation": { + "文档": [ + { + "group": "开始使用", + "pages": ["zh/index", "zh/quickstart"] + } + ], + "API": [ + { + "group": "API 参考", + "pages": ["zh/api-reference/introduction", "zh/api-reference/authentication"] + } + ] + } +} +``` + +## 导航图标 + +为导航项添加图标以提高可读性: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "快速开始", + "icon": "rocket", + "pages": ["zh/index", "zh/quickstart"] + }, + { + "group": "配置", + "icon": "gear", + "pages": ["zh/configuration/environment-variables", "zh/configuration/mcp-settings"] + }, + { + "group": "监控", + "icon": "chart-line", + "pages": ["zh/features/monitoring", "zh/features/analytics"] + } + ] +} +``` + +### 支持的图标 + +MCPHub 文档支持以下图标库的图标: + +- **Heroicons**: `hero-icon-name` +- **Font Awesome**: `fa-icon-name` +- **Feather**: `feather-icon-name` +- **Lucide**: `lucide-icon-name` + +常用图标示例: + +| 功能 | 图标 | 代码 | +| ---- | ---- | ------------- | +| 首页 | 🏠 | `"home"` | +| 设置 | ⚙️ | `"gear"` | +| API | 🔌 | `"plug"` | +| 安全 | 🔒 | `"lock"` | +| 监控 | 📊 | `"chart-bar"` | +| 文档 | 📖 | `"book"` | +| 开发 | 💻 | `"code"` | + +## 外部链接 + +在导航中包含外部资源链接: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "资源", + "pages": [ + { + "name": "GitHub 仓库", + "url": "https://github.com/mcphub/mcphub", + "icon": "github" + }, + { + "name": "Discord 社区", + "url": "https://discord.gg/mcphub", + "icon": "discord" + }, + { + "name": "状态页面", + "url": "https://status.mcphub.io", + "icon": "status" + } + ] + } + ] +} +``` + +## 导航排序 + +### 自动排序 + +默认情况下,导航项按字母顺序排列。可以通过文件名前缀控制排序: + +``` +zh/ +├── 01-index.mdx +├── 02-quickstart.mdx +├── development/ +│ ├── 01-getting-started.mdx +│ ├── 02-api-integration.mdx +│ └── 03-testing.mdx +└── configuration/ + ├── 01-environment-variables.mdx + ├── 02-mcp-settings.mdx + └── 03-docker-setup.mdx +``` + +### 手动排序 + +在 `docs.json` 中明确指定顺序: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "核心概念", + "pages": [ + "zh/concepts/introduction", + "zh/concepts/architecture", + "zh/concepts/mcp-protocol", + "zh/concepts/routing" + ] + } + ] +} +``` + +## 隐藏导航项 + +### 草稿页面 + +使用 `draft: true` 隐藏未完成的页面: + +```yaml title="draft-page.mdx" +--- +title: '开发中的功能' +description: '此功能正在开发中' +draft: true +--- +``` + +### 条件显示 + +根据用户角色或环境显示导航: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "管理功能", + "hidden": "user", + "pages": ["zh/admin/user-management", "zh/admin/system-settings"] + } + ] +} +``` + +## 导航元数据 + +### 页面元数据 + +在页面头部添加导航相关的元数据: + +```yaml title="page.mdx" +--- +title: '服务器管理' +description: 'MCPHub 服务器管理功能详解' +icon: 'server' +order: 1 +hidden: false +version: '2.0+' +tags: ['管理', '服务器', '配置'] +--- +``` + +### 组元数据 + +为导航组添加描述和图标: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "API 参考", + "icon": "api", + "description": "完整的 API 接口文档", + "version": "v2", + "pages": ["zh/api-reference/introduction"] + } + ] +} +``` + +## 搜索优化 + +### 搜索关键词 + +为页面添加搜索关键词: + +```yaml title="page.mdx" +--- +title: 'Docker 部署' +description: '使用 Docker 部署 MCPHub' +keywords: ['docker', '部署', '容器', '生产环境'] +searchable: true +--- +``` + +### 搜索权重 + +控制页面在搜索结果中的权重: + +```yaml title="important-page.mdx" +--- +title: '快速开始' +description: '5 分钟快速部署 MCPHub' +searchWeight: 10 +featured: true +--- +``` + +## 面包屑导航 + +自动生成面包屑导航: + +```json title="docs.json" +{ + "breadcrumbs": { + "enabled": true, + "separator": "›", + "home": "首页" + }, + "navigation": [ + { + "group": "配置管理", + "pages": ["zh/configuration/environment-variables"] + } + ] +} +``` + +显示效果:`首页 › 配置管理 › 环境变量` + +## 导航最佳实践 + +### 1. 逻辑分组 + +按功能和用户需求逻辑分组: + +```json +{ + "navigation": [ + { + "group": "新手指南", + "pages": ["introduction", "quickstart", "first-server"] + }, + { + "group": "进阶配置", + "pages": ["advanced-routing", "scaling", "monitoring"] + }, + { + "group": "参考文档", + "pages": ["api-reference", "cli-reference", "troubleshooting"] + } + ] +} +``` + +### 2. 渐进式学习路径 + +设计符合学习曲线的导航结构: + +1. **入门** → 快速开始、基础概念 +2. **实践** → 配置、部署、集成 +3. **进阶** → 优化、监控、故障排除 +4. **参考** → API 文档、CLI 手册 + +### 3. 移动端友好 + +确保导航在移动设备上的可用性: + +```json title="docs.json" +{ + "navigation": [ + { + "group": "快速开始", + "collapsed": false, + "pages": ["zh/index", "zh/quickstart"] + }, + { + "group": "详细文档", + "collapsed": true, + "pages": ["zh/advanced/..."] + } + ] +} +``` + +### 4. 国际化支持 + +为多语言文档配置导航: + +```json title="docs.json" +{ + "i18n": { + "defaultLocale": "zh", + "locales": ["zh", "en"] + }, + "navigation": { + "zh": [ + { + "group": "快速开始", + "pages": ["zh/index", "zh/quickstart"] + } + ], + "en": [ + { + "group": "Getting Started", + "pages": ["en/index", "en/quickstart"] + } + ] + } +} +``` + +### 5. 性能优化 + +- 使用懒加载减少初始加载时间 +- 合理设置导航深度(建议不超过 3 层) +- 避免过多的外部链接 +- 定期清理无效的导航项 diff --git a/docs/zh/essentials/reusable-snippets.mdx b/docs/zh/essentials/reusable-snippets.mdx new file mode 100644 index 0000000000000000000000000000000000000000..908768c37341efd9182ceca156fb9c399db6f669 --- /dev/null +++ b/docs/zh/essentials/reusable-snippets.mdx @@ -0,0 +1,144 @@ +--- +title: '可重用代码片段' +description: '学习如何创建和使用代码片段来保持文档的一致性' +--- + +## 什么是代码片段? + +代码片段允许您在文档的多个位置重用内容块。这有助于保持一致性并减少重复内容的维护工作。 + +## 创建代码片段 + +代码片段存储在 `snippets/` 文件夹中,使用 `.mdx` 扩展名。 + +### 基本代码片段 + +创建 `snippets/api-key-setup.mdx`: + +```md +获取您的 API 密钥: + +1. 登录到您的仪表板 +2. 导航到 **设置** > **API 密钥** +3. 点击 **生成新密钥** +4. 复制密钥并安全地存储 +``` + +### 带参数的代码片段 + +您可以创建接受参数的动态代码片段。创建 `snippets/code-example.mdx`: + +````jsx + + +```bash {props.packageManager} +{props.packageManager} install {props.packageName} +```` + + +``` + +## 使用代码片段 + +### 基本使用 + +使用 `` 组件来包含代码片段: + +```jsx + +``` + + + +### 带参数使用 + +```jsx + +``` + +## 代码片段最佳实践 + +### 文件组织 + +``` +snippets/ +├── setup/ +│ ├── installation.mdx +│ └── configuration.mdx +├── examples/ +│ ├── basic-usage.mdx +│ └── advanced-usage.mdx +└── common/ + ├── prerequisites.mdx + └── troubleshooting.mdx +``` + +### 命名约定 + +- 使用描述性文件名 +- 使用连字符分隔单词 +- 按主题分组到子文件夹 + +### 内容指导原则 + +1. **保持简洁** - 代码片段应该是独立的内容块 +2. **避免硬编码** - 对可变内容使用参数 +3. **文档化参数** - 在代码片段中注释必需的参数 + +### 参数文档 + +在代码片段文件的顶部记录所需参数: + +```md + + +安装说明... +``` + +## 高级代码片段 + +### 条件内容 + +您可以使用条件逻辑来根据参数显示不同的内容: + +```jsx +{ + props.framework === 'react' &&
React 特定的内容...
; +} + +{ + props.framework === 'vue' &&
Vue 特定的内容...
; +} +``` + +### 嵌套代码片段 + +代码片段可以包含其他代码片段: + +```jsx + + +## 安装步骤 + + +``` + +## 维护代码片段 + +### 版本控制 + +当更新代码片段时: + +1. 考虑向后兼容性 +2. 更新所有使用该代码片段的页面 +3. 测试更改在所有上下文中的效果 + +### 重构检查清单 + +- [ ] 确认所有参数仍然有效 +- [ ] 验证代码片段在所有使用位置正确渲染 +- [ ] 更新相关文档 diff --git a/docs/zh/essentials/settings.mdx b/docs/zh/essentials/settings.mdx new file mode 100644 index 0000000000000000000000000000000000000000..43c05920ac4d37a03aa4c15ebcd2311c3093b669 --- /dev/null +++ b/docs/zh/essentials/settings.mdx @@ -0,0 +1,172 @@ +--- +title: '设置' +description: '了解如何配置您的文档' +--- + +## 全局配置 + +所有的全局配置都在项目根目录的 `docs.json` 文件中设置。 + +### 名称 + +在配置的顶层设置文档的名称。 + +```json docs.json +{ + "name": "Mintlify 文档" +} +``` + +### Logo + +您可以显示浅色和深色模式的 logo。 + +```json docs.json +{ + "logo": { + "light": "/logo/light.svg", + "dark": "/logo/dark.svg" + } +} +``` + +### Favicon + +```json docs.json +{ + "favicon": "/favicon.ico" +} +``` + +### 颜色 + +自定义文档的主色调以匹配您的品牌。 + +```json docs.json +{ + "colors": { + "primary": "#9563FF", + "light": "#AE87FF", + "dark": "#9563FF" + } +} +``` + +设置一种颜色系统,通过仅更改主色调来协调您文档的配色方案。 + +### 导航 + +您的侧边栏导航在 `navigation` 字段中设置。文档页面必须嵌套在组下,组必须嵌套在导航下。 + +```json docs.json +{ + "navigation": [ + { + "group": "开始使用", + "pages": ["introduction", "quickstart", "development"] + } + ] +} +``` + +#### 标签 + +您可以将页面分组为不同的标签。当您想要将概念或 API 参考组织到不同的部分时,这很有用。 + +```json docs.json +{ + "navigation": { + "tabs": [ + { + "tab": "主要", + "groups": [ + { + "group": "开始使用", + "pages": ["introduction"] + } + ] + }, + { + "tab": "API 参考", + "groups": [ + { + "group": "端点", + "pages": ["api-reference/users"] + } + ] + } + ] + } +} +``` + +### 页脚 + +您可以在 `footer` 字段中配置页脚链接。 + +```json docs.json +{ + "footer": { + "socials": { + "website": "https://mintlify.com", + "github": "https://github.com/mintlify", + "slack": "https://mintlify.com/community" + } + } +} +``` + +### 搜索 + +您可以通过多种方式配置搜索,包括替换默认搜索或添加搜索锚点。 + +```json docs.json +{ + "search": { + "prompt": "搜索..." + } +} +``` + +## 页面配置 + +页面配置在每个 MDX 文件顶部的 frontmatter 中设置。 + +### 标题和描述 + +```md +--- +title: '介绍' +description: '欢迎来到我们的产品!' +--- +``` + +### 侧边栏标题 + +```md +--- +sidebarTitle: '主页' +--- +``` + +设置不同于页面标题的侧边栏标题。 + +### 图标 + +```md +--- +icon: 'star' +--- +``` + +为侧边栏中的页面设置 [FontAwesome](https://fontawesome.com/search?s=solid&m=free) 图标。 + +### 模式 + +```md +--- +mode: 'wide' +--- +``` + +设置页面的显示模式。选项包括 `"default"` 和 `"wide"`。 diff --git a/docs/zh/features/authentication.mdx b/docs/zh/features/authentication.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b409bc0a69261714b09487eb7375235f8f371227 --- /dev/null +++ b/docs/zh/features/authentication.mdx @@ -0,0 +1,330 @@ +--- +title: '身份认证与安全' +description: '为 MCPHub 配置身份认证和安全设置' +--- + +## 概述 + +MCPHub 提供灵活的身份认证机制来保护您的 MCP 服务器管理平台。系统支持多种身份认证方法和基于角色的访问控制。 + +## 身份认证方法 + +### 基于环境变量的认证 + +使用环境变量配置基础认证: + +```bash +# 基础认证凭据 +AUTH_USERNAME=admin +AUTH_PASSWORD=your-secure-password + +# JWT 设置 +JWT_SECRET=your-jwt-secret-key +JWT_EXPIRES_IN=24h +``` + +### 数据库认证 + +对于生产环境部署,启用基于数据库的用户管理: + +```json +{ + "auth": { + "provider": "database", + "database": { + "url": "postgresql://user:pass@localhost:5432/mcphub", + "userTable": "users" + } + } +} +``` + +## 用户管理 + +### 创建用户 + +通过管理界面或 API 创建用户: + +```bash +# 通过 API +curl -X POST http://localhost:3000/api/auth/users \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -d '{ + "username": "newuser", + "email": "user@example.com", + "password": "securepassword", + "role": "user" + }' +``` + +### 用户角色 + +MCPHub 支持基于角色的访问控制: + +- **管理员**: 完整系统访问权限、用户管理、服务器配置 +- **管理者**: 服务器管理、组管理、监控 +- **用户**: 在分配组内的基本服务器访问权限 +- **查看者**: 对分配资源的只读访问权限 + +## 基于组的访问控制 + +### 将用户分配到组 + +```bash +# 添加用户到组 +curl -X POST http://localhost:3000/api/groups/{groupId}/users \ + -H "Authorization: Bearer $TOKEN" \ + -d '{"userId": "user123"}' +``` + +### 组权限 + +配置组级别权限: + +```json +{ + "groupId": "dev-team", + "permissions": { + "servers": ["read", "write", "execute"], + "tools": ["read", "execute"], + "logs": ["read"], + "config": ["read"] + } +} +``` + +## API 认证 + +### JWT 令牌认证 + +```javascript +// 获取认证令牌 +const response = await fetch('/api/auth/login', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + username: 'your-username', + password: 'your-password', + }), +}); + +const { token } = await response.json(); + +// 在后续请求中使用令牌 +const protectedResponse = await fetch('/api/servers', { + headers: { + Authorization: `Bearer ${token}`, + }, +}); +``` + +### API 密钥认证 + +为系统集成生成 API 密钥: + +```bash +# 生成新的 API 密钥 +curl -X POST http://localhost:3000/api/auth/api-keys \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "name": "Integration Key", + "permissions": ["servers:read", "servers:write"], + "expiresAt": "2024-12-31T23:59:59.000Z" + }' +``` + +## 安全设置 + +### HTTPS 配置 + +为生产环境启用 HTTPS: + +```nginx +server { + listen 443 ssl http2; + server_name mcphub.example.com; + + ssl_certificate /path/to/certificate.crt; + ssl_certificate_key /path/to/private.key; + + location / { + proxy_pass http://localhost:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} +``` + +### 会话安全 + +配置安全的会话设置: + +```javascript +// 会话配置 +{ + "session": { + "secret": "your-session-secret", + "secure": true, // 生产环境中需要 HTTPS + "httpOnly": true, + "maxAge": 86400000, // 24 小时 + "sameSite": "strict" + } +} +``` + +### 速率限制 + +实施 API 速率限制: + +```javascript +{ + "rateLimit": { + "windowMs": 900000, // 15 分钟 + "max": 100, // 每个 IP 限制 100 个请求 + "message": "请求过于频繁,请稍后再试", + "standardHeaders": true, + "legacyHeaders": false + } +} +``` + +## 多因素认证 (MFA) + +### 启用 TOTP + +为管理员帐户启用基于时间的一次性密码: + +```bash +# 启用 MFA +curl -X POST http://localhost:3000/api/auth/mfa/enable \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "type": "totp", + "appName": "MCPHub" + }' +``` + +### 验证 MFA 代码 + +```javascript +// 登录时验证 MFA +const loginResponse = await fetch('/api/auth/login', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + username: 'admin', + password: 'password', + mfaCode: '123456', // 来自认证器应用的 6 位数字 + }), +}); +``` + +## 审计日志 + +### 启用审计日志 + +跟踪所有认证和授权事件: + +```json +{ + "audit": { + "enabled": true, + "logLevel": "info", + "events": [ + "login", + "logout", + "password_change", + "role_change", + "permission_change", + "server_access", + "config_change" + ], + "storage": { + "type": "database", + "retention": "90d" + } + } +} +``` + +### 查看审计日志 + +```bash +# 获取审计日志 +curl -X GET "http://localhost:3000/api/audit/logs?startDate=2024-01-01&endDate=2024-01-31" \ + -H "Authorization: Bearer $TOKEN" +``` + +## 密码策略 + +### 配置密码要求 + +```json +{ + "passwordPolicy": { + "minLength": 12, + "requireUppercase": true, + "requireLowercase": true, + "requireNumbers": true, + "requireSpecialChars": true, + "preventCommonPasswords": true, + "preventReuse": 5, // 防止重复使用最近 5 个密码 + "maxAge": 7776000 // 90 天后过期 + } +} +``` + +## 故障排除 + +### 常见认证问题 + +1. **JWT 令牌过期** + + ```bash + # 检查令牌有效期 + curl -X GET http://localhost:3000/api/auth/verify \ + -H "Authorization: Bearer $TOKEN" + ``` + +2. **权限被拒绝** + + ```bash + # 检查用户权限 + curl -X GET http://localhost:3000/api/auth/permissions \ + -H "Authorization: Bearer $TOKEN" + ``` + +3. **会话问题** + - 清除浏览器 cookies + - 检查会话配置 + - 验证服务器时间同步 + +### 调试认证流程 + +启用调试日志: + +```bash +# 设置环境变量 +export DEBUG=auth:* +export LOG_LEVEL=debug + +# 启动服务器 +npm start +``` + +## 安全最佳实践 + +1. **定期更新凭据**: 定期轮换 JWT 密钥和 API 密钥 +2. **最小权限原则**: 只授予用户执行其任务所需的最小权限 +3. **监控异常活动**: 设置警报以检测可疑的登录模式 +4. **备份配置**: 定期备份认证配置和用户数据 +5. **安全更新**: 保持 MCPHub 和依赖项的最新状态 + +更多安全配置选项,请参阅 [环境变量配置](/zh/configuration/environment-variables) 和 [Docker 设置](/zh/configuration/docker-setup) 文档。 diff --git a/docs/zh/features/group-management.mdx b/docs/zh/features/group-management.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c430b82ed90f41b4ff61c2757683604a7092abf7 --- /dev/null +++ b/docs/zh/features/group-management.mdx @@ -0,0 +1,567 @@ +--- +title: '组管理' +description: '组织用户和服务器为逻辑组,实现高效的访问控制' +--- + +## 概述 + +MCPHub 的组管理系统允许您将用户和服务器组织成逻辑组,从而简化权限管理和访问控制。组提供了一种灵活的方式来管理大规模部署中的资源。 + +## 创建组 + +### 通过仪表板 + +1. **导航到组部分**: 在主仪表板中点击"组" +2. **点击"创建组"**: 开始组创建流程 +3. **填写组详细信息**: + - **组名**: 唯一的组标识符 + - **显示名称**: 用户友好的组名称 + - **描述**: 组的目的和范围 + - **父组**: 可选的层次结构 + +### 通过 API + +```bash +curl -X POST http://localhost:3000/api/groups \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "name": "development-team", + "displayName": "开发团队", + "description": "前端和后端开发人员", + "parentGroup": null, + "settings": { + "autoAssign": false, + "maxMembers": 50, + "requireApproval": true + } + }' +``` + +### 通过配置文件 + +在 `groups.json` 中定义组: + +```json +{ + "groups": { + "dev-team": { + "displayName": "开发团队", + "description": "应用程序开发人员", + "permissions": { + "servers": ["read", "write", "execute"], + "tools": ["read", "execute"], + "logs": ["read"] + }, + "members": ["user1", "user2"], + "servers": ["dev-server-1", "dev-server-2"] + }, + "qa-team": { + "displayName": "质量保证团队", + "description": "测试和质量保证", + "permissions": { + "servers": ["read", "execute"], + "tools": ["read", "execute"], + "logs": ["read"] + }, + "members": ["qa1", "qa2"], + "servers": ["test-server", "staging-server"] + } + } +} +``` + +## 组层次结构 + +### 嵌套组 + +创建组层次结构以实现更好的组织: + +```json +{ + "groups": { + "engineering": { + "displayName": "工程部", + "description": "所有工程团队", + "children": ["frontend", "backend", "devops"] + }, + "frontend": { + "displayName": "前端团队", + "parent": "engineering", + "servers": ["frontend-dev", "frontend-staging"] + }, + "backend": { + "displayName": "后端团队", + "parent": "engineering", + "servers": ["api-server", "database-server"] + }, + "devops": { + "displayName": "运维团队", + "parent": "engineering", + "servers": ["monitoring", "deployment"] + } + } +} +``` + +### 继承权限 + +子组从父组继承权限: + +```bash +# 检查继承的权限 +curl -X GET http://localhost:3000/api/groups/frontend/permissions?inherited=true \ + -H "Authorization: Bearer $TOKEN" +``` + +## 用户管理 + +### 添加用户到组 + +```bash +# 添加单个用户 +curl -X POST http://localhost:3000/api/groups/dev-team/members \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "userId": "user123", + "role": "member" + }' + +# 批量添加用户 +curl -X POST http://localhost:3000/api/groups/dev-team/members/bulk \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "users": [ + {"userId": "user1", "role": "member"}, + {"userId": "user2", "role": "admin"}, + {"userId": "user3", "role": "member"} + ] + }' +``` + +### 用户角色 + +每个组内支持不同的用户角色: + +- **组管理员**: 完整的组管理权限 +- **成员**: 标准组访问权限 +- **查看者**: 只读访问权限 +- **访客**: 有限的临时访问权限 + +### 移除用户 + +```bash +# 从组中移除用户 +curl -X DELETE http://localhost:3000/api/groups/dev-team/members/user123 \ + -H "Authorization: Bearer $TOKEN" +``` + +## 服务器分配 + +### 分配服务器到组 + +```bash +# 分配单个服务器 +curl -X POST http://localhost:3000/api/groups/dev-team/servers \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "serverId": "my-server", + "permissions": ["read", "write", "execute"] + }' + +# 批量分配服务器 +curl -X POST http://localhost:3000/api/groups/dev-team/servers/bulk \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "servers": [ + { + "serverId": "server1", + "permissions": ["read", "write"] + }, + { + "serverId": "server2", + "permissions": ["read", "execute"] + } + ] + }' +``` + +### 服务器权限级别 + +为每个组-服务器对定义细粒度权限: + +```json +{ + "serverId": "my-server", + "permissions": { + "execute": { + "allowed": true, + "tools": ["filesystem", "web-search"], + "restrictions": { + "maxRequests": 100, + "timeWindow": "1h" + } + }, + "configure": { + "allowed": false + }, + "logs": { + "allowed": true, + "level": ["info", "warn", "error"] + } + } +} +``` + +## 权限管理 + +### 组权限模型 + +```json +{ + "groupId": "dev-team", + "permissions": { + "servers": { + "create": false, + "read": true, + "update": true, + "delete": false, + "execute": true + }, + "tools": { + "filesystem": { + "read": true, + "write": true, + "paths": ["/app/data", "/tmp"] + }, + "web-search": { + "enabled": true, + "maxQueries": 50 + } + }, + "monitoring": { + "viewLogs": true, + "viewMetrics": true, + "exportData": false + }, + "administration": { + "manageUsers": false, + "manageServers": true, + "manageGroups": false + } + } +} +``` + +### 动态权限 + +基于条件的动态权限: + +```json +{ + "permissions": { + "servers": { + "execute": { + "condition": "time.hour >= 9 && time.hour <= 17", + "message": "服务器执行仅在工作时间内允许" + } + }, + "tools": { + "filesystem": { + "write": { + "condition": "user.role === 'admin' || group.name === 'senior-devs'", + "message": "写入权限需要管理员或高级开发者角色" + } + } + } + } +} +``` + +## 配额管理 + +### 设置组配额 + +```bash +curl -X PUT http://localhost:3000/api/groups/dev-team/quotas \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "requests": { + "daily": 1000, + "monthly": 30000 + }, + "storage": { + "maxSize": "10GB", + "retention": "30d" + }, + "compute": { + "maxConcurrentRequests": 10, + "maxExecutionTime": "5m" + } + }' +``` + +### 监控配额使用 + +```bash +# 获取当前配额使用情况 +curl -X GET http://localhost:3000/api/groups/dev-team/quotas/usage \ + -H "Authorization: Bearer $TOKEN" +``` + +响应示例: + +```json +{ + "groupId": "dev-team", + "period": "2024-01-01T00:00:00Z", + "usage": { + "requests": { + "used": 750, + "limit": 1000, + "remaining": 250 + }, + "storage": { + "used": "7.2GB", + "limit": "10GB", + "remaining": "2.8GB" + }, + "compute": { + "currentConcurrent": 3, + "maxConcurrent": 10, + "avgExecutionTime": "2m 15s" + } + } +} +``` + +## 组策略 + +### 定义组策略 + +```json +{ + "groupId": "dev-team", + "policies": { + "security": { + "requireMFA": false, + "sessionTimeout": "8h", + "ipWhitelist": ["192.168.1.0/24", "10.0.0.0/8"] + }, + "usage": { + "allowWeekendAccess": true, + "restrictHolidays": false, + "maxSessionDuration": "12h" + }, + "data": { + "encryptionRequired": true, + "dataRetention": "90d", + "exportAllowed": false + } + } +} +``` + +### 策略继承 + +```bash +# 应用策略模板 +curl -X POST http://localhost:3000/api/groups/dev-team/policies/apply \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "template": "development-team-template", + "overrides": { + "security.sessionTimeout": "4h" + } + }' +``` + +## 自动化组管理 + +### 自动用户分配 + +基于属性自动分配用户: + +```json +{ + "autoAssignment": { + "enabled": true, + "rules": [ + { + "condition": "user.department === '开发'", + "action": { + "addToGroup": "dev-team", + "role": "member" + } + }, + { + "condition": "user.title.includes('高级')", + "action": { + "addToGroup": "senior-devs", + "role": "admin" + } + } + ] + } +} +``` + +### 定时任务 + +```bash +# 创建定时清理任务 +curl -X POST http://localhost:3000/api/groups/dev-team/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "name": "cleanup-inactive-users", + "schedule": "0 2 * * *", + "action": "removeInactiveUsers", + "params": { + "inactiveDays": 30 + } + }' +``` + +## 组通知 + +### 配置通知 + +```json +{ + "groupId": "dev-team", + "notifications": { + "channels": { + "email": { + "enabled": true, + "recipients": ["team-lead@company.com"] + }, + "slack": { + "enabled": true, + "webhook": "https://hooks.slack.com/...", + "channel": "#dev-team" + } + }, + "events": ["userJoined", "userLeft", "serverAdded", "quotaExceeded", "securityAlert"] + } +} +``` + +### 发送组通知 + +```bash +curl -X POST http://localhost:3000/api/groups/dev-team/notifications \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "type": "announcement", + "title": "维护通知", + "message": "服务器将在今晚 10 点进行维护", + "priority": "high", + "channels": ["email", "slack"] + }' +``` + +## 组分析 + +### 使用统计 + +```bash +# 获取组使用统计 +curl -X GET http://localhost:3000/api/groups/dev-team/analytics \ + -H "Authorization: Bearer $TOKEN" +``` + +响应示例: + +```json +{ + "groupId": "dev-team", + "period": "30d", + "stats": { + "activeUsers": 12, + "totalRequests": 15750, + "avgResponseTime": "250ms", + "errorRate": "0.5%", + "mostUsedTools": [ + { "name": "filesystem", "usage": 8500 }, + { "name": "web-search", "usage": 4200 }, + { "name": "database", "usage": 3050 } + ], + "peakUsageHours": [9, 10, 14, 15, 16] + } +} +``` + +### 生成报告 + +```bash +# 生成月度报告 +curl -X POST http://localhost:3000/api/groups/dev-team/reports \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "type": "monthly", + "format": "pdf", + "includeDetails": true, + "recipients": ["manager@company.com"] + }' +``` + +## 故障排除 + +### 常见问题 + +1. **用户无法访问组资源** + + ```bash + # 检查用户组成员身份 + curl -X GET http://localhost:3000/api/users/user123/groups \ + -H "Authorization: Bearer $TOKEN" + ``` + +2. **权限配置错误** + + ```bash + # 验证权限设置 + curl -X GET http://localhost:3000/api/groups/dev-team/permissions/validate \ + -H "Authorization: Bearer $TOKEN" + ``` + +3. **配额超限** + ```bash + # 检查配额状态 + curl -X GET http://localhost:3000/api/groups/dev-team/quotas/status \ + -H "Authorization: Bearer $TOKEN" + ``` + +### 调试组权限 + +启用权限调试: + +```bash +# 调试用户权限 +curl -X GET http://localhost:3000/api/debug/permissions \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "userId": "user123", + "resource": "server:my-server", + "action": "execute" + }' +``` + +## 最佳实践 + +1. **组织结构**: 使用层次化组结构镜像您的组织架构 +2. **权限最小化**: 只授予执行任务所需的最小权限 +3. **定期审核**: 定期审核组成员身份和权限 +4. **自动化**: 使用自动化规则减少手动管理开销 +5. **监控**: 设置监控和警报以跟踪组活动 + +有关更多信息,请参阅 [身份认证与安全](/zh/features/authentication) 和 [监控](/zh/features/monitoring) 文档。 diff --git a/docs/zh/features/monitoring.mdx b/docs/zh/features/monitoring.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d3a3e43cbb379679f8f1530b8d6ef8cad95ea52 --- /dev/null +++ b/docs/zh/features/monitoring.mdx @@ -0,0 +1,613 @@ +--- +title: '监控与分析' +description: '全面监控 MCP 服务器性能、健康状况和使用情况' +--- + +## 概述 + +MCPHub 提供全面的监控和分析功能,帮助您跟踪 MCP 服务器的性能、健康状况和使用模式。系统提供实时指标、历史分析和智能警报。 + +## 实时监控 + +### 仪表板概览 + +主监控仪表板显示关键指标: + +```bash +# 访问监控仪表板 +curl -X GET http://localhost:3000/api/monitoring/dashboard \ + -H "Authorization: Bearer $TOKEN" +``` + +响应示例: + +```json +{ + "overview": { + "totalServers": 12, + "activeServers": 10, + "totalRequests": 15420, + "avgResponseTime": "245ms", + "errorRate": "0.3%", + "uptime": "99.9%" + }, + "realtime": { + "requestsPerMinute": 85, + "activeConnections": 156, + "memoryUsage": "68%", + "cpuUsage": "42%" + } +} +``` + +### 服务器健康状态 + +```bash +# 获取所有服务器状态 +curl -X GET http://localhost:3000/api/monitoring/servers/health \ + -H "Authorization: Bearer $TOKEN" +``` + +## 性能指标 + +### 响应时间监控 + +```json +{ + "responseTime": { + "metrics": [ + { + "serverId": "server-1", + "average": "180ms", + "p50": "150ms", + "p95": "300ms", + "p99": "500ms" + } + ], + "alerts": { + "slowResponse": { + "threshold": "1s", + "current": "180ms", + "status": "normal" + } + } + } +} +``` + +### 吞吐量分析 + +```bash +# 获取吞吐量统计 +curl -X GET http://localhost:3000/api/monitoring/throughput \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "timeRange=1h" -d "granularity=5m" +``` + +响应示例: + +```json +{ + "timeRange": "1h", + "granularity": "5m", + "data": [ + { + "timestamp": "2024-01-01T12:00:00Z", + "totalRequests": 450, + "successfulRequests": 448, + "failedRequests": 2, + "avgResponseTime": "230ms" + } + ], + "summary": { + "peakThroughput": 520, + "averageThroughput": 412, + "totalRequests": 4944 + } +} +``` + +### 资源使用监控 + +```json +{ + "resources": { + "cpu": { + "usage": "42%", + "cores": 8, + "processes": [ + { "name": "mcp-server-1", "cpu": "15%" }, + { "name": "mcp-server-2", "cpu": "12%" } + ] + }, + "memory": { + "total": "32GB", + "used": "21.6GB", + "available": "10.4GB", + "byProcess": [ + { "name": "mcp-server-1", "memory": "2.1GB" }, + { "name": "mcp-server-2", "memory": "1.8GB" } + ] + }, + "network": { + "bytesIn": "1.2GB", + "bytesOut": "890MB", + "packetsIn": 1250000, + "packetsOut": 980000 + }, + "disk": { + "total": "1TB", + "used": "340GB", + "available": "660GB", + "iops": 1200 + } + } +} +``` + +## 日志管理 + +### 集中化日志收集 + +配置日志聚合: + +```json +{ + "logging": { + "centralized": true, + "storage": { + "type": "elasticsearch", + "config": { + "hosts": ["localhost:9200"], + "index": "mcphub-logs", + "retention": "30d" + } + }, + "levels": ["error", "warn", "info", "debug"], + "sources": ["application", "mcp-servers", "nginx", "system"] + } +} +``` + +### 实时日志查看 + +```bash +# 获取实时日志流 +curl -X GET http://localhost:3000/api/monitoring/logs/stream \ + -H "Authorization: Bearer $TOKEN" \ + -H "Accept: text/event-stream" + +# 过滤日志 +curl -X GET http://localhost:3000/api/monitoring/logs \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "level=error" -d "server=server-1" -d "limit=100" +``` + +### 日志分析 + +```bash +# 错误模式分析 +curl -X GET http://localhost:3000/api/monitoring/logs/analysis \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "type=error-patterns" -d "timeRange=24h" +``` + +响应示例: + +```json +{ + "analysis": { + "errorPatterns": [ + { + "pattern": "Connection timeout", + "occurrences": 45, + "trend": "increasing", + "affectedServers": ["server-2", "server-3"] + }, + { + "pattern": "Memory allocation failed", + "occurrences": 12, + "trend": "stable", + "affectedServers": ["server-1"] + } + ], + "recommendations": ["检查服务器网络连接", "增加内存限制配置"] + } +} +``` + +## 警报系统 + +### 配置警报规则 + +```json +{ + "alerts": [ + { + "name": "high-response-time", + "description": "响应时间过高警报", + "condition": "avg(response_time) > 1000ms over 5m", + "severity": "warning", + "notifications": { + "email": ["admin@company.com"], + "slack": "#alerts", + "webhook": "https://hooks.company.com/alerts" + } + }, + { + "name": "server-down", + "description": "服务器宕机警报", + "condition": "server_status == 'down'", + "severity": "critical", + "notifications": { + "email": ["oncall@company.com"], + "sms": ["+1234567890"], + "pagerduty": "service-key" + } + }, + { + "name": "high-error-rate", + "description": "错误率过高", + "condition": "error_rate > 5% over 10m", + "severity": "error", + "notifications": { + "slack": "#dev-team" + } + } + ] +} +``` + +### 警报通知 + +```bash +# 测试警报通知 +curl -X POST http://localhost:3000/api/monitoring/alerts/test \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "alertName": "high-response-time", + "channel": "email" + }' +``` + +### 警报历史 + +```bash +# 获取警报历史 +curl -X GET http://localhost:3000/api/monitoring/alerts/history \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "timeRange=7d" -d "severity=critical" +``` + +## 用户活动监控 + +### 用户会话跟踪 + +```json +{ + "userActivity": { + "activeSessions": 156, + "totalUsers": 89, + "sessionsByUser": [ + { + "userId": "user123", + "sessions": 3, + "lastActivity": "2024-01-01T12:30:00Z", + "totalRequests": 245 + } + ], + "topUsers": [ + { "userId": "power-user", "requests": 1250 }, + { "userId": "regular-user", "requests": 890 } + ] + } +} +``` + +### 使用模式分析 + +```bash +# 获取使用模式分析 +curl -X GET http://localhost:3000/api/monitoring/usage-patterns \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "timeRange=30d" +``` + +响应示例: + +```json +{ + "patterns": { + "peakHours": [9, 10, 14, 15, 16], + "peakDays": ["tuesday", "wednesday", "thursday"], + "toolUsage": [ + {"tool": "filesystem", "usage": 45%}, + {"tool": "web-search", "usage": 30%}, + {"tool": "database", "usage": 25%} + ], + "userBehavior": { + "avgSessionDuration": "45m", + "avgRequestsPerSession": 23, + "returnRate": "78%" + } + } +} +``` + +## 容量规划 + +### 资源预测 + +```json +{ + "capacityPlanning": { + "currentCapacity": { + "cpu": "42%", + "memory": "68%", + "network": "35%", + "storage": "34%" + }, + "predictions": { + "timeHorizon": "30d", + "cpuForecast": [ + { "date": "2024-01-07", "usage": "48%" }, + { "date": "2024-01-14", "usage": "52%" }, + { "date": "2024-01-21", "usage": "56%" }, + { "date": "2024-01-28", "usage": "61%" } + ], + "recommendations": [ + "考虑在第3周增加CPU资源", + "内存使用稳定,暂不需要扩容", + "建议监控网络带宽趋势" + ] + } + } +} +``` + +### 自动扩缩容 + +```json +{ + "autoScaling": { + "enabled": true, + "policies": [ + { + "metric": "cpu_usage", + "scaleUp": { + "threshold": "80%", + "duration": "5m", + "action": "add_instance" + }, + "scaleDown": { + "threshold": "30%", + "duration": "15m", + "action": "remove_instance" + } + }, + { + "metric": "request_queue_length", + "scaleUp": { + "threshold": 100, + "duration": "2m", + "action": "add_instance" + } + } + ] + } +} +``` + +## 性能分析报告 + +### 生成性能报告 + +```bash +# 生成周报 +curl -X POST http://localhost:3000/api/monitoring/reports \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "type": "performance", + "timeRange": "7d", + "format": "pdf", + "sections": [ + "overview", + "response_times", + "error_analysis", + "resource_usage", + "recommendations" + ], + "recipients": ["manager@company.com"] + }' +``` + +### 自定义报告 + +```json +{ + "customReport": { + "name": "monthly-summary", + "schedule": "0 0 1 * *", + "template": "executive-summary", + "data": { + "kpis": ["uptime", "avg_response_time", "total_requests", "error_rate", "user_satisfaction"], + "comparisons": { + "previousMonth": true, + "yearOverYear": true + } + }, + "distribution": { + "email": ["executives@company.com"], + "dashboard": true, + "archive": true + } + } +} +``` + +## 监控 API + +### 指标查询 + +```bash +# 查询自定义指标 +curl -X POST http://localhost:3000/api/monitoring/query \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "query": "avg(response_time) by (server_id)", + "timeRange": "1h", + "step": "5m" + }' +``` + +### 事件跟踪 + +```bash +# 记录自定义事件 +curl -X POST http://localhost:3000/api/monitoring/events \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "type": "deployment", + "title": "服务器更新", + "description": "部署新版本 v2.1.0", + "tags": ["deployment", "version-2.1.0"], + "metadata": { + "version": "2.1.0", + "servers": ["server-1", "server-2"] + } + }' +``` + +## 第三方集成 + +### Prometheus 集成 + +```yaml +# prometheus.yml +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'mcphub' + static_configs: + - targets: ['localhost:3000'] + metrics_path: '/api/monitoring/prometheus' + scrape_interval: 30s +``` + +### Grafana 仪表板 + +```json +{ + "dashboard": { + "title": "MCPHub 监控", + "panels": [ + { + "title": "请求率", + "type": "graph", + "targets": [ + { + "expr": "rate(mcphub_requests_total[5m])", + "legendFormat": "{{server_id}}" + } + ] + }, + { + "title": "响应时间", + "type": "graph", + "targets": [ + { + "expr": "histogram_quantile(0.95, mcphub_response_time_histogram)", + "legendFormat": "95th percentile" + } + ] + } + ] + } +} +``` + +### ELK Stack 集成 + +```json +{ + "logstash": { + "input": { + "beats": { + "port": 5044 + } + }, + "filter": { + "if": "[fields][service] == 'mcphub'", + "json": { + "source": "message" + }, + "date": { + "match": ["timestamp", "ISO8601"] + } + }, + "output": { + "elasticsearch": { + "hosts": ["localhost:9200"], + "index": "mcphub-logs-%{+YYYY.MM.dd}" + } + } + } +} +``` + +## 故障排除 + +### 监控问题诊断 + +```bash +# 检查监控服务状态 +curl -X GET http://localhost:3000/api/monitoring/health \ + -H "Authorization: Bearer $TOKEN" + +# 验证指标收集 +curl -X GET http://localhost:3000/api/monitoring/metrics/validation \ + -H "Authorization: Bearer $TOKEN" +``` + +### 性能问题分析 + +```bash +# 性能瓶颈分析 +curl -X GET http://localhost:3000/api/monitoring/performance/bottlenecks \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "timeRange=1h" +``` + +### 常见监控问题 + +1. **指标丢失** + + - 检查收集器配置 + - 验证网络连接 + - 检查存储空间 + +2. **警报不触发** + + - 验证警报规则语法 + - 检查通知配置 + - 测试通知渠道 + +3. **仪表板加载缓慢** + - 优化查询时间范围 + - 增加数据聚合 + - 检查数据库性能 + +## 最佳实践 + +1. **监控层次化**: 建立应用、基础设施和业务三层监控 +2. **合理的警报**: 避免警报疲劳,设置合适的阈值 +3. **数据保留**: 根据业务需求设置适当的数据保留期 +4. **安全监控**: 监控安全相关的指标和事件 +5. **持续优化**: 定期审查和优化监控配置 + +有关更多信息,请参阅 [智能路由](/zh/features/smart-routing) 和 [服务器管理](/zh/features/server-management) 文档。 diff --git a/docs/zh/features/server-management.mdx b/docs/zh/features/server-management.mdx new file mode 100644 index 0000000000000000000000000000000000000000..31ae1d84be7e6689d5d2b501427dba92523b9a86 --- /dev/null +++ b/docs/zh/features/server-management.mdx @@ -0,0 +1,496 @@ +--- +title: '服务器管理' +description: '通过热插拔配置集中管理多个 MCP 服务器' +--- + +## 概述 + +MCPHub 的服务器管理系统允许您从单个仪表板集中配置、监控和控制多个 MCP(模型上下文协议)服务器。所有更改都会实时应用,无需重启服务器。 + +## 添加 MCP 服务器 + +### 通过仪表板 + +1. **访问仪表板**: 导航到 `http://localhost:3000` 并登录 +2. **点击"添加服务器"**: 位于服务器部分 +3. **填写服务器详细信息**: + - **名称**: 服务器的唯一标识符 + - **命令**: 可执行命令(例如 `npx`、`uvx`、`python`) + - **参数**: 命令参数数组 + - **环境变量**: 环境设置的键值对 + - **工作目录**: 命令的可选工作目录 + +### 通过配置文件 + +编辑您的 `mcp_settings.json` 文件: + +```json +{ + "mcpServers": { + "server-name": { + "command": "command-to-run", + "args": ["arg1", "arg2"], + "env": { + "API_KEY": "your-api-key", + "CONFIG_VALUE": "some-value" + }, + "cwd": "/optional/working/directory" + } + } +} +``` + +### 通过 API + +使用 REST API 以编程方式添加服务器: + +```bash +curl -X POST http://localhost:3000/api/servers \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -d '{ + "name": "my-server", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/files"], + "env": { + "NODE_ENV": "production" + }, + "cwd": "/app" + }' +``` + +## 服务器配置 + +### 通用配置选项 + +```json +{ + "name": "filesystem-server", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"], + "env": { + "NODE_ENV": "production", + "DEBUG": "mcp:*", + "MAX_FILES": "1000" + }, + "cwd": "/app/workspace", + "timeout": 30000, + "retries": 3, + "enabled": true +} +``` + +### Python 服务器示例 + +```json +{ + "name": "python-server", + "command": "python", + "args": ["-m", "mcp_server", "--config", "config.json"], + "env": { + "PYTHONPATH": "/app/python", + "API_KEY": "${API_KEY}", + "LOG_LEVEL": "INFO" + }, + "cwd": "/app/python-server" +} +``` + +### Node.js 服务器示例 + +```json +{ + "name": "node-server", + "command": "node", + "args": ["server.js", "--port", "3001"], + "env": { + "NODE_ENV": "production", + "PORT": "3001", + "DATABASE_URL": "${DATABASE_URL}" + }, + "cwd": "/app/node-server" +} +``` + +## 服务器生命周期管理 + +### 启动服务器 + +```bash +# 启动特定服务器 +curl -X POST http://localhost:3000/api/servers/my-server/start \ + -H "Authorization: Bearer $TOKEN" + +# 启动所有服务器 +curl -X POST http://localhost:3000/api/servers/start-all \ + -H "Authorization: Bearer $TOKEN" +``` + +### 停止服务器 + +```bash +# 停止特定服务器 +curl -X POST http://localhost:3000/api/servers/my-server/stop \ + -H "Authorization: Bearer $TOKEN" + +# 优雅停止(等待当前请求完成) +curl -X POST http://localhost:3000/api/servers/my-server/stop \ + -H "Authorization: Bearer $TOKEN" \ + -d '{"graceful": true, "timeout": 30000}' +``` + +### 重启服务器 + +```bash +# 重启服务器 +curl -X POST http://localhost:3000/api/servers/my-server/restart \ + -H "Authorization: Bearer $TOKEN" +``` + +## 热配置重载 + +### 更新服务器配置 + +无需重启即可更新配置: + +```bash +curl -X PUT http://localhost:3000/api/servers/my-server/config \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "env": { + "DEBUG": "mcp:verbose", + "NEW_SETTING": "value" + }, + "args": ["--verbose", "--new-flag"] + }' +``` + +### 批量配置更新 + +```bash +curl -X PUT http://localhost:3000/api/servers/bulk-update \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "servers": ["server1", "server2"], + "config": { + "env": { + "LOG_LEVEL": "DEBUG" + } + } + }' +``` + +## 服务器状态监控 + +### 检查服务器状态 + +```bash +# 获取所有服务器状态 +curl -X GET http://localhost:3000/api/servers/status \ + -H "Authorization: Bearer $TOKEN" + +# 获取特定服务器状态 +curl -X GET http://localhost:3000/api/servers/my-server/status \ + -H "Authorization: Bearer $TOKEN" +``` + +响应示例: + +```json +{ + "name": "my-server", + "status": "running", + "pid": 12345, + "uptime": 3600000, + "memory": { + "rss": 123456789, + "heapTotal": 98765432, + "heapUsed": 87654321 + }, + "cpu": { + "user": 1000000, + "system": 500000 + }, + "lastRestart": "2024-01-01T12:00:00.000Z" +} +``` + +### 健康检查 + +配置自动健康检查: + +```json +{ + "name": "my-server", + "command": "node", + "args": ["server.js"], + "healthCheck": { + "enabled": true, + "interval": 30000, + "timeout": 5000, + "retries": 3, + "endpoint": "/health", + "expectedStatus": 200 + } +} +``` + +## 负载均衡 + +### 配置多实例 + +```json +{ + "name": "load-balanced-server", + "instances": 3, + "command": "node", + "args": ["server.js"], + "loadBalancer": { + "strategy": "round-robin", + "healthCheck": true, + "stickySession": false + }, + "env": { + "PORT": "${PORT}" + } +} +``` + +### 负载均衡策略 + +- **round-robin**: 轮询分发请求 +- **least-connections**: 分发到连接数最少的实例 +- **weighted**: 基于权重分发 +- **ip-hash**: 基于客户端 IP 的一致性哈希 + +## 资源限制 + +### 设置资源限制 + +```json +{ + "name": "resource-limited-server", + "command": "python", + "args": ["server.py"], + "resources": { + "memory": { + "limit": "512MB", + "warning": "400MB" + }, + "cpu": { + "limit": "50%", + "priority": "normal" + }, + "processes": { + "max": 10 + } + } +} +``` + +### 监控资源使用 + +```bash +# 获取资源使用统计 +curl -X GET http://localhost:3000/api/servers/my-server/resources \ + -H "Authorization: Bearer $TOKEN" +``` + +## 日志管理 + +### 配置日志记录 + +```json +{ + "name": "my-server", + "command": "node", + "args": ["server.js"], + "logging": { + "level": "info", + "file": "/var/log/mcphub/my-server.log", + "maxSize": "100MB", + "maxFiles": 5, + "rotate": true, + "format": "json" + } +} +``` + +### 查看日志 + +```bash +# 获取实时日志 +curl -X GET http://localhost:3000/api/servers/my-server/logs \ + -H "Authorization: Bearer $TOKEN" + +# 获取带过滤器的日志 +curl -X GET "http://localhost:3000/api/servers/my-server/logs?level=error&limit=100" \ + -H "Authorization: Bearer $TOKEN" +``` + +## 环境变量管理 + +### 动态环境变量 + +```json +{ + "name": "dynamic-server", + "command": "python", + "args": ["server.py"], + "env": { + "API_KEY": "${secrets:api_key}", + "DATABASE_URL": "${vault:db_url}", + "CURRENT_TIME": "${time:iso}", + "SERVER_ID": "${server:id}", + "HOSTNAME": "${system:hostname}" + } +} +``` + +### 环境变量模板 + +支持的模板变量: + +- `${secrets:key}`: 从密钥存储获取 +- `${vault:path}`: 从 Vault 获取 +- `${env:VAR}`: 从系统环境变量获取 +- `${time:format}`: 当前时间戳 +- `${server:property}`: 服务器属性 +- `${system:property}`: 系统属性 + +## 服务发现 + +### 自动服务发现 + +```json +{ + "serviceDiscovery": { + "enabled": true, + "provider": "consul", + "config": { + "host": "localhost", + "port": 8500, + "serviceName": "mcp-server", + "tags": ["mcp", "ai", "api"] + } + } +} +``` + +### 注册服务 + +```bash +# 手动注册服务 +curl -X POST http://localhost:3000/api/servers/my-server/register \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "service": { + "name": "my-mcp-service", + "tags": ["mcp", "production"], + "port": 3001, + "check": { + "http": "http://localhost:3001/health", + "interval": "30s" + } + } + }' +``` + +## 故障排除 + +### 常见问题 + +1. **服务器启动失败** + + ```bash + # 检查服务器日志 + curl -X GET http://localhost:3000/api/servers/my-server/logs?level=error \ + -H "Authorization: Bearer $TOKEN" + ``` + +2. **配置无效** + + ```bash + # 验证配置 + curl -X POST http://localhost:3000/api/servers/validate \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d @server-config.json + ``` + +3. **性能问题** + ```bash + # 获取性能指标 + curl -X GET http://localhost:3000/api/servers/my-server/metrics \ + -H "Authorization: Bearer $TOKEN" + ``` + +### 调试模式 + +启用详细调试: + +```json +{ + "name": "debug-server", + "command": "node", + "args": ["--inspect=0.0.0.0:9229", "server.js"], + "env": { + "DEBUG": "*", + "LOG_LEVEL": "debug", + "NODE_ENV": "development" + }, + "debugging": { + "enabled": true, + "port": 9229, + "breakOnStart": false + } +} +``` + +## 高级配置 + +### 自定义钩子 + +```json +{ + "name": "hooked-server", + "command": "node", + "args": ["server.js"], + "hooks": { + "beforeStart": ["./scripts/setup.sh"], + "afterStart": ["./scripts/notify.sh"], + "beforeStop": ["./scripts/cleanup.sh"], + "onError": ["./scripts/alert.sh"] + } +} +``` + +### 配置模板 + +```json +{ + "templates": { + "python-server": { + "command": "python", + "args": ["-m", "mcp_server"], + "env": { + "PYTHONPATH": "/app/python", + "LOG_LEVEL": "INFO" + } + } + }, + "servers": { + "my-python-server": { + "extends": "python-server", + "args": ["-m", "mcp_server", "--config", "custom.json"], + "env": { + "API_KEY": "custom-key" + } + } + } +} +``` + +有关更多配置选项,请参阅 [MCP 设置配置](/zh/configuration/mcp-settings) 和 [环境变量](/zh/configuration/environment-variables) 文档。 diff --git a/docs/zh/features/smart-routing.mdx b/docs/zh/features/smart-routing.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e3c8c470d5d31a88842424192f34e597aebf18c4 --- /dev/null +++ b/docs/zh/features/smart-routing.mdx @@ -0,0 +1,691 @@ +--- +title: '智能路由' +description: '自动负载均衡和请求路由到最佳的 MCP 服务器实例' +--- + +## 概述 + +MCPHub 的智能路由系统自动将传入请求路由到最适合的 MCP 服务器实例。系统考虑服务器负载、响应时间、功能可用性和业务规则来做出路由决策。 + +## 路由策略 + +### 轮询路由 + +最简单的路由策略,按顺序分发请求: + +```json +{ + "routing": { + "strategy": "round-robin", + "targets": [ + { + "serverId": "server-1", + "weight": 1, + "enabled": true + }, + { + "serverId": "server-2", + "weight": 1, + "enabled": true + }, + { + "serverId": "server-3", + "weight": 1, + "enabled": true + } + ] + } +} +``` + +### 加权轮询 + +基于服务器容量分配不同权重: + +```json +{ + "routing": { + "strategy": "weighted-round-robin", + "targets": [ + { + "serverId": "high-performance-server", + "weight": 3, + "specs": { + "cpu": "8 cores", + "memory": "32GB" + } + }, + { + "serverId": "standard-server-1", + "weight": 2, + "specs": { + "cpu": "4 cores", + "memory": "16GB" + } + }, + { + "serverId": "standard-server-2", + "weight": 1, + "specs": { + "cpu": "2 cores", + "memory": "8GB" + } + } + ] + } +} +``` + +### 最少连接数 + +将请求路由到当前连接数最少的服务器: + +```json +{ + "routing": { + "strategy": "least-connections", + "balancingMode": "dynamic", + "healthCheck": { + "enabled": true, + "interval": 10000 + } + } +} +``` + +### 基于响应时间 + +路由到响应时间最短的服务器: + +```json +{ + "routing": { + "strategy": "fastest-response", + "metrics": { + "measurementWindow": "5m", + "sampleSize": 100, + "excludeSlowRequests": true, + "slowRequestThreshold": "5s" + } + } +} +``` + +## 基于功能的路由 + +### 工具特定路由 + +根据请求的工具类型路由到专门的服务器: + +```json +{ + "routing": { + "strategy": "capability-based", + "rules": [ + { + "condition": { + "tool": "filesystem" + }, + "targets": ["filesystem-server-1", "filesystem-server-2"], + "strategy": "least-connections" + }, + { + "condition": { + "tool": "web-search" + }, + "targets": ["search-server-1", "search-server-2"], + "strategy": "round-robin" + }, + { + "condition": { + "tool": "database" + }, + "targets": ["db-server"], + "strategy": "single" + } + ], + "fallback": { + "targets": ["general-server-1", "general-server-2"], + "strategy": "round-robin" + } + } +} +``` + +### 内容感知路由 + +基于请求内容进行智能路由: + +```json +{ + "routing": { + "strategy": "content-aware", + "rules": [ + { + "condition": { + "content.language": "python" + }, + "targets": ["python-specialized-server"], + "reason": "Python代码分析专用服务器" + }, + { + "condition": { + "content.size": "> 1MB" + }, + "targets": ["high-memory-server"], + "reason": "大文件处理专用服务器" + }, + { + "condition": { + "content.type": "image" + }, + "targets": ["image-processing-server"], + "reason": "图像处理专用服务器" + } + ] + } +} +``` + +## 地理位置路由 + +### 基于客户端位置 + +根据客户端地理位置路由到最近的服务器: + +```json +{ + "routing": { + "strategy": "geo-location", + "regions": [ + { + "name": "北美", + "countries": ["US", "CA", "MX"], + "servers": ["us-east-1", "us-west-1", "ca-central-1"], + "strategy": "least-latency" + }, + { + "name": "欧洲", + "countries": ["DE", "FR", "UK", "NL"], + "servers": ["eu-west-1", "eu-central-1"], + "strategy": "round-robin" + }, + { + "name": "亚太", + "countries": ["CN", "JP", "KR", "SG"], + "servers": ["ap-southeast-1", "ap-northeast-1"], + "strategy": "fastest-response" + } + ], + "fallback": { + "servers": ["global-server-1"], + "strategy": "single" + } + } +} +``` + +### 延迟优化 + +```bash +# 配置延迟监控 +curl -X PUT http://localhost:3000/api/routing/latency-config \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "enabled": true, + "measurementInterval": 30000, + "regions": [ + {"id": "us-east", "endpoint": "ping.us-east.example.com"}, + {"id": "eu-west", "endpoint": "ping.eu-west.example.com"}, + {"id": "ap-southeast", "endpoint": "ping.ap-southeast.example.com"} + ], + "routing": { + "preferLowLatency": true, + "maxLatencyThreshold": "200ms", + "fallbackOnTimeout": true + } + }' +``` + +## 负载感知路由 + +### 实时负载监控 + +```json +{ + "routing": { + "strategy": "load-aware", + "loadMetrics": { + "cpu": { + "threshold": 80, + "weight": 0.4 + }, + "memory": { + "threshold": 85, + "weight": 0.3 + }, + "connections": { + "threshold": 1000, + "weight": 0.2 + }, + "responseTime": { + "threshold": "2s", + "weight": 0.1 + } + }, + "adaptation": { + "enabled": true, + "adjustmentInterval": 60000, + "emergencyThreshold": 95 + } + } +} +``` + +### 预测性负载均衡 + +```json +{ + "routing": { + "strategy": "predictive", + "prediction": { + "algorithm": "linear-regression", + "trainingWindow": "7d", + "predictionHorizon": "1h", + "factors": ["historical_load", "time_of_day", "day_of_week", "seasonal_patterns"] + }, + "adaptation": { + "preemptiveScaling": true, + "scaleUpThreshold": 70, + "scaleDownThreshold": 30 + } + } +} +``` + +## 故障转移和恢复 + +### 自动故障转移 + +```json +{ + "routing": { + "strategy": "high-availability", + "failover": { + "enabled": true, + "detection": { + "healthCheckFailures": 3, + "timeoutThreshold": "10s", + "checkInterval": 5000 + }, + "recovery": { + "automaticRecovery": true, + "recoveryChecks": 5, + "recoveryInterval": 30000 + } + }, + "clusters": [ + { + "name": "primary", + "servers": ["server-1", "server-2"], + "priority": 1 + }, + { + "name": "secondary", + "servers": ["backup-server-1", "backup-server-2"], + "priority": 2 + } + ] + } +} +``` + +### 断路器模式 + +```json +{ + "routing": { + "circuitBreaker": { + "enabled": true, + "failureThreshold": 10, + "timeWindow": 60000, + "halfOpenRetries": 3, + "fallback": { + "type": "cached-response", + "ttl": 300000 + } + } + } +} +``` + +## 会话亲和性 + +### 粘性会话 + +保持用户会话与特定服务器的关联: + +```json +{ + "routing": { + "strategy": "session-affinity", + "affinity": { + "type": "cookie", + "cookieName": "mcphub-server-id", + "ttl": 3600000, + "fallbackOnUnavailable": true + }, + "sessionStore": { + "type": "redis", + "config": { + "host": "localhost", + "port": 6379, + "db": 1 + } + } + } +} +``` + +### 基于用户 ID 的路由 + +```json +{ + "routing": { + "strategy": "user-based", + "userRouting": { + "algorithm": "consistent-hashing", + "hashFunction": "sha256", + "virtualNodes": 100, + "replicationFactor": 2 + } + } +} +``` + +## 动态路由配置 + +### 运行时配置更新 + +```bash +# 更新路由配置 +curl -X PUT http://localhost:3000/api/routing/config \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "strategy": "weighted-round-robin", + "weights": { + "server-1": 3, + "server-2": 2, + "server-3": 1 + }, + "applyImmediately": true + }' +``` + +### A/B 测试路由 + +```json +{ + "routing": { + "strategy": "ab-testing", + "experiments": [ + { + "name": "new-algorithm-test", + "enabled": true, + "trafficSplit": { + "control": 70, + "variant": 30 + }, + "rules": { + "control": { + "strategy": "round-robin", + "servers": ["stable-server-1", "stable-server-2"] + }, + "variant": { + "strategy": "ai-optimized", + "servers": ["experimental-server-1"] + } + }, + "metrics": ["response_time", "error_rate", "user_satisfaction"] + } + ] + } +} +``` + +## 路由分析和监控 + +### 实时路由指标 + +```bash +# 获取路由统计 +curl -X GET http://localhost:3000/api/routing/metrics \ + -H "Authorization: Bearer $TOKEN" +``` + +响应示例: + +```json +{ + "timestamp": "2024-01-01T12:00:00Z", + "totalRequests": 15420, + "routingDistribution": { + "server-1": { "requests": 6168, "percentage": 40 }, + "server-2": { "requests": 4626, "percentage": 30 }, + "server-3": { "requests": 3084, "percentage": 20 }, + "backup-server": { "requests": 1542, "percentage": 10 } + }, + "performance": { + "avgResponseTime": "245ms", + "p95ResponseTime": "580ms", + "errorRate": "0.3%" + }, + "failovers": { + "total": 2, + "byServer": { + "server-2": 1, + "server-3": 1 + } + } +} +``` + +### 路由决策日志 + +```bash +# 启用路由决策日志 +curl -X PUT http://localhost:3000/api/routing/logging \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "enabled": true, + "level": "info", + "includeDecisionFactors": true, + "sampleRate": 0.1 + }' +``` + +## 自定义路由规则 + +### 基于业务逻辑的路由 + +```json +{ + "routing": { + "strategy": "custom-rules", + "rules": [ + { + "name": "premium-users", + "priority": 1, + "condition": "user.tier === 'premium'", + "action": { + "targetServers": ["premium-server-1", "premium-server-2"], + "strategy": "least-connections", + "qos": { + "maxResponseTime": "1s", + "priority": "high" + } + } + }, + { + "name": "high-volume-requests", + "priority": 2, + "condition": "request.size > 10MB", + "action": { + "targetServers": ["high-capacity-server"], + "strategy": "single", + "timeout": "60s" + } + }, + { + "name": "batch-processing", + "priority": 3, + "condition": "request.type === 'batch'", + "action": { + "targetServers": ["batch-server-1", "batch-server-2"], + "strategy": "queue-based", + "queueConfig": { + "maxSize": 1000, + "timeout": "5m" + } + } + } + ] + } +} +``` + +### JavaScript 路由函数 + +```javascript +// 自定义路由函数 +function customRouting(request, servers, metrics) { + const { user, content, timestamp } = request; + + // 工作时间优先使用高性能服务器 + const isBusinessHours = + new Date(timestamp).getHours() >= 9 && new Date(timestamp).getHours() <= 17; + + if (isBusinessHours && user.priority === 'high') { + return servers.filter((s) => s.tags.includes('high-performance')); + } + + // 基于内容类型的特殊路由 + if (content.type === 'code-analysis') { + return servers.filter((s) => s.capabilities.includes('code-analysis')); + } + + // 默认负载均衡 + return servers.sort((a, b) => a.currentLoad - b.currentLoad); +} +``` + +## 路由优化 + +### 机器学习优化 + +```json +{ + "routing": { + "strategy": "ml-optimized", + "mlConfig": { + "algorithm": "reinforcement-learning", + "rewardFunction": "response_time_weighted", + "trainingData": { + "features": [ + "server_load", + "response_time_history", + "request_complexity", + "user_pattern", + "time_of_day" + ], + "targetMetric": "overall_satisfaction" + }, + "updateFrequency": "hourly", + "explorationRate": 0.1 + } + } +} +``` + +### 缓存感知路由 + +```json +{ + "routing": { + "strategy": "cache-aware", + "caching": { + "enabled": true, + "levels": [ + { + "type": "local", + "ttl": 300, + "maxSize": "100MB" + }, + { + "type": "distributed", + "provider": "redis", + "ttl": 3600, + "maxSize": "1GB" + } + ], + "routing": { + "preferCachedServers": true, + "cacheHitBonus": 0.3, + "cacheMissThreshold": 0.8 + } + } + } +} +``` + +## 故障排除 + +### 路由调试 + +```bash +# 调试特定请求的路由决策 +curl -X POST http://localhost:3000/api/routing/debug \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "request": { + "userId": "user123", + "tool": "filesystem", + "content": {"type": "read", "path": "/data/file.txt"} + }, + "traceRoute": true + }' +``` + +### 路由性能分析 + +```bash +# 获取路由性能报告 +curl -X GET http://localhost:3000/api/routing/performance \ + -H "Authorization: Bearer $TOKEN" \ + -G -d "timeRange=1h" -d "detailed=true" +``` + +### 常见问题 + +1. **不均匀的负载分布** + + - 检查服务器权重配置 + - 验证健康检查设置 + - 分析请求模式 + +2. **频繁的故障转移** + + - 调整健康检查阈值 + - 检查网络连接稳定性 + - 优化服务器资源 + +3. **路由延迟过高** + - 简化路由规则 + - 优化路由算法 + - 使用缓存加速决策 + +有关更多信息,请参阅 [监控](/zh/features/monitoring) 和 [服务器管理](/zh/features/server-management) 文档。 diff --git a/docs/zh/index.mdx b/docs/zh/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a392401981e8fc12c45b13b32405ac6ca1f24f53 --- /dev/null +++ b/docs/zh/index.mdx @@ -0,0 +1,97 @@ +--- +title: '欢迎使用 MCPHub' +description: 'MCPHub 是一个强大的 Model Context Protocol (MCP) 服务器管理平台,提供智能路由、负载均衡和实时监控功能' +--- + +MCPHub Hero Light +MCPHub Hero Dark + +## 什么是 MCPHub? + +MCPHub 是一个现代化的 Model Context Protocol (MCP) 服务器管理平台,旨在简化 AI 模型服务的部署、管理和监控。通过智能路由和负载均衡技术,MCPHub 帮助您构建高可用、可扩展的 AI 服务架构。 + +### 核心功能 + +- **🚀 智能路由** - 基于负载、延迟和健康状态的智能请求分发 +- **⚖️ 负载均衡** - 多种负载均衡策略,确保最优性能 +- **📊 实时监控** - 全面的性能指标和健康检查 +- **🔐 安全认证** - 企业级身份认证和访问控制 +- **🏗️ 服务器组管理** - 灵活的服务器分组和配置管理 +- **🔄 故障转移** - 自动故障检测和流量切换 + +## 快速开始 + +立即开始使用 MCPHub,只需几分钟即可部署您的第一个 MCP 服务器。 + + + + 跟随我们的快速开始指南,5 分钟内部署 MCPHub 并连接您的第一个 MCP 服务器 + + + 设置本地开发环境,了解 MCPHub 的架构和开发工作流 + + + +## 核心概念 + +了解 MCPHub 的核心概念,为深入使用做好准备。 + + + + 深入了解 Model Context Protocol 的工作原理和最佳实践 + + + 学习 MCPHub 的智能路由算法和配置策略 + + + 掌握 MCP 服务器的添加、配置和管理技巧 + + + 使用内置的监控工具跟踪性能和识别问题 + + + +## 部署选项 + +MCPHub 支持多种部署方式,满足不同规模和场景的需求。 + + + + 使用 Docker 容器快速部署,支持单机和集群模式 + + + 在 AWS、GCP、Azure 等云平台上部署 MCPHub + + + 在 Kubernetes 集群中部署高可用的 MCPHub 服务 + + + +## API 和集成 + +MCPHub 提供完整的 RESTful API 和多语言 SDK,方便与现有系统集成。 + + + + 完整的 API 接口文档,包含详细的请求示例和响应格式 + + + 官方 SDK 和命令行工具,加速开发集成 + + + +## 社区和支持 + +加入 MCPHub 社区,获取帮助和分享经验。 + + + + 查看源代码、提交问题和贡献代码 + + + 与其他开发者交流,获取实时帮助 + + + 支持 MCPHub 的开发和维护,帮助我们持续改进 + + diff --git a/docs/zh/quickstart.mdx b/docs/zh/quickstart.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ae397191590180dcfac02aee95a70276885928ab --- /dev/null +++ b/docs/zh/quickstart.mdx @@ -0,0 +1,304 @@ +--- +title: '快速开始' +description: '5 分钟内部署 MCPHub 并连接您的第一个 MCP 服务器' +--- + +## 欢迎使用 MCPHub! + +本指南将帮助您在 5 分钟内完成 MCPHub 的部署和配置,并连接您的第一个 MCP 服务器。 + +## 前提条件 + +在开始之前,请确保您的系统满足以下要求: + + + + - **操作系统**: Linux、macOS 或 Windows + - **内存**: 最少 2GB RAM(推荐 4GB+) + - **存储**: 至少 1GB 可用空间 + - **网络**: 稳定的互联网连接 + + + + - **Node.js**: 18.0+ 版本 + - **Docker**: 最新版本(可选,用于容器化部署) + - **Git**: 用于代码管理 + + 检查版本: + ```bash + node --version # 应该 >= 18.0.0 + npm --version # 应该 >= 8.0.0 + docker --version # 可选 + ``` + + + +## 安装 MCPHub + +### 方式一:使用 npm(推荐) + + + + 首先安装 MCPHub 命令行工具: + + ```bash + npm install -g @mcphub/cli + ``` + + 验证安装: + ```bash + mcphub --version + ``` + + + + + 创建一个新的 MCPHub 项目: + + ```bash + # 创建项目 + mcphub init my-mcphub-project + cd my-mcphub-project + + # 安装依赖 + npm install + ``` + + + + + 复制并编辑环境变量文件: + + ```bash + cp .env.example .env + ``` + + 编辑 `.env` 文件,设置基本配置: + ```bash + # 服务器配置 + PORT=3000 + NODE_ENV=development + + # 数据库配置(使用内置 SQLite) + DATABASE_URL=sqlite:./data/mcphub.db + + # JWT 密钥(请更改为安全的随机字符串) + JWT_SECRET=your-super-secret-jwt-key-change-me + + # 管理员账户 + ADMIN_EMAIL=admin@example.com + ADMIN_PASSWORD=admin123 + ``` + + + + +### 方式二:使用 Docker + + + + 使用 Docker Compose 一键部署: + + ```bash + # 下载配置文件 + curl -O https://raw.githubusercontent.com/mcphub/mcphub/main/docker-compose.yml + + # 启动服务 + docker-compose up -d + ``` + + 或者直接运行 Docker 容器: + ```bash + docker run -d \ + --name mcphub \ + -p 3000:3000 \ + -e NODE_ENV=production \ + -e JWT_SECRET=your-secret-key \ + mcphub/server:latest + ``` + + + + +## 启动 MCPHub + +### 开发模式启动 + +```bash +# 初始化数据库 +npm run db:setup + +# 启动开发服务器 +npm run dev +``` + +### 生产模式启动 + +```bash +# 构建应用 +npm run build + +# 启动生产服务器 +npm start +``` + +开发模式下,MCPHub 会在 `http://localhost:3000` 启动,并具有热重载功能。 + +## 首次访问和配置 + +### 1. 访问管理界面 + +打开浏览器,访问 `http://localhost:3000`,您将看到 MCPHub 的欢迎页面。 + +### 2. 登录管理员账户 + +使用您在 `.env` 文件中设置的管理员凭据登录: + +- **邮箱**: `admin@example.com` +- **密码**: `admin123` + +首次登录后,请立即更改默认密码以确保安全! + +### 3. 完成初始配置 + +登录后,系统会引导您完成初始配置: + +1. **更改管理员密码** +2. **设置组织信息** +3. **配置基本设置** + +## 添加您的第一个 MCP 服务器 + +### 1. 准备 MCP 服务器 + +如果您还没有 MCP 服务器,可以使用我们的示例服务器进行测试: + +```bash +# 克隆示例服务器 +git clone https://github.com/mcphub/example-mcp-server.git +cd example-mcp-server + +# 安装依赖并启动 +npm install +npm start +``` + +示例服务器将在 `http://localhost:3001` 启动。 + +### 2. 在 MCPHub 中添加服务器 + +在 MCPHub 管理界面中: + +1. 点击 **"添加服务器"** 按钮 +2. 填写服务器信息: + ``` + 名称: Example MCP Server + 端点: http://localhost:3001 + 描述: 示例 MCP 服务器用于测试 + ``` +3. 选择功能类型(如:chat、completion、analysis) +4. 点击 **"测试连接"** 验证服务器可达性 +5. 点击 **"保存"** 完成添加 + +### 3. 验证服务器状态 + +添加成功后,您应该能在服务器列表中看到新添加的服务器,状态显示为 **"活跃"**(绿色)。 + +## 测试路由功能 + +### 发送测试请求 + +使用 cURL 或其他 HTTP 客户端测试路由功能: + +```bash +# 发送聊天请求 +curl -X POST http://localhost:3000/api/chat \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { + "role": "user", + "content": "Hello, this is a test message!" + } + ] + }' +``` + +### 查看请求日志 + +在 MCPHub 管理界面的 **"监控"** 页面中,您可以实时查看: + +- 请求数量和响应时间 +- 服务器健康状态 +- 错误日志和统计 + +## 后续步骤 + +恭喜!您已经成功部署了 MCPHub 并添加了第一个 MCP 服务器。接下来您可以: + + + + 学习如何配置智能路由和负载均衡策略 + + + 了解服务器管理的高级功能 + + + 配置性能监控和告警通知 + + + 将 MCPHub 集成到您的应用程序中 + + + +## 常见问题 + + + + **可能原因**: + - 服务器地址错误或服务器未启动 + - 防火墙阻止连接 + - 网络配置问题 + + **解决方案**: + 1. 验证服务器是否正在运行:`curl http://localhost:3001/health` + 2. 检查防火墙设置 + 3. 确认网络连接正常 + + + + + **可能原因**: + - 健康检查失败 + - 服务器响应超时 + - 服务器崩溃或重启 + + **解决方案**: + 1. 检查服务器日志 + 2. 调整健康检查间隔 + 3. 重启服务器进程 + + + + + **解决方案**: + ```bash + # 重置管理员密码 + npm run reset-admin-password + ``` + 或者删除数据库文件重新初始化: + ```bash + rm data/mcphub.db + npm run db:setup + ``` + + + +## 获取帮助 + +如果您在设置过程中遇到问题: + +- 📖 查看 [完整文档](/zh/development/getting-started) +- 🐛 在 [GitHub](https://github.com/mcphub/mcphub/issues) 上报告问题 +- 💬 加入 [Discord 社区](https://discord.gg/mcphub) 获取实时帮助 +- 📧 发送邮件至 support@mcphub.io diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..03f3d71adc89a57f74709aa890cd439dc41d6f64 --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +NPM_REGISTRY=${NPM_REGISTRY:-https://registry.npmjs.org/} +echo "Setting npm registry to ${NPM_REGISTRY}" +npm config set registry "$NPM_REGISTRY" + +# 处理 HTTP_PROXY 和 HTTPS_PROXY 环境变量 +if [ -n "$HTTP_PROXY" ]; then + echo "Setting HTTP proxy to ${HTTP_PROXY}" + npm config set proxy "$HTTP_PROXY" + export HTTP_PROXY="$HTTP_PROXY" +fi + +if [ -n "$HTTPS_PROXY" ]; then + echo "Setting HTTPS proxy to ${HTTPS_PROXY}" + npm config set https-proxy "$HTTPS_PROXY" + export HTTPS_PROXY="$HTTPS_PROXY" +fi + +echo "Using REQUEST_TIMEOUT: $REQUEST_TIMEOUT" + +exec "$@" diff --git a/frontend/favicon.ico b/frontend/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..c8150a9c4e1ed7bd4b4612bd6c27bc321e5fd810 Binary files /dev/null and b/frontend/favicon.ico differ diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000000000000000000000000000000000000..8449b960b41c699d38d8da48a525e2386b743647 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,13 @@ + + + + + + MCP Hub Dashboard + + + +
+ + + \ No newline at end of file diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000000000000000000000000000000000000..51a6e4e62be6d68903fc0f44a0bd8b4ea4eae7e5 --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + '@tailwindcss/postcss': {}, + autoprefixer: {}, + }, +}; diff --git a/frontend/public/assets/reward.png b/frontend/public/assets/reward.png new file mode 100644 index 0000000000000000000000000000000000000000..dafae0267b16b00758af018ae68f13e92dcfcac2 Binary files /dev/null and b/frontend/public/assets/reward.png differ diff --git a/frontend/public/assets/wexin.png b/frontend/public/assets/wexin.png new file mode 100644 index 0000000000000000000000000000000000000000..b72f4c59e10ae7d2809af2cd780cee3069c4917d Binary files /dev/null and b/frontend/public/assets/wexin.png differ diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000000000000000000000000000000000000..07cc2b70019950b10eeac751106e264938d68bd8 --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,51 @@ +import React from 'react'; +import { BrowserRouter as Router, Route, Routes, Navigate } from 'react-router-dom'; +import { AuthProvider } from './contexts/AuthContext'; +import { ToastProvider } from './contexts/ToastContext'; +import { ThemeProvider } from './contexts/ThemeContext'; +import MainLayout from './layouts/MainLayout'; +import ProtectedRoute from './components/ProtectedRoute'; +import LoginPage from './pages/LoginPage'; +import DashboardPage from './pages/Dashboard'; +import ServersPage from './pages/ServersPage'; +import GroupsPage from './pages/GroupsPage'; +import SettingsPage from './pages/SettingsPage'; +import MarketPage from './pages/MarketPage'; +import LogsPage from './pages/LogsPage'; +import { getBasePath } from './utils/runtime'; + +function App() { + const basename = getBasePath(); + return ( + + + + + + {/* 公共路由 */} + } /> + + {/* 受保护的路由,使用 MainLayout 作为布局容器 */} + }> + }> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + + + + {/* 未匹配的路由重定向到首页 */} + } /> + + + + + + ); +} + +export default App; \ No newline at end of file diff --git a/frontend/src/components/AddGroupForm.tsx b/frontend/src/components/AddGroupForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..ad40ef6a862a7cda571d64758e49ca6f0b3ef38a --- /dev/null +++ b/frontend/src/components/AddGroupForm.tsx @@ -0,0 +1,132 @@ +import { useState, useEffect } from 'react' +import { useTranslation } from 'react-i18next' +import { useGroupData } from '@/hooks/useGroupData' +import { useServerData } from '@/hooks/useServerData' +import { GroupFormData, Server } from '@/types' +import { ToggleGroup } from './ui/ToggleGroup' + +interface AddGroupFormProps { + onAdd: () => void + onCancel: () => void +} + +const AddGroupForm = ({ onAdd, onCancel }: AddGroupFormProps) => { + const { t } = useTranslation() + const { createGroup } = useGroupData() + const { servers } = useServerData() + const [availableServers, setAvailableServers] = useState([]) + const [error, setError] = useState(null) + const [isSubmitting, setIsSubmitting] = useState(false) + + const [formData, setFormData] = useState({ + name: '', + description: '', + servers: [] + }) + + useEffect(() => { + // Filter available servers (enabled only) + setAvailableServers(servers.filter(server => server.enabled !== false)) + }, [servers]) + + const handleChange = (e: React.ChangeEvent) => { + const { name, value } = e.target + setFormData(prev => ({ + ...prev, + [name]: value + })) + } + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setIsSubmitting(true) + setError(null) + + try { + if (!formData.name.trim()) { + setError(t('groups.nameRequired')) + setIsSubmitting(false) + return + } + + const result = await createGroup(formData.name, formData.description, formData.servers) + + if (!result) { + setError(t('groups.createError')) + setIsSubmitting(false) + return + } + + onAdd() + } catch (err) { + setError(err instanceof Error ? err.message : String(err)) + setIsSubmitting(false) + } + } + + return ( +
+
+
+

{t('groups.addNew')}

+ + {error && ( +
+ {error} +
+ )} + +
+
+ + +
+ + ({ + value: server.name, + label: server.name + }))} + onChange={(servers) => setFormData(prev => ({ ...prev, servers }))} + /> + +
+ + +
+ +
+
+
+ ) +} + +export default AddGroupForm \ No newline at end of file diff --git a/frontend/src/components/AddServerForm.tsx b/frontend/src/components/AddServerForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..b0337e220fac7e6451a4638b2b00eb6b257e0a26 --- /dev/null +++ b/frontend/src/components/AddServerForm.tsx @@ -0,0 +1,94 @@ +import { useState } from 'react' +import { useTranslation } from 'react-i18next' +import ServerForm from './ServerForm' +import { getApiUrl } from '../utils/runtime'; + +interface AddServerFormProps { + onAdd: () => void +} + +const AddServerForm = ({ onAdd }: AddServerFormProps) => { + const { t } = useTranslation() + const [modalVisible, setModalVisible] = useState(false) + const [error, setError] = useState(null) + + const toggleModal = () => { + setModalVisible(!modalVisible) + setError(null) // Clear any previous errors when toggling modal + } + + const handleSubmit = async (payload: any) => { + try { + setError(null) + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/servers'), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '' + }, + body: JSON.stringify(payload), + }) + + const result = await response.json() + + if (!response.ok) { + // Use specific error message from the response if available + if (result && result.message) { + setError(result.message) + } else if (response.status === 400) { + setError(t('server.invalidData')) + } else if (response.status === 409) { + setError(t('server.alreadyExists', { serverName: payload.name })) + } else { + setError(t('server.addError')) + } + return + } + + setModalVisible(false) + onAdd() + } catch (err) { + console.error('Error adding server:', err) + + // Use friendly error messages based on error type + if (!navigator.onLine) { + setError(t('errors.network')) + } else if (err instanceof TypeError && ( + err.message.includes('NetworkError') || + err.message.includes('Failed to fetch') + )) { + setError(t('errors.serverConnection')) + } else { + setError(t('errors.serverAdd')) + } + } + } + + return ( +
+ + + {modalVisible && ( +
+ +
+ )} +
+ ) +} + +export default AddServerForm \ No newline at end of file diff --git a/frontend/src/components/ChangePasswordForm.tsx b/frontend/src/components/ChangePasswordForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..59ef9504658559766ca018ae3b068e7a37c7c5ce --- /dev/null +++ b/frontend/src/components/ChangePasswordForm.tsx @@ -0,0 +1,158 @@ +import React, { useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { ChangePasswordCredentials } from '../types'; +import { changePassword } from '../services/authService'; + +interface ChangePasswordFormProps { + onSuccess?: () => void; + onCancel?: () => void; +} + +const ChangePasswordForm: React.FC = ({ onSuccess, onCancel }) => { + const { t } = useTranslation(); + const [formData, setFormData] = useState({ + currentPassword: '', + newPassword: '', + }); + const [confirmPassword, setConfirmPassword] = useState(''); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + const [success, setSuccess] = useState(false); + + const handleChange = (e: React.ChangeEvent) => { + const { name, value } = e.target; + if (name === 'confirmPassword') { + setConfirmPassword(value); + } else { + setFormData(prev => ({ ...prev, [name]: value })); + } + }; + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + setError(null); + + // Validate passwords match + if (formData.newPassword !== confirmPassword) { + setError(t('auth.passwordsNotMatch')); + return; + } + + setIsLoading(true); + try { + const response = await changePassword(formData); + + if (response.success) { + setSuccess(true); + if (onSuccess) { + onSuccess(); + } + } else { + setError(response.message || t('auth.changePasswordError')); + } + } catch (err) { + setError(t('auth.changePasswordError')); + } finally { + setIsLoading(false); + } + }; + + return ( +
+

{t('auth.changePassword')}

+ + {success ? ( +
+ {t('auth.changePasswordSuccess')} +
+ ) : ( +
+ {error && ( +
+ {error} +
+ )} + +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ {onCancel && ( + + )} + +
+
+ )} +
+ ); +}; + +export default ChangePasswordForm; \ No newline at end of file diff --git a/frontend/src/components/EditGroupForm.tsx b/frontend/src/components/EditGroupForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..dcadf3ec9b18784a98ed5fd1a5590b1c68f0b673 --- /dev/null +++ b/frontend/src/components/EditGroupForm.tsx @@ -0,0 +1,149 @@ +import { useState, useEffect } from 'react' +import { useTranslation } from 'react-i18next' +import { Group, GroupFormData, Server } from '@/types' +import { useGroupData } from '@/hooks/useGroupData' +import { useServerData } from '@/hooks/useServerData' +import { ToggleGroup } from './ui/ToggleGroup' + +interface EditGroupFormProps { + group: Group + onEdit: () => void + onCancel: () => void +} + +const EditGroupForm = ({ group, onEdit, onCancel }: EditGroupFormProps) => { + const { t } = useTranslation() + const { updateGroup } = useGroupData() + const { servers } = useServerData() + const [availableServers, setAvailableServers] = useState([]) + const [error, setError] = useState(null) + const [isSubmitting, setIsSubmitting] = useState(false) + + const [formData, setFormData] = useState({ + name: group.name, + description: group.description || '', + servers: group.servers || [] + }) + + useEffect(() => { + // Filter available servers (enabled only) + setAvailableServers(servers.filter(server => server.enabled !== false)) + }, [servers]) + + const handleChange = (e: React.ChangeEvent) => { + const { name, value } = e.target + setFormData(prev => ({ + ...prev, + [name]: value + })) + } + + const handleServerToggle = (serverName: string) => { + setFormData(prev => { + const isSelected = prev.servers.includes(serverName) + return { + ...prev, + servers: isSelected + ? prev.servers.filter(name => name !== serverName) + : [...prev.servers, serverName] + } + }) + } + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setIsSubmitting(true) + setError(null) + + try { + if (!formData.name.trim()) { + setError(t('groups.nameRequired')) + setIsSubmitting(false) + return + } + + const result = await updateGroup(group.id, { + name: formData.name, + description: formData.description, + servers: formData.servers + }) + + if (!result) { + setError(t('groups.updateError')) + setIsSubmitting(false) + return + } + + onEdit() + } catch (err) { + setError(err instanceof Error ? err.message : String(err)) + setIsSubmitting(false) + } + } + + return ( +
+
+
+

{t('groups.edit')}

+ + {error && ( +
+ {error} +
+ )} + +
+
+ + +
+ + ({ + value: server.name, + label: server.name + }))} + onChange={(servers) => setFormData(prev => ({ ...prev, servers }))} + /> + +
+ + +
+ +
+
+
+ ) +} + +export default EditGroupForm \ No newline at end of file diff --git a/frontend/src/components/EditServerForm.tsx b/frontend/src/components/EditServerForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..fce9cb4ac407f0b162cb4e2df91f2b81b654b52a --- /dev/null +++ b/frontend/src/components/EditServerForm.tsx @@ -0,0 +1,77 @@ +import { useState } from 'react' +import { useTranslation } from 'react-i18next' +import { Server } from '@/types' +import { getApiUrl } from '../utils/runtime' +import ServerForm from './ServerForm' + +interface EditServerFormProps { + server: Server + onEdit: () => void + onCancel: () => void +} + +const EditServerForm = ({ server, onEdit, onCancel }: EditServerFormProps) => { + const { t } = useTranslation() + const [error, setError] = useState(null) + + const handleSubmit = async (payload: any) => { + try { + setError(null) + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/servers/${server.name}`), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '' + }, + body: JSON.stringify(payload), + }) + + const result = await response.json() + + if (!response.ok) { + // Use specific error message from the response if available + if (result && result.message) { + setError(result.message) + } else if (response.status === 404) { + setError(t('server.notFound', { serverName: server.name })) + } else if (response.status === 400) { + setError(t('server.invalidData')) + } else { + setError(t('server.updateError', { serverName: server.name })) + } + return + } + + onEdit() + } catch (err) { + console.error('Error updating server:', err) + + // Use friendly error messages based on error type + if (!navigator.onLine) { + setError(t('errors.network')) + } else if (err instanceof TypeError && ( + err.message.includes('NetworkError') || + err.message.includes('Failed to fetch') + )) { + setError(t('errors.serverConnection')) + } else { + setError(t('errors.serverUpdate', { serverName: server.name })) + } + } + } + + return ( +
+ +
+ ) +} + +export default EditServerForm \ No newline at end of file diff --git a/frontend/src/components/GroupCard.tsx b/frontend/src/components/GroupCard.tsx new file mode 100644 index 0000000000000000000000000000000000000000..d4ce4eee113e9c10041460647c877e9adf94b31f --- /dev/null +++ b/frontend/src/components/GroupCard.tsx @@ -0,0 +1,143 @@ +import { useState } from 'react' +import { useTranslation } from 'react-i18next' +import { Group, Server } from '@/types' +import { Edit, Trash, Copy, Check } from '@/components/icons/LucideIcons' +import DeleteDialog from '@/components/ui/DeleteDialog' +import { useToast } from '@/contexts/ToastContext' + +interface GroupCardProps { + group: Group + servers: Server[] + onEdit: (group: Group) => void + onDelete: (groupId: string) => void +} + +const GroupCard = ({ + group, + servers, + onEdit, + onDelete +}: GroupCardProps) => { + const { t } = useTranslation() + const { showToast } = useToast() + const [showDeleteDialog, setShowDeleteDialog] = useState(false) + const [copied, setCopied] = useState(false) + + const handleEdit = () => { + onEdit(group) + } + + const handleDelete = () => { + setShowDeleteDialog(true) + } + + const handleConfirmDelete = () => { + onDelete(group.id) + setShowDeleteDialog(false) + } + + const copyToClipboard = () => { + if (navigator.clipboard && window.isSecureContext) { + navigator.clipboard.writeText(group.id).then(() => { + setCopied(true) + setTimeout(() => setCopied(false), 2000) + }) + } else { + // Fallback for HTTP or unsupported clipboard API + const textArea = document.createElement('textarea') + textArea.value = group.id + // Avoid scrolling to bottom + textArea.style.position = 'fixed' + textArea.style.left = '-9999px' + document.body.appendChild(textArea) + textArea.focus() + textArea.select() + try { + document.execCommand('copy') + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } catch (err) { + showToast(t('common.copyFailed') || 'Copy failed', 'error') + console.error('Copy to clipboard failed:', err) + } + document.body.removeChild(textArea) + } + } + + // Get servers that belong to this group + const groupServers = servers.filter(server => group.servers.includes(server.name)) + + return ( +
+
+
+
+

{group.name}

+
+ {group.id} + +
+
+ {group.description && ( +

{group.description}

+ )} +
+
+
+ {t('groups.serverCount', { count: group.servers.length })} +
+ + +
+
+ +
+ {groupServers.length === 0 ? ( +

{t('groups.noServers')}

+ ) : ( +
+ {groupServers.map(server => ( +
+ {server.name} + +
+ ))} +
+ )} +
+ + setShowDeleteDialog(false)} + onConfirm={handleConfirmDelete} + serverName={group.name} + isGroup={true} + /> +
+ ) +} + +export default GroupCard \ No newline at end of file diff --git a/frontend/src/components/LogViewer.tsx b/frontend/src/components/LogViewer.tsx new file mode 100644 index 0000000000000000000000000000000000000000..4d857bf8ad22b4636e6cec45828c60f36ed78bab --- /dev/null +++ b/frontend/src/components/LogViewer.tsx @@ -0,0 +1,191 @@ +import React, { useEffect, useRef, useState } from 'react'; +import { LogEntry } from '../services/logService'; +import { Button } from './ui/Button'; +import { Badge } from './ui/Badge'; +import { useTranslation } from 'react-i18next'; + +interface LogViewerProps { + logs: LogEntry[]; + isLoading?: boolean; + error?: Error | null; + onClear?: () => void; +} + +const LogViewer: React.FC = ({ logs, isLoading = false, error = null, onClear }) => { + const { t } = useTranslation(); + const logContainerRef = useRef(null); + const [autoScroll, setAutoScroll] = useState(true); + const [filter, setFilter] = useState(''); + const [typeFilter, setTypeFilter] = useState>(['info', 'error', 'warn', 'debug']); + const [sourceFilter, setSourceFilter] = useState>(['main', 'child']); + + // Auto scroll to bottom when new logs come in if autoScroll is enabled + useEffect(() => { + if (autoScroll && logContainerRef.current) { + logContainerRef.current.scrollTop = logContainerRef.current.scrollHeight; + } + }, [logs, autoScroll]); + + // Filter logs based on current filter settings + const filteredLogs = logs.filter(log => { + const matchesText = filter ? log.message.toLowerCase().includes(filter.toLowerCase()) : true; + const matchesType = typeFilter.includes(log.type); + const matchesSource = sourceFilter.includes(log.source as 'main' | 'child'); + return matchesText && matchesType && matchesSource; + }); + + // Format timestamp to readable format + const formatTimestamp = (timestamp: number) => { + const date = new Date(timestamp); + return date.toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit', + second: '2-digit', + hour12: false + }); + }; + + // Get badge color based on log type + const getLogTypeColor = (type: string) => { + switch (type) { + case 'error': return 'bg-red-400'; + case 'warn': return 'bg-yellow-400'; + case 'debug': return 'bg-purple-400'; + default: return 'bg-blue-400'; + } + }; + + // Get badge color based on log source + const getSourceColor = (source: string) => { + switch (source) { + case 'main': return 'bg-green-400'; + case 'child': return 'bg-orange-400'; + default: return 'bg-gray-400'; + } + }; + + return ( +
+
+
+ {t('logs.filters')}: + + {/* Text search filter */} + setFilter(e.target.value)} + /> + + {/* Log type filters */} +
+ {(['info', 'error', 'warn', 'debug'] as const).map(type => ( + { + if (typeFilter.includes(type)) { + setTypeFilter(prev => prev.filter(t => t !== type)); + } else { + setTypeFilter(prev => [...prev, type]); + } + }} + > + {type} + + ))} +
+ + {/* Log source filters */} +
+ {(['main', 'child'] as const).map(source => ( + { + if (sourceFilter.includes(source)) { + setSourceFilter(prev => prev.filter(s => s !== source)); + } else { + setSourceFilter(prev => [...prev, source]); + } + }} + > + {source === 'main' ? t('logs.mainProcess') : t('logs.childProcess')} + + ))} +
+
+ +
+ + +
+
+ +
+ {isLoading ? ( +
+ {t('logs.loading')} +
+ ) : error ? ( +
+ {error.message} +
+ ) : filteredLogs.length === 0 ? ( +
+ {filter || typeFilter.length < 4 || sourceFilter.length < 2 + ? t('logs.noMatch') + : t('logs.noLogs')} +
+ ) : ( + filteredLogs.map((log, index) => ( +
+ [{formatTimestamp(log.timestamp)}] + + {log.type} + + + {log.source === 'main' ? t('logs.main') : t('logs.child')} + {log.processId ? ` (${log.processId})` : ''} + + {log.message} +
+ )) + )} +
+
+ ); +}; + +export default LogViewer; \ No newline at end of file diff --git a/frontend/src/components/MarketServerCard.tsx b/frontend/src/components/MarketServerCard.tsx new file mode 100644 index 0000000000000000000000000000000000000000..ad047686e32e0c2c38bece8850ef959a11c1546b --- /dev/null +++ b/frontend/src/components/MarketServerCard.tsx @@ -0,0 +1,153 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { MarketServer } from '@/types'; + +interface MarketServerCardProps { + server: MarketServer; + onClick: (server: MarketServer) => void; +} + +const MarketServerCard: React.FC = ({ server, onClick }) => { + const { t } = useTranslation(); + + // Intelligently calculate how many tags to display to ensure they fit in a single line + const getTagsToDisplay = () => { + if (!server.tags || server.tags.length === 0) { + return { tagsToShow: [], hasMore: false, moreCount: 0 }; + } + + // Estimate available width in the card (in characters) + const estimatedAvailableWidth = 28; // Estimated number of characters that can fit in one line + + // Calculate the character space needed for tags and plus sign (including # and spacing) + const calculateTagWidth = (tag: string) => tag.length + 3; // +3 for # and spacing + + // Loop to determine the maximum number of tags that can be displayed + let totalWidth = 0; + let i = 0; + + // First, sort tags by length to prioritize displaying shorter tags + const sortedTags = [...server.tags].sort((a, b) => a.length - b.length); + + // Calculate how many tags can fit + for (i = 0; i < sortedTags.length; i++) { + const tagWidth = calculateTagWidth(sortedTags[i]); + + // If this tag would make the total width exceed available width, stop adding + if (totalWidth + tagWidth > estimatedAvailableWidth) { + break; + } + + totalWidth += tagWidth; + + // If this is the last tag but there's still space, no need to show "more" + if (i === sortedTags.length - 1) { + return { + tagsToShow: sortedTags, + hasMore: false, + moreCount: 0 + }; + } + } + + // If there's not enough space to display any tags, show at least one + if (i === 0 && sortedTags.length > 0) { + i = 1; + } + + // Calculate space needed for the "more" tag + const moreCount = sortedTags.length - i; + const moreTagWidth = 3 + String(moreCount).length + t('market.moreTags').length; + + // If there's enough remaining space to display the "more" tag + if (totalWidth + moreTagWidth <= estimatedAvailableWidth || i < 1) { + return { + tagsToShow: sortedTags.slice(0, i), + hasMore: true, + moreCount + }; + } + + // If there's not enough space for even the "more" tag, reduce one tag to make room + return { + tagsToShow: sortedTags.slice(0, Math.max(1, i - 1)), + hasMore: true, + moreCount: moreCount + 1 + }; + }; + + const { tagsToShow, hasMore, moreCount } = getTagsToDisplay(); + + return ( +
onClick(server)} + > +
+

{server.display_name}

+ {server.is_official && ( + + {t('market.official')} + + )} +
+

{server.description}

+ + {/* Categories */} +
+ {server.categories?.length > 0 ? ( + server.categories.map((category, index) => ( + + {category} + + )) + ) : ( + - + )} +
+ + {/* Tags */} +
+ {server.tags?.length > 0 ? ( +
+ {tagsToShow.map((tag, index) => ( + + #{tag} + + ))} + {hasMore && ( + + +{moreCount} {t('market.moreTags')} + + )} +
+ ) : ( + - + )} +
+ +
+
+ {t('market.by')} + + {server.author?.name || t('market.unknown')} + +
+
+ + + + {server.tools?.length || 0} {t('market.tools')} +
+
+
+ ); +}; + +export default MarketServerCard; \ No newline at end of file diff --git a/frontend/src/components/MarketServerDetail.tsx b/frontend/src/components/MarketServerDetail.tsx new file mode 100644 index 0000000000000000000000000000000000000000..ae054965bdf1b96e1670f8801c43da356a36423a --- /dev/null +++ b/frontend/src/components/MarketServerDetail.tsx @@ -0,0 +1,297 @@ +import React, { useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { MarketServer, MarketServerInstallation } from '@/types'; +import ServerForm from './ServerForm'; + +interface MarketServerDetailProps { + server: MarketServer; + onBack: () => void; + onInstall: (server: MarketServer) => void; + installing?: boolean; + isInstalled?: boolean; +} + +const MarketServerDetail: React.FC = ({ + server, + onBack, + onInstall, + installing = false, + isInstalled = false +}) => { + const { t } = useTranslation(); + const [modalVisible, setModalVisible] = useState(false); + const [error, setError] = useState(null); + + // Helper function to determine button state + const getButtonProps = () => { + if (isInstalled) { + return { + className: "bg-green-600 cursor-default px-4 py-2 rounded text-sm font-medium text-white", + disabled: true, + text: t('market.installed') + }; + } else if (installing) { + return { + className: "bg-gray-400 cursor-not-allowed px-4 py-2 rounded text-sm font-medium text-white", + disabled: true, + text: t('market.installing') + }; + } else { + return { + className: "bg-blue-600 hover:bg-blue-700 px-4 py-2 rounded text-sm font-medium text-white", + disabled: false, + text: t('market.install') + }; + } + }; + + const toggleModal = () => { + setModalVisible(!modalVisible); + setError(null); // Clear any previous errors when toggling modal + }; + + const handleInstall = () => { + if (!isInstalled) { + toggleModal(); + } + }; + + // Get the preferred installation configuration based on priority: + // npm > uvx > default + const getPreferredInstallation = (): MarketServerInstallation | undefined => { + if (!server.installations) { + return undefined; + } + + if (server.installations.npm) { + return server.installations.npm; + } else if (server.installations.uvx) { + return server.installations.uvx; + } else if (server.installations.default) { + return server.installations.default; + } + + // If none of the preferred types are available, get the first available installation type + const installTypes = Object.keys(server.installations); + if (installTypes.length > 0) { + return server.installations[installTypes[0]]; + } + + return undefined; + }; + + const handleSubmit = async (payload: any) => { + try { + setError(null); + // Pass the server object to the parent component for installation + onInstall(server); + setModalVisible(false); + } catch (err) { + console.error('Error installing server:', err); + setError(t('errors.serverInstall')); + } + }; + + const buttonProps = getButtonProps(); + const preferredInstallation = getPreferredInstallation(); + + return ( +
+
+ +
+ +
+
+

+ {server.display_name} + ({server.name}) + + {t('market.author')}: {server.author.name} • {t('market.license')}: {server.license} • + + {t('market.repository')} + + +

+
+ +
+ {server.is_official && ( + + {t('market.official')} + + )} + +
+
+ +

{server.description}

+ +
+

{t('market.categories')} & {t('market.tags')}

+
+ {server.categories?.map((category, index) => ( + + {category} + + ))} + {server.tags && server.tags.map((tag, index) => ( + + #{tag} + + ))} +
+
+ + {server.arguments && Object.keys(server.arguments).length > 0 && ( +
+

{t('market.arguments')}

+
+ + + + + + + + + + + {Object.entries(server.arguments).map(([name, arg], index) => ( + + + + + + + ))} + +
+ {t('market.argumentName')} + + {t('market.description')} + + {t('market.required')} + + {t('market.example')} +
+ {name} + + {arg.description} + + {arg.required ? ( + + ) : ( + + )} + + {arg.example} +
+
+
+ )} + +
+

{t('market.tools')}

+
+ {server.tools?.map((tool, index) => ( +
+

+ {tool.name} + +

+

{tool.description}

+
+
+                  {JSON.stringify(tool.inputSchema, null, 2)}
+                
+
+
+ ))} +
+
+ + {server.examples && server.examples.length > 0 && ( +
+

{t('market.examples')}

+
+ {server.examples.map((example, index) => ( +
+

{example.title}

+

{example.description}

+
+                  {example.prompt}
+                
+
+ ))} +
+
+ )} + +
+ +
+ + {modalVisible && ( +
+ +
+ )} +
+ ); +}; + +export default MarketServerDetail; \ No newline at end of file diff --git a/frontend/src/components/ProtectedRoute.tsx b/frontend/src/components/ProtectedRoute.tsx new file mode 100644 index 0000000000000000000000000000000000000000..00cdd6b3d31e56e7f4fc748363ffb66c54e08bee --- /dev/null +++ b/frontend/src/components/ProtectedRoute.tsx @@ -0,0 +1,27 @@ +import React from 'react'; +import { Navigate, Outlet } from 'react-router-dom'; +import { useTranslation } from 'react-i18next'; +import { useAuth } from '../contexts/AuthContext'; + +interface ProtectedRouteProps { + redirectPath?: string; +} + +const ProtectedRoute: React.FC = ({ + redirectPath = '/login' +}) => { + const { t } = useTranslation(); + const { auth } = useAuth(); + + if (auth.loading) { + return
{t('app.loading')}
; + } + + if (!auth.isAuthenticated) { + return ; + } + + return ; +}; + +export default ProtectedRoute; \ No newline at end of file diff --git a/frontend/src/components/ServerCard.tsx b/frontend/src/components/ServerCard.tsx new file mode 100644 index 0000000000000000000000000000000000000000..a1863f4409e388f5cd0796bb24b7c5c05e1fe494 --- /dev/null +++ b/frontend/src/components/ServerCard.tsx @@ -0,0 +1,237 @@ +import { useState, useRef, useEffect } from 'react' +import { useTranslation } from 'react-i18next' +import { Server } from '@/types' +import { ChevronDown, ChevronRight, AlertCircle, Copy, Check } from 'lucide-react' +import { StatusBadge } from '@/components/ui/Badge' +import ToolCard from '@/components/ui/ToolCard' +import DeleteDialog from '@/components/ui/DeleteDialog' +import { useToast } from '@/contexts/ToastContext' + +interface ServerCardProps { + server: Server + onRemove: (serverName: string) => void + onEdit: (server: Server) => void + onToggle?: (server: Server, enabled: boolean) => void +} + +const ServerCard = ({ server, onRemove, onEdit, onToggle }: ServerCardProps) => { + const { t } = useTranslation() + const { showToast } = useToast() + const [isExpanded, setIsExpanded] = useState(false) + const [showDeleteDialog, setShowDeleteDialog] = useState(false) + const [isToggling, setIsToggling] = useState(false) + const [showErrorPopover, setShowErrorPopover] = useState(false) + const [copied, setCopied] = useState(false) + const errorPopoverRef = useRef(null) + + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if (errorPopoverRef.current && !errorPopoverRef.current.contains(event.target as Node)) { + setShowErrorPopover(false) + } + } + + document.addEventListener('mousedown', handleClickOutside) + return () => { + document.removeEventListener('mousedown', handleClickOutside) + } + }, []) + + const handleRemove = (e: React.MouseEvent) => { + e.stopPropagation() + setShowDeleteDialog(true) + } + + const handleEdit = (e: React.MouseEvent) => { + e.stopPropagation() + onEdit(server) + } + + const handleToggle = async (e: React.MouseEvent) => { + e.stopPropagation() + if (isToggling || !onToggle) return + + setIsToggling(true) + try { + await onToggle(server, !(server.enabled !== false)) + } finally { + setIsToggling(false) + } + } + + const handleErrorIconClick = (e: React.MouseEvent) => { + e.stopPropagation() + setShowErrorPopover(!showErrorPopover) + } + + const copyToClipboard = (e: React.MouseEvent) => { + e.stopPropagation() + if (!server.error) return + + if (navigator.clipboard && window.isSecureContext) { + navigator.clipboard.writeText(server.error).then(() => { + setCopied(true) + showToast(t('common.copySuccess') || 'Copied to clipboard', 'success') + setTimeout(() => setCopied(false), 2000) + }) + } else { + // Fallback for HTTP or unsupported clipboard API + const textArea = document.createElement('textarea') + textArea.value = server.error + // Avoid scrolling to bottom + textArea.style.position = 'fixed' + textArea.style.left = '-9999px' + document.body.appendChild(textArea) + textArea.focus() + textArea.select() + try { + document.execCommand('copy') + setCopied(true) + showToast(t('common.copySuccess') || 'Copied to clipboard', 'success') + setTimeout(() => setCopied(false), 2000) + } catch (err) { + showToast(t('common.copyFailed') || 'Copy failed', 'error') + console.error('Copy to clipboard failed:', err) + } + document.body.removeChild(textArea) + } + } + + const handleConfirmDelete = () => { + onRemove(server.name) + setShowDeleteDialog(false) + } + + return ( + <> +
+
setIsExpanded(!isExpanded)} + > +
+

{server.name}

+ + + {/* Tool count display */} +
+ + + + {server.tools?.length || 0} {t('server.tools')} +
+ + {server.error && ( +
+
+ +
+ + {showErrorPopover && ( +
e.stopPropagation()} + > +
+
+

{t('server.errorDetails')}

+ +
+ +
+
+
{server.error}
+
+
+ )} +
+ )} +
+
+ +
+ +
+ + +
+
+ + {isExpanded && server.tools && ( +
+
{t('server.tools')}
+
+ {server.tools.map((tool, index) => ( + + ))} +
+
+ )} +
+ + setShowDeleteDialog(false)} + onConfirm={handleConfirmDelete} + serverName={server.name} + /> + + ) +} + +export default ServerCard \ No newline at end of file diff --git a/frontend/src/components/ServerForm.tsx b/frontend/src/components/ServerForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..2556cd1a62bf6187d7a49531a83ac6eaab243ec6 --- /dev/null +++ b/frontend/src/components/ServerForm.tsx @@ -0,0 +1,388 @@ +import { useState } from 'react' +import { useTranslation } from 'react-i18next' +import { Server, EnvVar, ServerFormData } from '@/types' + +interface ServerFormProps { + onSubmit: (payload: any) => void + onCancel: () => void + initialData?: Server | null + modalTitle: string + formError?: string | null +} + +const ServerForm = ({ onSubmit, onCancel, initialData = null, modalTitle, formError = null }: ServerFormProps) => { + const { t } = useTranslation() + + // Determine the initial server type from the initialData + const getInitialServerType = () => { + if (!initialData || !initialData.config) return 'stdio'; + + if (initialData.config.type) { + return initialData.config.type; // Use explicit type if available + } else if (initialData.config.url) { + return 'sse'; // Fallback to SSE if URL exists + } else { + return 'stdio'; // Default to stdio + } + }; + + const [serverType, setServerType] = useState<'stdio' | 'sse' | 'streamable-http'>(getInitialServerType()); + + const [formData, setFormData] = useState({ + name: (initialData && initialData.name) || '', + url: (initialData && initialData.config && initialData.config.url) || '', + command: (initialData && initialData.config && initialData.config.command) || '', + arguments: + initialData && initialData.config && initialData.config.args + ? Array.isArray(initialData.config.args) + ? initialData.config.args.join(' ') + : String(initialData.config.args) + : '', + args: (initialData && initialData.config && initialData.config.args) || [], + type: getInitialServerType(), // Initialize the type field + env: [], + headers: [] + }) + + const [envVars, setEnvVars] = useState( + initialData && initialData.config && initialData.config.env + ? Object.entries(initialData.config.env).map(([key, value]) => ({ key, value })) + : [], + ) + + const [headerVars, setHeaderVars] = useState( + initialData && initialData.config && initialData.config.headers + ? Object.entries(initialData.config.headers).map(([key, value]) => ({ key, value })) + : [], + ) + + const [error, setError] = useState(null) + const isEdit = !!initialData + + const handleInputChange = (e: React.ChangeEvent) => { + const { name, value } = e.target + setFormData({ ...formData, [name]: value }) + } + + // Transform space-separated arguments string into array + const handleArgsChange = (value: string) => { + let args = value.split(' ').filter((arg) => arg.trim() !== '') + setFormData({ ...formData, arguments: value, args }) + } + + const updateServerType = (type: 'stdio' | 'sse' | 'streamable-http') => { + setServerType(type); + setFormData(prev => ({ ...prev, type })); + } + + const handleEnvVarChange = (index: number, field: 'key' | 'value', value: string) => { + const newEnvVars = [...envVars] + newEnvVars[index][field] = value + setEnvVars(newEnvVars) + } + + const addEnvVar = () => { + setEnvVars([...envVars, { key: '', value: '' }]) + } + + const removeEnvVar = (index: number) => { + const newEnvVars = [...envVars] + newEnvVars.splice(index, 1) + setEnvVars(newEnvVars) + } + + const handleHeaderVarChange = (index: number, field: 'key' | 'value', value: string) => { + const newHeaderVars = [...headerVars] + newHeaderVars[index][field] = value + setHeaderVars(newHeaderVars) + } + + const addHeaderVar = () => { + setHeaderVars([...headerVars, { key: '', value: '' }]) + } + + const removeHeaderVar = (index: number) => { + const newHeaderVars = [...headerVars] + newHeaderVars.splice(index, 1) + setHeaderVars(newHeaderVars) + } + + // Submit handler for server configuration + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setError(null) + + try { + const env: Record = {} + envVars.forEach(({ key, value }) => { + if (key.trim()) { + env[key.trim()] = value + } + }) + + const headers: Record = {} + headerVars.forEach(({ key, value }) => { + if (key.trim()) { + headers[key.trim()] = value + } + }) + + const payload = { + name: formData.name, + config: { + type: serverType, // Always include the type + ...(serverType === 'sse' || serverType === 'streamable-http' + ? { + url: formData.url, + ...(Object.keys(headers).length > 0 ? { headers } : {}) + } + : { + command: formData.command, + args: formData.args, + env: Object.keys(env).length > 0 ? env : undefined, + } + ) + } + } + + onSubmit(payload) + } catch (err) { + setError(`Error: ${err instanceof Error ? err.message : String(err)}`) + } + } + + return ( +
+
+

{modalTitle}

+ +
+ + {(error || formError) && ( +
+ {formError || error} +
+ )} + +
+
+ + +
+ +
+ +
+
+ updateServerType('stdio')} + className="mr-1" + /> + +
+
+ updateServerType('sse')} + className="mr-1" + /> + +
+
+ updateServerType('streamable-http')} + className="mr-1" + /> + +
+
+
+ + {serverType === 'sse' || serverType === 'streamable-http' ? ( + <> +
+ + +
+ +
+
+ + +
+ {headerVars.map((headerVar, index) => ( +
+
+ handleHeaderVarChange(index, 'key', e.target.value)} + className="shadow appearance-none border rounded py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline w-1/2" + placeholder="Authorization" + /> + : + handleHeaderVarChange(index, 'value', e.target.value)} + className="shadow appearance-none border rounded py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline w-1/2" + placeholder="Bearer token..." + /> +
+ +
+ ))} +
+ + ) : ( + <> +
+ + +
+
+ + handleArgsChange(e.target.value)} + className="shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline" + placeholder="e.g.: -y time-mcp" + required={serverType === 'stdio'} + /> +
+ +
+
+ + +
+ {envVars.map((envVar, index) => ( +
+
+ handleEnvVarChange(index, 'key', e.target.value)} + className="shadow appearance-none border rounded py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline w-1/2" + placeholder={t('server.key')} + /> + : + handleEnvVarChange(index, 'value', e.target.value)} + className="shadow appearance-none border rounded py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline w-1/2" + placeholder={t('server.value')} + /> +
+ +
+ ))} +
+ + )} + +
+ + +
+
+
+ ) +} + +export default ServerForm \ No newline at end of file diff --git a/frontend/src/components/icons/DiscordIcon.tsx b/frontend/src/components/icons/DiscordIcon.tsx new file mode 100644 index 0000000000000000000000000000000000000000..b21e16dd10200fe083636c86f21cd3a0eb41ac62 --- /dev/null +++ b/frontend/src/components/icons/DiscordIcon.tsx @@ -0,0 +1,20 @@ +import React from 'react'; + +export const DiscordIcon: React.FC> = (props) => { + return ( + + Discord + + + ); +}; + +export default DiscordIcon; diff --git a/frontend/src/components/icons/GitHubIcon.tsx b/frontend/src/components/icons/GitHubIcon.tsx new file mode 100644 index 0000000000000000000000000000000000000000..afc9eb5c8e505b85006a37bed82ddce9fc86c309 --- /dev/null +++ b/frontend/src/components/icons/GitHubIcon.tsx @@ -0,0 +1,21 @@ +import React from 'react'; + +export const GitHubIcon: React.FC> = (props) => { + return ( + + GitHub + + + ); +}; + + +export default GitHubIcon; diff --git a/frontend/src/components/icons/LucideIcons.tsx b/frontend/src/components/icons/LucideIcons.tsx new file mode 100644 index 0000000000000000000000000000000000000000..eceab0d81919f68a88639e8d81b265f556b2c6c1 --- /dev/null +++ b/frontend/src/components/icons/LucideIcons.tsx @@ -0,0 +1,55 @@ +import { + ChevronDown, + ChevronRight, + Edit, + Trash, + Copy, + Check, + User, + Settings, + LogOut, + Info, + Play, + Loader, + CheckCircle, + XCircle, + AlertCircle +} from 'lucide-react' + +export { + ChevronDown, + ChevronRight, + Edit, + Trash, + Copy, + Check, + User, + Settings, + LogOut, + Info, + Play, + Loader, + CheckCircle, + XCircle, + AlertCircle +} + +const LucideIcons = { + ChevronDown, + ChevronRight, + Edit, + Trash, + Copy, + Check, + User, + Settings, + LogOut, + Info, + Play, + Loader, + CheckCircle, + XCircle, + AlertCircle +} + +export default LucideIcons \ No newline at end of file diff --git a/frontend/src/components/icons/SponsorIcon.tsx b/frontend/src/components/icons/SponsorIcon.tsx new file mode 100644 index 0000000000000000000000000000000000000000..0facfe0840ed5effc9cd0ffd62d84ed8120b67a0 --- /dev/null +++ b/frontend/src/components/icons/SponsorIcon.tsx @@ -0,0 +1,20 @@ +import React from 'react'; + +export const SponsorIcon: React.FC> = (props) => { + return ( + + Sponsor + + + ); +}; + +export default SponsorIcon; diff --git a/frontend/src/components/icons/WeChatIcon.tsx b/frontend/src/components/icons/WeChatIcon.tsx new file mode 100644 index 0000000000000000000000000000000000000000..7e51a0b35c8c1a7d4d462bdc4a6bd1bcb0460788 --- /dev/null +++ b/frontend/src/components/icons/WeChatIcon.tsx @@ -0,0 +1,20 @@ +import React from 'react'; + +export const WeChatIcon: React.FC> = (props) => { + return ( + + WeChat + + + ); +}; + +export default WeChatIcon; diff --git a/frontend/src/components/icons/discord.svg b/frontend/src/components/icons/discord.svg new file mode 100644 index 0000000000000000000000000000000000000000..9d7796b8a3068f33744f39640c84b40138c81cf0 --- /dev/null +++ b/frontend/src/components/icons/discord.svg @@ -0,0 +1 @@ +Discord \ No newline at end of file diff --git a/frontend/src/components/icons/github.svg b/frontend/src/components/icons/github.svg new file mode 100644 index 0000000000000000000000000000000000000000..538ec5bf2a9a5724899daf728577cd0b8beaae90 --- /dev/null +++ b/frontend/src/components/icons/github.svg @@ -0,0 +1 @@ +GitHub \ No newline at end of file diff --git a/frontend/src/components/icons/sponsor.svg b/frontend/src/components/icons/sponsor.svg new file mode 100644 index 0000000000000000000000000000000000000000..a0c0e1ce83611cf953d45252529952f5b7e329a5 --- /dev/null +++ b/frontend/src/components/icons/sponsor.svg @@ -0,0 +1 @@ +GitHub Sponsors \ No newline at end of file diff --git a/frontend/src/components/icons/wechat.svg b/frontend/src/components/icons/wechat.svg new file mode 100644 index 0000000000000000000000000000000000000000..c3eb6c4a666f66fcda5df187936c5fee829d4ecb --- /dev/null +++ b/frontend/src/components/icons/wechat.svg @@ -0,0 +1 @@ +WeChat \ No newline at end of file diff --git a/frontend/src/components/layout/Content.tsx b/frontend/src/components/layout/Content.tsx new file mode 100644 index 0000000000000000000000000000000000000000..4c96c2f42abe78d2716c263f67e9a5d35235633a --- /dev/null +++ b/frontend/src/components/layout/Content.tsx @@ -0,0 +1,17 @@ +import React, { ReactNode } from 'react'; + +interface ContentProps { + children: ReactNode; +} + +const Content: React.FC = ({ children }) => { + return ( +
+
+ {children} +
+
+ ); +}; + +export default Content; \ No newline at end of file diff --git a/frontend/src/components/layout/Header.tsx b/frontend/src/components/layout/Header.tsx new file mode 100644 index 0000000000000000000000000000000000000000..1b12927f09f16d2445f93735cdd0187d6e99699a --- /dev/null +++ b/frontend/src/components/layout/Header.tsx @@ -0,0 +1,92 @@ +import React, { useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useAuth } from '@/contexts/AuthContext'; +import ThemeSwitch from '@/components/ui/ThemeSwitch'; +import GitHubIcon from '@/components/icons/GitHubIcon'; +import SponsorIcon from '@/components/icons/SponsorIcon'; +import WeChatIcon from '@/components/icons/WeChatIcon'; +import DiscordIcon from '@/components/icons/DiscordIcon'; +import SponsorDialog from '@/components/ui/SponsorDialog'; +import WeChatDialog from '@/components/ui/WeChatDialog'; + +interface HeaderProps { + onToggleSidebar: () => void; +} + +const Header: React.FC = ({ onToggleSidebar }) => { + const { t, i18n } = useTranslation(); + const { auth } = useAuth(); + const [sponsorDialogOpen, setSponsorDialogOpen] = useState(false); + const [wechatDialogOpen, setWechatDialogOpen] = useState(false); + + return ( +
+
+
+ {/* 侧边栏切换按钮 */} + + + {/* 应用标题 */} +

{t('app.title')}

+
+ + {/* Theme Switch and Version */} +
+ + {import.meta.env.PACKAGE_VERSION === 'dev' + ? import.meta.env.PACKAGE_VERSION + : `v${import.meta.env.PACKAGE_VERSION}`} + + + + + {i18n.language === 'zh' ? ( + + ) : ( + + + + )} + + +
+
+ + +
+ ); +}; + +export default Header; \ No newline at end of file diff --git a/frontend/src/components/layout/Sidebar.tsx b/frontend/src/components/layout/Sidebar.tsx new file mode 100644 index 0000000000000000000000000000000000000000..71b58390cb0c51616a8af761ca67518ba87d36ed --- /dev/null +++ b/frontend/src/components/layout/Sidebar.tsx @@ -0,0 +1,110 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { NavLink, useLocation } from 'react-router-dom'; +import UserProfileMenu from '@/components/ui/UserProfileMenu'; + +interface SidebarProps { + collapsed: boolean; +} + +interface MenuItem { + path: string; + label: string; + icon: React.ReactNode; +} + +const Sidebar: React.FC = ({ collapsed }) => { + const { t } = useTranslation(); + const location = useLocation(); + + // Application version from package.json (accessed via Vite environment variables) + const appVersion = import.meta.env.PACKAGE_VERSION as string; + + // Menu item configuration + const menuItems: MenuItem[] = [ + { + path: '/', + label: t('nav.dashboard'), + icon: ( + + + + + ), + }, + { + path: '/servers', + label: t('nav.servers'), + icon: ( + + + + ), + }, + { + path: '/groups', + label: t('nav.groups'), + icon: ( + + + + ), + }, + { + path: '/market', + label: t('nav.market'), + icon: ( + + + + ), + }, + { + path: '/logs', + label: t('nav.logs'), + icon: ( + + + + ), + }, + ]; + + return ( + + ); +}; + +export default Sidebar; \ No newline at end of file diff --git a/frontend/src/components/ui/AboutDialog.tsx b/frontend/src/components/ui/AboutDialog.tsx new file mode 100644 index 0000000000000000000000000000000000000000..9a63f4a5d76c2e4729e1b79b01eec771a4dd5d41 --- /dev/null +++ b/frontend/src/components/ui/AboutDialog.tsx @@ -0,0 +1,112 @@ +import React, { useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { X, RefreshCw } from 'lucide-react'; +import { checkLatestVersion, compareVersions } from '@/utils/version'; + +interface AboutDialogProps { + isOpen: boolean; + onClose: () => void; + version: string; +} + +const AboutDialog: React.FC = ({ isOpen, onClose, version }) => { + const { t } = useTranslation(); + const [hasNewVersion, setHasNewVersion] = useState(false); + const [latestVersion, setLatestVersion] = useState(""); + const [isChecking, setIsChecking] = useState(false); + + const checkForUpdates = async () => { + setIsChecking(true); + try { + const latest = await checkLatestVersion(); + if (latest) { + setLatestVersion(latest); + setHasNewVersion(compareVersions(version, latest) > 0); + } + } catch (error) { + console.error('Failed to check for updates:', error); + } finally { + setIsChecking(false); + } + }; + + useEffect(() => { + if (isOpen) { + checkForUpdates(); + } + }, [isOpen, version]); + + if (!isOpen) return null; + + return ( +
+
+
+ {/* Close button (X) in the top-right corner */} + + +

+ {t('about.title')} +

+ +
+
+ + {t('about.currentVersion')}: + + + {version} + +
+ + {hasNewVersion && latestVersion && ( +
+
+
+ + + +
+
+

{t('about.newVersionAvailable', { version: latestVersion })}

+

+ + {t('about.viewOnGitHub')} + +

+
+
+
+ )} + + +
+
+
+
+ ); +}; + +export default AboutDialog; diff --git a/frontend/src/components/ui/Badge.tsx b/frontend/src/components/ui/Badge.tsx new file mode 100644 index 0000000000000000000000000000000000000000..f5c29f6691a3d6ab51a7ddfdba1d006cb2a343f0 --- /dev/null +++ b/frontend/src/components/ui/Badge.tsx @@ -0,0 +1,67 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { ServerStatus } from '@/types'; +import { cn } from '../../utils/cn'; + +type BadgeVariant = 'default' | 'secondary' | 'outline' | 'destructive'; + +type BadgeProps = { + children: React.ReactNode; + variant?: BadgeVariant; + className?: string; + onClick?: () => void; +}; + +const badgeVariants = { + default: 'bg-blue-500 text-white hover:bg-blue-600', + secondary: 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-300 hover:bg-gray-200 dark:hover:bg-gray-600', + outline: 'bg-transparent border border-gray-300 dark:border-gray-600 text-gray-700 dark:text-gray-300 hover:bg-gray-100 dark:hover:bg-gray-800', + destructive: 'bg-red-500 text-white hover:bg-red-600', +}; + +export function Badge({ + children, + variant = 'default', + className, + onClick +}: BadgeProps) { + return ( + + {children} + + ); +} + +// For backward compatibility with existing code +export const StatusBadge = ({ status }: { status: 'connected' | 'disconnected' | 'connecting' }) => { + const { t } = useTranslation(); + + const colors = { + connecting: 'bg-yellow-100 text-yellow-800', + connected: 'bg-green-100 text-green-800', + disconnected: 'bg-red-100 text-red-800', + }; + + // Map status to translation keys + const statusTranslations = { + connected: 'status.online', + disconnected: 'status.offline', + connecting: 'status.connecting' + }; + + return ( + + {t(statusTranslations[status] || status)} + + ); +}; \ No newline at end of file diff --git a/frontend/src/components/ui/Button.tsx b/frontend/src/components/ui/Button.tsx new file mode 100644 index 0000000000000000000000000000000000000000..a60d9f8391660e1f04c49c0d82ac769f96ec90a2 --- /dev/null +++ b/frontend/src/components/ui/Button.tsx @@ -0,0 +1,51 @@ +import React from 'react'; +import { cn } from '../../utils/cn'; + +type ButtonVariant = 'default' | 'outline' | 'ghost' | 'link' | 'destructive'; +type ButtonSize = 'default' | 'sm' | 'lg' | 'icon'; + +interface ButtonProps extends React.ButtonHTMLAttributes { + variant?: ButtonVariant; + size?: ButtonSize; + asChild?: boolean; + children: React.ReactNode; +} + +const variantStyles: Record = { + default: 'bg-blue-500 text-white hover:bg-blue-600 focus:ring-blue-500', + outline: 'border border-gray-300 dark:border-gray-700 bg-transparent hover:bg-gray-100 dark:hover:bg-gray-800 text-gray-700 dark:text-gray-300', + ghost: 'bg-transparent hover:bg-gray-100 dark:hover:bg-gray-800 text-gray-700 dark:text-gray-300', + link: 'bg-transparent underline-offset-4 hover:underline text-blue-500 hover:text-blue-600', + destructive: 'bg-red-500 text-white hover:bg-red-600 focus:ring-red-500', +}; + +const sizeStyles: Record = { + default: 'h-10 py-2 px-4', + sm: 'h-8 px-3 text-sm', + lg: 'h-12 px-6', + icon: 'h-10 w-10 p-0', +}; + +export function Button({ + variant = 'default', + size = 'default', + className, + disabled, + children, + ...props +}: ButtonProps) { + return ( + + ); +} \ No newline at end of file diff --git a/frontend/src/components/ui/DeleteDialog.tsx b/frontend/src/components/ui/DeleteDialog.tsx new file mode 100644 index 0000000000000000000000000000000000000000..394e0dd67451c84ef524bf01479f707832787fe6 --- /dev/null +++ b/frontend/src/components/ui/DeleteDialog.tsx @@ -0,0 +1,48 @@ +import { useTranslation } from 'react-i18next' + +interface DeleteDialogProps { + isOpen: boolean + onClose: () => void + onConfirm: () => void + serverName: string + isGroup?: boolean +} + +const DeleteDialog = ({ isOpen, onClose, onConfirm, serverName, isGroup = false }: DeleteDialogProps) => { + const { t } = useTranslation() + + if (!isOpen) return null + + return ( +
+
+
+

+ {isGroup ? t('groups.confirmDelete') : t('server.confirmDelete')} +

+

+ {isGroup + ? t('groups.deleteWarning', { name: serverName }) + : t('server.deleteWarning', { name: serverName })} +

+
+ + +
+
+
+
+ ) +} + +export default DeleteDialog \ No newline at end of file diff --git a/frontend/src/components/ui/DynamicForm.tsx b/frontend/src/components/ui/DynamicForm.tsx new file mode 100644 index 0000000000000000000000000000000000000000..d1617650e2389054bbb4a7eb794dc3c6488ef3a4 --- /dev/null +++ b/frontend/src/components/ui/DynamicForm.tsx @@ -0,0 +1,363 @@ +import React, { useState, useEffect, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { ToolInputSchema } from '@/types'; + +interface JsonSchema { + type: string; + properties?: Record; + required?: string[]; + items?: JsonSchema; + enum?: any[]; + description?: string; + default?: any; +} + +interface DynamicFormProps { + schema: ToolInputSchema; + onSubmit: (values: Record) => void; + onCancel: () => void; + loading?: boolean; + storageKey?: string; // Optional key for localStorage persistence +} + +const DynamicForm: React.FC = ({ schema, onSubmit, onCancel, loading = false, storageKey }) => { + const { t } = useTranslation(); + const [formValues, setFormValues] = useState>({}); + const [errors, setErrors] = useState>({}); + + // Convert ToolInputSchema to JsonSchema - memoized to prevent infinite re-renders + const jsonSchema = useMemo(() => { + const convertToJsonSchema = (schema: ToolInputSchema): JsonSchema => { + const convertProperty = (prop: unknown): JsonSchema => { + if (typeof prop === 'object' && prop !== null) { + const obj = prop as any; + return { + type: obj.type || 'string', + description: obj.description, + enum: obj.enum, + default: obj.default, + properties: obj.properties ? Object.fromEntries( + Object.entries(obj.properties).map(([key, value]) => [key, convertProperty(value)]) + ) : undefined, + required: obj.required, + items: obj.items ? convertProperty(obj.items) : undefined, + }; + } + return { type: 'string' }; + }; + + return { + type: schema.type, + properties: schema.properties ? Object.fromEntries( + Object.entries(schema.properties).map(([key, value]) => [key, convertProperty(value)]) + ) : undefined, + required: schema.required, + }; + }; + + return convertToJsonSchema(schema); + }, [schema]); + + // Initialize form values with defaults or from localStorage + useEffect(() => { + const initializeValues = (schema: JsonSchema, path: string = ''): Record => { + const values: Record = {}; + + if (schema.type === 'object' && schema.properties) { + Object.entries(schema.properties).forEach(([key, propSchema]) => { + const fullPath = path ? `${path}.${key}` : key; + if (propSchema.default !== undefined) { + values[key] = propSchema.default; + } else if (propSchema.type === 'string') { + values[key] = ''; + } else if (propSchema.type === 'number' || propSchema.type === 'integer') { + values[key] = 0; + } else if (propSchema.type === 'boolean') { + values[key] = false; + } else if (propSchema.type === 'array') { + values[key] = []; + } else if (propSchema.type === 'object') { + values[key] = initializeValues(propSchema, fullPath); + } + }); + } + + return values; + }; + + let initialValues = initializeValues(jsonSchema); + + // Try to load saved form data from localStorage + if (storageKey) { + try { + const savedData = localStorage.getItem(storageKey); + if (savedData) { + const parsedData = JSON.parse(savedData); + // Merge saved data with initial values, preserving structure + initialValues = { ...initialValues, ...parsedData }; + } + } catch (error) { + console.warn('Failed to load saved form data:', error); + } + } + + setFormValues(initialValues); + }, [jsonSchema, storageKey]); + + const handleInputChange = (path: string, value: any) => { + setFormValues(prev => { + const newValues = { ...prev }; + const keys = path.split('.'); + let current = newValues; + + for (let i = 0; i < keys.length - 1; i++) { + if (!current[keys[i]]) { + current[keys[i]] = {}; + } + current = current[keys[i]]; + } + + current[keys[keys.length - 1]] = value; + + // Save to localStorage if storageKey is provided + if (storageKey) { + try { + localStorage.setItem(storageKey, JSON.stringify(newValues)); + } catch (error) { + console.warn('Failed to save form data to localStorage:', error); + } + } + + return newValues; + }); + + // Clear error for this field + if (errors[path]) { + setErrors(prev => { + const newErrors = { ...prev }; + delete newErrors[path]; + return newErrors; + }); + } + }; + + const validateForm = (): boolean => { + const newErrors: Record = {}; + + const validateObject = (schema: JsonSchema, values: any, path: string = '') => { + if (schema.type === 'object' && schema.properties) { + Object.entries(schema.properties).forEach(([key, propSchema]) => { + const fullPath = path ? `${path}.${key}` : key; + const value = values?.[key]; + + // Check required fields + if (schema.required?.includes(key) && (value === undefined || value === null || value === '')) { + newErrors[fullPath] = `${key} is required`; + return; + } + + // Validate type + if (value !== undefined && value !== null && value !== '') { + if (propSchema.type === 'string' && typeof value !== 'string') { + newErrors[fullPath] = `${key} must be a string`; + } else if (propSchema.type === 'number' && typeof value !== 'number') { + newErrors[fullPath] = `${key} must be a number`; + } else if (propSchema.type === 'integer' && (!Number.isInteger(value) || typeof value !== 'number')) { + newErrors[fullPath] = `${key} must be an integer`; + } else if (propSchema.type === 'boolean' && typeof value !== 'boolean') { + newErrors[fullPath] = `${key} must be a boolean`; + } else if (propSchema.type === 'object' && typeof value === 'object') { + validateObject(propSchema, value, fullPath); + } + } + }); + } + }; + + validateObject(jsonSchema, formValues); + setErrors(newErrors); + return Object.keys(newErrors).length === 0; + }; + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault(); + if (validateForm()) { + onSubmit(formValues); + } + }; + + const renderField = (key: string, propSchema: JsonSchema, path: string = ''): React.ReactNode => { + const fullPath = path ? `${path}.${key}` : key; + const value = formValues[key]; + const error = errors[fullPath]; + + if (propSchema.type === 'string') { + if (propSchema.enum) { + return ( +
+ + {propSchema.description && ( +

{propSchema.description}

+ )} + + {error &&

{error}

} +
+ ); + } else { + return ( +
+ + {propSchema.description && ( +

{propSchema.description}

+ )} + handleInputChange(fullPath, e.target.value)} + className={`w-full border rounded-md px-3 py-2 ${error ? 'border-red-500' : 'border-gray-300'} focus:outline-none focus:ring-2 focus:ring-blue-500`} + /> + {error &&

{error}

} +
+ ); + } + } + + if (propSchema.type === 'number' || propSchema.type === 'integer') { + return ( +
+ + {propSchema.description && ( +

{propSchema.description}

+ )} + { + const val = e.target.value === '' ? '' : propSchema.type === 'integer' ? parseInt(e.target.value) : parseFloat(e.target.value); + handleInputChange(fullPath, val); + }} + className={`w-full border rounded-md px-3 py-2 ${error ? 'border-red-500' : 'border-gray-300'} focus:outline-none focus:ring-2 focus:ring-blue-500`} + /> + {error &&

{error}

} +
+ ); + } + + if (propSchema.type === 'boolean') { + return ( +
+
+ handleInputChange(fullPath, e.target.checked)} + className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" + /> + +
+ {propSchema.description && ( +

{propSchema.description}

+ )} + {error &&

{error}

} +
+ ); + } + + // For other types, show as text input with description + return ( +
+ + {propSchema.description && ( +

{propSchema.description}

+ )} + handleInputChange(fullPath, e.target.value)} + placeholder={t('tool.enterValue', { type: propSchema.type })} + className={`w-full border rounded-md px-3 py-2 ${error ? 'border-red-500' : 'border-gray-300'} focus:outline-none focus:ring-2 focus:ring-blue-500`} + /> + {error &&

{error}

} +
+ ); + }; + + if (!jsonSchema.properties) { + return ( +
+

{t('tool.noParameters')}

+
+ + +
+
+ ); + } + + return ( +
+ {Object.entries(jsonSchema.properties || {}).map(([key, propSchema]) => + renderField(key, propSchema) + )} + +
+ + +
+
+ ); +}; + +export default DynamicForm; diff --git a/frontend/src/components/ui/Pagination.tsx b/frontend/src/components/ui/Pagination.tsx new file mode 100644 index 0000000000000000000000000000000000000000..b3c5010a278d6f48065a3c5d4c02d49b963f7315 --- /dev/null +++ b/frontend/src/components/ui/Pagination.tsx @@ -0,0 +1,128 @@ +import React from 'react'; + +interface PaginationProps { + currentPage: number; + totalPages: number; + onPageChange: (page: number) => void; +} + +const Pagination: React.FC = ({ + currentPage, + totalPages, + onPageChange +}) => { + // Generate page buttons + const getPageButtons = () => { + const buttons = []; + const maxDisplayedPages = 5; // Maximum number of page buttons to display + + // Always display first page + buttons.push( + + ); + + // Start range + let startPage = Math.max(2, currentPage - Math.floor(maxDisplayedPages / 2)); + + // If we're showing ellipsis after first page + if (startPage > 2) { + buttons.push( + + ... + + ); + } + + // Middle pages + for (let i = startPage; i <= Math.min(totalPages - 1, startPage + maxDisplayedPages - 3); i++) { + buttons.push( + + ); + } + + // If we're showing ellipsis before last page + if (startPage + maxDisplayedPages - 3 < totalPages - 1) { + buttons.push( + + ... + + ); + } + + // Always display last page if there's more than one page + if (totalPages > 1) { + buttons.push( + + ); + } + + return buttons; + }; + + // If there's only one page, don't render pagination + if (totalPages <= 1) { + return null; + } + + return ( +
+ + +
{getPageButtons()}
+ + +
+ ); +}; + +export default Pagination; \ No newline at end of file diff --git a/frontend/src/components/ui/SponsorDialog.tsx b/frontend/src/components/ui/SponsorDialog.tsx new file mode 100644 index 0000000000000000000000000000000000000000..c36af4b51cf6432ae30c5f2200f6b563222ead98 --- /dev/null +++ b/frontend/src/components/ui/SponsorDialog.tsx @@ -0,0 +1,60 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { X } from 'lucide-react'; + +interface SponsorDialogProps { + open: boolean; + onOpenChange: (open: boolean) => void; +} + +const SponsorDialog: React.FC = ({ open, onOpenChange }) => { + const { i18n, t } = useTranslation(); + + if (!open) return null; + + return ( +
+
+
+ {/* Close button (X) in the top-right corner */} + + +

+ {t('sponsor.title')} +

+ +
+ {i18n.language === 'zh' ? ( + {t('sponsor.rewardAlt')} + ) : ( +
+

{t('sponsor.supportMessage')}

+ + {t('sponsor.supportButton')} + +
+ )} +
+
+
+
+ ); +}; + +export default SponsorDialog; diff --git a/frontend/src/components/ui/ThemeSwitch.tsx b/frontend/src/components/ui/ThemeSwitch.tsx new file mode 100644 index 0000000000000000000000000000000000000000..47c2baab64d09c125093610f752d7c42581d84e3 --- /dev/null +++ b/frontend/src/components/ui/ThemeSwitch.tsx @@ -0,0 +1,51 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { useTheme } from '@/contexts/ThemeContext'; +import { Sun, Moon, Monitor } from 'lucide-react'; + +const ThemeSwitch: React.FC = () => { + const { t } = useTranslation(); + const { theme, setTheme } = useTheme(); + + return ( +
+
+ + + {/* */} +
+
+ ); +}; + +export default ThemeSwitch; \ No newline at end of file diff --git a/frontend/src/components/ui/Toast.tsx b/frontend/src/components/ui/Toast.tsx new file mode 100644 index 0000000000000000000000000000000000000000..31ad521bcb928099d128e10e3c8c479ee8d9fbe2 --- /dev/null +++ b/frontend/src/components/ui/Toast.tsx @@ -0,0 +1,96 @@ +import React, { useEffect, useState } from 'react'; +import { Check, X } from 'lucide-react'; +import { cn } from '@/utils/cn'; + +export type ToastType = 'success' | 'error' | 'info' | 'warning'; + +export interface ToastProps { + message: string; + type?: ToastType; + duration?: number; + onClose: () => void; + visible: boolean; +} + +const Toast: React.FC = ({ + message, + type = 'info', + duration = 3000, + onClose, + visible +}) => { + useEffect(() => { + if (visible) { + const timer = setTimeout(() => { + onClose(); + }, duration); + + return () => clearTimeout(timer); + } + }, [visible, duration, onClose]); + + const icons = { + success: , + error: , + info: ( + + + + ), + warning: ( + + + + ) + }; + + const bgColors = { + success: 'bg-green-50 border-green-200', + error: 'bg-red-50 border-red-200', + info: 'bg-blue-50 border-blue-200', + warning: 'bg-yellow-50 border-yellow-200' + }; + + const textColors = { + success: 'text-green-800', + error: 'text-red-800', + info: 'text-blue-800', + warning: 'text-yellow-800' + }; + + return ( +
+
+
+ {icons[type]} +
+
+

+ {message} +

+
+
+
+ +
+
+
+
+ ); +}; + +export default Toast; \ No newline at end of file diff --git a/frontend/src/components/ui/ToggleGroup.tsx b/frontend/src/components/ui/ToggleGroup.tsx new file mode 100644 index 0000000000000000000000000000000000000000..d4e173f97132bf76e5f36f6fbcec03a62dcfd44f --- /dev/null +++ b/frontend/src/components/ui/ToggleGroup.tsx @@ -0,0 +1,134 @@ +import React, { ReactNode } from 'react'; +import { cn } from '@/utils/cn'; + +interface ToggleGroupItemProps { + value: string; + isSelected: boolean; + onClick: () => void; + children: ReactNode; +} + +export const ToggleGroupItem: React.FC = ({ + value, + isSelected, + onClick, + children +}) => { + return ( + + ); +}; + +interface ToggleGroupProps { + label: string; + helpText?: string; + noOptionsText?: string; + values: string[]; + options: { value: string; label: string }[]; + onChange: (values: string[]) => void; + className?: string; +} + +export const ToggleGroup: React.FC = ({ + label, + helpText, + noOptionsText = "No options available", + values, + options, + onChange, + className +}) => { + const handleToggle = (value: string) => { + const isSelected = values.includes(value); + if (isSelected) { + onChange(values.filter(v => v !== value)); + } else { + onChange([...values, value]); + } + }; + + return ( +
+ +
+ {options.length === 0 ? ( +

{noOptionsText}

+ ) : ( +
+ {options.map(option => ( + handleToggle(option.value)} + > + {option.label} + + ))} +
+ )} +
+ {helpText && ( +

+ {helpText} +

+ )} +
+ ); +}; + +interface SwitchProps { + checked: boolean; + onCheckedChange: (checked: boolean) => void; + disabled?: boolean; +} + +export const Switch: React.FC = ({ + checked, + onCheckedChange, + disabled = false +}) => { + return ( + + ); +}; \ No newline at end of file diff --git a/frontend/src/components/ui/ToolCard.tsx b/frontend/src/components/ui/ToolCard.tsx new file mode 100644 index 0000000000000000000000000000000000000000..9342c4f69857cc9c32761beaf31ee26adcba0579 --- /dev/null +++ b/frontend/src/components/ui/ToolCard.tsx @@ -0,0 +1,139 @@ +import { useState, useCallback } from 'react' +import { useTranslation } from 'react-i18next' +import { Tool } from '@/types' +import { ChevronDown, ChevronRight, Play, Loader } from '@/components/icons/LucideIcons' +import { callTool, ToolCallResult } from '@/services/toolService' +import DynamicForm from './DynamicForm' +import ToolResult from './ToolResult' + +interface ToolCardProps { + server: string + tool: Tool +} + +const ToolCard = ({ tool, server }: ToolCardProps) => { + const { t } = useTranslation() + const [isExpanded, setIsExpanded] = useState(false) + const [showRunForm, setShowRunForm] = useState(false) + const [isRunning, setIsRunning] = useState(false) + const [result, setResult] = useState(null) + + // Generate a unique key for localStorage based on tool name and server + const getStorageKey = useCallback(() => { + return `mcphub_tool_form_${server ? `${server}_` : ''}${tool.name}` + }, [tool.name, server]) + + // Clear form data from localStorage + const clearStoredFormData = useCallback(() => { + localStorage.removeItem(getStorageKey()) + }, [getStorageKey]) + + const handleRunTool = async (arguments_: Record) => { + setIsRunning(true) + try { + const result = await callTool({ + toolName: tool.name, + arguments: arguments_, + }, server) + + setResult(result) + // Clear form data on successful submission + // clearStoredFormData() + } catch (error) { + setResult({ + success: false, + error: error instanceof Error ? error.message : 'Unknown error occurred', + }) + } finally { + setIsRunning(false) + } + } + + const handleCancelRun = () => { + setShowRunForm(false) + // Clear form data when cancelled + clearStoredFormData() + setResult(null) + } + + const handleCloseResult = () => { + setResult(null) + } + + return ( +
+
setIsExpanded(!isExpanded)} + > +
+

+ {tool.name} + + {tool.description || t('tool.noDescription')} + +

+
+
+ + +
+
+ + {isExpanded && ( +
+ {/* Schema Display */} + {!showRunForm && ( +
+

{t('tool.inputSchema')}

+
+                {JSON.stringify(tool.inputSchema, null, 2)}
+              
+
+ )} + + {/* Run Form */} + {showRunForm && ( +
+

{t('tool.runToolWithName', { name: tool.name })}

+ + {/* Tool Result */} + {result && ( +
+ +
+ )} +
+ )} + + +
+ )} +
+ ) +} + +export default ToolCard \ No newline at end of file diff --git a/frontend/src/components/ui/ToolResult.tsx b/frontend/src/components/ui/ToolResult.tsx new file mode 100644 index 0000000000000000000000000000000000000000..97af435a1c528e3a7191e5eabb9643a264bb8be4 --- /dev/null +++ b/frontend/src/components/ui/ToolResult.tsx @@ -0,0 +1,159 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { CheckCircle, XCircle, AlertCircle } from '@/components/icons/LucideIcons'; + +interface ToolResultProps { + result: { + success: boolean; + content?: Array<{ + type: string; + text?: string; + [key: string]: any; + }>; + error?: string; + message?: string; + }; + onClose: () => void; +} + +const ToolResult: React.FC = ({ result, onClose }) => { + const { t } = useTranslation(); + // Extract content from data.content + const content = result.content; + + const renderContent = (content: any): React.ReactNode => { + if (Array.isArray(content)) { + return content.map((item, index) => ( +
+ {renderContentItem(item)} +
+ )); + } + + return renderContentItem(content); + }; + + const renderContentItem = (item: any): React.ReactNode => { + if (typeof item === 'string') { + return ( +
+
{item}
+
+ ); + } + + if (typeof item === 'object' && item !== null) { + if (item.type === 'text' && item.text) { + return ( +
+
{item.text}
+
+ ); + } + + if (item.type === 'image' && item.data) { + return ( +
+ {t('tool.toolResult')} +
+ ); + } + + // For other structured content, try to parse as JSON + try { + const jsonString = typeof item === 'string' ? item : JSON.stringify(item, null, 2); + const parsed = typeof item === 'string' ? JSON.parse(item) : item; + + return ( +
+
{t('tool.jsonResponse')}
+
{JSON.stringify(parsed, null, 2)}
+
+ ); + } catch { + // If not valid JSON, show as string + return ( +
+
{String(item)}
+
+ ); + } + } + + return ( +
+
{String(item)}
+
+ ); + }; + + return ( +
+
+
+
+ {result.success ? ( + + ) : ( + + )} +
+

+ {t('tool.execution')} {result.success ? t('tool.successful') : t('tool.failed')} +

+ +
+
+ +
+
+ +
+ {result.success ? ( +
+ {result.content && result.content.length > 0 ? ( +
+
{t('tool.result')}
+ {renderContent(result.content)} +
+ ) : ( +
+ {t('tool.noContent')} +
+ )} +
+ ) : ( +
+
+ + {t('tool.error')} +
+ {content && content.length > 0 ? ( +
+
{t('tool.errorDetails')}
+ {renderContent(content)} +
+ ) : ( +
+
+                  {result.error || result.message || t('tool.unknownError')}
+                
+
+ )} +
+ )} +
+
+ ); +}; + +export default ToolResult; diff --git a/frontend/src/components/ui/UserProfileMenu.tsx b/frontend/src/components/ui/UserProfileMenu.tsx new file mode 100644 index 0000000000000000000000000000000000000000..e06057945415663e5b80bb4c719a2033f16952ff --- /dev/null +++ b/frontend/src/components/ui/UserProfileMenu.tsx @@ -0,0 +1,131 @@ +import React, { useState, useRef, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useNavigate } from 'react-router-dom'; +import { useAuth } from '@/contexts/AuthContext'; +import { User, Settings, LogOut, Info } from 'lucide-react'; +import AboutDialog from './AboutDialog'; +import { checkLatestVersion, compareVersions } from '@/utils/version'; + +interface UserProfileMenuProps { + collapsed: boolean; + version: string; +} + +const UserProfileMenu: React.FC = ({ collapsed, version }) => { + const { t } = useTranslation(); + const navigate = useNavigate(); + const { auth, logout } = useAuth(); + const [isOpen, setIsOpen] = useState(false); + const [showNewVersionInfo, setShowNewVersionInfo] = useState(false); + const [showAboutDialog, setShowAboutDialog] = useState(false); + const menuRef = useRef(null); + + // Check for new version on login and component mount + useEffect(() => { + const checkForNewVersion = async () => { + try { + const latestVersion = await checkLatestVersion(); + if (latestVersion) { + setShowNewVersionInfo(compareVersions(version, latestVersion) > 0); + } + } catch (error) { + console.error('Error checking for new version:', error); + } + }; + + checkForNewVersion(); + }, [version]); + + // Close the menu when clicking outside + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if (menuRef.current && !menuRef.current.contains(event.target as Node)) { + setIsOpen(false); + } + }; + + document.addEventListener('mousedown', handleClickOutside); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, []); + + const handleSettingsClick = () => { + navigate('/settings'); + setIsOpen(false); + }; + + const handleLogoutClick = () => { + logout(); + navigate('/login'); + }; + + const handleAboutClick = () => { + setShowAboutDialog(true); + setIsOpen(false); + }; + + return ( +
+ + + {isOpen && ( +
+ + + +
+ )} + + {/* About dialog */} + setShowAboutDialog(false)} + version={version} + /> +
+ ); +}; + +export default UserProfileMenu; diff --git a/frontend/src/components/ui/WeChatDialog.tsx b/frontend/src/components/ui/WeChatDialog.tsx new file mode 100644 index 0000000000000000000000000000000000000000..3edcd2fe77cd442b65a24aa815caea29e698cfe5 --- /dev/null +++ b/frontend/src/components/ui/WeChatDialog.tsx @@ -0,0 +1,49 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { X } from 'lucide-react'; + +interface WeChatDialogProps { + open: boolean; + onOpenChange: (open: boolean) => void; +} + +const WeChatDialog: React.FC = ({ open, onOpenChange }) => { + const { t } = useTranslation(); + + if (!open) return null; + + return ( +
+
+
+ {/* Close button (X) in the top-right corner */} + + +

+ {t('wechat.title')} +

+ +
+ {t('wechat.qrCodeAlt')} +

+ {t('wechat.scanMessage')} +

+
+
+
+
+ ); +}; + +export default WeChatDialog; diff --git a/frontend/src/contexts/AuthContext.tsx b/frontend/src/contexts/AuthContext.tsx new file mode 100644 index 0000000000000000000000000000000000000000..b18dab9bf0fa6acb6b8dc77e871bd5a2d2cafaaa --- /dev/null +++ b/frontend/src/contexts/AuthContext.tsx @@ -0,0 +1,159 @@ +import React, { createContext, useContext, useState, useEffect, ReactNode } from 'react'; +import { AuthState, IUser } from '../types'; +import * as authService from '../services/authService'; + +// Initial auth state +const initialState: AuthState = { + token: null, + isAuthenticated: false, + loading: true, + user: null, + error: null, +}; + +// Create auth context +const AuthContext = createContext<{ + auth: AuthState; + login: (username: string, password: string) => Promise; + register: (username: string, password: string, isAdmin?: boolean) => Promise; + logout: () => void; +}>({ + auth: initialState, + login: async () => false, + register: async () => false, + logout: () => {}, +}); + +// Auth provider component +export const AuthProvider: React.FC<{ children: ReactNode }> = ({ children }) => { + const [auth, setAuth] = useState(initialState); + + // Load user if token exists + useEffect(() => { + const loadUser = async () => { + const token = authService.getToken(); + + if (!token) { + setAuth({ + ...initialState, + loading: false, + }); + return; + } + + try { + const response = await authService.getCurrentUser(); + + if (response.success && response.user) { + setAuth({ + token, + isAuthenticated: true, + loading: false, + user: response.user, + error: null, + }); + } else { + authService.removeToken(); + setAuth({ + ...initialState, + loading: false, + }); + } + } catch (error) { + authService.removeToken(); + setAuth({ + ...initialState, + loading: false, + }); + } + }; + + loadUser(); + }, []); + + // Login function + const login = async (username: string, password: string): Promise => { + try { + const response = await authService.login({ username, password }); + + if (response.success && response.token && response.user) { + setAuth({ + token: response.token, + isAuthenticated: true, + loading: false, + user: response.user, + error: null, + }); + return true; + } else { + setAuth({ + ...initialState, + loading: false, + error: response.message || 'Authentication failed', + }); + return false; + } + } catch (error) { + setAuth({ + ...initialState, + loading: false, + error: 'Authentication failed', + }); + return false; + } + }; + + // Register function + const register = async ( + username: string, + password: string, + isAdmin = false + ): Promise => { + try { + const response = await authService.register({ username, password, isAdmin }); + + if (response.success && response.token && response.user) { + setAuth({ + token: response.token, + isAuthenticated: true, + loading: false, + user: response.user, + error: null, + }); + return true; + } else { + setAuth({ + ...initialState, + loading: false, + error: response.message || 'Registration failed', + }); + return false; + } + } catch (error) { + setAuth({ + ...initialState, + loading: false, + error: 'Registration failed', + }); + return false; + } + }; + + // Logout function + const logout = (): void => { + authService.logout(); + setAuth({ + ...initialState, + loading: false, + }); + }; + + return ( + + {children} + + ); +}; + +// Custom hook to use auth context +export const useAuth = () => useContext(AuthContext); \ No newline at end of file diff --git a/frontend/src/contexts/ThemeContext.tsx b/frontend/src/contexts/ThemeContext.tsx new file mode 100644 index 0000000000000000000000000000000000000000..355bfbc075dc073556b310c4732dc0360d0cab3d --- /dev/null +++ b/frontend/src/contexts/ThemeContext.tsx @@ -0,0 +1,76 @@ +import React, { createContext, useContext, useState, useEffect, ReactNode } from 'react'; + +type Theme = 'light' | 'dark' | 'system'; + +interface ThemeContextType { + theme: Theme; + setTheme: (theme: Theme) => void; + resolvedTheme: 'light' | 'dark'; // The actual theme used after resolving system preference +} + +const ThemeContext = createContext(undefined); + +export const useTheme = () => { + const context = useContext(ThemeContext); + if (!context) { + throw new Error('useTheme must be used within a ThemeProvider'); + } + return context; +}; + +export const ThemeProvider: React.FC<{ children: ReactNode }> = ({ children }) => { + // Get theme from localStorage or default to 'system' + const [theme, setTheme] = useState(() => { + const savedTheme = localStorage.getItem('theme') as Theme; + return savedTheme || 'system'; + }); + + const [resolvedTheme, setResolvedTheme] = useState<'light' | 'dark'>('light'); + + // Function to set theme and save to localStorage + const handleSetTheme = (newTheme: Theme) => { + setTheme(newTheme); + localStorage.setItem('theme', newTheme); + }; + + // Effect to handle system theme changes and apply theme to document + useEffect(() => { + const updateTheme = () => { + const root = window.document.documentElement; + const systemTheme = window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light'; + + // Determine which theme to use + const themeToApply = theme === 'system' ? systemTheme : theme; + setResolvedTheme(themeToApply as 'light' | 'dark'); + + // Apply or remove dark class based on theme + if (themeToApply === 'dark') { + console.log('Applying dark mode to HTML root element'); // 添加日志 + root.classList.add('dark'); + document.body.style.backgroundColor = '#111827'; // Force a dark background to ensure visible effect + } else { + console.log('Removing dark mode from HTML root element'); // 添加日志 + root.classList.remove('dark'); + document.body.style.backgroundColor = ''; // Reset background color + } + }; + + // Set up listeners for system theme changes + const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)'); + mediaQuery.addEventListener('change', updateTheme); + + // Initial theme setup + updateTheme(); + + // Cleanup + return () => { + mediaQuery.removeEventListener('change', updateTheme); + }; + }, [theme]); + + return ( + + {children} + + ); +}; \ No newline at end of file diff --git a/frontend/src/contexts/ToastContext.tsx b/frontend/src/contexts/ToastContext.tsx new file mode 100644 index 0000000000000000000000000000000000000000..7760fdf00bab5c9a36fa60574185c89c51703894 --- /dev/null +++ b/frontend/src/contexts/ToastContext.tsx @@ -0,0 +1,60 @@ +import React, { createContext, useContext, useState, ReactNode, useCallback } from 'react'; +import Toast, { ToastType } from '@/components/ui/Toast'; + +interface ToastContextProps { + showToast: (message: string, type?: ToastType, duration?: number) => void; +} + +const ToastContext = createContext(undefined); + +export const useToast = () => { + const context = useContext(ToastContext); + if (!context) { + throw new Error('useToast must be used within a ToastProvider'); + } + return context; +}; + +interface ToastProviderProps { + children: ReactNode; +} + +export const ToastProvider: React.FC = ({ children }) => { + const [toast, setToast] = useState<{ + message: string; + type: ToastType; + visible: boolean; + duration: number; + }>({ + message: '', + type: 'info', + visible: false, + duration: 3000, + }); + + const showToast = useCallback((message: string, type: ToastType = 'info', duration: number = 3000) => { + setToast({ + message, + type, + visible: true, + duration, + }); + }, []); + + const hideToast = useCallback(() => { + setToast((prev) => ({ ...prev, visible: false })); + }, []); + + return ( + + {children} + + + ); +}; \ No newline at end of file diff --git a/frontend/src/hooks/useGroupData.ts b/frontend/src/hooks/useGroupData.ts new file mode 100644 index 0000000000000000000000000000000000000000..852aa01c1d5c4b839a61f4b96f68c18a572f74f0 --- /dev/null +++ b/frontend/src/hooks/useGroupData.ts @@ -0,0 +1,236 @@ +import { useState, useEffect, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Group, ApiResponse } from '@/types'; +import { getApiUrl } from '../utils/runtime'; + +export const useGroupData = () => { + const { t } = useTranslation(); + const [groups, setGroups] = useState([]); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + const [refreshKey, setRefreshKey] = useState(0); + + const fetchGroups = useCallback(async () => { + try { + setLoading(true); + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/groups'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setGroups(data.data); + } else { + console.error('Invalid group data format:', data); + setGroups([]); + } + + setError(null); + } catch (err) { + console.error('Error fetching groups:', err); + setError(err instanceof Error ? err.message : 'Failed to fetch groups'); + setGroups([]); + } finally { + setLoading(false); + } + }, []); + + // Trigger a refresh of the groups data + const triggerRefresh = useCallback(() => { + setRefreshKey((prev) => prev + 1); + }, []); + + // Create a new group with server associations + const createGroup = async (name: string, description?: string, servers: string[] = []) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/groups'), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ name, description, servers }), + }); + + const result: ApiResponse = await response.json(); + + if (!response.ok) { + setError(result.message || t('groups.createError')); + return null; + } + + triggerRefresh(); + return result.data || null; + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to create group'); + return null; + } + }; + + // Update an existing group with server associations + const updateGroup = async ( + id: string, + data: { name?: string; description?: string; servers?: string[] }, + ) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/groups/${id}`), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify(data), + }); + + const result: ApiResponse = await response.json(); + + if (!response.ok) { + setError(result.message || t('groups.updateError')); + return null; + } + + triggerRefresh(); + return result.data || null; + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to update group'); + return null; + } + }; + + // Update servers in a group (for batch updates) + const updateGroupServers = async (groupId: string, servers: string[]) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/groups/${groupId}/servers/batch`), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ servers }), + }); + + const result: ApiResponse = await response.json(); + + if (!response.ok) { + setError(result.message || t('groups.updateError')); + return null; + } + + triggerRefresh(); + return result.data || null; + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to update group servers'); + return null; + } + }; + + // Delete a group + const deleteGroup = async (id: string) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/groups/${id}`), { + method: 'DELETE', + headers: { + 'x-auth-token': token || '', + }, + }); + + const result = await response.json(); + + if (!response.ok) { + setError(result.message || t('groups.deleteError')); + return false; + } + + triggerRefresh(); + return true; + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to delete group'); + return false; + } + }; + + // Add server to a group + const addServerToGroup = async (groupId: string, serverName: string) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/groups/${groupId}/servers`), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ serverName }), + }); + + const result: ApiResponse = await response.json(); + + if (!response.ok) { + setError(result.message || t('groups.serverAddError')); + return null; + } + + triggerRefresh(); + return result.data || null; + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to add server to group'); + return null; + } + }; + + // Remove server from group + const removeServerFromGroup = async (groupId: string, serverName: string) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/groups/${groupId}/servers/${serverName}`), { + method: 'DELETE', + headers: { + 'x-auth-token': token || '', + }, + }); + + const result: ApiResponse = await response.json(); + + if (!response.ok) { + setError(result.message || t('groups.serverRemoveError')); + return null; + } + + triggerRefresh(); + return result.data || null; + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to remove server from group'); + return null; + } + }; + + // Fetch groups when the component mounts or refreshKey changes + useEffect(() => { + fetchGroups(); + }, [fetchGroups, refreshKey]); + + return { + groups, + loading, + error, + setError, + triggerRefresh, + createGroup, + updateGroup, + updateGroupServers, + deleteGroup, + addServerToGroup, + removeServerFromGroup, + }; +}; diff --git a/frontend/src/hooks/useMarketData.ts b/frontend/src/hooks/useMarketData.ts new file mode 100644 index 0000000000000000000000000000000000000000..5f61d85f48a09672b36e8bf60fa3b4864cfbb4b1 --- /dev/null +++ b/frontend/src/hooks/useMarketData.ts @@ -0,0 +1,448 @@ +import { useState, useEffect, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { MarketServer, ApiResponse } from '@/types'; +import { getApiUrl } from '../utils/runtime'; + +export const useMarketData = () => { + const { t } = useTranslation(); + const [servers, setServers] = useState([]); + const [allServers, setAllServers] = useState([]); + const [categories, setCategories] = useState([]); + const [tags, setTags] = useState([]); + const [selectedCategory, setSelectedCategory] = useState(''); + const [selectedTag, setSelectedTag] = useState(''); + const [searchQuery, setSearchQuery] = useState(''); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + const [currentServer, setCurrentServer] = useState(null); + const [installedServers, setInstalledServers] = useState([]); + + // Pagination states + const [currentPage, setCurrentPage] = useState(1); + const [serversPerPage, setServersPerPage] = useState(9); + const [totalPages, setTotalPages] = useState(1); + + // Fetch all market servers + const fetchMarketServers = useCallback(async () => { + try { + setLoading(true); + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/market/servers'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setAllServers(data.data); + // Apply pagination to the fetched data + applyPagination(data.data, currentPage); + } else { + console.error('Invalid market servers data format:', data); + setError(t('market.fetchError')); + } + } catch (err) { + console.error('Error fetching market servers:', err); + setError(err instanceof Error ? err.message : String(err)); + } finally { + setLoading(false); + } + }, [t]); + + // Apply pagination to data + const applyPagination = useCallback( + (data: MarketServer[], page: number, itemsPerPage = serversPerPage) => { + const totalItems = data.length; + const calculatedTotalPages = Math.ceil(totalItems / itemsPerPage); + setTotalPages(calculatedTotalPages); + + // Ensure current page is valid + const validPage = Math.max(1, Math.min(page, calculatedTotalPages)); + if (validPage !== page) { + setCurrentPage(validPage); + } + + const startIndex = (validPage - 1) * itemsPerPage; + const paginatedServers = data.slice(startIndex, startIndex + itemsPerPage); + setServers(paginatedServers); + }, + [serversPerPage], + ); + + // Change page + const changePage = useCallback( + (page: number) => { + setCurrentPage(page); + applyPagination(allServers, page, serversPerPage); + }, + [allServers, applyPagination, serversPerPage], + ); + + // Fetch all categories + const fetchCategories = useCallback(async () => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/market/categories'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setCategories(data.data); + } else { + console.error('Invalid categories data format:', data); + } + } catch (err) { + console.error('Error fetching categories:', err); + } + }, []); + + // Fetch all tags + const fetchTags = useCallback(async () => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/market/tags'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setTags(data.data); + } else { + console.error('Invalid tags data format:', data); + } + } catch (err) { + console.error('Error fetching tags:', err); + } + }, []); + + // Fetch server by name + const fetchServerByName = useCallback( + async (name: string) => { + try { + setLoading(true); + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/market/servers/${name}`), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && data.data) { + setCurrentServer(data.data); + return data.data; + } else { + console.error('Invalid server data format:', data); + setError(t('market.serverNotFound')); + return null; + } + } catch (err) { + console.error(`Error fetching server ${name}:`, err); + setError(err instanceof Error ? err.message : String(err)); + return null; + } finally { + setLoading(false); + } + }, + [t], + ); + + // Search servers by query + const searchServers = useCallback( + async (query: string) => { + try { + setLoading(true); + setSearchQuery(query); + + if (!query.trim()) { + // Fetch fresh data from server instead of just applying pagination + fetchMarketServers(); + return; + } + + const token = localStorage.getItem('mcphub_token'); + const response = await fetch( + getApiUrl(`/market/servers/search?query=${encodeURIComponent(query)}`), + { + headers: { + 'x-auth-token': token || '', + }, + }, + ); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setAllServers(data.data); + setCurrentPage(1); + applyPagination(data.data, 1); + } else { + console.error('Invalid search results format:', data); + setError(t('market.searchError')); + } + } catch (err) { + console.error('Error searching servers:', err); + setError(err instanceof Error ? err.message : String(err)); + } finally { + setLoading(false); + } + }, + [t, allServers, applyPagination, fetchMarketServers], + ); + + // Filter servers by category + const filterByCategory = useCallback( + async (category: string) => { + try { + setLoading(true); + setSelectedCategory(category); + setSelectedTag(''); // Reset tag filter when filtering by category + + if (!category) { + fetchMarketServers(); + return; + } + + const token = localStorage.getItem('mcphub_token'); + const response = await fetch( + getApiUrl(`/market/categories/${encodeURIComponent(category)}`), + { + headers: { + 'x-auth-token': token || '', + }, + }, + ); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setAllServers(data.data); + setCurrentPage(1); + applyPagination(data.data, 1); + } else { + console.error('Invalid category filter results format:', data); + setError(t('market.filterError')); + } + } catch (err) { + console.error('Error filtering servers by category:', err); + setError(err instanceof Error ? err.message : String(err)); + } finally { + setLoading(false); + } + }, + [t, fetchMarketServers, applyPagination], + ); + + // Filter servers by tag + const filterByTag = useCallback( + async (tag: string) => { + try { + setLoading(true); + setSelectedTag(tag); + setSelectedCategory(''); // Reset category filter when filtering by tag + + if (!tag) { + fetchMarketServers(); + return; + } + + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/market/tags/${encodeURIComponent(tag)}`), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setAllServers(data.data); + setCurrentPage(1); + applyPagination(data.data, 1); + } else { + console.error('Invalid tag filter results format:', data); + setError(t('market.tagFilterError')); + } + } catch (err) { + console.error('Error filtering servers by tag:', err); + setError(err instanceof Error ? err.message : String(err)); + } finally { + setLoading(false); + } + }, + [t, fetchMarketServers, applyPagination], + ); + + // Fetch installed servers + const fetchInstalledServers = useCallback(async () => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/servers'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`Status: ${response.status}`); + } + + const data = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + // Extract server names + const installedServerNames = data.data.map((server: any) => server.name); + setInstalledServers(installedServerNames); + } + } catch (err) { + console.error('Error fetching installed servers:', err); + } + }, []); + + // Check if a server is already installed + const isServerInstalled = useCallback( + (serverName: string) => { + return installedServers.includes(serverName); + }, + [installedServers], + ); + + // Install server to the local environment + const installServer = useCallback( + async (server: MarketServer) => { + try { + const installType = server.installations?.npm + ? 'npm' + : Object.keys(server.installations || {}).length > 0 + ? Object.keys(server.installations)[0] + : null; + + if (!installType || !server.installations?.[installType]) { + setError(t('market.noInstallationMethod')); + return false; + } + + const installation = server.installations[installType]; + + // Prepare server configuration + const serverConfig = { + name: server.name, + config: { + command: installation.command, + args: installation.args, + env: installation.env || {}, + }, + }; + + // Call the createServer API + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/servers'), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify(serverConfig), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.message || `Status: ${response.status}`); + } + + // Update installed servers list after successful installation + await fetchInstalledServers(); + return true; + } catch (err) { + console.error('Error installing server:', err); + setError(err instanceof Error ? err.message : String(err)); + return false; + } + }, + [t, fetchInstalledServers], + ); + + // Change servers per page + const changeServersPerPage = useCallback( + (perPage: number) => { + setServersPerPage(perPage); + setCurrentPage(1); + applyPagination(allServers, 1, perPage); + }, + [allServers, applyPagination], + ); + + // Load initial data + useEffect(() => { + fetchMarketServers(); + fetchCategories(); + fetchTags(); + fetchInstalledServers(); + }, [fetchMarketServers, fetchCategories, fetchTags, fetchInstalledServers]); + + return { + servers, + allServers, + categories, + tags, + selectedCategory, + selectedTag, + searchQuery, + loading, + error, + setError, + currentServer, + fetchMarketServers, + fetchServerByName, + searchServers, + filterByCategory, + filterByTag, + installServer, + // Pagination properties and methods + currentPage, + totalPages, + serversPerPage, + changePage, + changeServersPerPage, + // Installed servers methods + isServerInstalled, + }; +}; diff --git a/frontend/src/hooks/useServerData.ts b/frontend/src/hooks/useServerData.ts new file mode 100644 index 0000000000000000000000000000000000000000..49db4e67a846517785ff5399e0be4b79bd82f97e --- /dev/null +++ b/frontend/src/hooks/useServerData.ts @@ -0,0 +1,307 @@ +import { useState, useEffect, useRef, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Server, ApiResponse } from '@/types'; +import { getApiUrl } from '../utils/runtime'; + +// Configuration options +const CONFIG = { + // Initialization phase configuration + startup: { + maxAttempts: 60, // Maximum number of attempts during initialization + pollingInterval: 3000, // Polling interval during initialization (3 seconds) + }, + // Normal operation phase configuration + normal: { + pollingInterval: 10000, // Polling interval during normal operation (10 seconds) + }, +}; + +export const useServerData = () => { + const { t } = useTranslation(); + const [servers, setServers] = useState([]); + const [error, setError] = useState(null); + const [refreshKey, setRefreshKey] = useState(0); + const [isInitialLoading, setIsInitialLoading] = useState(true); + const [fetchAttempts, setFetchAttempts] = useState(0); + + // Timer reference for polling + const intervalRef = useRef(null); + // Track current attempt count to avoid dependency cycles + const attemptsRef = useRef(0); + + // Clear the timer + const clearTimer = () => { + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + }; + + // Start normal polling + const startNormalPolling = useCallback(() => { + // Ensure no other timers are running + clearTimer(); + + const fetchServers = async () => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/servers'), { + headers: { + 'x-auth-token': token || '', + }, + }); + const data = await response.json(); + + if (data && data.success && Array.isArray(data.data)) { + setServers(data.data); + } else if (data && Array.isArray(data)) { + setServers(data); + } else { + console.error('Invalid server data format:', data); + setServers([]); + } + + // Reset error state + setError(null); + } catch (err) { + console.error('Error fetching servers during normal polling:', err); + + // Use friendly error message + if (!navigator.onLine) { + setError(t('errors.network')); + } else if ( + err instanceof TypeError && + (err.message.includes('NetworkError') || err.message.includes('Failed to fetch')) + ) { + setError(t('errors.serverConnection')); + } else { + setError(t('errors.serverFetch')); + } + } + }; + + // Execute immediately + fetchServers(); + + // Set up regular polling + intervalRef.current = setInterval(fetchServers, CONFIG.normal.pollingInterval); + }, [t]); + + useEffect(() => { + // Reset attempt count + if (refreshKey > 0) { + attemptsRef.current = 0; + setFetchAttempts(0); + } + + // Initialization phase request function + const fetchInitialData = async () => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/servers'), { + headers: { + 'x-auth-token': token || '', + }, + }); + const data = await response.json(); + + // Handle API response wrapper object, extract data field + if (data && data.success && Array.isArray(data.data)) { + setServers(data.data); + setIsInitialLoading(false); + // Initialization successful, start normal polling + startNormalPolling(); + return true; + } else if (data && Array.isArray(data)) { + // Compatibility handling, if API directly returns array + setServers(data); + setIsInitialLoading(false); + // Initialization successful, start normal polling + startNormalPolling(); + return true; + } else { + // If data format is not as expected, set to empty array + console.error('Invalid server data format:', data); + setServers([]); + setIsInitialLoading(false); + // Initialization successful but data is empty, start normal polling + startNormalPolling(); + return true; + } + } catch (err) { + // Increment attempt count, use ref to avoid triggering effect rerun + attemptsRef.current += 1; + console.error(`Initial loading attempt ${attemptsRef.current} failed:`, err); + + // Update state for display + setFetchAttempts(attemptsRef.current); + + // Set appropriate error message + if (!navigator.onLine) { + setError(t('errors.network')); + } else { + setError(t('errors.initialStartup')); + } + + // If maximum attempt count is exceeded, give up initialization and switch to normal polling + if (attemptsRef.current >= CONFIG.startup.maxAttempts) { + console.log('Maximum startup attempts reached, switching to normal polling'); + setIsInitialLoading(false); + // Clear initialization polling + clearTimer(); + // Switch to normal polling mode + startNormalPolling(); + } + + return false; + } + }; + + // On component mount, set appropriate polling based on current state + if (isInitialLoading) { + // Ensure no other timers are running + clearTimer(); + + // Execute initial request immediately + fetchInitialData(); + + // Set polling interval for initialization phase + intervalRef.current = setInterval(fetchInitialData, CONFIG.startup.pollingInterval); + console.log(`Started initial polling with interval: ${CONFIG.startup.pollingInterval}ms`); + } else { + // Initialization completed, start normal polling + startNormalPolling(); + } + + // Cleanup function + return () => { + clearTimer(); + }; + }, [refreshKey, t, isInitialLoading, startNormalPolling]); + + // Manually trigger refresh + const triggerRefresh = () => { + // Clear current timer + clearTimer(); + + // If in initialization phase, reset initialization state + if (isInitialLoading) { + setIsInitialLoading(true); + attemptsRef.current = 0; + setFetchAttempts(0); + } + + // Change in refreshKey will trigger useEffect to run again + setRefreshKey((prevKey) => prevKey + 1); + }; + + // Server related operations + const handleServerAdd = () => { + setRefreshKey((prevKey) => prevKey + 1); + }; + + const handleServerEdit = async (server: Server) => { + try { + // Fetch settings to get the full server config before editing + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/settings'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + const settingsData: ApiResponse<{ mcpServers: Record }> = await response.json(); + + if ( + settingsData && + settingsData.success && + settingsData.data && + settingsData.data.mcpServers && + settingsData.data.mcpServers[server.name] + ) { + const serverConfig = settingsData.data.mcpServers[server.name]; + return { + name: server.name, + status: server.status, + tools: server.tools || [], + config: serverConfig, + }; + } else { + console.error('Failed to get server config from settings:', settingsData); + setError(t('server.invalidConfig', { serverName: server.name })); + return null; + } + } catch (err) { + console.error('Error fetching server settings:', err); + setError(err instanceof Error ? err.message : String(err)); + return null; + } + }; + + const handleServerRemove = async (serverName: string) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/servers/${serverName}`), { + method: 'DELETE', + headers: { + 'x-auth-token': token || '', + }, + }); + const result = await response.json(); + + if (!response.ok) { + setError(result.message || t('server.deleteError', { serverName })); + return false; + } + + setRefreshKey((prevKey) => prevKey + 1); + return true; + } catch (err) { + setError(t('errors.general') + ': ' + (err instanceof Error ? err.message : String(err))); + return false; + } + }; + + const handleServerToggle = async (server: Server, enabled: boolean) => { + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl(`/servers/${server.name}/toggle`), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ enabled }), + }); + + const result = await response.json(); + + if (!response.ok) { + console.error('Failed to toggle server:', result); + setError(t('server.toggleError', { serverName: server.name })); + return false; + } + + // Update the UI immediately to reflect the change + setRefreshKey((prevKey) => prevKey + 1); + return true; + } catch (err) { + console.error('Error toggling server:', err); + setError(err instanceof Error ? err.message : String(err)); + return false; + } + }; + + return { + servers, + error, + setError, + isLoading: isInitialLoading, + fetchAttempts, + triggerRefresh, + handleServerAdd, + handleServerEdit, + handleServerRemove, + handleServerToggle, + }; +}; diff --git a/frontend/src/hooks/useSettingsData.ts b/frontend/src/hooks/useSettingsData.ts new file mode 100644 index 0000000000000000000000000000000000000000..9ceb7557d4b485c80fe20fdb29cc09f5ecc54c93 --- /dev/null +++ b/frontend/src/hooks/useSettingsData.ts @@ -0,0 +1,403 @@ +import { useState, useCallback, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import { ApiResponse } from '@/types'; +import { useToast } from '@/contexts/ToastContext'; +import { getApiUrl } from '../utils/runtime'; + +// Define types for the settings data +interface RoutingConfig { + enableGlobalRoute: boolean; + enableGroupNameRoute: boolean; + enableBearerAuth: boolean; + bearerAuthKey: string; +} + +interface InstallConfig { + pythonIndexUrl: string; + npmRegistry: string; +} + +interface SmartRoutingConfig { + enabled: boolean; + dbUrl: string; + openaiApiBaseUrl: string; + openaiApiKey: string; + openaiApiEmbeddingModel: string; +} + +interface SystemSettings { + systemConfig?: { + routing?: RoutingConfig; + install?: InstallConfig; + smartRouting?: SmartRoutingConfig; + }; +} + +interface TempRoutingConfig { + bearerAuthKey: string; +} + +export const useSettingsData = () => { + const { t } = useTranslation(); + const { showToast } = useToast(); + + const [routingConfig, setRoutingConfig] = useState({ + enableGlobalRoute: true, + enableGroupNameRoute: true, + enableBearerAuth: false, + bearerAuthKey: '', + }); + + const [tempRoutingConfig, setTempRoutingConfig] = useState({ + bearerAuthKey: '', + }); + + const [installConfig, setInstallConfig] = useState({ + pythonIndexUrl: '', + npmRegistry: '', + }); + + const [smartRoutingConfig, setSmartRoutingConfig] = useState({ + enabled: false, + dbUrl: '', + openaiApiBaseUrl: '', + openaiApiKey: '', + openaiApiEmbeddingModel: '', + }); + + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [refreshKey, setRefreshKey] = useState(0); + + // Trigger a refresh of the settings data + const triggerRefresh = useCallback(() => { + setRefreshKey((prev) => prev + 1); + }, []); + + // Fetch current settings + const fetchSettings = useCallback(async () => { + setLoading(true); + setError(null); + + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/settings'), { + headers: { + 'x-auth-token': token || '', + }, + }); + + if (!response.ok) { + throw new Error(`HTTP error! Status: ${response.status}`); + } + + const data: ApiResponse = await response.json(); + + if (data.success && data.data?.systemConfig?.routing) { + setRoutingConfig({ + enableGlobalRoute: data.data.systemConfig.routing.enableGlobalRoute ?? true, + enableGroupNameRoute: data.data.systemConfig.routing.enableGroupNameRoute ?? true, + enableBearerAuth: data.data.systemConfig.routing.enableBearerAuth ?? false, + bearerAuthKey: data.data.systemConfig.routing.bearerAuthKey || '', + }); + } + if (data.success && data.data?.systemConfig?.install) { + setInstallConfig({ + pythonIndexUrl: data.data.systemConfig.install.pythonIndexUrl || '', + npmRegistry: data.data.systemConfig.install.npmRegistry || '', + }); + } + if (data.success && data.data?.systemConfig?.smartRouting) { + setSmartRoutingConfig({ + enabled: data.data.systemConfig.smartRouting.enabled ?? false, + dbUrl: data.data.systemConfig.smartRouting.dbUrl || '', + openaiApiBaseUrl: data.data.systemConfig.smartRouting.openaiApiBaseUrl || '', + openaiApiKey: data.data.systemConfig.smartRouting.openaiApiKey || '', + openaiApiEmbeddingModel: + data.data.systemConfig.smartRouting.openaiApiEmbeddingModel || '', + }); + } + } catch (error) { + console.error('Failed to fetch settings:', error); + setError(error instanceof Error ? error.message : 'Failed to fetch settings'); + // 使用一个稳定的 showToast 引用,避免将其加入依赖数组 + showToast(t('errors.failedToFetchSettings')); + } finally { + setLoading(false); + } + }, [t]); // 移除 showToast 依赖 + + // Update routing configuration + const updateRoutingConfig = async ( + key: T, + value: RoutingConfig[T], + ) => { + setLoading(true); + setError(null); + + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/system-config'), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ + routing: { + [key]: value, + }, + }), + }); + + if (!response.ok) { + throw new Error(`HTTP error! Status: ${response.status}`); + } + + const data = await response.json(); + + if (data.success) { + setRoutingConfig({ + ...routingConfig, + [key]: value, + }); + showToast(t('settings.systemConfigUpdated')); + return true; + } else { + showToast(t('errors.failedToUpdateRouteConfig')); + return false; + } + } catch (error) { + console.error('Failed to update routing config:', error); + setError(error instanceof Error ? error.message : 'Failed to update routing config'); + showToast(t('errors.failedToUpdateRouteConfig')); + return false; + } finally { + setLoading(false); + } + }; + + // Update install configuration + const updateInstallConfig = async (key: keyof InstallConfig, value: string) => { + setLoading(true); + setError(null); + + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/system-config'), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ + install: { + [key]: value, + }, + }), + }); + + if (!response.ok) { + throw new Error(`HTTP error! Status: ${response.status}`); + } + + const data = await response.json(); + + if (data.success) { + setInstallConfig({ + ...installConfig, + [key]: value, + }); + showToast(t('settings.systemConfigUpdated')); + return true; + } else { + showToast(t('errors.failedToUpdateSystemConfig')); + return false; + } + } catch (error) { + console.error('Failed to update system config:', error); + setError(error instanceof Error ? error.message : 'Failed to update system config'); + showToast(t('errors.failedToUpdateSystemConfig')); + return false; + } finally { + setLoading(false); + } + }; + + // Update smart routing configuration + const updateSmartRoutingConfig = async ( + key: T, + value: SmartRoutingConfig[T], + ) => { + setLoading(true); + setError(null); + + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/system-config'), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ + smartRouting: { + [key]: value, + }, + }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.message || `HTTP error! Status: ${response.status}`); + } + + const data = await response.json(); + + if (data.success) { + setSmartRoutingConfig({ + ...smartRoutingConfig, + [key]: value, + }); + showToast(t('settings.systemConfigUpdated')); + return true; + } else { + showToast(data.message || t('errors.failedToUpdateSmartRoutingConfig')); + return false; + } + } catch (error) { + console.error('Failed to update smart routing config:', error); + const errorMessage = + error instanceof Error ? error.message : 'Failed to update smart routing config'; + setError(errorMessage); + showToast(errorMessage); + return false; + } finally { + setLoading(false); + } + }; + + // Update multiple smart routing configuration fields at once + const updateSmartRoutingConfigBatch = async (updates: Partial) => { + setLoading(true); + setError(null); + + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/system-config'), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ + smartRouting: updates, + }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.message || `HTTP error! Status: ${response.status}`); + } + + const data = await response.json(); + + if (data.success) { + setSmartRoutingConfig({ + ...smartRoutingConfig, + ...updates, + }); + showToast(t('settings.systemConfigUpdated')); + return true; + } else { + showToast(data.message || t('errors.failedToUpdateSmartRoutingConfig')); + return false; + } + } catch (error) { + console.error('Failed to update smart routing config:', error); + const errorMessage = + error instanceof Error ? error.message : 'Failed to update smart routing config'; + setError(errorMessage); + showToast(errorMessage); + return false; + } finally { + setLoading(false); + } + }; + + // Update multiple routing configuration fields at once + const updateRoutingConfigBatch = async (updates: Partial) => { + setLoading(true); + setError(null); + + try { + const token = localStorage.getItem('mcphub_token'); + const response = await fetch(getApiUrl('/system-config'), { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token || '', + }, + body: JSON.stringify({ + routing: updates, + }), + }); + + if (!response.ok) { + throw new Error(`HTTP error! Status: ${response.status}`); + } + + const data = await response.json(); + + if (data.success) { + setRoutingConfig({ + ...routingConfig, + ...updates, + }); + showToast(t('settings.systemConfigUpdated')); + return true; + } else { + showToast(t('errors.failedToUpdateRouteConfig')); + return false; + } + } catch (error) { + console.error('Failed to update routing config:', error); + setError(error instanceof Error ? error.message : 'Failed to update routing config'); + showToast(t('errors.failedToUpdateRouteConfig')); + return false; + } finally { + setLoading(false); + } + }; + + // Fetch settings when the component mounts or refreshKey changes + useEffect(() => { + fetchSettings(); + }, [fetchSettings, refreshKey]); + + useEffect(() => { + if (routingConfig) { + setTempRoutingConfig({ + bearerAuthKey: routingConfig.bearerAuthKey, + }); + } + }, [routingConfig]); + + return { + routingConfig, + tempRoutingConfig, + setTempRoutingConfig, + installConfig, + smartRoutingConfig, + loading, + error, + setError, + triggerRefresh, + fetchSettings, + updateRoutingConfig, + updateInstallConfig, + updateSmartRoutingConfig, + updateSmartRoutingConfigBatch, + updateRoutingConfigBatch, + }; +}; diff --git a/frontend/src/i18n.ts b/frontend/src/i18n.ts new file mode 100644 index 0000000000000000000000000000000000000000..5a542ee9922a1863cbe4bb41769ec5339066a774 --- /dev/null +++ b/frontend/src/i18n.ts @@ -0,0 +1,42 @@ +import i18n from 'i18next'; +import { initReactI18next } from 'react-i18next'; +import LanguageDetector from 'i18next-browser-languagedetector'; + +// Import translations +import enTranslation from './locales/en.json'; +import zhTranslation from './locales/zh.json'; + +i18n + // Detect user language + .use(LanguageDetector) + // Pass the i18n instance to react-i18next + .use(initReactI18next) + // Initialize i18next + .init({ + resources: { + en: { + translation: enTranslation + }, + zh: { + translation: zhTranslation + } + }, + fallbackLng: 'en', + debug: process.env.NODE_ENV === 'development', + + // Common namespace used for all translations + defaultNS: 'translation', + + interpolation: { + escapeValue: false, // React already safe from XSS + }, + + detection: { + // Order of detection; prioritize localStorage to respect user language choice + order: ['localStorage', 'cookie', 'htmlTag', 'navigator'], + // Cache the language in localStorage + caches: ['localStorage', 'cookie'], + } + }); + +export default i18n; \ No newline at end of file diff --git a/frontend/src/index.css b/frontend/src/index.css new file mode 100644 index 0000000000000000000000000000000000000000..d804cd4eb9207aa6af302e93d6f6dc6155218459 --- /dev/null +++ b/frontend/src/index.css @@ -0,0 +1,66 @@ +/* Use project's custom Tailwind import */ +@import "tailwindcss"; + +/* Add some custom styles to verify CSS is working correctly */ +body { + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', + 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', + sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +/* Dark mode override styles - these will apply when dark class is on html element */ +.dark body { + background-color: #111827; + color: #e5e7eb; +} + +.dark .bg-white { + background-color: #1f2937 !important; +} + +.dark .text-gray-900 { + color: #f9fafb !important; +} + +.dark .text-gray-800 { + color: #f3f4f6 !important; +} + +.dark .text-gray-700 { + color: #e5e7eb !important; +} + +.dark .text-gray-600 { + color: #d1d5db !important; +} + +.dark .text-gray-500 { + color: #9ca3af !important; +} + +.dark .border-gray-300 { + border-color: #4b5563 !important; +} + +.dark .bg-gray-100 { + background-color: #374151 !important; +} + +.dark .bg-gray-50 { + background-color: #1f2937 !important; +} + +.dark .shadow { + box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.4), 0 1px 2px 0 rgba(0, 0, 0, 0.24) !important; +} + +.bg-custom-blue { + background-color: #4a90e2; +} + +.text-custom-white { + color: #ffffff; +} \ No newline at end of file diff --git a/frontend/src/layouts/MainLayout.tsx b/frontend/src/layouts/MainLayout.tsx new file mode 100644 index 0000000000000000000000000000000000000000..e0a3204eb49c6bec91bd12a8f997392920929eae --- /dev/null +++ b/frontend/src/layouts/MainLayout.tsx @@ -0,0 +1,33 @@ +import React from 'react'; +import { Outlet } from 'react-router-dom'; +import Header from '@/components/layout/Header'; +import Sidebar from '@/components/layout/Sidebar'; +import Content from '@/components/layout/Content'; + +const MainLayout: React.FC = () => { + // 控制侧边栏展开/折叠状态 + const [sidebarCollapsed, setSidebarCollapsed] = React.useState(false); + + const toggleSidebar = () => { + setSidebarCollapsed(!sidebarCollapsed); + }; + + return ( +
+ {/* 顶部导航 */} +
+ +
+ {/* 侧边导航 */} + + + {/* 主内容区域 */} + + + +
+
+ ); +}; + +export default MainLayout; \ No newline at end of file diff --git a/frontend/src/locales/en.json b/frontend/src/locales/en.json new file mode 100644 index 0000000000000000000000000000000000000000..b7ed2943d26fa92bf8563629cb24c001996dc764 --- /dev/null +++ b/frontend/src/locales/en.json @@ -0,0 +1,321 @@ +{ + "app": { + "title": "MCP Hub Dashboard", + "error": "Error", + "closeButton": "Close", + "noServers": "No MCP servers available", + "loading": "Loading...", + "logout": "Logout", + "profile": "Profile", + "changePassword": "Change Password", + "toggleSidebar": "Toggle Sidebar", + "welcomeUser": "Welcome, {{username}}", + "name": "MCP Hub" + }, + "about": { + "title": "About", + "versionInfo": "MCP Hub Version: {{version}}", + "newVersion": "New version available!", + "currentVersion": "Current version", + "newVersionAvailable": "New version {{version}} is available", + "viewOnGitHub": "View on GitHub", + "checkForUpdates": "Check for Updates", + "checking": "Checking for updates..." + }, + "profile": { + "viewProfile": "View profile", + "userCenter": "User Center" + }, + "sponsor": { + "label": "Sponsor", + "title": "Support the Project", + "rewardAlt": "Reward QR Code", + "supportMessage": "Support the development of MCP Hub by buying me a coffee!", + "supportButton": "Support on Ko-fi" + }, + "wechat": { + "label": "WeChat", + "title": "Connect via WeChat", + "qrCodeAlt": "WeChat QR Code", + "scanMessage": "Scan this QR code to connect with us on WeChat" + }, + "discord": { + "label": "Discord", + "title": "Join our Discord server", + "community": "Join our growing community on Discord for support, discussions, and updates!" + }, + "theme": { + "title": "Theme", + "light": "Light", + "dark": "Dark", + "system": "System" + }, + "auth": { + "login": "Login", + "loginTitle": "Login to MCP Hub", + "username": "Username", + "password": "Password", + "loggingIn": "Logging in...", + "emptyFields": "Username and password cannot be empty", + "loginFailed": "Login failed, please check your username and password", + "loginError": "An error occurred during login", + "currentPassword": "Current Password", + "newPassword": "New Password", + "confirmPassword": "Confirm Password", + "passwordsNotMatch": "New password and confirmation do not match", + "changePasswordSuccess": "Password changed successfully", + "changePasswordError": "Failed to change password", + "changePassword": "Change Password", + "passwordChanged": "Password changed successfully", + "passwordChangeError": "Failed to change password" + }, + "server": { + "addServer": "Add Server", + "add": "Add", + "edit": "Edit", + "delete": "Delete", + "confirmDelete": "Are you sure you want to delete this server?", + "deleteWarning": "Deleting server '{{name}}' will remove it and all its data. This action cannot be undone.", + "status": "Status", + "tools": "Tools", + "name": "Server Name", + "url": "Server URL", + "apiKey": "API Key", + "save": "Save", + "cancel": "Cancel", + "invalidConfig": "Could not find configuration data for {{serverName}}", + "addError": "Failed to add server", + "editError": "Failed to edit server {{serverName}}", + "deleteError": "Failed to delete server {{serverName}}", + "updateError": "Failed to update server", + "editTitle": "Edit Server: {{serverName}}", + "type": "Server Type", + "command": "Command", + "arguments": "Arguments", + "envVars": "Environment Variables", + "headers": "HTTP Headers", + "key": "key", + "value": "value", + "enabled": "Enabled", + "enable": "Enable", + "disable": "Disable", + "remove": "Remove", + "toggleError": "Failed to toggle server {{serverName}}", + "alreadyExists": "Server {{serverName}} already exists", + "invalidData": "Invalid server data provided", + "notFound": "Server {{serverName}} not found", + "namePlaceholder": "Enter server name", + "urlPlaceholder": "Enter server URL", + "commandPlaceholder": "Enter command", + "argumentsPlaceholder": "Enter arguments", + "errorDetails": "Error Details", + "viewErrorDetails": "View error details" + }, + "status": { + "online": "Online", + "offline": "Offline", + "connecting": "Connecting" + }, + "errors": { + "general": "Something went wrong", + "network": "Network connection error. Please check your internet connection", + "serverConnection": "Unable to connect to the server. Please check if the server is running", + "serverAdd": "Failed to add server. Please check the server status", + "serverUpdate": "Failed to edit server {{serverName}}. Please check the server status", + "serverFetch": "Failed to retrieve server data. Please try again later", + "initialStartup": "The server might be starting up. Please wait a moment as this process can take some time on first launch...", + "serverInstall": "Failed to install server", + "failedToFetchSettings": "Failed to fetch settings", + "failedToUpdateRouteConfig": "Failed to update route configuration", + "failedToUpdateSmartRoutingConfig": "Failed to update smart routing configuration" + }, + "common": { + "processing": "Processing...", + "save": "Save", + "cancel": "Cancel", + "refresh": "Refresh", + "create": "Create", + "submitting": "Submitting...", + "delete": "Delete", + "copy": "Copy", + "copySuccess": "Copied to clipboard", + "copyFailed": "Copy failed", + "close": "Close" + }, + "nav": { + "dashboard": "Dashboard", + "servers": "Servers", + "groups": "Groups", + "settings": "Settings", + "changePassword": "Change Password", + "market": "Market", + "logs": "Logs" + }, + "pages": { + "dashboard": { + "title": "Dashboard", + "totalServers": "Total", + "onlineServers": "Online", + "offlineServers": "Offline", + "connectingServers": "Connecting", + "recentServers": "Recent Servers" + }, + "servers": { + "title": "Servers Management" + }, + "groups": { + "title": "Group Management" + }, + "settings": { + "title": "Settings", + "language": "Language", + "account": "Account Settings", + "password": "Change Password", + "appearance": "Appearance", + "routeConfig": "Security", + "installConfig": "Installation", + "smartRouting": "Smart Routing" + }, + "market": { + "title": "Server Market - (Data from mcpm.sh)" + }, + "logs": { + "title": "System Logs" + } + }, + "logs": { + "filters": "Filters", + "search": "Search logs...", + "autoScroll": "Auto-scroll", + "clearLogs": "Clear logs", + "loading": "Loading logs...", + "noLogs": "No logs available.", + "noMatch": "No logs match the current filters.", + "mainProcess": "Main Process", + "childProcess": "Child Process", + "main": "Main", + "child": "Child" + }, + "groups": { + "add": "Add", + "addNew": "Add New Group", + "edit": "Edit Group", + "delete": "Delete", + "confirmDelete": "Are you sure you want to delete this group?", + "deleteWarning": "Deleting group '{{name}}' will remove it and all its server associations. This action cannot be undone.", + "name": "Group Name", + "namePlaceholder": "Enter group name", + "nameRequired": "Group name is required", + "description": "Description", + "descriptionPlaceholder": "Enter group description (optional)", + "createError": "Failed to create group", + "updateError": "Failed to update group", + "deleteError": "Failed to delete group", + "serverAddError": "Failed to add server to group", + "serverRemoveError": "Failed to remove server from group", + "addServer": "Add Server to Group", + "selectServer": "Select a server to add", + "servers": "Servers in Group", + "remove": "Remove", + "noGroups": "No groups available. Create a new group to get started.", + "noServers": "No servers in this group.", + "noServerOptions": "No servers available", + "serverCount": "{{count}} Servers" + }, + "market": { + "title": "Server Market", + "official": "Official", + "by": "By", + "unknown": "Unknown", + "tools": "tools", + "search": "Search", + "searchPlaceholder": "Search for servers by name, category, or tags", + "clearFilters": "Clear", + "clearCategoryFilter": "", + "clearTagFilter": "", + "categories": "Categories", + "tags": "Tags", + "showTags": "Show tags", + "hideTags": "Hide tags", + "moreTags": "", + "noServers": "No servers found matching your search", + "backToList": "Back to list", + "install": "Install", + "installing": "Installing...", + "installed": "Installed", + "installServer": "Install Server: {{name}}", + "installSuccess": "Server {{serverName}} installed successfully", + "author": "Author", + "license": "License", + "repository": "Repository", + "examples": "Examples", + "arguments": "Arguments", + "argumentName": "Name", + "description": "Description", + "required": "Required", + "example": "Example", + "viewSchema": "View schema", + "fetchError": "Error fetching market servers", + "serverNotFound": "Server not found", + "searchError": "Error searching servers", + "filterError": "Error filtering servers by category", + "tagFilterError": "Error filtering servers by tag", + "noInstallationMethod": "No installation method available for this server", + "showing": "Showing {{from}}-{{to}} of {{total}} servers", + "perPage": "Per page" + }, + "tool": { + "run": "Run", + "running": "Running...", + "runTool": "Run Tool", + "cancel": "Cancel", + "noDescription": "No description available", + "inputSchema": "Input Schema:", + "runToolWithName": "Run Tool: {{name}}", + "execution": "Tool Execution", + "successful": "Successful", + "failed": "Failed", + "result": "Result:", + "error": "Error", + "errorDetails": "Error Details:", + "noContent": "Tool executed successfully but returned no content.", + "unknownError": "Unknown error occurred", + "jsonResponse": "JSON Response:", + "toolResult": "Tool result", + "noParameters": "This tool does not require any parameters.", + "selectOption": "Select an option", + "enterValue": "Enter {{type}} value" + }, + "settings": { + "enableGlobalRoute": "Enable Global Route", + "enableGlobalRouteDescription": "Allow connections to /sse endpoint without specifying a group ID", + "enableGroupNameRoute": "Enable Group Name Route", + "enableGroupNameRouteDescription": "Allow connections to /sse endpoint using group names instead of just group IDs", + "enableBearerAuth": "Enable Bearer Authentication", + "enableBearerAuthDescription": "Require bearer token authentication for MCP requests", + "bearerAuthKey": "Bearer Authentication Key", + "bearerAuthKeyDescription": "The authentication key that will be required in the Bearer token", + "bearerAuthKeyPlaceholder": "Enter bearer authentication key", + "pythonIndexUrl": "Python Package Repository URL", + "pythonIndexUrlDescription": "Set UV_DEFAULT_INDEX environment variable for Python package installation", + "pythonIndexUrlPlaceholder": "e.g. https://pypi.org/simple", + "npmRegistry": "NPM Registry URL", + "npmRegistryDescription": "Set npm_config_registry environment variable for NPM package installation", + "npmRegistryPlaceholder": "e.g. https://registry.npmjs.org/", + "installConfig": "Installation", + "systemConfigUpdated": "System configuration updated successfully", + "enableSmartRouting": "Enable Smart Routing", + "enableSmartRoutingDescription": "Enable smart routing feature to search the most suitable tool based on input (using $smart group name)", + "dbUrl": "PostgreSQL URL (requires pgvector support)", + "dbUrlPlaceholder": "e.g. postgresql://user:password@localhost:5432/dbname", + "openaiApiBaseUrl": "OpenAI API Base URL", + "openaiApiBaseUrlPlaceholder": "https://api.openai.com/v1", + "openaiApiKey": "OpenAI API Key", + "openaiApiKeyPlaceholder": "Enter OpenAI API key", + "openaiApiEmbeddingModel": "OpenAI Embedding Model", + "openaiApiEmbeddingModelPlaceholder": "text-embedding-3-small", + "smartRoutingConfigUpdated": "Smart routing configuration updated successfully", + "smartRoutingRequiredFields": "Database URL and OpenAI API Key are required to enable smart routing", + "smartRoutingValidationError": "Please fill in the required fields before enabling Smart Routing: {{fields}}" + } +} \ No newline at end of file diff --git a/frontend/src/locales/zh.json b/frontend/src/locales/zh.json new file mode 100644 index 0000000000000000000000000000000000000000..eafb0059833c96c299feed55d22c355fae53b0f3 --- /dev/null +++ b/frontend/src/locales/zh.json @@ -0,0 +1,323 @@ +{ + "app": { + "title": "MCP Hub 控制面板", + "error": "错误", + "closeButton": "关闭", + "noServers": "没有可用的 MCP 服务器", + "loading": "加载中...", + "logout": "退出登录", + "profile": "个人资料", + "changePassword": "修改密码", + "toggleSidebar": "切换侧边栏", + "welcomeUser": "欢迎, {{username}}", + "name": "MCP Hub" + }, + "about": { + "title": "关于", + "versionInfo": "MCP Hub 版本: {{version}}", + "newVersion": "有新版本可用!", + "currentVersion": "当前版本", + "newVersionAvailable": "新版本 {{version}} 已发布", + "viewOnGitHub": "在 GitHub 上查看", + "checkForUpdates": "检查更新", + "checking": "检查更新中..." + }, + "profile": { + "viewProfile": "查看个人中心", + "userCenter": "个人中心" + }, + "sponsor": { + "label": "赞助", + "title": "支持项目", + "rewardAlt": "赞赏码", + "supportMessage": "通过捐赠支持 MCP Hub 的开发!", + "supportButton": "在 Ko-fi 上支持" + }, + "wechat": { + "label": "微信", + "title": "微信联系", + "qrCodeAlt": "微信二维码", + "scanMessage": "扫描二维码添加微信" + }, + "discord": { + "label": "Discord", + "title": "加入我们的 Discord 服务器", + "community": "加入我们不断壮大的 Discord 社区,获取支持、参与讨论并了解最新动态!" + }, + "theme": { + "title": "主题", + "light": "浅色", + "dark": "深色", + "system": "系统" + }, + "auth": { + "login": "登录", + "loginTitle": "登录 MCP Hub", + "username": "用户名", + "password": "密码", + "loggingIn": "登录中...", + "emptyFields": "用户名和密码不能为空", + "loginFailed": "登录失败,请检查用户名和密码", + "loginError": "登录过程中出现错误", + "currentPassword": "当前密码", + "newPassword": "新密码", + "confirmPassword": "确认密码", + "passwordsNotMatch": "新密码与确认密码不一致", + "changePasswordSuccess": "密码修改成功", + "changePasswordError": "修改密码失败", + "changePassword": "修改密码", + "passwordChanged": "密码修改成功", + "passwordChangeError": "修改密码失败" + }, + "server": { + "addServer": "添加服务器", + "add": "添加", + "edit": "编辑", + "delete": "删除", + "confirmDelete": "您确定要删除此服务器吗?", + "deleteWarning": "删除服务器 '{{name}}' 将会移除该服务器及其所有数据。此操作无法撤销。", + "status": "状态", + "tools": "工具", + "name": "服务器名称", + "url": "服务器 URL", + "apiKey": "API 密钥", + "save": "保存", + "cancel": "取消", + "addError": "添加服务器失败", + "editError": "编辑服务器 {{serverName}} 失败", + "invalidConfig": "无法找到 {{serverName}} 的配置数据", + "deleteError": "删除服务器 {{serverName}} 失败", + "updateError": "更新服务器失败", + "editTitle": "编辑服务器: {{serverName}}", + "type": "服务器类型", + "command": "命令", + "arguments": "参数", + "envVars": "环境变量", + "headers": "HTTP 请求头", + "key": "键", + "value": "值", + "enabled": "已启用", + "enable": "启用", + "disable": "禁用", + "remove": "移除", + "toggleError": "切换服务器 {{serverName}} 状态失败", + "alreadyExists": "服务器 {{serverName}} 已经存在", + "invalidData": "提供的服务器数据无效", + "notFound": "找不到服务器 {{serverName}}", + "namePlaceholder": "请输入服务器名称", + "urlPlaceholder": "请输入服务器URL", + "commandPlaceholder": "请输入命令", + "argumentsPlaceholder": "请输入参数", + "errorDetails": "错误详情", + "viewErrorDetails": "查看错误详情" + }, + "status": { + "online": "在线", + "offline": "离线", + "connecting": "连接中" + }, + "errors": { + "general": "发生错误", + "network": "网络连接错误,请检查您的互联网连接", + "serverConnection": "无法连接到服务器,请检查服务器是否正在运行", + "serverAdd": "添加服务器失败,请检查服务器状态", + "serverUpdate": "编辑服务器 {{serverName}} 失败,请检查服务器状态", + "serverFetch": "获取服务器数据失败,请稍后重试", + "initialStartup": "服务器可能正在启动中。首次启动可能需要一些时间,请耐心等候...", + "serverInstall": "安装服务器失败", + "failedToFetchSettings": "获取设置失败", + "failedToUpdateSystemConfig": "更新系统配置失败", + "failedToUpdateRouteConfig": "更新路由配置失败", + "failedToUpdateSmartRoutingConfig": "更新智能路由配置失败" + }, + "common": { + "processing": "处理中...", + "save": "保存", + "cancel": "取消", + "refresh": "刷新", + "create": "创建", + "submitting": "提交中...", + "delete": "删除", + "copy": "复制", + "copySuccess": "已复制到剪贴板", + "copyFailed": "复制失败", + "close": "关闭" + }, + "nav": { + "dashboard": "仪表盘", + "servers": "服务器", + "settings": "设置", + "changePassword": "修改密码", + "groups": "分组", + "market": "市场", + "logs": "日志" + }, + "pages": { + "dashboard": { + "title": "仪表盘", + "totalServers": "总数", + "onlineServers": "在线", + "offlineServers": "离线", + "connectingServers": "连接中", + "recentServers": "最近的服务器" + }, + "servers": { + "title": "服务器管理" + }, + "settings": { + "title": "设置", + "language": "语言", + "account": "账户设置", + "password": "修改密码", + "appearance": "外观", + "routeConfig": "安全配置", + "installConfig": "安装", + "smartRouting": "智能路由" + }, + "groups": { + "title": "分组管理" + }, + "market": { + "title": "服务器市场 - (数据来源于 mcpm.sh)" + }, + "logs": { + "title": "系统日志" + } + }, + "logs": { + "filters": "筛选", + "search": "搜索日志...", + "autoScroll": "自动滚动", + "clearLogs": "清除日志", + "loading": "加载日志中...", + "noLogs": "暂无日志。", + "noMatch": "没有匹配当前筛选条件的日志。", + "mainProcess": "主进程", + "childProcess": "子进程", + "main": "主", + "child": "子" + }, + "groups": { + "add": "添加", + "addNew": "添加新分组", + "edit": "编辑分组", + "delete": "删除", + "confirmDelete": "您确定要删除此分组吗?", + "deleteWarning": "删除分组 '{{name}}' 将会移除该分组及其所有服务器关联。此操作无法撤销。", + "name": "分组名称", + "namePlaceholder": "请输入分组名称", + "nameRequired": "分组名称不能为空", + "description": "描述", + "descriptionPlaceholder": "请输入分组描述(可选)", + "createError": "创建分组失败", + "updateError": "更新分组失败", + "deleteError": "删除分组失败", + "serverAddError": "向分组添加服务器失败", + "serverRemoveError": "从分组移除服务器失败", + "addServer": "添加服务器到分组", + "selectServer": "选择要添加的服务器", + "servers": "分组中的服务器", + "remove": "移除", + "noGroups": "暂无可用分组。创建一个新分组以开始使用。", + "noServers": "此分组中没有服务器。", + "noServerOptions": "没有可用的服务器", + "serverCount": "{{count}} 台服务器" + }, + "market": { + "title": "服务器市场", + "official": "官方", + "by": "作者", + "unknown": "未知", + "tools": "工具", + "search": "搜索", + "searchPlaceholder": "搜索服务器名称、分类或标签", + "clearFilters": "清除", + "clearCategoryFilter": "", + "clearTagFilter": "", + "categories": "分类", + "tags": "标签", + "showTags": "显示标签", + "hideTags": "隐藏标签", + "moreTags": "", + "noServers": "未找到匹配的服务器", + "backToList": "返回列表", + "install": "安装", + "installing": "安装中...", + "installed": "已安装", + "installServer": "安装服务器: {{name}}", + "installSuccess": "服务器 {{serverName}} 安装成功", + "author": "作者", + "license": "许可证", + "repository": "代码仓库", + "examples": "示例", + "arguments": "参数", + "argumentName": "名称", + "description": "描述", + "required": "必填", + "example": "示例", + "viewSchema": "查看结构", + "fetchError": "获取服务器市场数据失败", + "serverNotFound": "未找到服务器", + "searchError": "搜索服务器失败", + "filterError": "按分类筛选服务器失败", + "tagFilterError": "按标签筛选服务器失败", + "noInstallationMethod": "该服务器没有可用的安装方法", + "showing": "显示 {{from}}-{{to}}/{{total}} 个服务器", + "perPage": "每页显示" + }, + "tool": { + "run": "运行", + "running": "运行中...", + "runTool": "运行工具", + "cancel": "取消", + "noDescription": "无描述信息", + "inputSchema": "输入模式:", + "runToolWithName": "运行工具:{{name}}", + "execution": "工具执行", + "successful": "成功", + "failed": "失败", + "result": "结果:", + "error": "错误", + "errorDetails": "错误详情:", + "noContent": "工具执行成功但未返回内容。", + "unknownError": "发生未知错误", + "jsonResponse": "JSON 响应:", + "toolResult": "工具结果", + "noParameters": "此工具不需要任何参数。", + "selectOption": "选择一个选项", + "enterValue": "输入{{type}}值" + }, + "settings": { + "enableGlobalRoute": "启用全局路由", + "enableGlobalRouteDescription": "允许不指定组 ID 就连接到 /sse 端点", + "enableGroupNameRoute": "启用组名路由", + "enableGroupNameRouteDescription": "允许使用组名而不仅仅是组 ID 连接到 /sse 端点", + "enableBearerAuth": "启用 Bearer 认证", + "enableBearerAuthDescription": "对 MCP 请求启用 Bearer 令牌认证", + "bearerAuthKey": "Bearer 认证密钥", + "bearerAuthKeyDescription": "Bearer 令牌中需要携带的认证密钥", + "bearerAuthKeyPlaceholder": "请输入 Bearer 认证密钥", + "pythonIndexUrl": "Python 包仓库地址", + "pythonIndexUrlDescription": "设置 UV_DEFAULT_INDEX 环境变量,用于 Python 包安装", + "pythonIndexUrlPlaceholder": "例如: https://mirrors.aliyun.com/pypi/simple", + "npmRegistry": "NPM 仓库地址", + "npmRegistryDescription": "设置 npm_config_registry 环境变量,用于 NPM 包安装", + "npmRegistryPlaceholder": "例如: https://registry.npmmirror.com/", + "installConfig": "安装配置", + "systemConfigUpdated": "系统配置更新成功", + "enableSmartRouting": "启用智能路由", + "enableSmartRoutingDescription": "开启智能路由功能,根据输入自动搜索最合适的工具(使用 $smart 分组)", + "dbUrl": "PostgreSQL 连接地址(必须支持 pgvector)", + "dbUrlPlaceholder": "例如: postgresql://user:password@localhost:5432/dbname", + "openaiApiBaseUrl": "OpenAI API 基础地址", + "openaiApiBaseUrlPlaceholder": "https://api.openai.com/v1", + "openaiApiKey": "OpenAI API 密钥", + "openaiApiKeyDescription": "用于访问 OpenAI API 的密钥", + "openaiApiKeyPlaceholder": "请输入 OpenAI API 密钥", + "openaiApiEmbeddingModel": "OpenAI 嵌入模型", + "openaiApiEmbeddingModelPlaceholder": "text-embedding-3-small", + "smartRoutingConfigUpdated": "智能路由配置更新成功", + "smartRoutingRequiredFields": "启用智能路由需要填写数据库连接地址和 OpenAI API 密钥", + "smartRoutingValidationError": "启用智能路由前请先填写必要字段:{{fields}}" + } +} \ No newline at end of file diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx new file mode 100644 index 0000000000000000000000000000000000000000..f1cd5794dd16ee8c78387d38f38a1c252997d78a --- /dev/null +++ b/frontend/src/main.tsx @@ -0,0 +1,45 @@ +import React from 'react'; +import ReactDOM from 'react-dom/client'; +import App from './App'; +import './index.css'; +// Import the i18n configuration +import './i18n'; +import { loadRuntimeConfig } from './utils/runtime'; + +// Load runtime configuration before starting the app +async function initializeApp() { + try { + console.log('Loading runtime configuration...'); + const config = await loadRuntimeConfig(); + console.log('Runtime configuration loaded:', config); + + // Store config in window object + window.__MCPHUB_CONFIG__ = config; + + // Start React app + ReactDOM.createRoot(document.getElementById('root')!).render( + + + , + ); + } catch (error) { + console.error('Failed to initialize app:', error); + + // Fallback: start app with default config + console.log('Starting app with default configuration...'); + window.__MCPHUB_CONFIG__ = { + basePath: '', + version: 'dev', + name: 'mcphub', + }; + + ReactDOM.createRoot(document.getElementById('root')!).render( + + + , + ); + } +} + +// Initialize the app +initializeApp(); \ No newline at end of file diff --git a/frontend/src/pages/Dashboard.tsx b/frontend/src/pages/Dashboard.tsx new file mode 100644 index 0000000000000000000000000000000000000000..9691548d1b6c25d77189a5cd2a18905479d7c2b4 --- /dev/null +++ b/frontend/src/pages/Dashboard.tsx @@ -0,0 +1,206 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { useServerData } from '@/hooks/useServerData'; +import { ServerStatus } from '@/types'; + +const DashboardPage: React.FC = () => { + const { t } = useTranslation(); + const { servers, error, setError, isLoading } = useServerData(); + + // Calculate server statistics + const serverStats = { + total: servers.length, + online: servers.filter(server => server.status === 'connected').length, + offline: servers.filter(server => server.status === 'disconnected').length, + connecting: servers.filter(server => server.status === 'connecting').length + }; + + // Map status to translation keys + const statusTranslations = { + connected: 'status.online', + disconnected: 'status.offline', + connecting: 'status.connecting' + } + + // Calculate percentage for each status (for dashboard display) + const getStatusPercentage = (status: ServerStatus) => { + if (servers.length === 0) return 0; + return Math.round((servers.filter(server => server.status === status).length / servers.length) * 100); + }; + + return ( +
+

{t('pages.dashboard.title')}

+ + {error && ( +
+
+
+

{t('app.error')}

+

{error}

+
+ +
+
+ )} + + {isLoading ? ( +
+
+ + + + +

{t('app.loading')}

+
+
+ ) : ( +
+ {/* Total servers */} +
+
+
+ + + +
+
+

{t('pages.dashboard.totalServers')}

+

{serverStats.total}

+
+
+
+ + {/* Online servers */} +
+
+
+ + + +
+
+

{t('pages.dashboard.onlineServers')}

+

{serverStats.online}

+
+
+
+
+
+
+ + {/* Offline servers */} +
+
+
+ + + +
+
+

{t('pages.dashboard.offlineServers')}

+

{serverStats.offline}

+
+
+
+
+
+
+ + {/* Connecting servers */} +
+
+
+ + + +
+
+

{t('pages.dashboard.connectingServers')}

+

{serverStats.connecting}

+
+
+
+
+
+
+
+ )} + + {/* Recent activity list */} + {servers.length > 0 && !isLoading && ( +
+

{t('pages.dashboard.recentServers')}

+
+ + + + + + + + + + + {servers.slice(0, 5).map((server, index) => ( + + + + + + + ))} + +
+ {t('server.name')} + + {t('server.status')} + + {t('server.tools')} + + {t('server.enabled')} +
+ {server.name} + + + {t(statusTranslations[server.status] || server.status)} + + + {server.tools?.length || 0} + + {server.enabled !== false ? ( + + ) : ( + + )} +
+
+
+ )} +
+ ); +}; + +export default DashboardPage; \ No newline at end of file diff --git a/frontend/src/pages/GroupsPage.tsx b/frontend/src/pages/GroupsPage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..40d451373866b640eaff0cec1e82ee18966d6674 --- /dev/null +++ b/frontend/src/pages/GroupsPage.tsx @@ -0,0 +1,116 @@ +import React, { useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Group } from '@/types'; +import { useGroupData } from '@/hooks/useGroupData'; +import { useServerData } from '@/hooks/useServerData'; +import AddGroupForm from '@/components/AddGroupForm'; +import EditGroupForm from '@/components/EditGroupForm'; +import GroupCard from '@/components/GroupCard'; + +const GroupsPage: React.FC = () => { + const { t } = useTranslation(); + const { + groups, + loading: groupsLoading, + error: groupError, + setError: setGroupError, + deleteGroup, + triggerRefresh + } = useGroupData(); + const { servers } = useServerData(); + + const [editingGroup, setEditingGroup] = useState(null); + const [showAddForm, setShowAddForm] = useState(false); + + const handleEditClick = (group: Group) => { + setEditingGroup(group); + }; + + const handleEditComplete = () => { + setEditingGroup(null); + triggerRefresh(); // Refresh the groups list after editing + }; + + const handleDeleteGroup = async (groupId: string) => { + const success = await deleteGroup(groupId); + if (!success) { + setGroupError(t('groups.deleteError')); + } + }; + + const handleAddGroup = () => { + setShowAddForm(true); + }; + + const handleAddComplete = () => { + setShowAddForm(false); + triggerRefresh(); // Refresh the groups list after adding + }; + + return ( +
+
+

{t('pages.groups.title')}

+
+ +
+
+ + {groupError && ( +
+

{groupError}

+
+ )} + + {groupsLoading ? ( +
+
+ + + + +

{t('app.loading')}

+
+
+ ) : groups.length === 0 ? ( +
+

{t('groups.noGroups')}

+
+ ) : ( +
+ {groups.map((group) => ( + + ))} +
+ )} + + {showAddForm && ( + + )} + + {editingGroup && ( + setEditingGroup(null)} + /> + )} +
+ ); +}; + +export default GroupsPage; \ No newline at end of file diff --git a/frontend/src/pages/LoginPage.tsx b/frontend/src/pages/LoginPage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..0e6cfd6d2aa505782bd5e8265320149a455799c6 --- /dev/null +++ b/frontend/src/pages/LoginPage.tsx @@ -0,0 +1,108 @@ +import React, { useState } from 'react'; +import { useNavigate } from 'react-router-dom'; +import { useTranslation } from 'react-i18next'; +import { useAuth } from '../contexts/AuthContext'; +import ThemeSwitch from '@/components/ui/ThemeSwitch'; + +const LoginPage: React.FC = () => { + const { t } = useTranslation(); + const [username, setUsername] = useState(''); + const [password, setPassword] = useState(''); + const [error, setError] = useState(null); + const [loading, setLoading] = useState(false); + const { login } = useAuth(); + const navigate = useNavigate(); + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + setError(null); + setLoading(true); + + try { + if (!username || !password) { + setError(t('auth.emptyFields')); + setLoading(false); + return; + } + + const success = await login(username, password); + + if (success) { + navigate('/'); + } else { + setError(t('auth.loginFailed')); + } + } catch (err) { + setError(t('auth.loginError')); + } finally { + setLoading(false); + } + }; + + return ( +
+
+ +
+
+
+

+ {t('auth.loginTitle')} +

+
+
+
+
+ + setUsername(e.target.value)} + /> +
+
+ + setPassword(e.target.value)} + /> +
+
+ + {error && ( +
{error}
+ )} + +
+ +
+
+
+
+ ); +}; + +export default LoginPage; \ No newline at end of file diff --git a/frontend/src/pages/LogsPage.tsx b/frontend/src/pages/LogsPage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..d52e7b56bc79e7f97c677c800b88a63012dc47ca --- /dev/null +++ b/frontend/src/pages/LogsPage.tsx @@ -0,0 +1,28 @@ +// filepath: /Users/sunmeng/code/github/mcphub/frontend/src/pages/LogsPage.tsx +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import LogViewer from '../components/LogViewer'; +import { useLogs } from '../services/logService'; + +const LogsPage: React.FC = () => { + const { t } = useTranslation(); + const { logs, loading, error, clearLogs } = useLogs(); + + return ( +
+
+

{t('pages.logs.title')}

+
+
+ +
+
+ ); +}; + +export default LogsPage; \ No newline at end of file diff --git a/frontend/src/pages/MarketPage.tsx b/frontend/src/pages/MarketPage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..53caa9cac2884f9dd45fa9923dc970beb5bd1baf --- /dev/null +++ b/frontend/src/pages/MarketPage.tsx @@ -0,0 +1,356 @@ +import React, { useState, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useNavigate, useParams, useLocation } from 'react-router-dom'; +import { MarketServer } from '@/types'; +import { useMarketData } from '@/hooks/useMarketData'; +import { useToast } from '@/contexts/ToastContext'; +import MarketServerCard from '@/components/MarketServerCard'; +import MarketServerDetail from '@/components/MarketServerDetail'; +import Pagination from '@/components/ui/Pagination'; + +const MarketPage: React.FC = () => { + const { t } = useTranslation(); + const navigate = useNavigate(); + const location = useLocation(); + const { serverName } = useParams<{ serverName?: string }>(); + const { showToast } = useToast(); + + const { + servers, + allServers, + categories, + tags, + loading, + error, + setError, + searchServers, + filterByCategory, + filterByTag, + selectedCategory, + selectedTag, + installServer, + fetchServerByName, + isServerInstalled, + // Pagination + currentPage, + totalPages, + changePage, + serversPerPage, + changeServersPerPage + } = useMarketData(); + + const [selectedServer, setSelectedServer] = useState(null); + const [searchQuery, setSearchQuery] = useState(''); + const [installing, setInstalling] = useState(false); + const [showTags, setShowTags] = useState(false); + + // Load server details if a server name is in the URL + useEffect(() => { + const loadServerDetails = async () => { + if (serverName) { + const server = await fetchServerByName(serverName); + if (server) { + setSelectedServer(server); + } else { + // If server not found, navigate back to market page + navigate('/market'); + } + } else { + setSelectedServer(null); + } + }; + + loadServerDetails(); + }, [serverName, fetchServerByName, navigate]); + + const handleSearch = (e: React.FormEvent) => { + e.preventDefault(); + searchServers(searchQuery); + }; + + const handleCategoryClick = (category: string) => { + filterByCategory(category); + }; + + const handleTagClick = (tag: string) => { + filterByTag(tag); + }; + + const handleClearFilters = () => { + setSearchQuery(''); + filterByCategory(''); + filterByTag(''); + }; + + const handleServerClick = (server: MarketServer) => { + navigate(`/market/${server.name}`); + }; + + const handleBackToList = () => { + navigate('/market'); + }; + + const handleInstall = async (server: MarketServer) => { + try { + setInstalling(true); + const success = await installServer(server); + if (success) { + // Show success message using toast instead of alert + showToast(t('market.installSuccess', { serverName: server.display_name }), 'success'); + } + } finally { + setInstalling(false); + } + }; + + const handlePageChange = (page: number) => { + changePage(page); + // Scroll to top of page when changing pages + window.scrollTo({ top: 0, behavior: 'smooth' }); + }; + + const handleChangeItemsPerPage = (e: React.ChangeEvent) => { + const newValue = parseInt(e.target.value, 10); + changeServersPerPage(newValue); + }; + + const toggleTagsVisibility = () => { + setShowTags(!showTags); + }; + + // Render detailed view if a server is selected + if (selectedServer) { + return ( + + ); + } + + return ( +
+
+
+

+ {t('market.title')} + {t('pages.market.title').split(' - ')[1]} +

+
+
+ + {error && ( +
+
+

{error}

+ +
+
+ )} + + {/* Search bar at the top */} +
+
+
+ setSearchQuery(e.target.value)} + placeholder={t('market.searchPlaceholder')} + className="shadow appearance-none border rounded w-full py-2 px-3 text-gray-700 leading-tight focus:outline-none focus:shadow-outline" + /> +
+ + {(searchQuery || selectedCategory || selectedTag) && ( + + )} +
+
+ +
+ {/* Left sidebar for filters (without search) */} +
+
+ {/* Categories */} + {categories.length > 0 ? ( +
+
+

{t('market.categories')}

+ {selectedCategory && ( + filterByCategory('')}> + {t('market.clearCategoryFilter')} + + )} +
+
+ {categories.map((category) => ( + + ))} +
+
+ ) : loading ? ( +
+
+

{t('market.categories')}

+
+
+ + + + +

{t('app.loading')}

+
+
+ ) : ( +
+
+

{t('market.categories')}

+
+

{t('market.noCategories')}

+
+ )} + + {/* Tags */} + {/* {tags.length > 0 && ( +
+
+
+

{t('market.tags')}

+ +
+ {selectedTag && ( + filterByTag('')}> + {t('market.clearTagFilter')} + + )} +
+ {showTags && ( +
+ {tags.map((tag) => ( + + ))} +
+ )} +
+ )} */} +
+
+ + {/* Main content area */} +
+ {loading ? ( +
+
+ + + + +

{t('app.loading')}

+
+
+ ) : servers.length === 0 ? ( +
+

{t('market.noServers')}

+
+ ) : ( + <> +
+ {servers.map((server, index) => ( + + ))} +
+ +
+
+ {t('market.showing', { + from: (currentPage - 1) * serversPerPage + 1, + to: Math.min(currentPage * serversPerPage, allServers.length), + total: allServers.length + })} +
+ +
+ + +
+
+ +
+ +
+ + )} +
+
+
+ ); +}; + +export default MarketPage; \ No newline at end of file diff --git a/frontend/src/pages/ServersPage.tsx b/frontend/src/pages/ServersPage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..293f9dca0ed9b91b3ffdbec4dd4b56c6f3189c3c --- /dev/null +++ b/frontend/src/pages/ServersPage.tsx @@ -0,0 +1,144 @@ +import React, { useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useNavigate } from 'react-router-dom'; +import { Server } from '@/types'; +import ServerCard from '@/components/ServerCard'; +import AddServerForm from '@/components/AddServerForm'; +import EditServerForm from '@/components/EditServerForm'; +import { useServerData } from '@/hooks/useServerData'; + +const ServersPage: React.FC = () => { + const { t } = useTranslation(); + const navigate = useNavigate(); + const { + servers, + error, + setError, + isLoading, + handleServerAdd, + handleServerEdit, + handleServerRemove, + handleServerToggle, + triggerRefresh + } = useServerData(); + const [editingServer, setEditingServer] = useState(null); + const [isRefreshing, setIsRefreshing] = useState(false); + + const handleEditClick = async (server: Server) => { + const fullServerData = await handleServerEdit(server); + if (fullServerData) { + setEditingServer(fullServerData); + } + }; + + const handleEditComplete = () => { + setEditingServer(null); + triggerRefresh(); + }; + + const handleRefresh = async () => { + setIsRefreshing(true); + try { + triggerRefresh(); + // Add a slight delay to make the spinner visible + await new Promise(resolve => setTimeout(resolve, 500)); + } finally { + setIsRefreshing(false); + } + }; + + return ( +
+
+

{t('pages.servers.title')}

+
+ + + +
+
+ + {error && ( +
+
+
+

{t('app.error')}

+

{error}

+
+ +
+
+ )} + + {isLoading ? ( +
+
+ + + + +

{t('app.loading')}

+
+
+ ) : servers.length === 0 ? ( +
+

{t('app.noServers')}

+
+ ) : ( +
+ {servers.map((server, index) => ( + + ))} +
+ )} + + {editingServer && ( + setEditingServer(null)} + /> + )} +
+ ); +}; + +export default ServersPage; \ No newline at end of file diff --git a/frontend/src/pages/SettingsPage.tsx b/frontend/src/pages/SettingsPage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..43500a26fa50e2e044a52f71e6a77de60917f01f --- /dev/null +++ b/frontend/src/pages/SettingsPage.tsx @@ -0,0 +1,524 @@ +import React, { useState, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useNavigate } from 'react-router-dom'; +import ChangePasswordForm from '@/components/ChangePasswordForm'; +import { Switch } from '@/components/ui/ToggleGroup'; +import { useSettingsData } from '@/hooks/useSettingsData'; +import { useToast } from '@/contexts/ToastContext'; +import { generateRandomKey } from '@/utils/key'; + +const SettingsPage: React.FC = () => { + const { t, i18n } = useTranslation(); + const navigate = useNavigate(); + const { showToast } = useToast(); + const [currentLanguage, setCurrentLanguage] = useState(i18n.language); + + // Update current language when it changes + useEffect(() => { + setCurrentLanguage(i18n.language); + }, [i18n.language]); + + const [installConfig, setInstallConfig] = useState<{ + pythonIndexUrl: string; + npmRegistry: string; + }>({ + pythonIndexUrl: '', + npmRegistry: '', + }); + + const [tempSmartRoutingConfig, setTempSmartRoutingConfig] = useState<{ + dbUrl: string; + openaiApiBaseUrl: string; + openaiApiKey: string; + openaiApiEmbeddingModel: string; + }>({ + dbUrl: '', + openaiApiBaseUrl: '', + openaiApiKey: '', + openaiApiEmbeddingModel: '', + }); + + const { + routingConfig, + tempRoutingConfig, + setTempRoutingConfig, + installConfig: savedInstallConfig, + smartRoutingConfig, + loading, + updateRoutingConfig, + updateRoutingConfigBatch, + updateInstallConfig, + updateSmartRoutingConfig, + updateSmartRoutingConfigBatch + } = useSettingsData(); + + // Update local installConfig when savedInstallConfig changes + useEffect(() => { + if (savedInstallConfig) { + setInstallConfig(savedInstallConfig); + } + }, [savedInstallConfig]); + + // Update local tempSmartRoutingConfig when smartRoutingConfig changes + useEffect(() => { + if (smartRoutingConfig) { + setTempSmartRoutingConfig({ + dbUrl: smartRoutingConfig.dbUrl || '', + openaiApiBaseUrl: smartRoutingConfig.openaiApiBaseUrl || '', + openaiApiKey: smartRoutingConfig.openaiApiKey || '', + openaiApiEmbeddingModel: smartRoutingConfig.openaiApiEmbeddingModel || '', + }); + } + }, [smartRoutingConfig]); + + const [sectionsVisible, setSectionsVisible] = useState({ + routingConfig: false, + installConfig: false, + smartRoutingConfig: false, + password: false + }); + + const toggleSection = (section: 'routingConfig' | 'installConfig' | 'smartRoutingConfig' | 'password') => { + setSectionsVisible(prev => ({ + ...prev, + [section]: !prev[section] + })); + }; + + const handleRoutingConfigChange = async (key: 'enableGlobalRoute' | 'enableGroupNameRoute' | 'enableBearerAuth' | 'bearerAuthKey', value: boolean | string) => { + // If enableBearerAuth is turned on and there's no key, generate one first + if (key === 'enableBearerAuth' && value === true) { + if (!tempRoutingConfig.bearerAuthKey && !routingConfig.bearerAuthKey) { + const newKey = generateRandomKey(); + handleBearerAuthKeyChange(newKey); + + // Update both enableBearerAuth and bearerAuthKey in a single call + const success = await updateRoutingConfigBatch({ + enableBearerAuth: true, + bearerAuthKey: newKey + }); + + if (success) { + // Update tempRoutingConfig to reflect the saved values + setTempRoutingConfig(prev => ({ + ...prev, + bearerAuthKey: newKey + })); + } + return; + } + } + + await updateRoutingConfig(key, value); + }; + + const handleBearerAuthKeyChange = (value: string) => { + setTempRoutingConfig(prev => ({ + ...prev, + bearerAuthKey: value + })); + }; + + const saveBearerAuthKey = async () => { + await updateRoutingConfig('bearerAuthKey', tempRoutingConfig.bearerAuthKey); + }; + + const handleInstallConfigChange = (key: 'pythonIndexUrl' | 'npmRegistry', value: string) => { + setInstallConfig({ + ...installConfig, + [key]: value + }); + }; + + const saveInstallConfig = async (key: 'pythonIndexUrl' | 'npmRegistry') => { + await updateInstallConfig(key, installConfig[key]); + }; + + const handleSmartRoutingConfigChange = (key: 'dbUrl' | 'openaiApiBaseUrl' | 'openaiApiKey' | 'openaiApiEmbeddingModel', value: string) => { + setTempSmartRoutingConfig({ + ...tempSmartRoutingConfig, + [key]: value + }); + }; + + const saveSmartRoutingConfig = async (key: 'dbUrl' | 'openaiApiBaseUrl' | 'openaiApiKey' | 'openaiApiEmbeddingModel') => { + await updateSmartRoutingConfig(key, tempSmartRoutingConfig[key]); + }; + + const handleSmartRoutingEnabledChange = async (value: boolean) => { + // If enabling Smart Routing, validate required fields and save any unsaved changes + if (value) { + const currentDbUrl = tempSmartRoutingConfig.dbUrl || smartRoutingConfig.dbUrl; + const currentOpenaiApiKey = tempSmartRoutingConfig.openaiApiKey || smartRoutingConfig.openaiApiKey; + + if (!currentDbUrl || !currentOpenaiApiKey) { + const missingFields = []; + if (!currentDbUrl) missingFields.push(t('settings.dbUrl')); + if (!currentOpenaiApiKey) missingFields.push(t('settings.openaiApiKey')); + + showToast(t('settings.smartRoutingValidationError', { + fields: missingFields.join(', ') + })); + return; + } + + // Prepare updates object with unsaved changes and enabled status + const updates: any = { enabled: value }; + + // Check for unsaved changes and include them in the batch update + if (tempSmartRoutingConfig.dbUrl !== smartRoutingConfig.dbUrl) { + updates.dbUrl = tempSmartRoutingConfig.dbUrl; + } + if (tempSmartRoutingConfig.openaiApiBaseUrl !== smartRoutingConfig.openaiApiBaseUrl) { + updates.openaiApiBaseUrl = tempSmartRoutingConfig.openaiApiBaseUrl; + } + if (tempSmartRoutingConfig.openaiApiKey !== smartRoutingConfig.openaiApiKey) { + updates.openaiApiKey = tempSmartRoutingConfig.openaiApiKey; + } + if (tempSmartRoutingConfig.openaiApiEmbeddingModel !== smartRoutingConfig.openaiApiEmbeddingModel) { + updates.openaiApiEmbeddingModel = tempSmartRoutingConfig.openaiApiEmbeddingModel; + } + + // Save all changes in a single batch update + await updateSmartRoutingConfigBatch(updates); + } else { + // If disabling, just update the enabled status + await updateSmartRoutingConfig('enabled', value); + } + }; + + const handlePasswordChangeSuccess = () => { + setTimeout(() => { + navigate('/'); + }, 2000); + }; + + const handleLanguageChange = (lang: string) => { + localStorage.setItem('i18nextLng', lang); + window.location.reload(); + }; + + return ( +
+

{t('pages.settings.title')}

+ + {/* Language Settings */} +
+
+

{t('pages.settings.language')}

+
+ + +
+
+
+ + {/* Smart Routing Configuration Settings */} +
+
toggleSection('smartRoutingConfig')} + > +

{t('pages.settings.smartRouting')}

+ + {sectionsVisible.smartRoutingConfig ? '▼' : '►'} + +
+ + {sectionsVisible.smartRoutingConfig && ( +
+
+
+

{t('settings.enableSmartRouting')}

+

{t('settings.enableSmartRoutingDescription')}

+
+ handleSmartRoutingEnabledChange(checked)} + /> +
+ +
+
+

+ *{t('settings.dbUrl')} +

+
+
+ handleSmartRoutingConfigChange('dbUrl', e.target.value)} + placeholder={t('settings.dbUrlPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm border-gray-300" + disabled={loading} + /> + +
+
+ +
+
+

+ *{t('settings.openaiApiKey')} +

+
+
+ handleSmartRoutingConfigChange('openaiApiKey', e.target.value)} + placeholder={t('settings.openaiApiKeyPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm border-gray-300" + disabled={loading} + /> + +
+
+ +
+
+

{t('settings.openaiApiBaseUrl')}

+
+
+ handleSmartRoutingConfigChange('openaiApiBaseUrl', e.target.value)} + placeholder={t('settings.openaiApiBaseUrlPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm" + disabled={loading} + /> + +
+
+ +
+
+

{t('settings.openaiApiEmbeddingModel')}

+
+
+ handleSmartRoutingConfigChange('openaiApiEmbeddingModel', e.target.value)} + placeholder={t('settings.openaiApiEmbeddingModelPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm" + disabled={loading} + /> + +
+
+
+ )} +
+ + {/* Route Configuration Settings */} +
+
toggleSection('routingConfig')} + > +

{t('pages.settings.routeConfig')}

+ + {sectionsVisible.routingConfig ? '▼' : '►'} + +
+ + {sectionsVisible.routingConfig && ( +
+
+
+

{t('settings.enableBearerAuth')}

+

{t('settings.enableBearerAuthDescription')}

+
+ handleRoutingConfigChange('enableBearerAuth', checked)} + /> +
+ + {routingConfig.enableBearerAuth && ( +
+
+

{t('settings.bearerAuthKey')}

+

{t('settings.bearerAuthKeyDescription')}

+
+
+ handleBearerAuthKeyChange(e.target.value)} + placeholder={t('settings.bearerAuthKeyPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm" + disabled={loading || !routingConfig.enableBearerAuth} + /> + +
+
+ )} + +
+
+

{t('settings.enableGlobalRoute')}

+

{t('settings.enableGlobalRouteDescription')}

+
+ handleRoutingConfigChange('enableGlobalRoute', checked)} + /> +
+ +
+
+

{t('settings.enableGroupNameRoute')}

+

{t('settings.enableGroupNameRouteDescription')}

+
+ handleRoutingConfigChange('enableGroupNameRoute', checked)} + /> +
+ +
+ )} +
+ + {/* Installation Configuration Settings */} +
+
toggleSection('installConfig')} + > +

{t('settings.installConfig')}

+ + {sectionsVisible.installConfig ? '▼' : '►'} + +
+ + {sectionsVisible.installConfig && ( +
+
+
+

{t('settings.pythonIndexUrl')}

+

{t('settings.pythonIndexUrlDescription')}

+
+
+ handleInstallConfigChange('pythonIndexUrl', e.target.value)} + placeholder={t('settings.pythonIndexUrlPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm" + disabled={loading} + /> + +
+
+ +
+
+

{t('settings.npmRegistry')}

+

{t('settings.npmRegistryDescription')}

+
+
+ handleInstallConfigChange('npmRegistry', e.target.value)} + placeholder={t('settings.npmRegistryPlaceholder')} + className="flex-1 mt-1 block w-full py-2 px-3 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm" + disabled={loading} + /> + +
+
+
+ )} +
+ + {/* Change Password */} +
+
toggleSection('password')} + > +

{t('auth.changePassword')}

+ + {sectionsVisible.password ? '▼' : '►'} + +
+ + {sectionsVisible.password && ( +
+ +
+ )} +
+
+ ); +}; + +export default SettingsPage; \ No newline at end of file diff --git a/frontend/src/services/authService.ts b/frontend/src/services/authService.ts new file mode 100644 index 0000000000000000000000000000000000000000..0efa3408d7d80afb3368e7e87d28826a12005d4b --- /dev/null +++ b/frontend/src/services/authService.ts @@ -0,0 +1,147 @@ +import { + AuthResponse, + LoginCredentials, + RegisterCredentials, + ChangePasswordCredentials, +} from '../types'; +import { getApiUrl } from '../utils/runtime'; + +// Token key in localStorage +const TOKEN_KEY = 'mcphub_token'; + +// Get token from localStorage +export const getToken = (): string | null => { + return localStorage.getItem(TOKEN_KEY); +}; + +// Set token in localStorage +export const setToken = (token: string): void => { + localStorage.setItem(TOKEN_KEY, token); +}; + +// Remove token from localStorage +export const removeToken = (): void => { + localStorage.removeItem(TOKEN_KEY); +}; + +// Login user +export const login = async (credentials: LoginCredentials): Promise => { + try { + console.log(getApiUrl('/auth/login')); + const response = await fetch(getApiUrl('/auth/login'), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(credentials), + }); + + const data: AuthResponse = await response.json(); + + if (data.success && data.token) { + setToken(data.token); + } + + return data; + } catch (error) { + console.error('Login error:', error); + return { + success: false, + message: 'An error occurred during login', + }; + } +}; + +// Register user +export const register = async (credentials: RegisterCredentials): Promise => { + try { + const response = await fetch(getApiUrl('/auth/register'), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(credentials), + }); + + const data: AuthResponse = await response.json(); + + if (data.success && data.token) { + setToken(data.token); + } + + return data; + } catch (error) { + console.error('Register error:', error); + return { + success: false, + message: 'An error occurred during registration', + }; + } +}; + +// Get current user +export const getCurrentUser = async (): Promise => { + const token = getToken(); + + if (!token) { + return { + success: false, + message: 'No authentication token', + }; + } + + try { + const response = await fetch(getApiUrl('/auth/user'), { + method: 'GET', + headers: { + 'x-auth-token': token, + }, + }); + + return await response.json(); + } catch (error) { + console.error('Get current user error:', error); + return { + success: false, + message: 'An error occurred while fetching user data', + }; + } +}; + +// Change password +export const changePassword = async ( + credentials: ChangePasswordCredentials, +): Promise => { + const token = getToken(); + + if (!token) { + return { + success: false, + message: 'No authentication token', + }; + } + + try { + const response = await fetch(getApiUrl('/auth/change-password'), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token, + }, + body: JSON.stringify(credentials), + }); + + return await response.json(); + } catch (error) { + console.error('Change password error:', error); + return { + success: false, + message: 'An error occurred while changing password', + }; + } +}; + +// Logout user +export const logout = (): void => { + removeToken(); +}; diff --git a/frontend/src/services/logService.ts b/frontend/src/services/logService.ts new file mode 100644 index 0000000000000000000000000000000000000000..e4912272528ba3dacbf1b9e90f2e76e40c92df83 --- /dev/null +++ b/frontend/src/services/logService.ts @@ -0,0 +1,153 @@ +import { useEffect, useState } from 'react'; +import { getToken } from './authService'; // Import getToken function +import { getApiUrl } from '../utils/runtime'; + +export interface LogEntry { + timestamp: number; + type: 'info' | 'error' | 'warn' | 'debug'; + source: string; + message: string; + processId?: string; +} + +// Fetch all logs +export const fetchLogs = async (): Promise => { + try { + // Get authentication token + const token = getToken(); + if (!token) { + throw new Error('Authentication token not found. Please log in.'); + } + + const response = await fetch(getApiUrl('/logs'), { + headers: { + 'x-auth-token': token, + }, + }); + + const result = await response.json(); + + if (!result.success) { + throw new Error(result.error || 'Failed to fetch logs'); + } + + return result.data; + } catch (error) { + console.error('Error fetching logs:', error); + throw error; + } +}; + +// Clear all logs +export const clearLogs = async (): Promise => { + try { + // Get authentication token + const token = getToken(); + if (!token) { + throw new Error('Authentication token not found. Please log in.'); + } + + const response = await fetch(getApiUrl('/logs'), { + method: 'DELETE', + headers: { + 'x-auth-token': token, + }, + }); + + const result = await response.json(); + + if (!result.success) { + throw new Error(result.error || 'Failed to clear logs'); + } + } catch (error) { + console.error('Error clearing logs:', error); + throw error; + } +}; + +// Hook to use logs with SSE streaming +export const useLogs = () => { + const [logs, setLogs] = useState([]); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + + useEffect(() => { + let eventSource: EventSource | null = null; + let isMounted = true; + + const connectToLogStream = () => { + try { + // Close existing connection if any + if (eventSource) { + eventSource.close(); + } + + // Get the authentication token + const token = getToken(); + if (!token) { + setError(new Error('Authentication token not found. Please log in.')); + setLoading(false); + return; + } + + // Connect to SSE endpoint with auth token in URL + eventSource = new EventSource(getApiUrl(`/logs/stream?token=${token}`)); + + eventSource.onmessage = (event) => { + if (!isMounted) return; + + try { + const data = JSON.parse(event.data); + + if (data.type === 'initial') { + setLogs(data.logs); + setLoading(false); + } else if (data.type === 'log') { + setLogs((prevLogs) => [...prevLogs, data.log]); + } + } catch (err) { + console.error('Error parsing SSE message:', err); + } + }; + + eventSource.onerror = () => { + if (!isMounted) return; + + if (eventSource) { + eventSource.close(); + // Attempt to reconnect after a delay + setTimeout(connectToLogStream, 5000); + } + + setError(new Error('Connection to log stream lost, attempting to reconnect...')); + }; + } catch (err) { + if (!isMounted) return; + setError(err instanceof Error ? err : new Error('Failed to connect to log stream')); + setLoading(false); + } + }; + + // Initial connection + connectToLogStream(); + + // Cleanup on unmount + return () => { + isMounted = false; + if (eventSource) { + eventSource.close(); + } + }; + }, []); + + const clearAllLogs = async () => { + try { + await clearLogs(); + setLogs([]); + } catch (err) { + setError(err instanceof Error ? err : new Error('Failed to clear logs')); + } + }; + + return { logs, loading, error, clearLogs: clearAllLogs }; +}; diff --git a/frontend/src/services/toolService.ts b/frontend/src/services/toolService.ts new file mode 100644 index 0000000000000000000000000000000000000000..be0f7b97a10c889ec42bd4e6ff88be0bb202f962 --- /dev/null +++ b/frontend/src/services/toolService.ts @@ -0,0 +1,72 @@ +import { getApiUrl } from '../utils/runtime'; +import { getToken } from './authService'; + +export interface ToolCallRequest { + toolName: string; + arguments?: Record; +} + +export interface ToolCallResult { + success: boolean; + content?: Array<{ + type: string; + text?: string; + [key: string]: any; + }>; + error?: string; + message?: string; +} + +/** + * Call a MCP tool via the call_tool API + */ +export const callTool = async ( + request: ToolCallRequest, + server?: string, +): Promise => { + try { + const token = getToken(); + if (!token) { + throw new Error('Authentication token not found. Please log in.'); + } + + // Construct the URL with optional server parameter + const url = server ? `/tools/call/${server}` : '/tools/call'; + + const response = await fetch(getApiUrl(url), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-auth-token': token, + Authorization: `Bearer ${token}`, // Add bearer auth for MCP routing + }, + body: JSON.stringify({ + toolName: request.toolName, + arguments: request.arguments, + }), + }); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const data = await response.json(); + if (!data.success) { + return { + success: false, + error: data.message || 'Tool call failed', + }; + } + + return { + success: true, + content: data.data.content || [], + }; + } catch (error) { + console.error('Error calling tool:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Unknown error occurred', + }; + } +}; diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..a23901eb616bba854c6c9775fdc798deeed0aa43 --- /dev/null +++ b/frontend/src/types/index.ts @@ -0,0 +1,165 @@ +// Server status types +export type ServerStatus = 'connecting' | 'connected' | 'disconnected'; + +// Market server types +export interface MarketServerRepository { + type: string; + url: string; +} + +export interface MarketServerAuthor { + name: string; +} + +export interface MarketServerInstallation { + type: string; + command: string; + args: string[]; + env?: Record; +} + +export interface MarketServerArgument { + description: string; + required: boolean; + example: string; +} + +export interface MarketServerExample { + title: string; + description: string; + prompt: string; +} + +export interface MarketServerTool { + name: string; + description: string; + inputSchema: Record; +} + +export interface MarketServer { + name: string; + display_name: string; + description: string; + repository: MarketServerRepository; + homepage: string; + author: MarketServerAuthor; + license: string; + categories: string[]; + tags: string[]; + examples: MarketServerExample[]; + installations: { + [key: string]: MarketServerInstallation; + }; + arguments: Record; + tools: MarketServerTool[]; + is_official?: boolean; +} + +// Tool input schema types +export interface ToolInputSchema { + type: string; + properties?: Record; + required?: string[]; +} + +// Tool types +export interface Tool { + name: string; + description: string; + inputSchema: ToolInputSchema; +} + +// Server config types +export interface ServerConfig { + type?: 'stdio' | 'sse' | 'streamable-http'; + url?: string; + command?: string; + args?: string[]; + env?: Record; + headers?: Record; + enabled?: boolean; +} + +// Server types +export interface Server { + name: string; + status: ServerStatus; + error?: string; + tools?: Tool[]; + config?: ServerConfig; + enabled?: boolean; +} + +// Group types +export interface Group { + id: string; + name: string; + description?: string; + servers: string[]; +} + +// Environment variable types +export interface EnvVar { + key: string; + value: string; +} + +// Form data types +export interface ServerFormData { + name: string; + url: string; + command: string; + arguments: string; + args?: string[]; // Added explicit args field + type?: 'stdio' | 'sse' | 'streamable-http'; // Added type field + env: EnvVar[]; + headers: EnvVar[]; +} + +// Group form data types +export interface GroupFormData { + name: string; + description: string; + servers: string[]; // Added servers array to include in form data +} + +// API response types +export interface ApiResponse { + success: boolean; + message?: string; + data?: T; +} + +// Auth types +export interface IUser { + username: string; + isAdmin?: boolean; +} + +export interface AuthState { + isAuthenticated: boolean; + user: IUser | null; + loading: boolean; + error: string | null; +} + +export interface LoginCredentials { + username: string; + password: string; +} + +export interface RegisterCredentials extends LoginCredentials { + isAdmin?: boolean; +} + +export interface ChangePasswordCredentials { + currentPassword: string; + newPassword: string; +} + +export interface AuthResponse { + success: boolean; + token?: string; + user?: IUser; + message?: string; +} diff --git a/frontend/src/types/runtime.ts b/frontend/src/types/runtime.ts new file mode 100644 index 0000000000000000000000000000000000000000..2d8685710e62a08882fe0ee82dea0a1a845f55ae --- /dev/null +++ b/frontend/src/types/runtime.ts @@ -0,0 +1,15 @@ +// Global runtime configuration interface +export interface RuntimeConfig { + basePath: string; + version: string; + name: string; +} + +// Extend Window interface to include runtime config +declare global { + interface Window { + __MCPHUB_CONFIG__?: RuntimeConfig; + } +} + +export {}; diff --git a/frontend/src/utils/api.ts b/frontend/src/utils/api.ts new file mode 100644 index 0000000000000000000000000000000000000000..e111cb10d4e7fe00f51085c9999f4699d1e98794 --- /dev/null +++ b/frontend/src/utils/api.ts @@ -0,0 +1,28 @@ +/** + * API utility functions for constructing URLs with proper base path support + * + * @deprecated Use functions from utils/runtime.ts instead for runtime configuration support + */ + +import { getApiBaseUrl as getRuntimeApiBaseUrl, getApiUrl as getRuntimeApiUrl } from './runtime'; + +/** + * Get the API base URL including base path and /api prefix + * @returns The complete API base URL + * @deprecated Use getApiBaseUrl from utils/runtime.ts instead + */ +export const getApiBaseUrl = (): string => { + console.warn('getApiBaseUrl from utils/api.ts is deprecated, use utils/runtime.ts instead'); + return getRuntimeApiBaseUrl(); +}; + +/** + * Construct a full API URL with the given endpoint + * @param endpoint - The API endpoint (should start with /, e.g., '/auth/login') + * @returns The complete API URL + * @deprecated Use getApiUrl from utils/runtime.ts instead + */ +export const getApiUrl = (endpoint: string): string => { + console.warn('getApiUrl from utils/api.ts is deprecated, use utils/runtime.ts instead'); + return getRuntimeApiUrl(endpoint); +}; diff --git a/frontend/src/utils/cn.ts b/frontend/src/utils/cn.ts new file mode 100644 index 0000000000000000000000000000000000000000..dcb15b8158d1d199466c492b34f6e3f3f35f6dbb --- /dev/null +++ b/frontend/src/utils/cn.ts @@ -0,0 +1,10 @@ +import { ClassValue, clsx } from 'clsx'; +import { twMerge } from 'tailwind-merge'; + +/** + * Combines multiple class names and deduplicates Tailwind CSS classes + * This is a utility function for conditionally joining class names together + */ +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} \ No newline at end of file diff --git a/frontend/src/utils/key.ts b/frontend/src/utils/key.ts new file mode 100644 index 0000000000000000000000000000000000000000..4284863319fce652427faec12fdf7168e89745aa --- /dev/null +++ b/frontend/src/utils/key.ts @@ -0,0 +1,8 @@ +export function generateRandomKey(length: number = 32): string { + const characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; + const array = new Uint8Array(length); + crypto.getRandomValues(array); + return Array.from(array) + .map((x) => characters.charAt(x % characters.length)) + .join(''); +} diff --git a/frontend/src/utils/runtime.ts b/frontend/src/utils/runtime.ts new file mode 100644 index 0000000000000000000000000000000000000000..c3ea4e742b99283864c998d137d0d0167fd29498 --- /dev/null +++ b/frontend/src/utils/runtime.ts @@ -0,0 +1,105 @@ +import type { RuntimeConfig } from '../types/runtime'; + +/** + * Get runtime configuration from window object + */ +export const getRuntimeConfig = (): RuntimeConfig => { + return ( + window.__MCPHUB_CONFIG__ || { + basePath: '', + version: 'dev', + name: 'mcphub', + } + ); +}; + +/** + * Get the base path from runtime configuration + */ +export const getBasePath = (): string => { + const config = getRuntimeConfig(); + const basePath = config.basePath || ''; + + // Ensure the path starts with / if it's not empty and doesn't already start with / + if (basePath && !basePath.startsWith('/')) { + return '/' + basePath; + } + return basePath; +}; + +/** + * Get the API base URL including base path and /api prefix + */ +export const getApiBaseUrl = (): string => { + const basePath = getBasePath(); + // Always append /api to the base path for API endpoints + return basePath + '/api'; +}; + +/** + * Construct a full API URL with the given endpoint + */ +export const getApiUrl = (endpoint: string): string => { + const baseUrl = getApiBaseUrl(); + // Ensure endpoint starts with / + const normalizedEndpoint = endpoint.startsWith('/') ? endpoint : '/' + endpoint; + return baseUrl + normalizedEndpoint; +}; + +/** + * Load runtime configuration from server + */ +export const loadRuntimeConfig = async (): Promise => { + try { + // For initial config load, we need to determine the correct path + // Try different possible paths based on current location + const currentPath = window.location.pathname; + const possibleConfigPaths = [ + // If we're already on a subpath, try to use it + currentPath.replace(/\/[^\/]*$/, '') + '/config', + // Try root config + '/config', + // Try with potential base paths + ...(currentPath.includes('/') + ? [currentPath.split('/')[1] ? `/${currentPath.split('/')[1]}/config` : '/config'] + : ['/config']), + ]; + + for (const configPath of possibleConfigPaths) { + try { + const response = await fetch(configPath, { + method: 'GET', + headers: { + Accept: 'application/json', + 'Cache-Control': 'no-cache', + }, + }); + + if (response.ok) { + const data = await response.json(); + if (data.success && data.data) { + return data.data; + } + } + } catch (error) { + // Continue to next path + console.debug(`Failed to load config from ${configPath}:`, error); + } + } + + // Fallback to default config + console.warn('Could not load runtime config from server, using defaults'); + return { + basePath: '', + version: 'dev', + name: 'mcphub', + }; + } catch (error) { + console.error('Error loading runtime config:', error); + return { + basePath: '', + version: 'dev', + name: 'mcphub', + }; + } +}; diff --git a/frontend/src/utils/version.ts b/frontend/src/utils/version.ts new file mode 100644 index 0000000000000000000000000000000000000000..2e4959e1901a0e27999256a0e309af1098f21518 --- /dev/null +++ b/frontend/src/utils/version.ts @@ -0,0 +1,32 @@ +const NPM_REGISTRY = 'https://registry.npmjs.org'; +const PACKAGE_NAME = '@samanhappy/mcphub'; + +export const checkLatestVersion = async (): Promise => { + try { + const response = await fetch(`${NPM_REGISTRY}/${PACKAGE_NAME}/latest`); + if (!response.ok) { + throw new Error(`Failed to fetch latest version: ${response.status}`); + } + const data = await response.json(); + return data.version || null; + } catch (error) { + console.error('Error checking for latest version:', error); + return null; + } +}; + +export const compareVersions = (current: string, latest: string): number => { + if (current === 'dev') return -1; + const currentParts = current.split('.').map(Number); + const latestParts = latest.split('.').map(Number); + + for (let i = 0; i < 3; i++) { + const currentPart = currentParts[i] || 0; + const latestPart = latestParts[i] || 0; + + if (currentPart > latestPart) return -1; + if (currentPart < latestPart) return 1; + } + + return 0; +}; diff --git a/frontend/src/vite-env.d.ts b/frontend/src/vite-env.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..07e677cb527b9776a1c7506eb844c00ad7cb3bfe --- /dev/null +++ b/frontend/src/vite-env.d.ts @@ -0,0 +1,9 @@ +/// + +interface ImportMeta { + readonly env: { + readonly PACKAGE_VERSION: string; + // Add other custom env variables here if needed + [key: string]: any; + }; +} diff --git a/frontend/tailwind.config.js b/frontend/tailwind.config.js new file mode 100644 index 0000000000000000000000000000000000000000..9b004be811c54e929a33735e209e7a39fff40e7f --- /dev/null +++ b/frontend/tailwind.config.js @@ -0,0 +1,12 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: [ + "./index.html", + "./src/**/*.{js,ts,jsx,tsx}", + ], + darkMode: 'class', // Use class strategy for dark mode + theme: { + extend: {}, + }, + plugins: [], +} \ No newline at end of file diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..e91ebaa8c9fe67a7d2d4610386d40a56b3c9a561 --- /dev/null +++ b/frontend/tsconfig.json @@ -0,0 +1,31 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "noFallthroughCasesInSwitch": true, + + /* Paths */ + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} \ No newline at end of file diff --git a/frontend/tsconfig.node.json b/frontend/tsconfig.node.json new file mode 100644 index 0000000000000000000000000000000000000000..099658cf3d29c0c21bc9b61d0a8b02652ddb92a9 --- /dev/null +++ b/frontend/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} \ No newline at end of file diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts new file mode 100644 index 0000000000000000000000000000000000000000..55720f96802e7c024714a19f979838d80c4b1752 --- /dev/null +++ b/frontend/vite.config.ts @@ -0,0 +1,44 @@ +import { defineConfig } from 'vite'; +import react from '@vitejs/plugin-react'; +import path from 'path'; +import tailwindcss from '@tailwindcss/vite'; +// Import the package.json to get the version +import { readFileSync } from 'fs'; + +// Get package.json version +const packageJson = JSON.parse(readFileSync(path.resolve(__dirname, '../package.json'), 'utf-8')); + +// For runtime configuration, we'll always use relative paths +// BASE_PATH will be determined at runtime +const basePath = ''; + +// https://vitejs.dev/config/ +export default defineConfig({ + base: './', // Always use relative paths for runtime configuration + plugins: [react(), tailwindcss()], + resolve: { + alias: { + '@': path.resolve(__dirname, './src'), + }, + }, + define: { + // Make package version available as global variable + // BASE_PATH will be loaded at runtime + 'import.meta.env.PACKAGE_VERSION': JSON.stringify(packageJson.version), + }, + build: { + sourcemap: true, // Enable source maps for production build + }, + server: { + proxy: { + [`${basePath}/api`]: { + target: 'http://localhost:3000', + changeOrigin: true, + }, + [`${basePath}/auth`]: { + target: 'http://localhost:3000', + changeOrigin: true, + }, + }, + }, +}); diff --git a/googled76ca578b6543fbc.html b/googled76ca578b6543fbc.html new file mode 100644 index 0000000000000000000000000000000000000000..ec9ceaa9d9f164cf7d82ac46b2bf9b764961bcc5 --- /dev/null +++ b/googled76ca578b6543fbc.html @@ -0,0 +1 @@ +google-site-verification: googled76ca578b6543fbc.html diff --git a/jest.config.js b/jest.config.js new file mode 100644 index 0000000000000000000000000000000000000000..df902983ea14c2c65f71d800d8472ea05b644ce1 --- /dev/null +++ b/jest.config.js @@ -0,0 +1,10 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + roots: ['/src'], + transform: { + '^.+\\.tsx?$': 'ts-jest', + }, + testRegex: '(/__tests__/.*|(\\.|/)(test|spec))\\.tsx?$', + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], +}; diff --git a/mcp_settings.json b/mcp_settings.json new file mode 100644 index 0000000000000000000000000000000000000000..c7c15b1a742623041e25e5b36136980f1af711a9 --- /dev/null +++ b/mcp_settings.json @@ -0,0 +1,45 @@ +{ + "mcpServers": { + "amap": { + "command": "npx", + "args": [ + "-y", + "@amap/amap-maps-mcp-server" + ], + "env": { + "AMAP_MAPS_API_KEY": "your-api-key" + } + }, + "playwright": { + "command": "npx", + "args": [ + "@playwright/mcp@latest", + "--headless" + ] + }, + "fetch": { + "command": "uvx", + "args": [ + "mcp-server-fetch" + ] + }, + "slack": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-slack" + ], + "env": { + "SLACK_BOT_TOKEN": "your-bot-token", + "SLACK_TEAM_ID": "your-team-id" + } + } + }, + "users": [ + { + "username": "admin", + "password": "$2b$10$Vt7krIvjNgyN67LXqly0uOcTpN0LI55cYRbcKC71pUDAP0nJ7RPa.", + "isAdmin": true + } + ] +} \ No newline at end of file diff --git a/nginx.conf.example b/nginx.conf.example new file mode 100644 index 0000000000000000000000000000000000000000..780a7f8fa953cd65cdb54d177b10e4a47ed8912a --- /dev/null +++ b/nginx.conf.example @@ -0,0 +1,72 @@ +# Nginx configuration example for MCPHub with subpath routing +# This example shows how to deploy MCPHub under a subpath like /mcphub + +server { + listen 80; + server_name your-domain.com; + + # MCPHub under /mcphub subpath + location /mcphub/ { + # Remove the subpath prefix before forwarding to MCPHub + rewrite ^/mcphub/(.*)$ /$1 break; + + proxy_pass http://mcphub:3000/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + + # Important: Disable buffering for SSE connections + proxy_buffering off; + proxy_cache off; + + # Support for Server-Sent Events (SSE) + proxy_read_timeout 24h; + proxy_send_timeout 24h; + } + + # Alternative configuration if you want to keep the subpath in the backend + # In this case, set BASE_PATH=/mcphub + # location /mcphub/ { + # proxy_pass http://mcphub:3000/mcphub/; + # proxy_http_version 1.1; + # proxy_set_header Upgrade $http_upgrade; + # proxy_set_header Connection 'upgrade'; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # proxy_cache_bypass $http_upgrade; + # + # # Important: Disable buffering for SSE connections + # proxy_buffering off; + # proxy_cache off; + # + # # Support for Server-Sent Events (SSE) + # proxy_read_timeout 24h; + # proxy_send_timeout 24h; + # } +} + +# Docker Compose example with subpath +# version: '3.8' +# services: +# mcphub: +# image: samanhappy/mcphub +# environment: +# - BASE_PATH=/mcphub +# volumes: +# - ./mcp_settings.json:/app/mcp_settings.json +# +# nginx: +# image: nginx:alpine +# ports: +# - "80:80" +# volumes: +# - ./nginx.conf:/etc/nginx/conf.d/default.conf +# depends_on: +# - mcphub diff --git a/package.json b/package.json new file mode 100644 index 0000000000000000000000000000000000000000..0c5db387d535f3deb8e02f5b17a29625cdd2524e --- /dev/null +++ b/package.json @@ -0,0 +1,107 @@ +{ + "name": "@samanhappy/mcphub", + "version": "dev", + "description": "A hub server for mcp servers", + "main": "dist/index.js", + "type": "module", + "bin": { + "mcphub": "bin/cli.js" + }, + "files": [ + "dist", + "bin", + "mcp_settings.json", + "servers.json", + "frontend/dist", + "README.md", + "LICENSE" + ], + "scripts": { + "build": "pnpm backend:build && pnpm frontend:build", + "backend:build": "tsc", + "start": "node dist/index.js", + "backend:dev": "tsx watch src/index.ts", + "backend:debug": "tsx watch src/index.ts --inspect", + "lint": "eslint . --ext .ts", + "format": "prettier --write \"src/**/*.ts\"", + "test": "jest", + "frontend:dev": "cd frontend && vite", + "frontend:build": "cd frontend && vite build", + "frontend:preview": "cd frontend && vite preview", + "dev": "concurrently \"pnpm backend:dev\" \"pnpm frontend:dev\"", + "debug": "concurrently \"pnpm backend:debug\" \"pnpm frontend:dev\"", + "prepublishOnly": "npm run build && node scripts/verify-dist.js" + }, + "keywords": [ + "typescript", + "server", + "mcp", + "model context protocol" + ], + "author": "", + "license": "ISC", + "dependencies": { + "@modelcontextprotocol/sdk": "^1.11.1", + "@types/pg": "^8.15.2", + "bcryptjs": "^3.0.2", + "dotenv": "^16.3.1", + "dotenv-expand": "^12.0.2", + "express": "^4.21.2", + "express-validator": "^7.2.1", + "jsonwebtoken": "^9.0.2", + "openai": "^4.103.0", + "pg": "^8.16.0", + "pgvector": "^0.2.1", + "postgres": "^3.4.7", + "reflect-metadata": "^0.2.2", + "typeorm": "^0.3.24", + "uuid": "^11.1.0" + }, + "devDependencies": { + "@radix-ui/react-accordion": "^1.2.3", + "@radix-ui/react-slot": "^1.1.2", + "@shadcn/ui": "^0.0.4", + "@tailwindcss/postcss": "^4.1.3", + "@tailwindcss/vite": "^4.1.7", + "@types/bcryptjs": "^3.0.0", + "@types/express": "^4.17.21", + "@types/jest": "^29.5.5", + "@types/jsonwebtoken": "^9.0.9", + "@types/node": "^22.15.21", + "@types/react": "^19.0.12", + "@types/react-dom": "^19.0.4", + "@types/uuid": "^10.0.0", + "@typescript-eslint/eslint-plugin": "^6.7.4", + "@typescript-eslint/parser": "^6.7.4", + "@vitejs/plugin-react": "^4.4.1", + "autoprefixer": "^10.4.21", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "concurrently": "^9.1.2", + "eslint": "^8.50.0", + "i18next": "^24.2.3", + "i18next-browser-languagedetector": "^8.0.4", + "jest": "^29.7.0", + "lucide-react": "^0.486.0", + "next": "^15.2.4", + "postcss": "^8.5.3", + "prettier": "^3.0.3", + "react": "^19.1.0", + "react-dom": "^19.1.0", + "react-i18next": "^15.4.1", + "react-router-dom": "^7.6.0", + "tailwind-merge": "^3.1.0", + "tailwind-scrollbar-hide": "^2.0.0", + "tailwindcss": "^4.0.17", + "ts-jest": "^29.1.1", + "ts-node-dev": "^2.0.0", + "tsx": "^4.7.0", + "typescript": "^5.2.2", + "vite": "^6.3.5", + "zod": "^3.24.2" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "packageManager": "pnpm@10.11.0+sha256.a69e9cb077da419d47d18f1dd52e207245b29cac6e076acedbeb8be3b1a67bd7" +} \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40409e8b331d646011061c4f356ad607b8a9cc1e --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,7721 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@modelcontextprotocol/sdk': + specifier: ^1.11.1 + version: 1.12.1 + '@types/pg': + specifier: ^8.15.2 + version: 8.15.4 + bcryptjs: + specifier: ^3.0.2 + version: 3.0.2 + dotenv: + specifier: ^16.3.1 + version: 16.5.0 + dotenv-expand: + specifier: ^12.0.2 + version: 12.0.2 + express: + specifier: ^4.21.2 + version: 4.21.2 + express-validator: + specifier: ^7.2.1 + version: 7.2.1 + jsonwebtoken: + specifier: ^9.0.2 + version: 9.0.2 + openai: + specifier: ^4.103.0 + version: 4.104.0(zod@3.25.48) + pg: + specifier: ^8.16.0 + version: 8.16.0 + pgvector: + specifier: ^0.2.1 + version: 0.2.1 + postgres: + specifier: ^3.4.7 + version: 3.4.7 + reflect-metadata: + specifier: ^0.2.2 + version: 0.2.2 + typeorm: + specifier: ^0.3.24 + version: 0.3.24(pg@8.16.0)(reflect-metadata@0.2.2)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + uuid: + specifier: ^11.1.0 + version: 11.1.0 + devDependencies: + '@radix-ui/react-accordion': + specifier: ^1.2.3 + version: 1.2.11(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-slot': + specifier: ^1.1.2 + version: 1.2.3(@types/react@19.1.6)(react@19.1.0) + '@shadcn/ui': + specifier: ^0.0.4 + version: 0.0.4 + '@tailwindcss/postcss': + specifier: ^4.1.3 + version: 4.1.8 + '@tailwindcss/vite': + specifier: ^4.1.7 + version: 4.1.8(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)) + '@types/bcryptjs': + specifier: ^3.0.0 + version: 3.0.0 + '@types/express': + specifier: ^4.17.21 + version: 4.17.22 + '@types/jest': + specifier: ^29.5.5 + version: 29.5.14 + '@types/jsonwebtoken': + specifier: ^9.0.9 + version: 9.0.9 + '@types/node': + specifier: ^22.15.21 + version: 22.15.29 + '@types/react': + specifier: ^19.0.12 + version: 19.1.6 + '@types/react-dom': + specifier: ^19.0.4 + version: 19.1.5(@types/react@19.1.6) + '@types/uuid': + specifier: ^10.0.0 + version: 10.0.0 + '@typescript-eslint/eslint-plugin': + specifier: ^6.7.4 + version: 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1)(typescript@5.8.3) + '@typescript-eslint/parser': + specifier: ^6.7.4 + version: 6.21.0(eslint@8.57.1)(typescript@5.8.3) + '@vitejs/plugin-react': + specifier: ^4.4.1 + version: 4.5.0(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)) + autoprefixer: + specifier: ^10.4.21 + version: 10.4.21(postcss@8.5.4) + class-variance-authority: + specifier: ^0.7.1 + version: 0.7.1 + clsx: + specifier: ^2.1.1 + version: 2.1.1 + concurrently: + specifier: ^9.1.2 + version: 9.1.2 + eslint: + specifier: ^8.50.0 + version: 8.57.1 + i18next: + specifier: ^24.2.3 + version: 24.2.3(typescript@5.8.3) + i18next-browser-languagedetector: + specifier: ^8.0.4 + version: 8.1.0 + jest: + specifier: ^29.7.0 + version: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + lucide-react: + specifier: ^0.486.0 + version: 0.486.0(react@19.1.0) + next: + specifier: ^15.2.4 + version: 15.3.3(@babel/core@7.27.4)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + postcss: + specifier: ^8.5.3 + version: 8.5.4 + prettier: + specifier: ^3.0.3 + version: 3.5.3 + react: + specifier: ^19.1.0 + version: 19.1.0 + react-dom: + specifier: ^19.1.0 + version: 19.1.0(react@19.1.0) + react-i18next: + specifier: ^15.4.1 + version: 15.5.2(i18next@24.2.3(typescript@5.8.3))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.8.3) + react-router-dom: + specifier: ^7.6.0 + version: 7.6.1(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + tailwind-merge: + specifier: ^3.1.0 + version: 3.3.0 + tailwind-scrollbar-hide: + specifier: ^2.0.0 + version: 2.0.0(tailwindcss@4.1.8) + tailwindcss: + specifier: ^4.0.17 + version: 4.1.8 + ts-jest: + specifier: ^29.1.1 + version: 29.3.4(@babel/core@7.27.4)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.27.4))(jest@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)))(typescript@5.8.3) + ts-node-dev: + specifier: ^2.0.0 + version: 2.0.0(@types/node@22.15.29)(typescript@5.8.3) + tsx: + specifier: ^4.7.0 + version: 4.19.4 + typescript: + specifier: ^5.2.2 + version: 5.8.3 + vite: + specifier: ^6.3.5 + version: 6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4) + zod: + specifier: ^3.24.2 + version: 3.25.48 + +packages: + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.27.1': + resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.27.3': + resolution: {integrity: sha512-V42wFfx1ymFte+ecf6iXghnnP8kWTO+ZLXIyZq+1LAXHHvTZdVxicn4yiVYdYMGaCO3tmqub11AorKkv+iodqw==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.27.4': + resolution: {integrity: sha512-bXYxrXFubeYdvB0NhD/NBB3Qi6aZeV20GOWVI47t2dkecCEoneR4NPVcb7abpXDEvejgrUfFtG6vG/zxAKmg+g==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.27.3': + resolution: {integrity: sha512-xnlJYj5zepml8NXtjkG0WquFUv8RskFqyFcVgTBp5k+NaA/8uw/K+OSVf8AMGw5e9HKP2ETd5xpK5MLZQD6b4Q==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.27.2': + resolution: {integrity: sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.27.1': + resolution: {integrity: sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.27.3': + resolution: {integrity: sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-plugin-utils@7.27.1': + resolution: {integrity: sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.27.1': + resolution: {integrity: sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.27.1': + resolution: {integrity: sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.27.4': + resolution: {integrity: sha512-Y+bO6U+I7ZKaM5G5rDUZiYfUvQPUibYmAFe7EnKdnKBbVXDZxvp+MWOH5gYciY0EPk4EScsuFMQBbEfpdRKSCQ==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.27.4': + resolution: {integrity: sha512-BRmLHGwpUqLFR2jzx9orBuX/ABDkj2jLKOXrHDTN2aOKL+jFDDKaRNo9nyYsIl9h/UE/7lMKdDjKQQyxKKDZ7g==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-syntax-async-generators@7.8.4': + resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-bigint@7.8.3': + resolution: {integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-class-properties@7.12.13': + resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-class-static-block@7.14.5': + resolution: {integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-attributes@7.26.0': + resolution: {integrity: sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-meta@7.10.4': + resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-json-strings@7.8.3': + resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-jsx@7.25.9': + resolution: {integrity: sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-logical-assignment-operators@7.10.4': + resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3': + resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-numeric-separator@7.10.4': + resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-object-rest-spread@7.8.3': + resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-optional-catch-binding@7.8.3': + resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-optional-chaining@7.8.3': + resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-private-property-in-object@7.14.5': + resolution: {integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-top-level-await@7.14.5': + resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-typescript@7.25.9': + resolution: {integrity: sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-self@7.27.1': + resolution: {integrity: sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-source@7.27.1': + resolution: {integrity: sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/runtime@7.27.0': + resolution: {integrity: sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==} + engines: {node: '>=6.9.0'} + + '@babel/template@7.27.2': + resolution: {integrity: sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.27.4': + resolution: {integrity: sha512-oNcu2QbHqts9BtOWJosOVJapWjBDSxGCpFvikNR5TGDYDQf3JwpIoMzIKrvfoti93cLfPJEG4tH9SPVeyCGgdA==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.27.3': + resolution: {integrity: sha512-Y1GkI4ktrtvmawoSq+4FCVHNryea6uR+qUQy0AGxLSsjCX0nVmkYQMBLHDkXZuo5hGx7eYdnIaslsdBFm7zbUw==} + engines: {node: '>=6.9.0'} + + '@bcoe/v8-coverage@0.2.3': + resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} + + '@cspotcode/source-map-support@0.8.1': + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + + '@emnapi/runtime@1.4.3': + resolution: {integrity: sha512-pBPWdu6MLKROBX05wSNKcNb++m5Er+KQ9QkB+WVM+pW2Kx9hoSrVTnu3BdkI5eBLZoKu/J6mW/B6i6bJB2ytXQ==} + + '@esbuild/aix-ppc64@0.25.2': + resolution: {integrity: sha512-wCIboOL2yXZym2cgm6mlA742s9QeJ8DjGVaL39dLN4rRwrOgOyYSnOaFPhKZGLb2ngj4EyfAFjsNJwPXZvseag==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.25.2': + resolution: {integrity: sha512-5ZAX5xOmTligeBaeNEPnPaeEuah53Id2tX4c2CVP3JaROTH+j4fnfHCkr1PjXMd78hMst+TlkfKcW/DlTq0i4w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.25.2': + resolution: {integrity: sha512-NQhH7jFstVY5x8CKbcfa166GoV0EFkaPkCKBQkdPJFvo5u+nGXLEH/ooniLb3QI8Fk58YAx7nsPLozUWfCBOJA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.25.2': + resolution: {integrity: sha512-Ffcx+nnma8Sge4jzddPHCZVRvIfQ0kMsUsCMcJRHkGJ1cDmhe4SsrYIjLUKn1xpHZybmOqCWwB0zQvsjdEHtkg==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.25.2': + resolution: {integrity: sha512-MpM6LUVTXAzOvN4KbjzU/q5smzryuoNjlriAIx+06RpecwCkL9JpenNzpKd2YMzLJFOdPqBpuub6eVRP5IgiSA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.25.2': + resolution: {integrity: sha512-5eRPrTX7wFyuWe8FqEFPG2cU0+butQQVNcT4sVipqjLYQjjh8a8+vUTfgBKM88ObB85ahsnTwF7PSIt6PG+QkA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.25.2': + resolution: {integrity: sha512-mLwm4vXKiQ2UTSX4+ImyiPdiHjiZhIaE9QvC7sw0tZ6HoNMjYAqQpGyui5VRIi5sGd+uWq940gdCbY3VLvsO1w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.25.2': + resolution: {integrity: sha512-6qyyn6TjayJSwGpm8J9QYYGQcRgc90nmfdUb0O7pp1s4lTY+9D0H9O02v5JqGApUyiHOtkz6+1hZNvNtEhbwRQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.25.2': + resolution: {integrity: sha512-gq/sjLsOyMT19I8obBISvhoYiZIAaGF8JpeXu1u8yPv8BE5HlWYobmlsfijFIZ9hIVGYkbdFhEqC0NvM4kNO0g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.25.2': + resolution: {integrity: sha512-UHBRgJcmjJv5oeQF8EpTRZs/1knq6loLxTsjc3nxO9eXAPDLcWW55flrMVc97qFPbmZP31ta1AZVUKQzKTzb0g==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.25.2': + resolution: {integrity: sha512-bBYCv9obgW2cBP+2ZWfjYTU+f5cxRoGGQ5SeDbYdFCAZpYWrfjjfYwvUpP8MlKbP0nwZ5gyOU/0aUzZ5HWPuvQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.25.2': + resolution: {integrity: sha512-SHNGiKtvnU2dBlM5D8CXRFdd+6etgZ9dXfaPCeJtz+37PIUlixvlIhI23L5khKXs3DIzAn9V8v+qb1TRKrgT5w==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.25.2': + resolution: {integrity: sha512-hDDRlzE6rPeoj+5fsADqdUZl1OzqDYow4TB4Y/3PlKBD0ph1e6uPHzIQcv2Z65u2K0kpeByIyAjCmjn1hJgG0Q==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.25.2': + resolution: {integrity: sha512-tsHu2RRSWzipmUi9UBDEzc0nLc4HtpZEI5Ba+Omms5456x5WaNuiG3u7xh5AO6sipnJ9r4cRWQB2tUjPyIkc6g==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.25.2': + resolution: {integrity: sha512-k4LtpgV7NJQOml/10uPU0s4SAXGnowi5qBSjaLWMojNCUICNu7TshqHLAEbkBdAszL5TabfvQ48kK84hyFzjnw==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.25.2': + resolution: {integrity: sha512-GRa4IshOdvKY7M/rDpRR3gkiTNp34M0eLTaC1a08gNrh4u488aPhuZOCpkF6+2wl3zAN7L7XIpOFBhnaE3/Q8Q==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.25.2': + resolution: {integrity: sha512-QInHERlqpTTZ4FRB0fROQWXcYRD64lAoiegezDunLpalZMjcUcld3YzZmVJ2H/Cp0wJRZ8Xtjtj0cEHhYc/uUg==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.25.2': + resolution: {integrity: sha512-talAIBoY5M8vHc6EeI2WW9d/CkiO9MQJ0IOWX8hrLhxGbro/vBXJvaQXefW2cP0z0nQVTdQ/eNyGFV1GSKrxfw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.25.2': + resolution: {integrity: sha512-voZT9Z+tpOxrvfKFyfDYPc4DO4rk06qamv1a/fkuzHpiVBMOhpjK+vBmWM8J1eiB3OLSMFYNaOaBNLXGChf5tg==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.25.2': + resolution: {integrity: sha512-dcXYOC6NXOqcykeDlwId9kB6OkPUxOEqU+rkrYVqJbK2hagWOMrsTGsMr8+rW02M+d5Op5NNlgMmjzecaRf7Tg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.25.2': + resolution: {integrity: sha512-t/TkWwahkH0Tsgoq1Ju7QfgGhArkGLkF1uYz8nQS/PPFlXbP5YgRpqQR3ARRiC2iXoLTWFxc6DJMSK10dVXluw==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/sunos-x64@0.25.2': + resolution: {integrity: sha512-cfZH1co2+imVdWCjd+D1gf9NjkchVhhdpgb1q5y6Hcv9TP6Zi9ZG/beI3ig8TvwT9lH9dlxLq5MQBBgwuj4xvA==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.25.2': + resolution: {integrity: sha512-7Loyjh+D/Nx/sOTzV8vfbB3GJuHdOQyrOryFdZvPHLf42Tk9ivBU5Aedi7iyX+x6rbn2Mh68T4qq1SDqJBQO5Q==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.25.2': + resolution: {integrity: sha512-WRJgsz9un0nqZJ4MfhabxaD9Ft8KioqU3JMinOTvobbX6MOSUigSBlogP8QB3uxpJDsFS6yN+3FDBdqE5lg9kg==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.25.2': + resolution: {integrity: sha512-kM3HKb16VIXZyIeVrM1ygYmZBKybX8N4p754bw390wGO3Tf2j4L2/WYL+4suWujpgf6GBYs3jv7TyUivdd05JA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.5.1': + resolution: {integrity: sha512-soEIOALTfTK6EjmKMMoLugwaP0rzkad90iIWd1hMO9ARkSAyjfMfkRRhLvD5qH7vvM0Cg72pieUfR6yh6XxC4w==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.1': + resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/eslintrc@2.1.4': + resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@eslint/js@8.57.1': + resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@humanwhocodes/config-array@0.13.0': + resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==} + engines: {node: '>=10.10.0'} + deprecated: Use @eslint/config-array instead + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/object-schema@2.0.3': + resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} + deprecated: Use @eslint/object-schema instead + + '@img/sharp-darwin-arm64@0.34.2': + resolution: {integrity: sha512-OfXHZPppddivUJnqyKoi5YVeHRkkNE2zUFT2gbpKxp/JZCFYEYubnMg+gOp6lWfasPrTS+KPosKqdI+ELYVDtg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + + '@img/sharp-darwin-x64@0.34.2': + resolution: {integrity: sha512-dYvWqmjU9VxqXmjEtjmvHnGqF8GrVjM2Epj9rJ6BUIXvk8slvNDJbhGFvIoXzkDhrJC2jUxNLz/GUjjvSzfw+g==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-darwin-arm64@1.1.0': + resolution: {integrity: sha512-HZ/JUmPwrJSoM4DIQPv/BfNh9yrOA8tlBbqbLz4JZ5uew2+o22Ik+tHQJcih7QJuSa0zo5coHTfD5J8inqj9DA==} + cpu: [arm64] + os: [darwin] + + '@img/sharp-libvips-darwin-x64@1.1.0': + resolution: {integrity: sha512-Xzc2ToEmHN+hfvsl9wja0RlnXEgpKNmftriQp6XzY/RaSfwD9th+MSh0WQKzUreLKKINb3afirxW7A0fz2YWuQ==} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-linux-arm64@1.1.0': + resolution: {integrity: sha512-IVfGJa7gjChDET1dK9SekxFFdflarnUB8PwW8aGwEoF3oAsSDuNUTYS+SKDOyOJxQyDC1aPFMuRYLoDInyV9Ew==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linux-arm@1.1.0': + resolution: {integrity: sha512-s8BAd0lwUIvYCJyRdFqvsj+BJIpDBSxs6ivrOPm/R7piTs5UIwY5OjXrP2bqXC9/moGsyRa37eYWYCOGVXxVrA==} + cpu: [arm] + os: [linux] + + '@img/sharp-libvips-linux-ppc64@1.1.0': + resolution: {integrity: sha512-tiXxFZFbhnkWE2LA8oQj7KYR+bWBkiV2nilRldT7bqoEZ4HiDOcePr9wVDAZPi/Id5fT1oY9iGnDq20cwUz8lQ==} + cpu: [ppc64] + os: [linux] + + '@img/sharp-libvips-linux-s390x@1.1.0': + resolution: {integrity: sha512-xukSwvhguw7COyzvmjydRb3x/09+21HykyapcZchiCUkTThEQEOMtBj9UhkaBRLuBrgLFzQ2wbxdeCCJW/jgJA==} + cpu: [s390x] + os: [linux] + + '@img/sharp-libvips-linux-x64@1.1.0': + resolution: {integrity: sha512-yRj2+reB8iMg9W5sULM3S74jVS7zqSzHG3Ol/twnAAkAhnGQnpjj6e4ayUz7V+FpKypwgs82xbRdYtchTTUB+Q==} + cpu: [x64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-arm64@1.1.0': + resolution: {integrity: sha512-jYZdG+whg0MDK+q2COKbYidaqW/WTz0cc1E+tMAusiDygrM4ypmSCjOJPmFTvHHJ8j/6cAGyeDWZOsK06tP33w==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-x64@1.1.0': + resolution: {integrity: sha512-wK7SBdwrAiycjXdkPnGCPLjYb9lD4l6Ze2gSdAGVZrEL05AOUJESWU2lhlC+Ffn5/G+VKuSm6zzbQSzFX/P65A==} + cpu: [x64] + os: [linux] + + '@img/sharp-linux-arm64@0.34.2': + resolution: {integrity: sha512-D8n8wgWmPDakc83LORcfJepdOSN6MvWNzzz2ux0MnIbOqdieRZwVYY32zxVx+IFUT8er5KPcyU3XXsn+GzG/0Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linux-arm@0.34.2': + resolution: {integrity: sha512-0DZzkvuEOqQUP9mo2kjjKNok5AmnOr1jB2XYjkaoNRwpAYMDzRmAqUIa1nRi58S2WswqSfPOWLNOr0FDT3H5RQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + + '@img/sharp-linux-s390x@0.34.2': + resolution: {integrity: sha512-EGZ1xwhBI7dNISwxjChqBGELCWMGDvmxZXKjQRuqMrakhO8QoMgqCrdjnAqJq/CScxfRn+Bb7suXBElKQpPDiw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + + '@img/sharp-linux-x64@0.34.2': + resolution: {integrity: sha512-sD7J+h5nFLMMmOXYH4DD9UtSNBD05tWSSdWAcEyzqW8Cn5UxXvsHAxmxSesYUsTOBmUnjtxghKDl15EvfqLFbQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-linuxmusl-arm64@0.34.2': + resolution: {integrity: sha512-NEE2vQ6wcxYav1/A22OOxoSOGiKnNmDzCYFOZ949xFmrWZOVII1Bp3NqVVpvj+3UeHMFyN5eP/V5hzViQ5CZNA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linuxmusl-x64@0.34.2': + resolution: {integrity: sha512-DOYMrDm5E6/8bm/yQLCWyuDJwUnlevR8xtF8bs+gjZ7cyUNYXiSf/E8Kp0Ss5xasIaXSHzb888V1BE4i1hFhAA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-wasm32@0.34.2': + resolution: {integrity: sha512-/VI4mdlJ9zkaq53MbIG6rZY+QRN3MLbR6usYlgITEzi4Rpx5S6LFKsycOQjkOGmqTNmkIdLjEvooFKwww6OpdQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + + '@img/sharp-win32-arm64@0.34.2': + resolution: {integrity: sha512-cfP/r9FdS63VA5k0xiqaNaEoGxBg9k7uE+RQGzuK9fHt7jib4zAVVseR9LsE4gJcNWgT6APKMNnCcnyOtmSEUQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [win32] + + '@img/sharp-win32-ia32@0.34.2': + resolution: {integrity: sha512-QLjGGvAbj0X/FXl8n1WbtQ6iVBpWU7JO94u/P2M4a8CFYsvQi4GW2mRy/JqkRx0qpBzaOdKJKw8uc930EX2AHw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + + '@img/sharp-win32-x64@0.34.2': + resolution: {integrity: sha512-aUdT6zEYtDKCaxkofmmJDJYGCf0+pJg3eU9/oBuqvEeoB9dKI6ZLc/1iLJCTuJQDO4ptntAlkUmHgGjyuobZbw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + + '@isaacs/cliui@8.0.2': + resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} + engines: {node: '>=12'} + + '@isaacs/fs-minipass@4.0.1': + resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} + engines: {node: '>=18.0.0'} + + '@istanbuljs/load-nyc-config@1.1.0': + resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==} + engines: {node: '>=8'} + + '@istanbuljs/schema@0.1.3': + resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} + engines: {node: '>=8'} + + '@jest/console@29.7.0': + resolution: {integrity: sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/core@29.7.0': + resolution: {integrity: sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + '@jest/environment@29.7.0': + resolution: {integrity: sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/expect-utils@29.7.0': + resolution: {integrity: sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/expect@29.7.0': + resolution: {integrity: sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/fake-timers@29.7.0': + resolution: {integrity: sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/globals@29.7.0': + resolution: {integrity: sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/reporters@29.7.0': + resolution: {integrity: sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + '@jest/schemas@29.6.3': + resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/source-map@29.6.3': + resolution: {integrity: sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/test-result@29.7.0': + resolution: {integrity: sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/test-sequencer@29.7.0': + resolution: {integrity: sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/transform@29.7.0': + resolution: {integrity: sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/types@29.6.3': + resolution: {integrity: sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jridgewell/gen-mapping@0.3.8': + resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@jridgewell/trace-mapping@0.3.9': + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + + '@modelcontextprotocol/sdk@1.12.1': + resolution: {integrity: sha512-KG1CZhZfWg+u8pxeM/mByJDScJSrjjxLc8fwQqbsS8xCjBmQfMNEBTotYdNanKekepnfRI85GtgQlctLFpcYPw==} + engines: {node: '>=18'} + + '@next/env@15.3.3': + resolution: {integrity: sha512-OdiMrzCl2Xi0VTjiQQUK0Xh7bJHnOuET2s+3V+Y40WJBAXrJeGA3f+I8MZJ/YQ3mVGi5XGR1L66oFlgqXhQ4Vw==} + + '@next/swc-darwin-arm64@15.3.3': + resolution: {integrity: sha512-WRJERLuH+O3oYB4yZNVahSVFmtxRNjNF1I1c34tYMoJb0Pve+7/RaLAJJizyYiFhjYNGHRAE1Ri2Fd23zgDqhg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@next/swc-darwin-x64@15.3.3': + resolution: {integrity: sha512-XHdzH/yBc55lu78k/XwtuFR/ZXUTcflpRXcsu0nKmF45U96jt1tsOZhVrn5YH+paw66zOANpOnFQ9i6/j+UYvw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@next/swc-linux-arm64-gnu@15.3.3': + resolution: {integrity: sha512-VZ3sYL2LXB8znNGcjhocikEkag/8xiLgnvQts41tq6i+wql63SMS1Q6N8RVXHw5pEUjiof+II3HkDd7GFcgkzw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@next/swc-linux-arm64-musl@15.3.3': + resolution: {integrity: sha512-h6Y1fLU4RWAp1HPNJWDYBQ+e3G7sLckyBXhmH9ajn8l/RSMnhbuPBV/fXmy3muMcVwoJdHL+UtzRzs0nXOf9SA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@next/swc-linux-x64-gnu@15.3.3': + resolution: {integrity: sha512-jJ8HRiF3N8Zw6hGlytCj5BiHyG/K+fnTKVDEKvUCyiQ/0r5tgwO7OgaRiOjjRoIx2vwLR+Rz8hQoPrnmFbJdfw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@next/swc-linux-x64-musl@15.3.3': + resolution: {integrity: sha512-HrUcTr4N+RgiiGn3jjeT6Oo208UT/7BuTr7K0mdKRBtTbT4v9zJqCDKO97DUqqoBK1qyzP1RwvrWTvU6EPh/Cw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@next/swc-win32-arm64-msvc@15.3.3': + resolution: {integrity: sha512-SxorONgi6K7ZUysMtRF3mIeHC5aA3IQLmKFQzU0OuhuUYwpOBc1ypaLJLP5Bf3M9k53KUUUj4vTPwzGvl/NwlQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@next/swc-win32-x64-msvc@15.3.3': + resolution: {integrity: sha512-4QZG6F8enl9/S2+yIiOiju0iCTFd93d8VC1q9LZS4p/Xuk81W2QDjCFeoogmrWWkAD59z8ZxepBQap2dKS5ruw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@pkgjs/parseargs@0.11.0': + resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} + engines: {node: '>=14'} + + '@radix-ui/primitive@1.1.2': + resolution: {integrity: sha512-XnbHrrprsNqZKQhStrSwgRUQzoCI1glLzdw79xiZPoofhGICeZRSQ3dIxAKH1gb3OHfNf4d6f+vAv3kil2eggA==} + + '@radix-ui/react-accordion@1.2.11': + resolution: {integrity: sha512-l3W5D54emV2ues7jjeG1xcyN7S3jnK3zE2zHqgn0CmMsy9lNJwmgcrmaxS+7ipw15FAivzKNzH3d5EcGoFKw0A==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-collapsible@1.1.11': + resolution: {integrity: sha512-2qrRsVGSCYasSz1RFOorXwl0H7g7J1frQtgpQgYrt+MOidtPAINHn9CPovQXb83r8ahapdx3Tu0fa/pdFFSdPg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-collection@1.1.7': + resolution: {integrity: sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-compose-refs@1.1.2': + resolution: {integrity: sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-context@1.1.2': + resolution: {integrity: sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-direction@1.1.1': + resolution: {integrity: sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-id@1.1.1': + resolution: {integrity: sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-presence@1.1.4': + resolution: {integrity: sha512-ueDqRbdc4/bkaQT3GIpLQssRlFgWaL/U2z/S31qRwwLWoxHLgry3SIfCwhxeQNbirEUXFa+lq3RL3oBYXtcmIA==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-primitive@2.1.3': + resolution: {integrity: sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-slot@1.2.3': + resolution: {integrity: sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-controllable-state@1.2.2': + resolution: {integrity: sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-effect-event@0.0.2': + resolution: {integrity: sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-layout-effect@1.1.1': + resolution: {integrity: sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@rolldown/pluginutils@1.0.0-beta.9': + resolution: {integrity: sha512-e9MeMtVWo186sgvFFJOPGy7/d2j2mZhLJIdVW0C/xDluuOvymEATqz6zKsP0ZmXGzQtqlyjz5sC1sYQUoJG98w==} + + '@rollup/rollup-android-arm-eabi@4.40.1': + resolution: {integrity: sha512-kxz0YeeCrRUHz3zyqvd7n+TVRlNyTifBsmnmNPtk3hQURUyG9eAB+usz6DAwagMusjx/zb3AjvDUvhFGDAexGw==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.40.1': + resolution: {integrity: sha512-PPkxTOisoNC6TpnDKatjKkjRMsdaWIhyuMkA4UsBXT9WEZY4uHezBTjs6Vl4PbqQQeu6oION1w2voYZv9yquCw==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.40.1': + resolution: {integrity: sha512-VWXGISWFY18v/0JyNUy4A46KCFCb9NVsH+1100XP31lud+TzlezBbz24CYzbnA4x6w4hx+NYCXDfnvDVO6lcAA==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.40.1': + resolution: {integrity: sha512-nIwkXafAI1/QCS7pxSpv/ZtFW6TXcNUEHAIA9EIyw5OzxJZQ1YDrX+CL6JAIQgZ33CInl1R6mHet9Y/UZTg2Bw==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.40.1': + resolution: {integrity: sha512-BdrLJ2mHTrIYdaS2I99mriyJfGGenSaP+UwGi1kB9BLOCu9SR8ZpbkmmalKIALnRw24kM7qCN0IOm6L0S44iWw==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.40.1': + resolution: {integrity: sha512-VXeo/puqvCG8JBPNZXZf5Dqq7BzElNJzHRRw3vjBE27WujdzuOPecDPc/+1DcdcTptNBep3861jNq0mYkT8Z6Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.40.1': + resolution: {integrity: sha512-ehSKrewwsESPt1TgSE/na9nIhWCosfGSFqv7vwEtjyAqZcvbGIg4JAcV7ZEh2tfj/IlfBeZjgOXm35iOOjadcg==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.40.1': + resolution: {integrity: sha512-m39iO/aaurh5FVIu/F4/Zsl8xppd76S4qoID8E+dSRQvTyZTOI2gVk3T4oqzfq1PtcvOfAVlwLMK3KRQMaR8lg==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.40.1': + resolution: {integrity: sha512-Y+GHnGaku4aVLSgrT0uWe2o2Rq8te9hi+MwqGF9r9ORgXhmHK5Q71N757u0F8yU1OIwUIFy6YiJtKjtyktk5hg==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.40.1': + resolution: {integrity: sha512-jEwjn3jCA+tQGswK3aEWcD09/7M5wGwc6+flhva7dsQNRZZTe30vkalgIzV4tjkopsTS9Jd7Y1Bsj6a4lzz8gQ==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loongarch64-gnu@4.40.1': + resolution: {integrity: sha512-ySyWikVhNzv+BV/IDCsrraOAZ3UaC8SZB67FZlqVwXwnFhPihOso9rPOxzZbjp81suB1O2Topw+6Ug3JNegejQ==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-powerpc64le-gnu@4.40.1': + resolution: {integrity: sha512-BvvA64QxZlh7WZWqDPPdt0GH4bznuL6uOO1pmgPnnv86rpUpc8ZxgZwcEgXvo02GRIZX1hQ0j0pAnhwkhwPqWg==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.40.1': + resolution: {integrity: sha512-EQSP+8+1VuSulm9RKSMKitTav89fKbHymTf25n5+Yr6gAPZxYWpj3DzAsQqoaHAk9YX2lwEyAf9S4W8F4l3VBQ==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-riscv64-musl@4.40.1': + resolution: {integrity: sha512-n/vQ4xRZXKuIpqukkMXZt9RWdl+2zgGNx7Uda8NtmLJ06NL8jiHxUawbwC+hdSq1rrw/9CghCpEONor+l1e2gA==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.40.1': + resolution: {integrity: sha512-h8d28xzYb98fMQKUz0w2fMc1XuGzLLjdyxVIbhbil4ELfk5/orZlSTpF/xdI9C8K0I8lCkq+1En2RJsawZekkg==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.40.1': + resolution: {integrity: sha512-XiK5z70PEFEFqcNj3/zRSz/qX4bp4QIraTy9QjwJAb/Z8GM7kVUsD0Uk8maIPeTyPCP03ChdI+VVmJriKYbRHQ==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.40.1': + resolution: {integrity: sha512-2BRORitq5rQ4Da9blVovzNCMaUlyKrzMSvkVR0D4qPuOy/+pMCrh1d7o01RATwVy+6Fa1WBw+da7QPeLWU/1mQ==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-win32-arm64-msvc@4.40.1': + resolution: {integrity: sha512-b2bcNm9Kbde03H+q+Jjw9tSfhYkzrDUf2d5MAd1bOJuVplXvFhWz7tRtWvD8/ORZi7qSCy0idW6tf2HgxSXQSg==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.40.1': + resolution: {integrity: sha512-DfcogW8N7Zg7llVEfpqWMZcaErKfsj9VvmfSyRjCyo4BI3wPEfrzTtJkZG6gKP/Z92wFm6rz2aDO7/JfiR/whA==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.40.1': + resolution: {integrity: sha512-ECyOuDeH3C1I8jH2MK1RtBJW+YPMvSfT0a5NN0nHfQYnDSJ6tUiZH3gzwVP5/Kfh/+Tt7tpWVF9LXNTnhTJ3kA==} + cpu: [x64] + os: [win32] + + '@shadcn/ui@0.0.4': + resolution: {integrity: sha512-0dtu/5ApsOZ24qgaZwtif8jVwqol7a4m1x5AxPuM1k5wxhqU7t/qEfBGtaSki1R8VlbTQfCj5PAlO45NKCa7Gg==} + hasBin: true + + '@sinclair/typebox@0.27.8': + resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} + + '@sinonjs/commons@3.0.1': + resolution: {integrity: sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==} + + '@sinonjs/fake-timers@10.3.0': + resolution: {integrity: sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==} + + '@sqltools/formatter@1.2.5': + resolution: {integrity: sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==} + + '@swc/counter@0.1.3': + resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} + + '@swc/helpers@0.5.15': + resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} + + '@tailwindcss/node@4.1.8': + resolution: {integrity: sha512-OWwBsbC9BFAJelmnNcrKuf+bka2ZxCE2A4Ft53Tkg4uoiE67r/PMEYwCsourC26E+kmxfwE0hVzMdxqeW+xu7Q==} + + '@tailwindcss/oxide-android-arm64@4.1.8': + resolution: {integrity: sha512-Fbz7qni62uKYceWYvUjRqhGfZKwhZDQhlrJKGtnZfuNtHFqa8wmr+Wn74CTWERiW2hn3mN5gTpOoxWKk0jRxjg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.1.8': + resolution: {integrity: sha512-RdRvedGsT0vwVVDztvyXhKpsU2ark/BjgG0huo4+2BluxdXo8NDgzl77qh0T1nUxmM11eXwR8jA39ibvSTbi7A==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.1.8': + resolution: {integrity: sha512-t6PgxjEMLp5Ovf7uMb2OFmb3kqzVTPPakWpBIFzppk4JE4ix0yEtbtSjPbU8+PZETpaYMtXvss2Sdkx8Vs4XRw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.1.8': + resolution: {integrity: sha512-g8C8eGEyhHTqwPStSwZNSrOlyx0bhK/V/+zX0Y+n7DoRUzyS8eMbVshVOLJTDDC+Qn9IJnilYbIKzpB9n4aBsg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.8': + resolution: {integrity: sha512-Jmzr3FA4S2tHhaC6yCjac3rGf7hG9R6Gf2z9i9JFcuyy0u79HfQsh/thifbYTF2ic82KJovKKkIB6Z9TdNhCXQ==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.8': + resolution: {integrity: sha512-qq7jXtO1+UEtCmCeBBIRDrPFIVI4ilEQ97qgBGdwXAARrUqSn/L9fUrkb1XP/mvVtoVeR2bt/0L77xx53bPZ/Q==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.1.8': + resolution: {integrity: sha512-O6b8QesPbJCRshsNApsOIpzKt3ztG35gfX9tEf4arD7mwNinsoCKxkj8TgEE0YRjmjtO3r9FlJnT/ENd9EVefQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.1.8': + resolution: {integrity: sha512-32iEXX/pXwikshNOGnERAFwFSfiltmijMIAbUhnNyjFr3tmWmMJWQKU2vNcFX0DACSXJ3ZWcSkzNbaKTdngH6g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.1.8': + resolution: {integrity: sha512-s+VSSD+TfZeMEsCaFaHTaY5YNj3Dri8rST09gMvYQKwPphacRG7wbuQ5ZJMIJXN/puxPcg/nU+ucvWguPpvBDg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-wasm32-wasi@4.1.8': + resolution: {integrity: sha512-CXBPVFkpDjM67sS1psWohZ6g/2/cd+cq56vPxK4JeawelxwK4YECgl9Y9TjkE2qfF+9/s1tHHJqrC4SS6cVvSg==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + bundledDependencies: + - '@napi-rs/wasm-runtime' + - '@emnapi/core' + - '@emnapi/runtime' + - '@tybys/wasm-util' + - '@emnapi/wasi-threads' + - tslib + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.8': + resolution: {integrity: sha512-7GmYk1n28teDHUjPlIx4Z6Z4hHEgvP5ZW2QS9ygnDAdI/myh3HTHjDqtSqgu1BpRoI4OiLx+fThAyA1JePoENA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.1.8': + resolution: {integrity: sha512-fou+U20j+Jl0EHwK92spoWISON2OBnCazIc038Xj2TdweYV33ZRkS9nwqiUi2d/Wba5xg5UoHfvynnb/UB49cQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.1.8': + resolution: {integrity: sha512-d7qvv9PsM5N3VNKhwVUhpK6r4h9wtLkJ6lz9ZY9aeZgrUWk1Z8VPyqyDT9MZlem7GTGseRQHkeB1j3tC7W1P+A==} + engines: {node: '>= 10'} + + '@tailwindcss/postcss@4.1.8': + resolution: {integrity: sha512-vB/vlf7rIky+w94aWMw34bWW1ka6g6C3xIOdICKX2GC0VcLtL6fhlLiafF0DVIwa9V6EHz8kbWMkS2s2QvvNlw==} + + '@tailwindcss/vite@4.1.8': + resolution: {integrity: sha512-CQ+I8yxNV5/6uGaJjiuymgw0kEQiNKRinYbZXPdx1fk5WgiyReG0VaUx/Xq6aVNSUNJFzxm6o8FNKS5aMaim5A==} + peerDependencies: + vite: ^5.2.0 || ^6 + + '@tsconfig/node10@1.0.11': + resolution: {integrity: sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==} + + '@tsconfig/node12@1.0.11': + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + + '@tsconfig/node14@1.0.3': + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + + '@tsconfig/node16@1.0.4': + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + + '@types/babel__core@7.20.5': + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + + '@types/babel__generator@7.27.0': + resolution: {integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==} + + '@types/babel__template@7.4.4': + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + + '@types/babel__traverse@7.20.7': + resolution: {integrity: sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==} + + '@types/bcryptjs@3.0.0': + resolution: {integrity: sha512-WRZOuCuaz8UcZZE4R5HXTco2goQSI2XxjGY3hbM/xDvwmqFWd4ivooImsMx65OKM6CtNKbnZ5YL+YwAwK7c1dg==} + deprecated: This is a stub types definition. bcryptjs provides its own type definitions, so you do not need this installed. + + '@types/body-parser@1.19.5': + resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==} + + '@types/connect@3.4.38': + resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} + + '@types/estree@1.0.7': + resolution: {integrity: sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==} + + '@types/express-serve-static-core@4.19.6': + resolution: {integrity: sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==} + + '@types/express@4.17.22': + resolution: {integrity: sha512-eZUmSnhRX9YRSkplpz0N+k6NljUUn5l3EWZIKZvYzhvMphEuNiyyy1viH/ejgt66JWgALwC/gtSUAeQKtSwW/w==} + + '@types/graceful-fs@4.1.9': + resolution: {integrity: sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==} + + '@types/http-errors@2.0.4': + resolution: {integrity: sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==} + + '@types/istanbul-lib-coverage@2.0.6': + resolution: {integrity: sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==} + + '@types/istanbul-lib-report@3.0.3': + resolution: {integrity: sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==} + + '@types/istanbul-reports@3.0.4': + resolution: {integrity: sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==} + + '@types/jest@29.5.14': + resolution: {integrity: sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@types/jsonwebtoken@9.0.9': + resolution: {integrity: sha512-uoe+GxEuHbvy12OUQct2X9JenKM3qAscquYymuQN4fMWG9DBQtykrQEFcAbVACF7qaLw9BePSodUL0kquqBJpQ==} + + '@types/mime@1.3.5': + resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} + + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + + '@types/node-fetch@2.6.12': + resolution: {integrity: sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==} + + '@types/node@18.19.110': + resolution: {integrity: sha512-WW2o4gTmREtSnqKty9nhqF/vA0GKd0V/rbC0OyjSk9Bz6bzlsXKT+i7WDdS/a0z74rfT2PO4dArVCSnapNLA5Q==} + + '@types/node@22.15.29': + resolution: {integrity: sha512-LNdjOkUDlU1RZb8e1kOIUpN1qQUlzGkEtbVNo53vbrwDg5om6oduhm4SiUaPW5ASTXhAiP0jInWG8Qx9fVlOeQ==} + + '@types/pg@8.15.4': + resolution: {integrity: sha512-I6UNVBAoYbvuWkkU3oosC8yxqH21f4/Jc4DK71JLG3dT2mdlGe1z+ep/LQGXaKaOgcvUrsQoPRqfgtMcvZiJhg==} + + '@types/qs@6.14.0': + resolution: {integrity: sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==} + + '@types/range-parser@1.2.7': + resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} + + '@types/react-dom@19.1.5': + resolution: {integrity: sha512-CMCjrWucUBZvohgZxkjd6S9h0nZxXjzus6yDfUb+xLxYM7VvjKNH1tQrE9GWLql1XoOP4/Ds3bwFqShHUYraGg==} + peerDependencies: + '@types/react': ^19.0.0 + + '@types/react@19.1.6': + resolution: {integrity: sha512-JeG0rEWak0N6Itr6QUx+X60uQmN+5t3j9r/OVDtWzFXKaj6kD1BwJzOksD0FF6iWxZlbE1kB0q9vtnU2ekqa1Q==} + + '@types/semver@7.7.0': + resolution: {integrity: sha512-k107IF4+Xr7UHjwDc7Cfd6PRQfbdkiRabXGRjo07b4WyPahFBZCZ1sE+BNxYIJPPg73UkfOsVOLwqVc/6ETrIA==} + + '@types/send@0.17.4': + resolution: {integrity: sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==} + + '@types/serve-static@1.15.7': + resolution: {integrity: sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==} + + '@types/stack-utils@2.0.3': + resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} + + '@types/strip-bom@3.0.0': + resolution: {integrity: sha512-xevGOReSYGM7g/kUBZzPqCrR/KYAo+F0yiPc85WFTJa0MSLtyFTVTU6cJu/aV4mid7IffDIWqo69THF2o4JiEQ==} + + '@types/strip-json-comments@0.0.30': + resolution: {integrity: sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==} + + '@types/uuid@10.0.0': + resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + + '@types/yargs-parser@21.0.3': + resolution: {integrity: sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==} + + '@types/yargs@17.0.33': + resolution: {integrity: sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==} + + '@typescript-eslint/eslint-plugin@6.21.0': + resolution: {integrity: sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + '@typescript-eslint/parser': ^6.0.0 || ^6.0.0-alpha + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/parser@6.21.0': + resolution: {integrity: sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/scope-manager@6.21.0': + resolution: {integrity: sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==} + engines: {node: ^16.0.0 || >=18.0.0} + + '@typescript-eslint/type-utils@6.21.0': + resolution: {integrity: sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/types@6.21.0': + resolution: {integrity: sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==} + engines: {node: ^16.0.0 || >=18.0.0} + + '@typescript-eslint/typescript-estree@6.21.0': + resolution: {integrity: sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/utils@6.21.0': + resolution: {integrity: sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + + '@typescript-eslint/visitor-keys@6.21.0': + resolution: {integrity: sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==} + engines: {node: ^16.0.0 || >=18.0.0} + + '@ungap/structured-clone@1.3.0': + resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + + '@vitejs/plugin-react@4.5.0': + resolution: {integrity: sha512-JuLWaEqypaJmOJPLWwO335Ig6jSgC1FTONCWAxnqcQthLTK/Yc9aH6hr9z/87xciejbQcnP3GnA1FWUSWeXaeg==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + vite: ^4.2.0 || ^5.0.0 || ^6.0.0 + + abort-controller@3.0.0: + resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} + engines: {node: '>=6.5'} + + accepts@1.3.8: + resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} + engines: {node: '>= 0.6'} + + accepts@2.0.0: + resolution: {integrity: sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==} + engines: {node: '>= 0.6'} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn-walk@8.3.4: + resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==} + engines: {node: '>=0.4.0'} + + acorn@8.14.1: + resolution: {integrity: sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==} + engines: {node: '>=0.4.0'} + hasBin: true + + agentkeepalive@4.6.0: + resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} + engines: {node: '>= 8.0.0'} + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-escapes@4.3.2: + resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==} + engines: {node: '>=8'} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-regex@6.1.0: + resolution: {integrity: sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==} + engines: {node: '>=12'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + + ansi-styles@6.2.1: + resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} + engines: {node: '>=12'} + + ansis@3.17.0: + resolution: {integrity: sha512-0qWUglt9JEqLFr3w1I1pbrChn1grhaiAR2ocX1PP/flRmxgtwTzPFFFnfIlD6aMOLQZgSuCRlidD70lvx8yhzg==} + engines: {node: '>=14'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + app-root-path@3.1.0: + resolution: {integrity: sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==} + engines: {node: '>= 6.0.0'} + + arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + + argparse@1.0.10: + resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + array-flatten@1.1.1: + resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==} + + array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + async@3.2.6: + resolution: {integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + autoprefixer@10.4.21: + resolution: {integrity: sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + babel-jest@29.7.0: + resolution: {integrity: sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.8.0 + + babel-plugin-istanbul@6.1.1: + resolution: {integrity: sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==} + engines: {node: '>=8'} + + babel-plugin-jest-hoist@29.6.3: + resolution: {integrity: sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + babel-preset-current-node-syntax@1.1.0: + resolution: {integrity: sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==} + peerDependencies: + '@babel/core': ^7.0.0 + + babel-preset-jest@29.6.3: + resolution: {integrity: sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.0.0 + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + + bcryptjs@3.0.2: + resolution: {integrity: sha512-k38b3XOZKv60C4E2hVsXTolJWfkGRMbILBIe2IBITXciy5bOsTKot5kDrf3ZfufQtQOUN5mXceUEpU1rTl9Uog==} + hasBin: true + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + bl@5.1.0: + resolution: {integrity: sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==} + + body-parser@1.20.3: + resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} + engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + + body-parser@2.2.0: + resolution: {integrity: sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg==} + engines: {node: '>=18'} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.24.4: + resolution: {integrity: sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + browserslist@4.25.0: + resolution: {integrity: sha512-PJ8gYKeS5e/whHBh8xrwYK+dAvEj7JXtz6uTucnMRB8OiGTsKccFekoRrjajPBHV8oOY+2tI4uxeceSimKwMFA==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + bs-logger@0.2.6: + resolution: {integrity: sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==} + engines: {node: '>= 6'} + + bser@2.1.1: + resolution: {integrity: sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==} + + buffer-equal-constant-time@1.0.1: + resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} + + buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + + busboy@1.6.0: + resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} + engines: {node: '>=10.16.0'} + + bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + call-bound@1.0.4: + resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} + engines: {node: '>= 0.4'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + caniuse-lite@1.0.30001707: + resolution: {integrity: sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw==} + + caniuse-lite@1.0.30001720: + resolution: {integrity: sha512-Ec/2yV2nNPwb4DnTANEV99ZWwm3ZWfdlfkQbWSDDt+PsXEVYwlhPH8tdMaPunYTKKmz7AnHi2oNEi1GcmKCD8g==} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + chalk@5.2.0: + resolution: {integrity: sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==} + engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + + char-regex@1.0.2: + resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==} + engines: {node: '>=10'} + + chokidar@3.6.0: + resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} + engines: {node: '>= 8.10.0'} + + chownr@3.0.0: + resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} + engines: {node: '>=18'} + + ci-info@3.9.0: + resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} + engines: {node: '>=8'} + + cjs-module-lexer@1.4.3: + resolution: {integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==} + + class-variance-authority@0.7.1: + resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==} + + cli-cursor@4.0.0: + resolution: {integrity: sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + cli-spinners@2.9.2: + resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==} + engines: {node: '>=6'} + + client-only@0.0.1: + resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} + + cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + + clone@1.0.4: + resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==} + engines: {node: '>=0.8'} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + co@4.6.0: + resolution: {integrity: sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==} + engines: {iojs: '>= 1.0.0', node: '>= 0.12.0'} + + collect-v8-coverage@1.0.2: + resolution: {integrity: sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + color-string@1.9.1: + resolution: {integrity: sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==} + + color@4.2.3: + resolution: {integrity: sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==} + engines: {node: '>=12.5.0'} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + commander@10.0.1: + resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} + engines: {node: '>=14'} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + concurrently@9.1.2: + resolution: {integrity: sha512-H9MWcoPsYddwbOGM6difjVwVZHl63nwMEwDJG/L7VGtuaJhb12h2caPG2tVPWs7emuYix252iGfqOyrz1GczTQ==} + engines: {node: '>=18'} + hasBin: true + + content-disposition@0.5.4: + resolution: {integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==} + engines: {node: '>= 0.6'} + + content-disposition@1.0.0: + resolution: {integrity: sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg==} + engines: {node: '>= 0.6'} + + content-type@1.0.5: + resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} + engines: {node: '>= 0.6'} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + cookie-signature@1.0.6: + resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} + + cookie-signature@1.2.2: + resolution: {integrity: sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==} + engines: {node: '>=6.6.0'} + + cookie@0.7.1: + resolution: {integrity: sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==} + engines: {node: '>= 0.6'} + + cookie@0.7.2: + resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} + engines: {node: '>= 0.6'} + + cookie@1.0.2: + resolution: {integrity: sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==} + engines: {node: '>=18'} + + cors@2.8.5: + resolution: {integrity: sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==} + engines: {node: '>= 0.10'} + + create-jest@29.7.0: + resolution: {integrity: sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + + create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + + data-uri-to-buffer@4.0.1: + resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==} + engines: {node: '>= 12'} + + dayjs@1.11.13: + resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==} + + debug@2.6.9: + resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + debug@4.4.1: + resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + dedent@1.5.3: + resolution: {integrity: sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + + dedent@1.6.0: + resolution: {integrity: sha512-F1Z+5UCFpmQUzJa11agbyPVMbpgT/qA3/SKyJ1jyBgm7dUcUEa8v9JwDkerSQXfakBwFljIxhOJqGkjUwZ9FSA==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + defaults@1.0.4: + resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + + destroy@1.2.0: + resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} + engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + + detect-libc@2.0.4: + resolution: {integrity: sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==} + engines: {node: '>=8'} + + detect-newline@3.1.0: + resolution: {integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==} + engines: {node: '>=8'} + + diff-sequences@29.6.3: + resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + + dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + + dotenv-expand@12.0.2: + resolution: {integrity: sha512-lXpXz2ZE1cea1gL4sz2Ipj8y4PiVjytYr3Ij0SWoms1PGxIv7m2CRKuRuCRtHdVuvM/hNJPMxt5PbhboNC4dPQ==} + engines: {node: '>=12'} + + dotenv@16.5.0: + resolution: {integrity: sha512-m/C+AwOAr9/W1UOIZUo232ejMNnJAJtYQjUbHoNTBNTJSvqzzDh7vnrei3o3r3m9blf6ZoDkvcw0VmozNRFJxg==} + engines: {node: '>=12'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + dynamic-dedupe@0.3.0: + resolution: {integrity: sha512-ssuANeD+z97meYOqd50e04Ze5qp4bPqo8cCkI4TRjZkzAUgIDTrXV1R8QCdINpiI+hw14+rYazvTRdQrz0/rFQ==} + + eastasianwidth@0.2.0: + resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + + ecdsa-sig-formatter@1.0.11: + resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} + + ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + + ejs@3.1.10: + resolution: {integrity: sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==} + engines: {node: '>=0.10.0'} + hasBin: true + + electron-to-chromium@1.5.128: + resolution: {integrity: sha512-bo1A4HH/NS522Ws0QNFIzyPcyUUNV/yyy70Ho1xqfGYzPUme2F/xr4tlEOuM6/A538U1vDA7a4XfCd1CKRegKQ==} + + electron-to-chromium@1.5.161: + resolution: {integrity: sha512-hwtetwfKNZo/UlwHIVBlKZVdy7o8bIZxxKs0Mv/ROPiQQQmDgdm5a+KvKtBsxM8ZjFzTaCeLoodZ8jiBE3o9rA==} + + emittery@0.13.1: + resolution: {integrity: sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==} + engines: {node: '>=12'} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + emoji-regex@9.2.2: + resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} + + encodeurl@1.0.2: + resolution: {integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==} + engines: {node: '>= 0.8'} + + encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + + enhanced-resolve@5.18.1: + resolution: {integrity: sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==} + engines: {node: '>=10.13.0'} + + error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + esbuild@0.25.2: + resolution: {integrity: sha512-16854zccKPnC+toMywC+uKNeYSv+/eXkevRAfwRD/G9Cleq66m8XFIrigkbvauLLlCfDL45Q2cWegSg53gGBnQ==} + engines: {node: '>=18'} + hasBin: true + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + + escape-string-regexp@2.0.0: + resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} + engines: {node: '>=8'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint@8.57.1: + resolution: {integrity: sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. + hasBin: true + + espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + + esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + + event-target-shim@5.0.1: + resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} + engines: {node: '>=6'} + + eventsource-parser@3.0.1: + resolution: {integrity: sha512-VARTJ9CYeuQYb0pZEPbzi740OWFgpHe7AYJ2WFZVnUDUQp5Dk2yJUgF36YsZ81cOyxT0QxmXD2EQpapAouzWVA==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.6: + resolution: {integrity: sha512-l19WpE2m9hSuyP06+FbuUUf1G+R0SFLrtQfbRb9PRr+oimOfxQhgGCbVaXg5IvZyyTThJsxh6L/srkMiCeBPDA==} + engines: {node: '>=18.0.0'} + + execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + + execa@7.2.0: + resolution: {integrity: sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==} + engines: {node: ^14.18.0 || ^16.14.0 || >=18.0.0} + + exit@0.1.2: + resolution: {integrity: sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==} + engines: {node: '>= 0.8.0'} + + expect@29.7.0: + resolution: {integrity: sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + express-rate-limit@7.5.0: + resolution: {integrity: sha512-eB5zbQh5h+VenMPM3fh+nw1YExi5nMr6HUCR62ELSP11huvxm/Uir1H1QEyTkk5QX6A58pX6NmaTMceKZ0Eodg==} + engines: {node: '>= 16'} + peerDependencies: + express: ^4.11 || 5 || ^5.0.0-beta.1 + + express-validator@7.2.1: + resolution: {integrity: sha512-CjNE6aakfpuwGaHQZ3m8ltCG2Qvivd7RHtVMS/6nVxOM7xVGqr4bhflsm4+N5FP5zI7Zxp+Hae+9RE+o8e3ZOQ==} + engines: {node: '>= 8.0.0'} + + express@4.21.2: + resolution: {integrity: sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==} + engines: {node: '>= 0.10.0'} + + express@5.1.0: + resolution: {integrity: sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==} + engines: {node: '>= 18'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.19.1: + resolution: {integrity: sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==} + + fb-watchman@2.0.2: + resolution: {integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==} + + fdir@6.4.4: + resolution: {integrity: sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + fetch-blob@3.2.0: + resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} + engines: {node: ^12.20 || >= 14.13} + + file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + + filelist@1.0.4: + resolution: {integrity: sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + finalhandler@1.3.1: + resolution: {integrity: sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==} + engines: {node: '>= 0.8'} + + finalhandler@2.1.0: + resolution: {integrity: sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q==} + engines: {node: '>= 0.8'} + + find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + + flatted@3.3.3: + resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + + foreground-child@3.3.1: + resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} + engines: {node: '>=14'} + + form-data-encoder@1.7.2: + resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} + + form-data@4.0.2: + resolution: {integrity: sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==} + engines: {node: '>= 6'} + + formdata-node@4.4.1: + resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} + engines: {node: '>= 12.20'} + + formdata-polyfill@4.0.10: + resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} + engines: {node: '>=12.20.0'} + + forwarded@0.2.0: + resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} + engines: {node: '>= 0.6'} + + fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + + fresh@0.5.2: + resolution: {integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==} + engines: {node: '>= 0.6'} + + fresh@2.0.0: + resolution: {integrity: sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==} + engines: {node: '>= 0.8'} + + fs-extra@11.3.0: + resolution: {integrity: sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==} + engines: {node: '>=14.14'} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-package-type@0.1.0: + resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} + engines: {node: '>=8.0.0'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + + get-tsconfig@4.10.0: + resolution: {integrity: sha512-kGzZ3LWWQcGIAmg6iWvXn0ei6WDtV26wzHRMwDSzmAbcXrTEXxHy6IehI6/4eT6VRKyMP1eF1VqwrVUmE/LR7A==} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} + hasBin: true + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + globals@13.24.0: + resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} + engines: {node: '>=8'} + + globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + + html-parse-stringify@3.0.1: + resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==} + + http-errors@2.0.0: + resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} + engines: {node: '>= 0.8'} + + human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + + human-signals@4.3.1: + resolution: {integrity: sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==} + engines: {node: '>=14.18.0'} + + humanize-ms@1.2.1: + resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + + i18next-browser-languagedetector@8.1.0: + resolution: {integrity: sha512-mHZxNx1Lq09xt5kCauZ/4bsXOEA2pfpwSoU11/QTJB+pD94iONFwp+ohqi///PwiFvjFOxe1akYCdHyFo1ng5Q==} + + i18next@24.2.3: + resolution: {integrity: sha512-lfbf80OzkocvX7nmZtu7nSTNbrTYR52sLWxPtlXX1zAhVw8WEnFk4puUkCR4B1dNQwbSpEHHHemcZu//7EcB7A==} + peerDependencies: + typescript: ^5 + peerDependenciesMeta: + typescript: + optional: true + + iconv-lite@0.4.24: + resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} + engines: {node: '>=0.10.0'} + + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + + ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} + engines: {node: '>=6'} + + import-local@3.2.0: + resolution: {integrity: sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==} + engines: {node: '>=8'} + hasBin: true + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + ipaddr.js@1.9.1: + resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} + engines: {node: '>= 0.10'} + + is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + + is-arrayish@0.3.2: + resolution: {integrity: sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-core-module@2.16.1: + resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} + engines: {node: '>= 0.4'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-generator-fn@2.1.0: + resolution: {integrity: sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==} + engines: {node: '>=6'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-interactive@2.0.0: + resolution: {integrity: sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==} + engines: {node: '>=12'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + is-promise@4.0.0: + resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} + + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + + is-stream@3.0.0: + resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + is-unicode-supported@1.3.0: + resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==} + engines: {node: '>=12'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-instrument@5.2.1: + resolution: {integrity: sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==} + engines: {node: '>=8'} + + istanbul-lib-instrument@6.0.3: + resolution: {integrity: sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==} + engines: {node: '>=10'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-lib-source-maps@4.0.1: + resolution: {integrity: sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==} + engines: {node: '>=10'} + + istanbul-reports@3.1.7: + resolution: {integrity: sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==} + engines: {node: '>=8'} + + jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} + + jake@10.9.2: + resolution: {integrity: sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==} + engines: {node: '>=10'} + hasBin: true + + jest-changed-files@29.7.0: + resolution: {integrity: sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-circus@29.7.0: + resolution: {integrity: sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-cli@29.7.0: + resolution: {integrity: sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + jest-config@29.7.0: + resolution: {integrity: sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@types/node': '*' + ts-node: '>=9.0.0' + peerDependenciesMeta: + '@types/node': + optional: true + ts-node: + optional: true + + jest-diff@29.7.0: + resolution: {integrity: sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-docblock@29.7.0: + resolution: {integrity: sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-each@29.7.0: + resolution: {integrity: sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-environment-node@29.7.0: + resolution: {integrity: sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-get-type@29.6.3: + resolution: {integrity: sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-haste-map@29.7.0: + resolution: {integrity: sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-leak-detector@29.7.0: + resolution: {integrity: sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-matcher-utils@29.7.0: + resolution: {integrity: sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-message-util@29.7.0: + resolution: {integrity: sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-mock@29.7.0: + resolution: {integrity: sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-pnp-resolver@1.2.3: + resolution: {integrity: sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==} + engines: {node: '>=6'} + peerDependencies: + jest-resolve: '*' + peerDependenciesMeta: + jest-resolve: + optional: true + + jest-regex-util@29.6.3: + resolution: {integrity: sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-resolve-dependencies@29.7.0: + resolution: {integrity: sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-resolve@29.7.0: + resolution: {integrity: sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-runner@29.7.0: + resolution: {integrity: sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-runtime@29.7.0: + resolution: {integrity: sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-snapshot@29.7.0: + resolution: {integrity: sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-util@29.7.0: + resolution: {integrity: sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-validate@29.7.0: + resolution: {integrity: sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-watcher@29.7.0: + resolution: {integrity: sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-worker@29.7.0: + resolution: {integrity: sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest@29.7.0: + resolution: {integrity: sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + jiti@2.4.2: + resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==} + hasBin: true + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@3.14.1: + resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} + hasBin: true + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + jsonfile@6.1.0: + resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + + jsonwebtoken@9.0.2: + resolution: {integrity: sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==} + engines: {node: '>=12', npm: '>=6'} + + jwa@1.4.1: + resolution: {integrity: sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==} + + jws@3.2.2: + resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + kleur@3.0.3: + resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} + engines: {node: '>=6'} + + leven@3.1.0: + resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} + engines: {node: '>=6'} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + lightningcss-darwin-arm64@1.30.1: + resolution: {integrity: sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [darwin] + + lightningcss-darwin-x64@1.30.1: + resolution: {integrity: sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [darwin] + + lightningcss-freebsd-x64@1.30.1: + resolution: {integrity: sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [freebsd] + + lightningcss-linux-arm-gnueabihf@1.30.1: + resolution: {integrity: sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==} + engines: {node: '>= 12.0.0'} + cpu: [arm] + os: [linux] + + lightningcss-linux-arm64-gnu@1.30.1: + resolution: {integrity: sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-arm64-musl@1.30.1: + resolution: {integrity: sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-x64-gnu@1.30.1: + resolution: {integrity: sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-linux-x64-musl@1.30.1: + resolution: {integrity: sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-win32-arm64-msvc@1.30.1: + resolution: {integrity: sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [win32] + + lightningcss-win32-x64-msvc@1.30.1: + resolution: {integrity: sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [win32] + + lightningcss@1.30.1: + resolution: {integrity: sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg==} + engines: {node: '>= 12.0.0'} + + lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash.includes@4.3.0: + resolution: {integrity: sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==} + + lodash.isboolean@3.0.3: + resolution: {integrity: sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==} + + lodash.isinteger@4.0.4: + resolution: {integrity: sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==} + + lodash.isnumber@3.0.3: + resolution: {integrity: sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.isstring@4.0.1: + resolution: {integrity: sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==} + + lodash.memoize@4.1.2: + resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + lodash.once@4.1.1: + resolution: {integrity: sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==} + + lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + log-symbols@5.1.0: + resolution: {integrity: sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA==} + engines: {node: '>=12'} + + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + lucide-react@0.486.0: + resolution: {integrity: sha512-xWop/wMsC1ikiEVLZrxXjPKw4vU/eAip33G2mZHgbWnr4Nr5Rt4Vx4s/q1D3B/rQVbxjOuqASkEZcUxDEKzecw==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + magic-string@0.30.17: + resolution: {integrity: sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + + make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + + makeerror@1.0.12: + resolution: {integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + media-typer@0.3.0: + resolution: {integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==} + engines: {node: '>= 0.6'} + + media-typer@1.1.0: + resolution: {integrity: sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==} + engines: {node: '>= 0.8'} + + merge-descriptors@1.0.3: + resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} + + merge-descriptors@2.0.0: + resolution: {integrity: sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==} + engines: {node: '>=18'} + + merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + methods@1.1.2: + resolution: {integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==} + engines: {node: '>= 0.6'} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-db@1.54.0: + resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + mime-types@3.0.1: + resolution: {integrity: sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==} + engines: {node: '>= 0.6'} + + mime@1.6.0: + resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} + engines: {node: '>=4'} + hasBin: true + + mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + + mimic-fn@4.0.0: + resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} + engines: {node: '>=12'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + + minimatch@9.0.3: + resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==} + engines: {node: '>=16 || 14 >=14.17'} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + minizlib@3.0.2: + resolution: {integrity: sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==} + engines: {node: '>= 18'} + + mkdirp@1.0.4: + resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==} + engines: {node: '>=10'} + hasBin: true + + mkdirp@3.0.1: + resolution: {integrity: sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==} + engines: {node: '>=10'} + hasBin: true + + ms@2.0.0: + resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + negotiator@0.6.3: + resolution: {integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==} + engines: {node: '>= 0.6'} + + negotiator@1.0.0: + resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} + engines: {node: '>= 0.6'} + + next@15.3.3: + resolution: {integrity: sha512-JqNj29hHNmCLtNvd090SyRbXJiivQ+58XjCcrC50Crb5g5u2zi7Y2YivbsEfzk6AtVI80akdOQbaMZwWB1Hthw==} + engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} + hasBin: true + peerDependencies: + '@opentelemetry/api': ^1.1.0 + '@playwright/test': ^1.41.2 + babel-plugin-react-compiler: '*' + react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + sass: ^1.3.0 + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@playwright/test': + optional: true + babel-plugin-react-compiler: + optional: true + sass: + optional: true + + node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + deprecated: Use your platform's native DOMException instead + + node-fetch@2.7.0: + resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + + node-fetch@3.3.2: + resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + node-int64@0.4.0: + resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} + + node-releases@2.0.19: + resolution: {integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + + npm-run-path@5.3.0: + resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-inspect@1.13.4: + resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} + engines: {node: '>= 0.4'} + + on-finished@2.4.1: + resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} + engines: {node: '>= 0.8'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + + onetime@6.0.0: + resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} + engines: {node: '>=12'} + + openai@4.104.0: + resolution: {integrity: sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==} + hasBin: true + peerDependencies: + ws: ^8.18.0 + zod: ^3.23.8 + peerDependenciesMeta: + ws: + optional: true + zod: + optional: true + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + ora@6.3.1: + resolution: {integrity: sha512-ERAyNnZOfqM+Ao3RAvIXkYh5joP220yf59gVe2X/cI6SiCxIdi4c9HZKZD8R6q/RDXEje1THBju6iExiSsgJaQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + + package-json-from-dist@1.0.1: + resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + + parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-key@4.0.0: + resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} + engines: {node: '>=12'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-scurry@1.11.1: + resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} + engines: {node: '>=16 || 14 >=14.18'} + + path-to-regexp@0.1.12: + resolution: {integrity: sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==} + + path-to-regexp@8.2.0: + resolution: {integrity: sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==} + engines: {node: '>=16'} + + path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + pg-cloudflare@1.2.5: + resolution: {integrity: sha512-OOX22Vt0vOSRrdoUPKJ8Wi2OpE/o/h9T8X1s4qSkCedbNah9ei2W2765be8iMVxQUsvgT7zIAT2eIa9fs5+vtg==} + + pg-connection-string@2.9.0: + resolution: {integrity: sha512-P2DEBKuvh5RClafLngkAuGe9OUlFV7ebu8w1kmaaOgPcpJd1RIFh7otETfI6hAR8YupOLFTY7nuvvIn7PLciUQ==} + + pg-int8@1.0.1: + resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} + engines: {node: '>=4.0.0'} + + pg-pool@3.10.0: + resolution: {integrity: sha512-DzZ26On4sQ0KmqnO34muPcmKbhrjmyiO4lCCR0VwEd7MjmiKf5NTg/6+apUEu0NF7ESa37CGzFxH513CoUmWnA==} + peerDependencies: + pg: '>=8.0' + + pg-protocol@1.10.0: + resolution: {integrity: sha512-IpdytjudNuLv8nhlHs/UrVBhU0e78J0oIS/0AVdTbWxSOkFUVdsHC/NrorO6nXsQNDTT1kzDSOMJubBQviX18Q==} + + pg-types@2.2.0: + resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} + engines: {node: '>=4'} + + pg@8.16.0: + resolution: {integrity: sha512-7SKfdvP8CTNXjMUzfcVTaI+TDzBEeaUnVwiVGZQD1Hh33Kpev7liQba9uLd4CfN8r9mCVsD0JIpq03+Unpz+kg==} + engines: {node: '>= 8.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + + pgpass@1.0.5: + resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} + + pgvector@0.2.1: + resolution: {integrity: sha512-nKaQY9wtuiidwLMdVIce1O3kL0d+FxrigCVzsShnoqzOSaWWWOvuctb/sYwlai5cTwwzRSNa+a/NtN2kVZGNJw==} + engines: {node: '>= 18'} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + picomatch@4.0.2: + resolution: {integrity: sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==} + engines: {node: '>=12'} + + pirates@4.0.7: + resolution: {integrity: sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==} + engines: {node: '>= 6'} + + pkce-challenge@5.0.0: + resolution: {integrity: sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ==} + engines: {node: '>=16.20.0'} + + pkg-dir@4.2.0: + resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==} + engines: {node: '>=8'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.4.31: + resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} + engines: {node: ^10 || ^12 || >=14} + + postcss@8.5.4: + resolution: {integrity: sha512-QSa9EBe+uwlGTFmHsPKokv3B/oEMQZxfqW0QqNCyhpa6mB1afzulwn8hihglqAb2pOw+BJgNlmXQ8la2VeHB7w==} + engines: {node: ^10 || ^12 || >=14} + + postgres-array@2.0.0: + resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==} + engines: {node: '>=4'} + + postgres-bytea@1.0.0: + resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==} + engines: {node: '>=0.10.0'} + + postgres-date@1.0.7: + resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==} + engines: {node: '>=0.10.0'} + + postgres-interval@1.2.0: + resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} + engines: {node: '>=0.10.0'} + + postgres@3.4.7: + resolution: {integrity: sha512-Jtc2612XINuBjIl/QTWsV5UvE8UHuNblcO3vVADSrKsrc6RqGX6lOW1cEo3CM2v0XG4Nat8nI+YM7/f26VxXLw==} + engines: {node: '>=12'} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier@3.5.3: + resolution: {integrity: sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==} + engines: {node: '>=14'} + hasBin: true + + pretty-format@29.7.0: + resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + prompts@2.4.2: + resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} + engines: {node: '>= 6'} + + proxy-addr@2.0.7: + resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} + engines: {node: '>= 0.10'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + pure-rand@6.1.0: + resolution: {integrity: sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==} + + qs@6.13.0: + resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} + engines: {node: '>=0.6'} + + qs@6.14.0: + resolution: {integrity: sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==} + engines: {node: '>=0.6'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + + raw-body@2.5.2: + resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} + engines: {node: '>= 0.8'} + + raw-body@3.0.0: + resolution: {integrity: sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==} + engines: {node: '>= 0.8'} + + react-dom@19.1.0: + resolution: {integrity: sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==} + peerDependencies: + react: ^19.1.0 + + react-i18next@15.5.2: + resolution: {integrity: sha512-ePODyXgmZQAOYTbZXQn5rRsSBu3Gszo69jxW6aKmlSgxKAI1fOhDwSu6bT4EKHciWPKQ7v7lPrjeiadR6Gi+1A==} + peerDependencies: + i18next: '>= 23.2.3' + react: '>= 16.8.0' + react-dom: '*' + react-native: '*' + typescript: ^5 + peerDependenciesMeta: + react-dom: + optional: true + react-native: + optional: true + typescript: + optional: true + + react-is@18.3.1: + resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} + + react-refresh@0.17.0: + resolution: {integrity: sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==} + engines: {node: '>=0.10.0'} + + react-router-dom@7.6.1: + resolution: {integrity: sha512-vxU7ei//UfPYQ3iZvHuO1D/5fX3/JOqhNTbRR+WjSBWxf9bIvpWK+ftjmdfJHzPOuMQKe2fiEdG+dZX6E8uUpA==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + + react-router@7.6.1: + resolution: {integrity: sha512-hPJXXxHJZEsPFNVbtATH7+MMX43UDeOauz+EAU4cgqTn7ojdI9qQORqS8Z0qmDlL1TclO/6jLRYUEtbWidtdHQ==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + peerDependenciesMeta: + react-dom: + optional: true + + react@19.1.0: + resolution: {integrity: sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==} + engines: {node: '>=0.10.0'} + + readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + reflect-metadata@0.2.2: + resolution: {integrity: sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==} + + regenerator-runtime@0.14.1: + resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + resolve-cwd@3.0.0: + resolution: {integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==} + engines: {node: '>=8'} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: '>=8'} + + resolve-pkg-maps@1.0.0: + resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + + resolve.exports@2.0.3: + resolution: {integrity: sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==} + engines: {node: '>=10'} + + resolve@1.22.10: + resolution: {integrity: sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==} + engines: {node: '>= 0.4'} + hasBin: true + + restore-cursor@4.0.0: + resolution: {integrity: sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + reusify@1.1.0: + resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rimraf@2.7.1: + resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==} + deprecated: Rimraf versions prior to v4 are no longer supported + hasBin: true + + rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + deprecated: Rimraf versions prior to v4 are no longer supported + hasBin: true + + rollup@4.40.1: + resolution: {integrity: sha512-C5VvvgCCyfyotVITIAv+4efVytl5F7wt+/I2i9q9GZcEXW9BP52YYOXC58igUi+LFZVHukErIIqQSWwv/M3WRw==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + router@2.2.0: + resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} + engines: {node: '>= 18'} + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + rxjs@7.8.2: + resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} + + safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + scheduler@0.26.0: + resolution: {integrity: sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==} + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} + engines: {node: '>=10'} + hasBin: true + + semver@7.7.2: + resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} + engines: {node: '>=10'} + hasBin: true + + send@0.19.0: + resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} + engines: {node: '>= 0.8.0'} + + send@1.2.0: + resolution: {integrity: sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==} + engines: {node: '>= 18'} + + serve-static@1.16.2: + resolution: {integrity: sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==} + engines: {node: '>= 0.8.0'} + + serve-static@2.2.0: + resolution: {integrity: sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ==} + engines: {node: '>= 18'} + + set-cookie-parser@2.7.1: + resolution: {integrity: sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==} + + setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + + sha.js@2.4.11: + resolution: {integrity: sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==} + hasBin: true + + sharp@0.34.2: + resolution: {integrity: sha512-lszvBmB9QURERtyKT2bNmsgxXK0ShJrL/fvqlonCo7e6xBF8nT8xU6pW+PMIbLsz0RxQk3rgH9kd8UmvOzlMJg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + shell-quote@1.8.3: + resolution: {integrity: sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==} + engines: {node: '>= 0.4'} + + side-channel-list@1.0.0: + resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} + engines: {node: '>= 0.4'} + + side-channel-map@1.0.1: + resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} + engines: {node: '>= 0.4'} + + side-channel-weakmap@1.0.2: + resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} + engines: {node: '>= 0.4'} + + side-channel@1.1.0: + resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} + engines: {node: '>= 0.4'} + + signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + + simple-swizzle@0.2.2: + resolution: {integrity: sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==} + + sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + source-map-support@0.5.13: + resolution: {integrity: sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==} + + source-map-support@0.5.21: + resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + + sprintf-js@1.0.3: + resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + + sql-highlight@6.0.0: + resolution: {integrity: sha512-+fLpbAbWkQ+d0JEchJT/NrRRXbYRNbG15gFpANx73EwxQB1PRjj+k/OI0GTU0J63g8ikGkJECQp9z8XEJZvPRw==} + engines: {node: '>=14'} + + stack-utils@2.0.6: + resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==} + engines: {node: '>=10'} + + statuses@2.0.1: + resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} + engines: {node: '>= 0.8'} + + stdin-discarder@0.1.0: + resolution: {integrity: sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + streamsearch@1.1.0: + resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} + engines: {node: '>=10.0.0'} + + string-length@4.0.2: + resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} + engines: {node: '>=10'} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string-width@5.1.2: + resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} + engines: {node: '>=12'} + + string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-ansi@7.1.0: + resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} + engines: {node: '>=12'} + + strip-bom@3.0.0: + resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} + engines: {node: '>=4'} + + strip-bom@4.0.0: + resolution: {integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==} + engines: {node: '>=8'} + + strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + + strip-final-newline@3.0.0: + resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==} + engines: {node: '>=12'} + + strip-json-comments@2.0.1: + resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} + engines: {node: '>=0.10.0'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + styled-jsx@5.1.6: + resolution: {integrity: sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==} + engines: {node: '>= 12.0.0'} + peerDependencies: + '@babel/core': '*' + babel-plugin-macros: '*' + react: '>= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0' + peerDependenciesMeta: + '@babel/core': + optional: true + babel-plugin-macros: + optional: true + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + tailwind-merge@3.3.0: + resolution: {integrity: sha512-fyW/pEfcQSiigd5SNn0nApUOxx0zB/dm6UDU/rEwc2c3sX2smWUNbapHv+QRqLGVp9GWX3THIa7MUGPo+YkDzQ==} + + tailwind-scrollbar-hide@2.0.0: + resolution: {integrity: sha512-lqiIutHliEiODwBRHy4G2+Tcayo2U7+3+4frBmoMETD72qtah+XhOk5XcPzC1nJvXhXUdfl2ajlMhUc2qC6CIg==} + peerDependencies: + tailwindcss: '>=3.0.0 || >= 4.0.0 || >= 4.0.0-beta.8 || >= 4.0.0-alpha.20' + + tailwindcss@4.1.8: + resolution: {integrity: sha512-kjeW8gjdxasbmFKpVGrGd5T4i40mV5J2Rasw48QARfYeQ8YS9x02ON9SFWax3Qf616rt4Cp3nVNIj6Hd1mP3og==} + + tapable@2.2.2: + resolution: {integrity: sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg==} + engines: {node: '>=6'} + + tar@7.4.3: + resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==} + engines: {node: '>=18'} + + test-exclude@6.0.0: + resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} + engines: {node: '>=8'} + + text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + tinyglobby@0.2.13: + resolution: {integrity: sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==} + engines: {node: '>=12.0.0'} + + tmpl@1.0.5: + resolution: {integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + + tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + + tree-kill@1.2.2: + resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==} + hasBin: true + + ts-api-utils@1.4.3: + resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==} + engines: {node: '>=16'} + peerDependencies: + typescript: '>=4.2.0' + + ts-jest@29.3.4: + resolution: {integrity: sha512-Iqbrm8IXOmV+ggWHOTEbjwyCf2xZlUMv5npExksXohL+tk8va4Fjhb+X2+Rt9NBmgO7bJ8WpnMLOwih/DnMlFA==} + engines: {node: ^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@babel/core': '>=7.0.0-beta.0 <8' + '@jest/transform': ^29.0.0 + '@jest/types': ^29.0.0 + babel-jest: ^29.0.0 + esbuild: '*' + jest: ^29.0.0 + typescript: '>=4.3 <6' + peerDependenciesMeta: + '@babel/core': + optional: true + '@jest/transform': + optional: true + '@jest/types': + optional: true + babel-jest: + optional: true + esbuild: + optional: true + + ts-node-dev@2.0.0: + resolution: {integrity: sha512-ywMrhCfH6M75yftYvrvNarLEY+SUXtUvU8/0Z6llrHQVBx12GiFk5sStF8UdfE/yfzk9IAq7O5EEbTQsxlBI8w==} + engines: {node: '>=0.8.0'} + hasBin: true + peerDependencies: + node-notifier: '*' + typescript: '*' + peerDependenciesMeta: + node-notifier: + optional: true + + ts-node@10.9.2: + resolution: {integrity: sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + + tsconfig@7.0.0: + resolution: {integrity: sha512-vZXmzPrL+EmC4T/4rVlT2jNVMWCi/O4DIiSj3UHg1OE5kCKbk4mfrXc6dZksLgRM/TZlKnousKH9bbTazUWRRw==} + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + tsx@4.19.4: + resolution: {integrity: sha512-gK5GVzDkJK1SI1zwHf32Mqxf2tSJkNx+eYcNly5+nHvWqXUJYUkWBQtKauoESz3ymezAI++ZwT855x5p5eop+Q==} + engines: {node: '>=18.0.0'} + hasBin: true + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + + type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + type-fest@0.21.3: + resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} + engines: {node: '>=10'} + + type-fest@4.41.0: + resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==} + engines: {node: '>=16'} + + type-is@1.6.18: + resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} + engines: {node: '>= 0.6'} + + type-is@2.0.1: + resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} + engines: {node: '>= 0.6'} + + typeorm@0.3.24: + resolution: {integrity: sha512-4IrHG7A0tY8l5gEGXfW56VOMfUVWEkWlH/h5wmcyZ+V8oCiLj7iTPp0lEjMEZVrxEkGSdP9ErgTKHKXQApl/oA==} + engines: {node: '>=16.13.0'} + hasBin: true + peerDependencies: + '@google-cloud/spanner': ^5.18.0 || ^6.0.0 || ^7.0.0 + '@sap/hana-client': ^2.12.25 + better-sqlite3: ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0 + hdb-pool: ^0.1.6 + ioredis: ^5.0.4 + mongodb: ^5.8.0 || ^6.0.0 + mssql: ^9.1.1 || ^10.0.1 || ^11.0.1 + mysql2: ^2.2.5 || ^3.0.1 + oracledb: ^6.3.0 + pg: ^8.5.1 + pg-native: ^3.0.0 + pg-query-stream: ^4.0.0 + redis: ^3.1.1 || ^4.0.0 + reflect-metadata: ^0.1.14 || ^0.2.0 + sql.js: ^1.4.0 + sqlite3: ^5.0.3 + ts-node: ^10.7.0 + typeorm-aurora-data-api-driver: ^2.0.0 || ^3.0.0 + peerDependenciesMeta: + '@google-cloud/spanner': + optional: true + '@sap/hana-client': + optional: true + better-sqlite3: + optional: true + hdb-pool: + optional: true + ioredis: + optional: true + mongodb: + optional: true + mssql: + optional: true + mysql2: + optional: true + oracledb: + optional: true + pg: + optional: true + pg-native: + optional: true + pg-query-stream: + optional: true + redis: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + ts-node: + optional: true + typeorm-aurora-data-api-driver: + optional: true + + typescript@5.8.3: + resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + universalify@2.0.1: + resolution: {integrity: sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==} + engines: {node: '>= 10.0.0'} + + unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + + update-browserslist-db@1.1.3: + resolution: {integrity: sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + utils-merge@1.0.1: + resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} + engines: {node: '>= 0.4.0'} + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + + v8-to-istanbul@9.3.0: + resolution: {integrity: sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==} + engines: {node: '>=10.12.0'} + + validator@13.12.0: + resolution: {integrity: sha512-c1Q0mCiPlgdTVVVIJIrBuxNicYE+t/7oKeI9MWLj3fh/uq2Pxh/3eeWbVZ4OcGW1TUf53At0njHw5SMdA3tmMg==} + engines: {node: '>= 0.10'} + + vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + + vite@6.3.5: + resolution: {integrity: sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + jiti: '>=1.21.0' + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + void-elements@3.1.0: + resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} + engines: {node: '>=0.10.0'} + + walker@1.0.8: + resolution: {integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==} + + wcwidth@1.0.1: + resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==} + + web-streams-polyfill@3.3.3: + resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} + engines: {node: '>= 8'} + + web-streams-polyfill@4.0.0-beta.3: + resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} + engines: {node: '>= 14'} + + webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + + whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrap-ansi@8.1.0: + resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} + engines: {node: '>=12'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + write-file-atomic@4.0.2: + resolution: {integrity: sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + + xtend@4.0.2: + resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} + engines: {node: '>=0.4'} + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yallist@5.0.0: + resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} + engines: {node: '>=18'} + + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + + yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zod-to-json-schema@3.24.5: + resolution: {integrity: sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==} + peerDependencies: + zod: ^3.24.1 + + zod@3.25.48: + resolution: {integrity: sha512-0X1mz8FtgEIvaxGjdIImYpZEaZMrund9pGXm3M6vM7Reba0e2eI71KPjSCGXBfwKDPwPoywf6waUKc3/tFvX2Q==} + +snapshots: + + '@alloc/quick-lru@5.2.0': {} + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.27.1': + dependencies: + '@babel/helper-validator-identifier': 7.27.1 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/compat-data@7.27.3': {} + + '@babel/core@7.27.4': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.27.1 + '@babel/generator': 7.27.3 + '@babel/helper-compilation-targets': 7.27.2 + '@babel/helper-module-transforms': 7.27.3(@babel/core@7.27.4) + '@babel/helpers': 7.27.4 + '@babel/parser': 7.27.4 + '@babel/template': 7.27.2 + '@babel/traverse': 7.27.4 + '@babel/types': 7.27.3 + convert-source-map: 2.0.0 + debug: 4.4.1 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.27.3': + dependencies: + '@babel/parser': 7.27.4 + '@babel/types': 7.27.3 + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.1.0 + + '@babel/helper-compilation-targets@7.27.2': + dependencies: + '@babel/compat-data': 7.27.3 + '@babel/helper-validator-option': 7.27.1 + browserslist: 4.25.0 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-module-imports@7.27.1': + dependencies: + '@babel/traverse': 7.27.4 + '@babel/types': 7.27.3 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.27.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-module-imports': 7.27.1 + '@babel/helper-validator-identifier': 7.27.1 + '@babel/traverse': 7.27.4 + transitivePeerDependencies: + - supports-color + + '@babel/helper-plugin-utils@7.27.1': {} + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.27.1': {} + + '@babel/helper-validator-option@7.27.1': {} + + '@babel/helpers@7.27.4': + dependencies: + '@babel/template': 7.27.2 + '@babel/types': 7.27.3 + + '@babel/parser@7.27.4': + dependencies: + '@babel/types': 7.27.3 + + '@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-import-attributes@7.26.0(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-syntax-typescript@7.25.9(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-transform-react-jsx-self@7.27.1(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/plugin-transform-react-jsx-source@7.27.1(@babel/core@7.27.4)': + dependencies: + '@babel/core': 7.27.4 + '@babel/helper-plugin-utils': 7.27.1 + + '@babel/runtime@7.27.0': + dependencies: + regenerator-runtime: 0.14.1 + + '@babel/template@7.27.2': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/parser': 7.27.4 + '@babel/types': 7.27.3 + + '@babel/traverse@7.27.4': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/generator': 7.27.3 + '@babel/parser': 7.27.4 + '@babel/template': 7.27.2 + '@babel/types': 7.27.3 + debug: 4.4.1 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.27.3': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.27.1 + + '@bcoe/v8-coverage@0.2.3': {} + + '@cspotcode/source-map-support@0.8.1': + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + + '@emnapi/runtime@1.4.3': + dependencies: + tslib: 2.8.1 + optional: true + + '@esbuild/aix-ppc64@0.25.2': + optional: true + + '@esbuild/android-arm64@0.25.2': + optional: true + + '@esbuild/android-arm@0.25.2': + optional: true + + '@esbuild/android-x64@0.25.2': + optional: true + + '@esbuild/darwin-arm64@0.25.2': + optional: true + + '@esbuild/darwin-x64@0.25.2': + optional: true + + '@esbuild/freebsd-arm64@0.25.2': + optional: true + + '@esbuild/freebsd-x64@0.25.2': + optional: true + + '@esbuild/linux-arm64@0.25.2': + optional: true + + '@esbuild/linux-arm@0.25.2': + optional: true + + '@esbuild/linux-ia32@0.25.2': + optional: true + + '@esbuild/linux-loong64@0.25.2': + optional: true + + '@esbuild/linux-mips64el@0.25.2': + optional: true + + '@esbuild/linux-ppc64@0.25.2': + optional: true + + '@esbuild/linux-riscv64@0.25.2': + optional: true + + '@esbuild/linux-s390x@0.25.2': + optional: true + + '@esbuild/linux-x64@0.25.2': + optional: true + + '@esbuild/netbsd-arm64@0.25.2': + optional: true + + '@esbuild/netbsd-x64@0.25.2': + optional: true + + '@esbuild/openbsd-arm64@0.25.2': + optional: true + + '@esbuild/openbsd-x64@0.25.2': + optional: true + + '@esbuild/sunos-x64@0.25.2': + optional: true + + '@esbuild/win32-arm64@0.25.2': + optional: true + + '@esbuild/win32-ia32@0.25.2': + optional: true + + '@esbuild/win32-x64@0.25.2': + optional: true + + '@eslint-community/eslint-utils@4.5.1(eslint@8.57.1)': + dependencies: + eslint: 8.57.1 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.1': {} + + '@eslint/eslintrc@2.1.4': + dependencies: + ajv: 6.12.6 + debug: 4.4.0 + espree: 9.6.1 + globals: 13.24.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@8.57.1': {} + + '@humanwhocodes/config-array@0.13.0': + dependencies: + '@humanwhocodes/object-schema': 2.0.3 + debug: 4.4.0 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/object-schema@2.0.3': {} + + '@img/sharp-darwin-arm64@0.34.2': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.1.0 + optional: true + + '@img/sharp-darwin-x64@0.34.2': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.1.0 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.1.0': + optional: true + + '@img/sharp-libvips-darwin-x64@1.1.0': + optional: true + + '@img/sharp-libvips-linux-arm64@1.1.0': + optional: true + + '@img/sharp-libvips-linux-arm@1.1.0': + optional: true + + '@img/sharp-libvips-linux-ppc64@1.1.0': + optional: true + + '@img/sharp-libvips-linux-s390x@1.1.0': + optional: true + + '@img/sharp-libvips-linux-x64@1.1.0': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.1.0': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.1.0': + optional: true + + '@img/sharp-linux-arm64@0.34.2': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.1.0 + optional: true + + '@img/sharp-linux-arm@0.34.2': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.1.0 + optional: true + + '@img/sharp-linux-s390x@0.34.2': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.1.0 + optional: true + + '@img/sharp-linux-x64@0.34.2': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.1.0 + optional: true + + '@img/sharp-linuxmusl-arm64@0.34.2': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.1.0 + optional: true + + '@img/sharp-linuxmusl-x64@0.34.2': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.1.0 + optional: true + + '@img/sharp-wasm32@0.34.2': + dependencies: + '@emnapi/runtime': 1.4.3 + optional: true + + '@img/sharp-win32-arm64@0.34.2': + optional: true + + '@img/sharp-win32-ia32@0.34.2': + optional: true + + '@img/sharp-win32-x64@0.34.2': + optional: true + + '@isaacs/cliui@8.0.2': + dependencies: + string-width: 5.1.2 + string-width-cjs: string-width@4.2.3 + strip-ansi: 7.1.0 + strip-ansi-cjs: strip-ansi@6.0.1 + wrap-ansi: 8.1.0 + wrap-ansi-cjs: wrap-ansi@7.0.0 + + '@isaacs/fs-minipass@4.0.1': + dependencies: + minipass: 7.1.2 + + '@istanbuljs/load-nyc-config@1.1.0': + dependencies: + camelcase: 5.3.1 + find-up: 4.1.0 + get-package-type: 0.1.0 + js-yaml: 3.14.1 + resolve-from: 5.0.0 + + '@istanbuljs/schema@0.1.3': {} + + '@jest/console@29.7.0': + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + chalk: 4.1.2 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + slash: 3.0.0 + + '@jest/core@29.7.0(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3))': + dependencies: + '@jest/console': 29.7.0 + '@jest/reporters': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + ci-info: 3.9.0 + exit: 0.1.2 + graceful-fs: 4.2.11 + jest-changed-files: 29.7.0 + jest-config: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + jest-haste-map: 29.7.0 + jest-message-util: 29.7.0 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-resolve-dependencies: 29.7.0 + jest-runner: 29.7.0 + jest-runtime: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + jest-validate: 29.7.0 + jest-watcher: 29.7.0 + micromatch: 4.0.8 + pretty-format: 29.7.0 + slash: 3.0.0 + strip-ansi: 6.0.1 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + - ts-node + + '@jest/environment@29.7.0': + dependencies: + '@jest/fake-timers': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + jest-mock: 29.7.0 + + '@jest/expect-utils@29.7.0': + dependencies: + jest-get-type: 29.6.3 + + '@jest/expect@29.7.0': + dependencies: + expect: 29.7.0 + jest-snapshot: 29.7.0 + transitivePeerDependencies: + - supports-color + + '@jest/fake-timers@29.7.0': + dependencies: + '@jest/types': 29.6.3 + '@sinonjs/fake-timers': 10.3.0 + '@types/node': 22.15.29 + jest-message-util: 29.7.0 + jest-mock: 29.7.0 + jest-util: 29.7.0 + + '@jest/globals@29.7.0': + dependencies: + '@jest/environment': 29.7.0 + '@jest/expect': 29.7.0 + '@jest/types': 29.6.3 + jest-mock: 29.7.0 + transitivePeerDependencies: + - supports-color + + '@jest/reporters@29.7.0': + dependencies: + '@bcoe/v8-coverage': 0.2.3 + '@jest/console': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@jridgewell/trace-mapping': 0.3.25 + '@types/node': 22.15.29 + chalk: 4.1.2 + collect-v8-coverage: 1.0.2 + exit: 0.1.2 + glob: 7.2.3 + graceful-fs: 4.2.11 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-instrument: 6.0.3 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 4.0.1 + istanbul-reports: 3.1.7 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + jest-worker: 29.7.0 + slash: 3.0.0 + string-length: 4.0.2 + strip-ansi: 6.0.1 + v8-to-istanbul: 9.3.0 + transitivePeerDependencies: + - supports-color + + '@jest/schemas@29.6.3': + dependencies: + '@sinclair/typebox': 0.27.8 + + '@jest/source-map@29.6.3': + dependencies: + '@jridgewell/trace-mapping': 0.3.25 + callsites: 3.1.0 + graceful-fs: 4.2.11 + + '@jest/test-result@29.7.0': + dependencies: + '@jest/console': 29.7.0 + '@jest/types': 29.6.3 + '@types/istanbul-lib-coverage': 2.0.6 + collect-v8-coverage: 1.0.2 + + '@jest/test-sequencer@29.7.0': + dependencies: + '@jest/test-result': 29.7.0 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + slash: 3.0.0 + + '@jest/transform@29.7.0': + dependencies: + '@babel/core': 7.27.4 + '@jest/types': 29.6.3 + '@jridgewell/trace-mapping': 0.3.25 + babel-plugin-istanbul: 6.1.1 + chalk: 4.1.2 + convert-source-map: 2.0.0 + fast-json-stable-stringify: 2.1.0 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-regex-util: 29.6.3 + jest-util: 29.7.0 + micromatch: 4.0.8 + pirates: 4.0.7 + slash: 3.0.0 + write-file-atomic: 4.0.2 + transitivePeerDependencies: + - supports-color + + '@jest/types@29.6.3': + dependencies: + '@jest/schemas': 29.6.3 + '@types/istanbul-lib-coverage': 2.0.6 + '@types/istanbul-reports': 3.0.4 + '@types/node': 22.15.29 + '@types/yargs': 17.0.33 + chalk: 4.1.2 + + '@jridgewell/gen-mapping@0.3.8': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@jridgewell/trace-mapping@0.3.9': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@modelcontextprotocol/sdk@1.12.1': + dependencies: + ajv: 6.12.6 + content-type: 1.0.5 + cors: 2.8.5 + cross-spawn: 7.0.6 + eventsource: 3.0.6 + express: 5.1.0 + express-rate-limit: 7.5.0(express@5.1.0) + pkce-challenge: 5.0.0 + raw-body: 3.0.0 + zod: 3.25.48 + zod-to-json-schema: 3.24.5(zod@3.25.48) + transitivePeerDependencies: + - supports-color + + '@next/env@15.3.3': {} + + '@next/swc-darwin-arm64@15.3.3': + optional: true + + '@next/swc-darwin-x64@15.3.3': + optional: true + + '@next/swc-linux-arm64-gnu@15.3.3': + optional: true + + '@next/swc-linux-arm64-musl@15.3.3': + optional: true + + '@next/swc-linux-x64-gnu@15.3.3': + optional: true + + '@next/swc-linux-x64-musl@15.3.3': + optional: true + + '@next/swc-win32-arm64-msvc@15.3.3': + optional: true + + '@next/swc-win32-x64-msvc@15.3.3': + optional: true + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.19.1 + + '@pkgjs/parseargs@0.11.0': + optional: true + + '@radix-ui/primitive@1.1.2': {} + + '@radix-ui/react-accordion@1.2.11(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@radix-ui/primitive': 1.1.2 + '@radix-ui/react-collapsible': 1.1.11(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-collection': 1.1.7(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-direction': 1.1.1(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + optionalDependencies: + '@types/react': 19.1.6 + '@types/react-dom': 19.1.5(@types/react@19.1.6) + + '@radix-ui/react-collapsible@1.1.11(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@radix-ui/primitive': 1.1.2 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-id': 1.1.1(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-presence': 1.1.4(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + optionalDependencies: + '@types/react': 19.1.6 + '@types/react-dom': 19.1.5(@types/react@19.1.6) + + '@radix-ui/react-collection@1.1.7(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-context': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + optionalDependencies: + '@types/react': 19.1.6 + '@types/react-dom': 19.1.5(@types/react@19.1.6) + + '@radix-ui/react-compose-refs@1.1.2(@types/react@19.1.6)(react@19.1.0)': + dependencies: + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-context@1.1.2(@types/react@19.1.6)(react@19.1.0)': + dependencies: + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-direction@1.1.1(@types/react@19.1.6)(react@19.1.0)': + dependencies: + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-id@1.1.1(@types/react@19.1.6)(react@19.1.0)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-presence@1.1.4(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + optionalDependencies: + '@types/react': 19.1.6 + '@types/react-dom': 19.1.5(@types/react@19.1.6) + + '@radix-ui/react-primitive@2.1.3(@types/react-dom@19.1.5(@types/react@19.1.6))(@types/react@19.1.6)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + optionalDependencies: + '@types/react': 19.1.6 + '@types/react-dom': 19.1.5(@types/react@19.1.6) + + '@radix-ui/react-slot@1.2.3(@types/react@19.1.6)(react@19.1.0)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-use-controllable-state@1.2.2(@types/react@19.1.6)(react@19.1.0)': + dependencies: + '@radix-ui/react-use-effect-event': 0.0.2(@types/react@19.1.6)(react@19.1.0) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-use-effect-event@0.0.2(@types/react@19.1.6)(react@19.1.0)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.1.6)(react@19.1.0) + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@radix-ui/react-use-layout-effect@1.1.1(@types/react@19.1.6)(react@19.1.0)': + dependencies: + react: 19.1.0 + optionalDependencies: + '@types/react': 19.1.6 + + '@rolldown/pluginutils@1.0.0-beta.9': {} + + '@rollup/rollup-android-arm-eabi@4.40.1': + optional: true + + '@rollup/rollup-android-arm64@4.40.1': + optional: true + + '@rollup/rollup-darwin-arm64@4.40.1': + optional: true + + '@rollup/rollup-darwin-x64@4.40.1': + optional: true + + '@rollup/rollup-freebsd-arm64@4.40.1': + optional: true + + '@rollup/rollup-freebsd-x64@4.40.1': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.40.1': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.40.1': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.40.1': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.40.1': + optional: true + + '@rollup/rollup-linux-loongarch64-gnu@4.40.1': + optional: true + + '@rollup/rollup-linux-powerpc64le-gnu@4.40.1': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.40.1': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.40.1': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.40.1': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.40.1': + optional: true + + '@rollup/rollup-linux-x64-musl@4.40.1': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.40.1': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.40.1': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.40.1': + optional: true + + '@shadcn/ui@0.0.4': + dependencies: + chalk: 5.2.0 + commander: 10.0.1 + execa: 7.2.0 + fs-extra: 11.3.0 + node-fetch: 3.3.2 + ora: 6.3.1 + prompts: 2.4.2 + zod: 3.25.48 + + '@sinclair/typebox@0.27.8': {} + + '@sinonjs/commons@3.0.1': + dependencies: + type-detect: 4.0.8 + + '@sinonjs/fake-timers@10.3.0': + dependencies: + '@sinonjs/commons': 3.0.1 + + '@sqltools/formatter@1.2.5': {} + + '@swc/counter@0.1.3': {} + + '@swc/helpers@0.5.15': + dependencies: + tslib: 2.8.1 + + '@tailwindcss/node@4.1.8': + dependencies: + '@ampproject/remapping': 2.3.0 + enhanced-resolve: 5.18.1 + jiti: 2.4.2 + lightningcss: 1.30.1 + magic-string: 0.30.17 + source-map-js: 1.2.1 + tailwindcss: 4.1.8 + + '@tailwindcss/oxide-android-arm64@4.1.8': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.1.8': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.1.8': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.1.8': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.8': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.8': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.1.8': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.1.8': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.1.8': + optional: true + + '@tailwindcss/oxide-wasm32-wasi@4.1.8': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.8': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.1.8': + optional: true + + '@tailwindcss/oxide@4.1.8': + dependencies: + detect-libc: 2.0.4 + tar: 7.4.3 + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.1.8 + '@tailwindcss/oxide-darwin-arm64': 4.1.8 + '@tailwindcss/oxide-darwin-x64': 4.1.8 + '@tailwindcss/oxide-freebsd-x64': 4.1.8 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.8 + '@tailwindcss/oxide-linux-arm64-gnu': 4.1.8 + '@tailwindcss/oxide-linux-arm64-musl': 4.1.8 + '@tailwindcss/oxide-linux-x64-gnu': 4.1.8 + '@tailwindcss/oxide-linux-x64-musl': 4.1.8 + '@tailwindcss/oxide-wasm32-wasi': 4.1.8 + '@tailwindcss/oxide-win32-arm64-msvc': 4.1.8 + '@tailwindcss/oxide-win32-x64-msvc': 4.1.8 + + '@tailwindcss/postcss@4.1.8': + dependencies: + '@alloc/quick-lru': 5.2.0 + '@tailwindcss/node': 4.1.8 + '@tailwindcss/oxide': 4.1.8 + postcss: 8.5.4 + tailwindcss: 4.1.8 + + '@tailwindcss/vite@4.1.8(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4))': + dependencies: + '@tailwindcss/node': 4.1.8 + '@tailwindcss/oxide': 4.1.8 + tailwindcss: 4.1.8 + vite: 6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4) + + '@tsconfig/node10@1.0.11': {} + + '@tsconfig/node12@1.0.11': {} + + '@tsconfig/node14@1.0.3': {} + + '@tsconfig/node16@1.0.4': {} + + '@types/babel__core@7.20.5': + dependencies: + '@babel/parser': 7.27.4 + '@babel/types': 7.27.3 + '@types/babel__generator': 7.27.0 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.7 + + '@types/babel__generator@7.27.0': + dependencies: + '@babel/types': 7.27.3 + + '@types/babel__template@7.4.4': + dependencies: + '@babel/parser': 7.27.4 + '@babel/types': 7.27.3 + + '@types/babel__traverse@7.20.7': + dependencies: + '@babel/types': 7.27.3 + + '@types/bcryptjs@3.0.0': + dependencies: + bcryptjs: 3.0.2 + + '@types/body-parser@1.19.5': + dependencies: + '@types/connect': 3.4.38 + '@types/node': 22.15.29 + + '@types/connect@3.4.38': + dependencies: + '@types/node': 22.15.29 + + '@types/estree@1.0.7': {} + + '@types/express-serve-static-core@4.19.6': + dependencies: + '@types/node': 22.15.29 + '@types/qs': 6.14.0 + '@types/range-parser': 1.2.7 + '@types/send': 0.17.4 + + '@types/express@4.17.22': + dependencies: + '@types/body-parser': 1.19.5 + '@types/express-serve-static-core': 4.19.6 + '@types/qs': 6.14.0 + '@types/serve-static': 1.15.7 + + '@types/graceful-fs@4.1.9': + dependencies: + '@types/node': 22.15.29 + + '@types/http-errors@2.0.4': {} + + '@types/istanbul-lib-coverage@2.0.6': {} + + '@types/istanbul-lib-report@3.0.3': + dependencies: + '@types/istanbul-lib-coverage': 2.0.6 + + '@types/istanbul-reports@3.0.4': + dependencies: + '@types/istanbul-lib-report': 3.0.3 + + '@types/jest@29.5.14': + dependencies: + expect: 29.7.0 + pretty-format: 29.7.0 + + '@types/json-schema@7.0.15': {} + + '@types/jsonwebtoken@9.0.9': + dependencies: + '@types/ms': 2.1.0 + '@types/node': 22.15.29 + + '@types/mime@1.3.5': {} + + '@types/ms@2.1.0': {} + + '@types/node-fetch@2.6.12': + dependencies: + '@types/node': 22.15.29 + form-data: 4.0.2 + + '@types/node@18.19.110': + dependencies: + undici-types: 5.26.5 + + '@types/node@22.15.29': + dependencies: + undici-types: 6.21.0 + + '@types/pg@8.15.4': + dependencies: + '@types/node': 22.15.29 + pg-protocol: 1.10.0 + pg-types: 2.2.0 + + '@types/qs@6.14.0': {} + + '@types/range-parser@1.2.7': {} + + '@types/react-dom@19.1.5(@types/react@19.1.6)': + dependencies: + '@types/react': 19.1.6 + + '@types/react@19.1.6': + dependencies: + csstype: 3.1.3 + + '@types/semver@7.7.0': {} + + '@types/send@0.17.4': + dependencies: + '@types/mime': 1.3.5 + '@types/node': 22.15.29 + + '@types/serve-static@1.15.7': + dependencies: + '@types/http-errors': 2.0.4 + '@types/node': 22.15.29 + '@types/send': 0.17.4 + + '@types/stack-utils@2.0.3': {} + + '@types/strip-bom@3.0.0': {} + + '@types/strip-json-comments@0.0.30': {} + + '@types/uuid@10.0.0': {} + + '@types/yargs-parser@21.0.3': {} + + '@types/yargs@17.0.33': + dependencies: + '@types/yargs-parser': 21.0.3 + + '@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1)(typescript@5.8.3)': + dependencies: + '@eslint-community/regexpp': 4.12.1 + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.8.3) + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/type-utils': 6.21.0(eslint@8.57.1)(typescript@5.8.3) + '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.8.3) + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.4.0 + eslint: 8.57.1 + graphemer: 1.4.0 + ignore: 5.3.2 + natural-compare: 1.4.0 + semver: 7.7.1 + ts-api-utils: 1.4.3(typescript@5.8.3) + optionalDependencies: + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.8.3)': + dependencies: + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.8.3) + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.4.0 + eslint: 8.57.1 + optionalDependencies: + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@6.21.0': + dependencies: + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/visitor-keys': 6.21.0 + + '@typescript-eslint/type-utils@6.21.0(eslint@8.57.1)(typescript@5.8.3)': + dependencies: + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.8.3) + '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.8.3) + debug: 4.4.0 + eslint: 8.57.1 + ts-api-utils: 1.4.3(typescript@5.8.3) + optionalDependencies: + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@6.21.0': {} + + '@typescript-eslint/typescript-estree@6.21.0(typescript@5.8.3)': + dependencies: + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.4.0 + globby: 11.1.0 + is-glob: 4.0.3 + minimatch: 9.0.3 + semver: 7.7.1 + ts-api-utils: 1.4.3(typescript@5.8.3) + optionalDependencies: + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@6.21.0(eslint@8.57.1)(typescript@5.8.3)': + dependencies: + '@eslint-community/eslint-utils': 4.5.1(eslint@8.57.1) + '@types/json-schema': 7.0.15 + '@types/semver': 7.7.0 + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.8.3) + eslint: 8.57.1 + semver: 7.7.1 + transitivePeerDependencies: + - supports-color + - typescript + + '@typescript-eslint/visitor-keys@6.21.0': + dependencies: + '@typescript-eslint/types': 6.21.0 + eslint-visitor-keys: 3.4.3 + + '@ungap/structured-clone@1.3.0': {} + + '@vitejs/plugin-react@4.5.0(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4))': + dependencies: + '@babel/core': 7.27.4 + '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.27.4) + '@babel/plugin-transform-react-jsx-source': 7.27.1(@babel/core@7.27.4) + '@rolldown/pluginutils': 1.0.0-beta.9 + '@types/babel__core': 7.20.5 + react-refresh: 0.17.0 + vite: 6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4) + transitivePeerDependencies: + - supports-color + + abort-controller@3.0.0: + dependencies: + event-target-shim: 5.0.1 + + accepts@1.3.8: + dependencies: + mime-types: 2.1.35 + negotiator: 0.6.3 + + accepts@2.0.0: + dependencies: + mime-types: 3.0.1 + negotiator: 1.0.0 + + acorn-jsx@5.3.2(acorn@8.14.1): + dependencies: + acorn: 8.14.1 + + acorn-walk@8.3.4: + dependencies: + acorn: 8.14.1 + + acorn@8.14.1: {} + + agentkeepalive@4.6.0: + dependencies: + humanize-ms: 1.2.1 + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-escapes@4.3.2: + dependencies: + type-fest: 0.21.3 + + ansi-regex@5.0.1: {} + + ansi-regex@6.1.0: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + ansi-styles@5.2.0: {} + + ansi-styles@6.2.1: {} + + ansis@3.17.0: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + app-root-path@3.1.0: {} + + arg@4.1.3: {} + + argparse@1.0.10: + dependencies: + sprintf-js: 1.0.3 + + argparse@2.0.1: {} + + array-flatten@1.1.1: {} + + array-union@2.1.0: {} + + async@3.2.6: {} + + asynckit@0.4.0: {} + + autoprefixer@10.4.21(postcss@8.5.4): + dependencies: + browserslist: 4.24.4 + caniuse-lite: 1.0.30001707 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.1.1 + postcss: 8.5.4 + postcss-value-parser: 4.2.0 + + babel-jest@29.7.0(@babel/core@7.27.4): + dependencies: + '@babel/core': 7.27.4 + '@jest/transform': 29.7.0 + '@types/babel__core': 7.20.5 + babel-plugin-istanbul: 6.1.1 + babel-preset-jest: 29.6.3(@babel/core@7.27.4) + chalk: 4.1.2 + graceful-fs: 4.2.11 + slash: 3.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-istanbul@6.1.1: + dependencies: + '@babel/helper-plugin-utils': 7.27.1 + '@istanbuljs/load-nyc-config': 1.1.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-instrument: 5.2.1 + test-exclude: 6.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-jest-hoist@29.6.3: + dependencies: + '@babel/template': 7.27.2 + '@babel/types': 7.27.3 + '@types/babel__core': 7.20.5 + '@types/babel__traverse': 7.20.7 + + babel-preset-current-node-syntax@1.1.0(@babel/core@7.27.4): + dependencies: + '@babel/core': 7.27.4 + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.27.4) + '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.27.4) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.27.4) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.27.4) + '@babel/plugin-syntax-import-attributes': 7.26.0(@babel/core@7.27.4) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.27.4) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.27.4) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.27.4) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.27.4) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.27.4) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.27.4) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.27.4) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.27.4) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.27.4) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.27.4) + + babel-preset-jest@29.6.3(@babel/core@7.27.4): + dependencies: + '@babel/core': 7.27.4 + babel-plugin-jest-hoist: 29.6.3 + babel-preset-current-node-syntax: 1.1.0(@babel/core@7.27.4) + + balanced-match@1.0.2: {} + + base64-js@1.5.1: {} + + bcryptjs@3.0.2: {} + + binary-extensions@2.3.0: {} + + bl@5.1.0: + dependencies: + buffer: 6.0.3 + inherits: 2.0.4 + readable-stream: 3.6.2 + + body-parser@1.20.3: + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 2.6.9 + depd: 2.0.0 + destroy: 1.2.0 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + on-finished: 2.4.1 + qs: 6.13.0 + raw-body: 2.5.2 + type-is: 1.6.18 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + + body-parser@2.2.0: + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 4.4.1 + http-errors: 2.0.0 + iconv-lite: 0.6.3 + on-finished: 2.4.1 + qs: 6.14.0 + raw-body: 3.0.0 + type-is: 2.0.1 + transitivePeerDependencies: + - supports-color + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.24.4: + dependencies: + caniuse-lite: 1.0.30001707 + electron-to-chromium: 1.5.128 + node-releases: 2.0.19 + update-browserslist-db: 1.1.3(browserslist@4.24.4) + + browserslist@4.25.0: + dependencies: + caniuse-lite: 1.0.30001720 + electron-to-chromium: 1.5.161 + node-releases: 2.0.19 + update-browserslist-db: 1.1.3(browserslist@4.25.0) + + bs-logger@0.2.6: + dependencies: + fast-json-stable-stringify: 2.1.0 + + bser@2.1.1: + dependencies: + node-int64: 0.4.0 + + buffer-equal-constant-time@1.0.1: {} + + buffer-from@1.1.2: {} + + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + + busboy@1.6.0: + dependencies: + streamsearch: 1.1.0 + + bytes@3.1.2: {} + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + call-bound@1.0.4: + dependencies: + call-bind-apply-helpers: 1.0.2 + get-intrinsic: 1.3.0 + + callsites@3.1.0: {} + + camelcase@5.3.1: {} + + camelcase@6.3.0: {} + + caniuse-lite@1.0.30001707: {} + + caniuse-lite@1.0.30001720: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + chalk@5.2.0: {} + + char-regex@1.0.2: {} + + chokidar@3.6.0: + dependencies: + anymatch: 3.1.3 + braces: 3.0.3 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + + chownr@3.0.0: {} + + ci-info@3.9.0: {} + + cjs-module-lexer@1.4.3: {} + + class-variance-authority@0.7.1: + dependencies: + clsx: 2.1.1 + + cli-cursor@4.0.0: + dependencies: + restore-cursor: 4.0.0 + + cli-spinners@2.9.2: {} + + client-only@0.0.1: {} + + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + clone@1.0.4: {} + + clsx@2.1.1: {} + + co@4.6.0: {} + + collect-v8-coverage@1.0.2: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + color-string@1.9.1: + dependencies: + color-name: 1.1.4 + simple-swizzle: 0.2.2 + optional: true + + color@4.2.3: + dependencies: + color-convert: 2.0.1 + color-string: 1.9.1 + optional: true + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + commander@10.0.1: {} + + concat-map@0.0.1: {} + + concurrently@9.1.2: + dependencies: + chalk: 4.1.2 + lodash: 4.17.21 + rxjs: 7.8.2 + shell-quote: 1.8.3 + supports-color: 8.1.1 + tree-kill: 1.2.2 + yargs: 17.7.2 + + content-disposition@0.5.4: + dependencies: + safe-buffer: 5.2.1 + + content-disposition@1.0.0: + dependencies: + safe-buffer: 5.2.1 + + content-type@1.0.5: {} + + convert-source-map@2.0.0: {} + + cookie-signature@1.0.6: {} + + cookie-signature@1.2.2: {} + + cookie@0.7.1: {} + + cookie@0.7.2: {} + + cookie@1.0.2: {} + + cors@2.8.5: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + + create-jest@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)): + dependencies: + '@jest/types': 29.6.3 + chalk: 4.1.2 + exit: 0.1.2 + graceful-fs: 4.2.11 + jest-config: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + jest-util: 29.7.0 + prompts: 2.4.2 + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + create-require@1.1.1: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + csstype@3.1.3: {} + + data-uri-to-buffer@4.0.1: {} + + dayjs@1.11.13: {} + + debug@2.6.9: + dependencies: + ms: 2.0.0 + + debug@4.4.0: + dependencies: + ms: 2.1.3 + + debug@4.4.1: + dependencies: + ms: 2.1.3 + + dedent@1.5.3: {} + + dedent@1.6.0: {} + + deep-is@0.1.4: {} + + deepmerge@4.3.1: {} + + defaults@1.0.4: + dependencies: + clone: 1.0.4 + + delayed-stream@1.0.0: {} + + depd@2.0.0: {} + + destroy@1.2.0: {} + + detect-libc@2.0.4: {} + + detect-newline@3.1.0: {} + + diff-sequences@29.6.3: {} + + diff@4.0.2: {} + + dir-glob@3.0.1: + dependencies: + path-type: 4.0.0 + + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + + dotenv-expand@12.0.2: + dependencies: + dotenv: 16.5.0 + + dotenv@16.5.0: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + dynamic-dedupe@0.3.0: + dependencies: + xtend: 4.0.2 + + eastasianwidth@0.2.0: {} + + ecdsa-sig-formatter@1.0.11: + dependencies: + safe-buffer: 5.2.1 + + ee-first@1.1.1: {} + + ejs@3.1.10: + dependencies: + jake: 10.9.2 + + electron-to-chromium@1.5.128: {} + + electron-to-chromium@1.5.161: {} + + emittery@0.13.1: {} + + emoji-regex@8.0.0: {} + + emoji-regex@9.2.2: {} + + encodeurl@1.0.2: {} + + encodeurl@2.0.0: {} + + enhanced-resolve@5.18.1: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.2.2 + + error-ex@1.3.2: + dependencies: + is-arrayish: 0.2.1 + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + esbuild@0.25.2: + optionalDependencies: + '@esbuild/aix-ppc64': 0.25.2 + '@esbuild/android-arm': 0.25.2 + '@esbuild/android-arm64': 0.25.2 + '@esbuild/android-x64': 0.25.2 + '@esbuild/darwin-arm64': 0.25.2 + '@esbuild/darwin-x64': 0.25.2 + '@esbuild/freebsd-arm64': 0.25.2 + '@esbuild/freebsd-x64': 0.25.2 + '@esbuild/linux-arm': 0.25.2 + '@esbuild/linux-arm64': 0.25.2 + '@esbuild/linux-ia32': 0.25.2 + '@esbuild/linux-loong64': 0.25.2 + '@esbuild/linux-mips64el': 0.25.2 + '@esbuild/linux-ppc64': 0.25.2 + '@esbuild/linux-riscv64': 0.25.2 + '@esbuild/linux-s390x': 0.25.2 + '@esbuild/linux-x64': 0.25.2 + '@esbuild/netbsd-arm64': 0.25.2 + '@esbuild/netbsd-x64': 0.25.2 + '@esbuild/openbsd-arm64': 0.25.2 + '@esbuild/openbsd-x64': 0.25.2 + '@esbuild/sunos-x64': 0.25.2 + '@esbuild/win32-arm64': 0.25.2 + '@esbuild/win32-ia32': 0.25.2 + '@esbuild/win32-x64': 0.25.2 + + escalade@3.2.0: {} + + escape-html@1.0.3: {} + + escape-string-regexp@2.0.0: {} + + escape-string-regexp@4.0.0: {} + + eslint-scope@7.2.2: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint@8.57.1: + dependencies: + '@eslint-community/eslint-utils': 4.5.1(eslint@8.57.1) + '@eslint-community/regexpp': 4.12.1 + '@eslint/eslintrc': 2.1.4 + '@eslint/js': 8.57.1 + '@humanwhocodes/config-array': 0.13.0 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.3.0 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.0 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.6.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.24.0 + graphemer: 1.4.0 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + + espree@9.6.1: + dependencies: + acorn: 8.14.1 + acorn-jsx: 5.3.2(acorn@8.14.1) + eslint-visitor-keys: 3.4.3 + + esprima@4.0.1: {} + + esquery@1.6.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + etag@1.8.1: {} + + event-target-shim@5.0.1: {} + + eventsource-parser@3.0.1: {} + + eventsource@3.0.6: + dependencies: + eventsource-parser: 3.0.1 + + execa@5.1.1: + dependencies: + cross-spawn: 7.0.6 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + + execa@7.2.0: + dependencies: + cross-spawn: 7.0.6 + get-stream: 6.0.1 + human-signals: 4.3.1 + is-stream: 3.0.0 + merge-stream: 2.0.0 + npm-run-path: 5.3.0 + onetime: 6.0.0 + signal-exit: 3.0.7 + strip-final-newline: 3.0.0 + + exit@0.1.2: {} + + expect@29.7.0: + dependencies: + '@jest/expect-utils': 29.7.0 + jest-get-type: 29.6.3 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + + express-rate-limit@7.5.0(express@5.1.0): + dependencies: + express: 5.1.0 + + express-validator@7.2.1: + dependencies: + lodash: 4.17.21 + validator: 13.12.0 + + express@4.21.2: + dependencies: + accepts: 1.3.8 + array-flatten: 1.1.1 + body-parser: 1.20.3 + content-disposition: 0.5.4 + content-type: 1.0.5 + cookie: 0.7.1 + cookie-signature: 1.0.6 + debug: 2.6.9 + depd: 2.0.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 1.3.1 + fresh: 0.5.2 + http-errors: 2.0.0 + merge-descriptors: 1.0.3 + methods: 1.1.2 + on-finished: 2.4.1 + parseurl: 1.3.3 + path-to-regexp: 0.1.12 + proxy-addr: 2.0.7 + qs: 6.13.0 + range-parser: 1.2.1 + safe-buffer: 5.2.1 + send: 0.19.0 + serve-static: 1.16.2 + setprototypeof: 1.2.0 + statuses: 2.0.1 + type-is: 1.6.18 + utils-merge: 1.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + + express@5.1.0: + dependencies: + accepts: 2.0.0 + body-parser: 2.2.0 + content-disposition: 1.0.0 + content-type: 1.0.5 + cookie: 0.7.2 + cookie-signature: 1.2.2 + debug: 4.4.1 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 2.1.0 + fresh: 2.0.0 + http-errors: 2.0.0 + merge-descriptors: 2.0.0 + mime-types: 3.0.1 + on-finished: 2.4.1 + once: 1.4.0 + parseurl: 1.3.3 + proxy-addr: 2.0.7 + qs: 6.14.0 + range-parser: 1.2.1 + router: 2.2.0 + send: 1.2.0 + serve-static: 2.2.0 + statuses: 2.0.1 + type-is: 2.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + + fast-deep-equal@3.1.3: {} + + fast-glob@3.3.3: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.19.1: + dependencies: + reusify: 1.1.0 + + fb-watchman@2.0.2: + dependencies: + bser: 2.1.1 + + fdir@6.4.4(picomatch@4.0.2): + optionalDependencies: + picomatch: 4.0.2 + + fetch-blob@3.2.0: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.3.3 + + file-entry-cache@6.0.1: + dependencies: + flat-cache: 3.2.0 + + filelist@1.0.4: + dependencies: + minimatch: 5.1.6 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + finalhandler@1.3.1: + dependencies: + debug: 2.6.9 + encodeurl: 2.0.0 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.1 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + + finalhandler@2.1.0: + dependencies: + debug: 4.4.1 + encodeurl: 2.0.0 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + + find-up@4.1.0: + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@3.2.0: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + rimraf: 3.0.2 + + flatted@3.3.3: {} + + foreground-child@3.3.1: + dependencies: + cross-spawn: 7.0.6 + signal-exit: 4.1.0 + + form-data-encoder@1.7.2: {} + + form-data@4.0.2: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + mime-types: 2.1.35 + + formdata-node@4.4.1: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 4.0.0-beta.3 + + formdata-polyfill@4.0.10: + dependencies: + fetch-blob: 3.2.0 + + forwarded@0.2.0: {} + + fraction.js@4.3.7: {} + + fresh@0.5.2: {} + + fresh@2.0.0: {} + + fs-extra@11.3.0: + dependencies: + graceful-fs: 4.2.11 + jsonfile: 6.1.0 + universalify: 2.0.1 + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + gensync@1.0.0-beta.2: {} + + get-caller-file@2.0.5: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-package-type@0.1.0: {} + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + get-stream@6.0.1: {} + + get-tsconfig@4.10.0: + dependencies: + resolve-pkg-maps: 1.0.0 + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@10.4.5: + dependencies: + foreground-child: 3.3.1 + jackspeak: 3.4.3 + minimatch: 9.0.5 + minipass: 7.1.2 + package-json-from-dist: 1.0.1 + path-scurry: 1.11.1 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@11.12.0: {} + + globals@13.24.0: + dependencies: + type-fest: 0.20.2 + + globby@11.1.0: + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.3 + ignore: 5.3.2 + merge2: 1.4.1 + slash: 3.0.0 + + gopd@1.2.0: {} + + graceful-fs@4.2.11: {} + + graphemer@1.4.0: {} + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + html-escaper@2.0.2: {} + + html-parse-stringify@3.0.1: + dependencies: + void-elements: 3.1.0 + + http-errors@2.0.0: + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.1 + toidentifier: 1.0.1 + + human-signals@2.1.0: {} + + human-signals@4.3.1: {} + + humanize-ms@1.2.1: + dependencies: + ms: 2.1.3 + + i18next-browser-languagedetector@8.1.0: + dependencies: + '@babel/runtime': 7.27.0 + + i18next@24.2.3(typescript@5.8.3): + dependencies: + '@babel/runtime': 7.27.0 + optionalDependencies: + typescript: 5.8.3 + + iconv-lite@0.4.24: + dependencies: + safer-buffer: 2.1.2 + + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + + ieee754@1.2.1: {} + + ignore@5.3.2: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + import-local@3.2.0: + dependencies: + pkg-dir: 4.2.0 + resolve-cwd: 3.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + ipaddr.js@1.9.1: {} + + is-arrayish@0.2.1: {} + + is-arrayish@0.3.2: + optional: true + + is-binary-path@2.1.0: + dependencies: + binary-extensions: 2.3.0 + + is-core-module@2.16.1: + dependencies: + hasown: 2.0.2 + + is-extglob@2.1.1: {} + + is-fullwidth-code-point@3.0.0: {} + + is-generator-fn@2.1.0: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-interactive@2.0.0: {} + + is-number@7.0.0: {} + + is-path-inside@3.0.3: {} + + is-promise@4.0.0: {} + + is-stream@2.0.1: {} + + is-stream@3.0.0: {} + + is-unicode-supported@1.3.0: {} + + isexe@2.0.0: {} + + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-instrument@5.2.1: + dependencies: + '@babel/core': 7.27.4 + '@babel/parser': 7.27.4 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + istanbul-lib-instrument@6.0.3: + dependencies: + '@babel/core': 7.27.4 + '@babel/parser': 7.27.4 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 7.7.2 + transitivePeerDependencies: + - supports-color + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-lib-source-maps@4.0.1: + dependencies: + debug: 4.4.1 + istanbul-lib-coverage: 3.2.2 + source-map: 0.6.1 + transitivePeerDependencies: + - supports-color + + istanbul-reports@3.1.7: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + jackspeak@3.4.3: + dependencies: + '@isaacs/cliui': 8.0.2 + optionalDependencies: + '@pkgjs/parseargs': 0.11.0 + + jake@10.9.2: + dependencies: + async: 3.2.6 + chalk: 4.1.2 + filelist: 1.0.4 + minimatch: 3.1.2 + + jest-changed-files@29.7.0: + dependencies: + execa: 5.1.1 + jest-util: 29.7.0 + p-limit: 3.1.0 + + jest-circus@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/expect': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + chalk: 4.1.2 + co: 4.6.0 + dedent: 1.5.3 + is-generator-fn: 2.1.0 + jest-each: 29.7.0 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-runtime: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + p-limit: 3.1.0 + pretty-format: 29.7.0 + pure-rand: 6.1.0 + slash: 3.0.0 + stack-utils: 2.0.6 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-cli@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)): + dependencies: + '@jest/core': 29.7.0(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + chalk: 4.1.2 + create-jest: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + exit: 0.1.2 + import-local: 3.2.0 + jest-config: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + jest-util: 29.7.0 + jest-validate: 29.7.0 + yargs: 17.7.2 + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + jest-config@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)): + dependencies: + '@babel/core': 7.27.4 + '@jest/test-sequencer': 29.7.0 + '@jest/types': 29.6.3 + babel-jest: 29.7.0(@babel/core@7.27.4) + chalk: 4.1.2 + ci-info: 3.9.0 + deepmerge: 4.3.1 + glob: 7.2.3 + graceful-fs: 4.2.11 + jest-circus: 29.7.0 + jest-environment-node: 29.7.0 + jest-get-type: 29.6.3 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-runner: 29.7.0 + jest-util: 29.7.0 + jest-validate: 29.7.0 + micromatch: 4.0.8 + parse-json: 5.2.0 + pretty-format: 29.7.0 + slash: 3.0.0 + strip-json-comments: 3.1.1 + optionalDependencies: + '@types/node': 22.15.29 + ts-node: 10.9.2(@types/node@22.15.29)(typescript@5.8.3) + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-diff@29.7.0: + dependencies: + chalk: 4.1.2 + diff-sequences: 29.6.3 + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-docblock@29.7.0: + dependencies: + detect-newline: 3.1.0 + + jest-each@29.7.0: + dependencies: + '@jest/types': 29.6.3 + chalk: 4.1.2 + jest-get-type: 29.6.3 + jest-util: 29.7.0 + pretty-format: 29.7.0 + + jest-environment-node@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/fake-timers': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + jest-mock: 29.7.0 + jest-util: 29.7.0 + + jest-get-type@29.6.3: {} + + jest-haste-map@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/graceful-fs': 4.1.9 + '@types/node': 22.15.29 + anymatch: 3.1.3 + fb-watchman: 2.0.2 + graceful-fs: 4.2.11 + jest-regex-util: 29.6.3 + jest-util: 29.7.0 + jest-worker: 29.7.0 + micromatch: 4.0.8 + walker: 1.0.8 + optionalDependencies: + fsevents: 2.3.3 + + jest-leak-detector@29.7.0: + dependencies: + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-matcher-utils@29.7.0: + dependencies: + chalk: 4.1.2 + jest-diff: 29.7.0 + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-message-util@29.7.0: + dependencies: + '@babel/code-frame': 7.27.1 + '@jest/types': 29.6.3 + '@types/stack-utils': 2.0.3 + chalk: 4.1.2 + graceful-fs: 4.2.11 + micromatch: 4.0.8 + pretty-format: 29.7.0 + slash: 3.0.0 + stack-utils: 2.0.6 + + jest-mock@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + jest-util: 29.7.0 + + jest-pnp-resolver@1.2.3(jest-resolve@29.7.0): + optionalDependencies: + jest-resolve: 29.7.0 + + jest-regex-util@29.6.3: {} + + jest-resolve-dependencies@29.7.0: + dependencies: + jest-regex-util: 29.6.3 + jest-snapshot: 29.7.0 + transitivePeerDependencies: + - supports-color + + jest-resolve@29.7.0: + dependencies: + chalk: 4.1.2 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-pnp-resolver: 1.2.3(jest-resolve@29.7.0) + jest-util: 29.7.0 + jest-validate: 29.7.0 + resolve: 1.22.10 + resolve.exports: 2.0.3 + slash: 3.0.0 + + jest-runner@29.7.0: + dependencies: + '@jest/console': 29.7.0 + '@jest/environment': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + chalk: 4.1.2 + emittery: 0.13.1 + graceful-fs: 4.2.11 + jest-docblock: 29.7.0 + jest-environment-node: 29.7.0 + jest-haste-map: 29.7.0 + jest-leak-detector: 29.7.0 + jest-message-util: 29.7.0 + jest-resolve: 29.7.0 + jest-runtime: 29.7.0 + jest-util: 29.7.0 + jest-watcher: 29.7.0 + jest-worker: 29.7.0 + p-limit: 3.1.0 + source-map-support: 0.5.13 + transitivePeerDependencies: + - supports-color + + jest-runtime@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/fake-timers': 29.7.0 + '@jest/globals': 29.7.0 + '@jest/source-map': 29.6.3 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + chalk: 4.1.2 + cjs-module-lexer: 1.4.3 + collect-v8-coverage: 1.0.2 + glob: 7.2.3 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-message-util: 29.7.0 + jest-mock: 29.7.0 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + slash: 3.0.0 + strip-bom: 4.0.0 + transitivePeerDependencies: + - supports-color + + jest-snapshot@29.7.0: + dependencies: + '@babel/core': 7.27.4 + '@babel/generator': 7.27.3 + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.27.4) + '@babel/plugin-syntax-typescript': 7.25.9(@babel/core@7.27.4) + '@babel/types': 7.27.3 + '@jest/expect-utils': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + babel-preset-current-node-syntax: 1.1.0(@babel/core@7.27.4) + chalk: 4.1.2 + expect: 29.7.0 + graceful-fs: 4.2.11 + jest-diff: 29.7.0 + jest-get-type: 29.6.3 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + natural-compare: 1.4.0 + pretty-format: 29.7.0 + semver: 7.7.2 + transitivePeerDependencies: + - supports-color + + jest-util@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + chalk: 4.1.2 + ci-info: 3.9.0 + graceful-fs: 4.2.11 + picomatch: 2.3.1 + + jest-validate@29.7.0: + dependencies: + '@jest/types': 29.6.3 + camelcase: 6.3.0 + chalk: 4.1.2 + jest-get-type: 29.6.3 + leven: 3.1.0 + pretty-format: 29.7.0 + + jest-watcher@29.7.0: + dependencies: + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.15.29 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + emittery: 0.13.1 + jest-util: 29.7.0 + string-length: 4.0.2 + + jest-worker@29.7.0: + dependencies: + '@types/node': 22.15.29 + jest-util: 29.7.0 + merge-stream: 2.0.0 + supports-color: 8.1.1 + + jest@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)): + dependencies: + '@jest/core': 29.7.0(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + '@jest/types': 29.6.3 + import-local: 3.2.0 + jest-cli: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + jiti@2.4.2: {} + + js-tokens@4.0.0: {} + + js-yaml@3.14.1: + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + + js-yaml@4.1.0: + dependencies: + argparse: 2.0.1 + + jsesc@3.1.0: {} + + json-buffer@3.0.1: {} + + json-parse-even-better-errors@2.3.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json5@2.2.3: {} + + jsonfile@6.1.0: + dependencies: + universalify: 2.0.1 + optionalDependencies: + graceful-fs: 4.2.11 + + jsonwebtoken@9.0.2: + dependencies: + jws: 3.2.2 + lodash.includes: 4.3.0 + lodash.isboolean: 3.0.3 + lodash.isinteger: 4.0.4 + lodash.isnumber: 3.0.3 + lodash.isplainobject: 4.0.6 + lodash.isstring: 4.0.1 + lodash.once: 4.1.1 + ms: 2.1.3 + semver: 7.7.1 + + jwa@1.4.1: + dependencies: + buffer-equal-constant-time: 1.0.1 + ecdsa-sig-formatter: 1.0.11 + safe-buffer: 5.2.1 + + jws@3.2.2: + dependencies: + jwa: 1.4.1 + safe-buffer: 5.2.1 + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + kleur@3.0.3: {} + + leven@3.1.0: {} + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + lightningcss-darwin-arm64@1.30.1: + optional: true + + lightningcss-darwin-x64@1.30.1: + optional: true + + lightningcss-freebsd-x64@1.30.1: + optional: true + + lightningcss-linux-arm-gnueabihf@1.30.1: + optional: true + + lightningcss-linux-arm64-gnu@1.30.1: + optional: true + + lightningcss-linux-arm64-musl@1.30.1: + optional: true + + lightningcss-linux-x64-gnu@1.30.1: + optional: true + + lightningcss-linux-x64-musl@1.30.1: + optional: true + + lightningcss-win32-arm64-msvc@1.30.1: + optional: true + + lightningcss-win32-x64-msvc@1.30.1: + optional: true + + lightningcss@1.30.1: + dependencies: + detect-libc: 2.0.4 + optionalDependencies: + lightningcss-darwin-arm64: 1.30.1 + lightningcss-darwin-x64: 1.30.1 + lightningcss-freebsd-x64: 1.30.1 + lightningcss-linux-arm-gnueabihf: 1.30.1 + lightningcss-linux-arm64-gnu: 1.30.1 + lightningcss-linux-arm64-musl: 1.30.1 + lightningcss-linux-x64-gnu: 1.30.1 + lightningcss-linux-x64-musl: 1.30.1 + lightningcss-win32-arm64-msvc: 1.30.1 + lightningcss-win32-x64-msvc: 1.30.1 + + lines-and-columns@1.2.4: {} + + locate-path@5.0.0: + dependencies: + p-locate: 4.1.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.includes@4.3.0: {} + + lodash.isboolean@3.0.3: {} + + lodash.isinteger@4.0.4: {} + + lodash.isnumber@3.0.3: {} + + lodash.isplainobject@4.0.6: {} + + lodash.isstring@4.0.1: {} + + lodash.memoize@4.1.2: {} + + lodash.merge@4.6.2: {} + + lodash.once@4.1.1: {} + + lodash@4.17.21: {} + + log-symbols@5.1.0: + dependencies: + chalk: 5.2.0 + is-unicode-supported: 1.3.0 + + lru-cache@10.4.3: {} + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + lucide-react@0.486.0(react@19.1.0): + dependencies: + react: 19.1.0 + + magic-string@0.30.17: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.0 + + make-dir@4.0.0: + dependencies: + semver: 7.7.2 + + make-error@1.3.6: {} + + makeerror@1.0.12: + dependencies: + tmpl: 1.0.5 + + math-intrinsics@1.1.0: {} + + media-typer@0.3.0: {} + + media-typer@1.1.0: {} + + merge-descriptors@1.0.3: {} + + merge-descriptors@2.0.0: {} + + merge-stream@2.0.0: {} + + merge2@1.4.1: {} + + methods@1.1.2: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + mime-db@1.52.0: {} + + mime-db@1.54.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + mime-types@3.0.1: + dependencies: + mime-db: 1.54.0 + + mime@1.6.0: {} + + mimic-fn@2.1.0: {} + + mimic-fn@4.0.0: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@5.1.6: + dependencies: + brace-expansion: 2.0.1 + + minimatch@9.0.3: + dependencies: + brace-expansion: 2.0.1 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.1 + + minimist@1.2.8: {} + + minipass@7.1.2: {} + + minizlib@3.0.2: + dependencies: + minipass: 7.1.2 + + mkdirp@1.0.4: {} + + mkdirp@3.0.1: {} + + ms@2.0.0: {} + + ms@2.1.3: {} + + nanoid@3.3.11: {} + + natural-compare@1.4.0: {} + + negotiator@0.6.3: {} + + negotiator@1.0.0: {} + + next@15.3.3(@babel/core@7.27.4)(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + '@next/env': 15.3.3 + '@swc/counter': 0.1.3 + '@swc/helpers': 0.5.15 + busboy: 1.6.0 + caniuse-lite: 1.0.30001707 + postcss: 8.4.31 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + styled-jsx: 5.1.6(@babel/core@7.27.4)(react@19.1.0) + optionalDependencies: + '@next/swc-darwin-arm64': 15.3.3 + '@next/swc-darwin-x64': 15.3.3 + '@next/swc-linux-arm64-gnu': 15.3.3 + '@next/swc-linux-arm64-musl': 15.3.3 + '@next/swc-linux-x64-gnu': 15.3.3 + '@next/swc-linux-x64-musl': 15.3.3 + '@next/swc-win32-arm64-msvc': 15.3.3 + '@next/swc-win32-x64-msvc': 15.3.3 + sharp: 0.34.2 + transitivePeerDependencies: + - '@babel/core' + - babel-plugin-macros + + node-domexception@1.0.0: {} + + node-fetch@2.7.0: + dependencies: + whatwg-url: 5.0.0 + + node-fetch@3.3.2: + dependencies: + data-uri-to-buffer: 4.0.1 + fetch-blob: 3.2.0 + formdata-polyfill: 4.0.10 + + node-int64@0.4.0: {} + + node-releases@2.0.19: {} + + normalize-path@3.0.0: {} + + normalize-range@0.1.2: {} + + npm-run-path@4.0.1: + dependencies: + path-key: 3.1.1 + + npm-run-path@5.3.0: + dependencies: + path-key: 4.0.0 + + object-assign@4.1.1: {} + + object-inspect@1.13.4: {} + + on-finished@2.4.1: + dependencies: + ee-first: 1.1.1 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + onetime@5.1.2: + dependencies: + mimic-fn: 2.1.0 + + onetime@6.0.0: + dependencies: + mimic-fn: 4.0.0 + + openai@4.104.0(zod@3.25.48): + dependencies: + '@types/node': 18.19.110 + '@types/node-fetch': 2.6.12 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + optionalDependencies: + zod: 3.25.48 + transitivePeerDependencies: + - encoding + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + ora@6.3.1: + dependencies: + chalk: 5.2.0 + cli-cursor: 4.0.0 + cli-spinners: 2.9.2 + is-interactive: 2.0.0 + is-unicode-supported: 1.3.0 + log-symbols: 5.1.0 + stdin-discarder: 0.1.0 + strip-ansi: 7.1.0 + wcwidth: 1.0.1 + + p-limit@2.3.0: + dependencies: + p-try: 2.2.0 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@4.1.0: + dependencies: + p-limit: 2.3.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + p-try@2.2.0: {} + + package-json-from-dist@1.0.1: {} + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + parse-json@5.2.0: + dependencies: + '@babel/code-frame': 7.27.1 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + parseurl@1.3.3: {} + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-key@4.0.0: {} + + path-parse@1.0.7: {} + + path-scurry@1.11.1: + dependencies: + lru-cache: 10.4.3 + minipass: 7.1.2 + + path-to-regexp@0.1.12: {} + + path-to-regexp@8.2.0: {} + + path-type@4.0.0: {} + + pg-cloudflare@1.2.5: + optional: true + + pg-connection-string@2.9.0: {} + + pg-int8@1.0.1: {} + + pg-pool@3.10.0(pg@8.16.0): + dependencies: + pg: 8.16.0 + + pg-protocol@1.10.0: {} + + pg-types@2.2.0: + dependencies: + pg-int8: 1.0.1 + postgres-array: 2.0.0 + postgres-bytea: 1.0.0 + postgres-date: 1.0.7 + postgres-interval: 1.2.0 + + pg@8.16.0: + dependencies: + pg-connection-string: 2.9.0 + pg-pool: 3.10.0(pg@8.16.0) + pg-protocol: 1.10.0 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.2.5 + + pgpass@1.0.5: + dependencies: + split2: 4.2.0 + + pgvector@0.2.1: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + picomatch@4.0.2: {} + + pirates@4.0.7: {} + + pkce-challenge@5.0.0: {} + + pkg-dir@4.2.0: + dependencies: + find-up: 4.1.0 + + postcss-value-parser@4.2.0: {} + + postcss@8.4.31: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postcss@8.5.4: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postgres-array@2.0.0: {} + + postgres-bytea@1.0.0: {} + + postgres-date@1.0.7: {} + + postgres-interval@1.2.0: + dependencies: + xtend: 4.0.2 + + postgres@3.4.7: {} + + prelude-ls@1.2.1: {} + + prettier@3.5.3: {} + + pretty-format@29.7.0: + dependencies: + '@jest/schemas': 29.6.3 + ansi-styles: 5.2.0 + react-is: 18.3.1 + + prompts@2.4.2: + dependencies: + kleur: 3.0.3 + sisteransi: 1.0.5 + + proxy-addr@2.0.7: + dependencies: + forwarded: 0.2.0 + ipaddr.js: 1.9.1 + + punycode@2.3.1: {} + + pure-rand@6.1.0: {} + + qs@6.13.0: + dependencies: + side-channel: 1.1.0 + + qs@6.14.0: + dependencies: + side-channel: 1.1.0 + + queue-microtask@1.2.3: {} + + range-parser@1.2.1: {} + + raw-body@2.5.2: + dependencies: + bytes: 3.1.2 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + unpipe: 1.0.0 + + raw-body@3.0.0: + dependencies: + bytes: 3.1.2 + http-errors: 2.0.0 + iconv-lite: 0.6.3 + unpipe: 1.0.0 + + react-dom@19.1.0(react@19.1.0): + dependencies: + react: 19.1.0 + scheduler: 0.26.0 + + react-i18next@15.5.2(i18next@24.2.3(typescript@5.8.3))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.8.3): + dependencies: + '@babel/runtime': 7.27.0 + html-parse-stringify: 3.0.1 + i18next: 24.2.3(typescript@5.8.3) + react: 19.1.0 + optionalDependencies: + react-dom: 19.1.0(react@19.1.0) + typescript: 5.8.3 + + react-is@18.3.1: {} + + react-refresh@0.17.0: {} + + react-router-dom@7.6.1(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + react-router: 7.6.1(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + + react-router@7.6.1(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + cookie: 1.0.2 + react: 19.1.0 + set-cookie-parser: 2.7.1 + optionalDependencies: + react-dom: 19.1.0(react@19.1.0) + + react@19.1.0: {} + + readable-stream@3.6.2: + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + + readdirp@3.6.0: + dependencies: + picomatch: 2.3.1 + + reflect-metadata@0.2.2: {} + + regenerator-runtime@0.14.1: {} + + require-directory@2.1.1: {} + + resolve-cwd@3.0.0: + dependencies: + resolve-from: 5.0.0 + + resolve-from@4.0.0: {} + + resolve-from@5.0.0: {} + + resolve-pkg-maps@1.0.0: {} + + resolve.exports@2.0.3: {} + + resolve@1.22.10: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + restore-cursor@4.0.0: + dependencies: + onetime: 5.1.2 + signal-exit: 3.0.7 + + reusify@1.1.0: {} + + rimraf@2.7.1: + dependencies: + glob: 7.2.3 + + rimraf@3.0.2: + dependencies: + glob: 7.2.3 + + rollup@4.40.1: + dependencies: + '@types/estree': 1.0.7 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.40.1 + '@rollup/rollup-android-arm64': 4.40.1 + '@rollup/rollup-darwin-arm64': 4.40.1 + '@rollup/rollup-darwin-x64': 4.40.1 + '@rollup/rollup-freebsd-arm64': 4.40.1 + '@rollup/rollup-freebsd-x64': 4.40.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.40.1 + '@rollup/rollup-linux-arm-musleabihf': 4.40.1 + '@rollup/rollup-linux-arm64-gnu': 4.40.1 + '@rollup/rollup-linux-arm64-musl': 4.40.1 + '@rollup/rollup-linux-loongarch64-gnu': 4.40.1 + '@rollup/rollup-linux-powerpc64le-gnu': 4.40.1 + '@rollup/rollup-linux-riscv64-gnu': 4.40.1 + '@rollup/rollup-linux-riscv64-musl': 4.40.1 + '@rollup/rollup-linux-s390x-gnu': 4.40.1 + '@rollup/rollup-linux-x64-gnu': 4.40.1 + '@rollup/rollup-linux-x64-musl': 4.40.1 + '@rollup/rollup-win32-arm64-msvc': 4.40.1 + '@rollup/rollup-win32-ia32-msvc': 4.40.1 + '@rollup/rollup-win32-x64-msvc': 4.40.1 + fsevents: 2.3.3 + + router@2.2.0: + dependencies: + debug: 4.4.1 + depd: 2.0.0 + is-promise: 4.0.0 + parseurl: 1.3.3 + path-to-regexp: 8.2.0 + transitivePeerDependencies: + - supports-color + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + rxjs@7.8.2: + dependencies: + tslib: 2.8.1 + + safe-buffer@5.2.1: {} + + safer-buffer@2.1.2: {} + + scheduler@0.26.0: {} + + semver@6.3.1: {} + + semver@7.7.1: {} + + semver@7.7.2: {} + + send@0.19.0: + dependencies: + debug: 2.6.9 + depd: 2.0.0 + destroy: 1.2.0 + encodeurl: 1.0.2 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 0.5.2 + http-errors: 2.0.0 + mime: 1.6.0 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + + send@1.2.0: + dependencies: + debug: 4.4.1 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 2.0.0 + http-errors: 2.0.0 + mime-types: 3.0.1 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + + serve-static@1.16.2: + dependencies: + encodeurl: 2.0.0 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 0.19.0 + transitivePeerDependencies: + - supports-color + + serve-static@2.2.0: + dependencies: + encodeurl: 2.0.0 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 1.2.0 + transitivePeerDependencies: + - supports-color + + set-cookie-parser@2.7.1: {} + + setprototypeof@1.2.0: {} + + sha.js@2.4.11: + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 + + sharp@0.34.2: + dependencies: + color: 4.2.3 + detect-libc: 2.0.4 + semver: 7.7.2 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.34.2 + '@img/sharp-darwin-x64': 0.34.2 + '@img/sharp-libvips-darwin-arm64': 1.1.0 + '@img/sharp-libvips-darwin-x64': 1.1.0 + '@img/sharp-libvips-linux-arm': 1.1.0 + '@img/sharp-libvips-linux-arm64': 1.1.0 + '@img/sharp-libvips-linux-ppc64': 1.1.0 + '@img/sharp-libvips-linux-s390x': 1.1.0 + '@img/sharp-libvips-linux-x64': 1.1.0 + '@img/sharp-libvips-linuxmusl-arm64': 1.1.0 + '@img/sharp-libvips-linuxmusl-x64': 1.1.0 + '@img/sharp-linux-arm': 0.34.2 + '@img/sharp-linux-arm64': 0.34.2 + '@img/sharp-linux-s390x': 0.34.2 + '@img/sharp-linux-x64': 0.34.2 + '@img/sharp-linuxmusl-arm64': 0.34.2 + '@img/sharp-linuxmusl-x64': 0.34.2 + '@img/sharp-wasm32': 0.34.2 + '@img/sharp-win32-arm64': 0.34.2 + '@img/sharp-win32-ia32': 0.34.2 + '@img/sharp-win32-x64': 0.34.2 + optional: true + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + shell-quote@1.8.3: {} + + side-channel-list@1.0.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + + side-channel-map@1.0.1: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + + side-channel-weakmap@1.0.2: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + side-channel-map: 1.0.1 + + side-channel@1.1.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + side-channel-list: 1.0.0 + side-channel-map: 1.0.1 + side-channel-weakmap: 1.0.2 + + signal-exit@3.0.7: {} + + signal-exit@4.1.0: {} + + simple-swizzle@0.2.2: + dependencies: + is-arrayish: 0.3.2 + optional: true + + sisteransi@1.0.5: {} + + slash@3.0.0: {} + + source-map-js@1.2.1: {} + + source-map-support@0.5.13: + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + + source-map-support@0.5.21: + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + + source-map@0.6.1: {} + + split2@4.2.0: {} + + sprintf-js@1.0.3: {} + + sql-highlight@6.0.0: {} + + stack-utils@2.0.6: + dependencies: + escape-string-regexp: 2.0.0 + + statuses@2.0.1: {} + + stdin-discarder@0.1.0: + dependencies: + bl: 5.1.0 + + streamsearch@1.1.0: {} + + string-length@4.0.2: + dependencies: + char-regex: 1.0.2 + strip-ansi: 6.0.1 + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string-width@5.1.2: + dependencies: + eastasianwidth: 0.2.0 + emoji-regex: 9.2.2 + strip-ansi: 7.1.0 + + string_decoder@1.3.0: + dependencies: + safe-buffer: 5.2.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-ansi@7.1.0: + dependencies: + ansi-regex: 6.1.0 + + strip-bom@3.0.0: {} + + strip-bom@4.0.0: {} + + strip-final-newline@2.0.0: {} + + strip-final-newline@3.0.0: {} + + strip-json-comments@2.0.1: {} + + strip-json-comments@3.1.1: {} + + styled-jsx@5.1.6(@babel/core@7.27.4)(react@19.1.0): + dependencies: + client-only: 0.0.1 + react: 19.1.0 + optionalDependencies: + '@babel/core': 7.27.4 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-color@8.1.1: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + tailwind-merge@3.3.0: {} + + tailwind-scrollbar-hide@2.0.0(tailwindcss@4.1.8): + dependencies: + tailwindcss: 4.1.8 + + tailwindcss@4.1.8: {} + + tapable@2.2.2: {} + + tar@7.4.3: + dependencies: + '@isaacs/fs-minipass': 4.0.1 + chownr: 3.0.0 + minipass: 7.1.2 + minizlib: 3.0.2 + mkdirp: 3.0.1 + yallist: 5.0.0 + + test-exclude@6.0.0: + dependencies: + '@istanbuljs/schema': 0.1.3 + glob: 7.2.3 + minimatch: 3.1.2 + + text-table@0.2.0: {} + + tinyglobby@0.2.13: + dependencies: + fdir: 6.4.4(picomatch@4.0.2) + picomatch: 4.0.2 + + tmpl@1.0.5: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + toidentifier@1.0.1: {} + + tr46@0.0.3: {} + + tree-kill@1.2.2: {} + + ts-api-utils@1.4.3(typescript@5.8.3): + dependencies: + typescript: 5.8.3 + + ts-jest@29.3.4(@babel/core@7.27.4)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.27.4))(jest@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)))(typescript@5.8.3): + dependencies: + bs-logger: 0.2.6 + ejs: 3.1.10 + fast-json-stable-stringify: 2.1.0 + jest: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)) + jest-util: 29.7.0 + json5: 2.2.3 + lodash.memoize: 4.1.2 + make-error: 1.3.6 + semver: 7.7.2 + type-fest: 4.41.0 + typescript: 5.8.3 + yargs-parser: 21.1.1 + optionalDependencies: + '@babel/core': 7.27.4 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + babel-jest: 29.7.0(@babel/core@7.27.4) + + ts-node-dev@2.0.0(@types/node@22.15.29)(typescript@5.8.3): + dependencies: + chokidar: 3.6.0 + dynamic-dedupe: 0.3.0 + minimist: 1.2.8 + mkdirp: 1.0.4 + resolve: 1.22.10 + rimraf: 2.7.1 + source-map-support: 0.5.21 + tree-kill: 1.2.2 + ts-node: 10.9.2(@types/node@22.15.29)(typescript@5.8.3) + tsconfig: 7.0.0 + typescript: 5.8.3 + transitivePeerDependencies: + - '@swc/core' + - '@swc/wasm' + - '@types/node' + + ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3): + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.11 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 22.15.29 + acorn: 8.14.1 + acorn-walk: 8.3.4 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.8.3 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + + tsconfig@7.0.0: + dependencies: + '@types/strip-bom': 3.0.0 + '@types/strip-json-comments': 0.0.30 + strip-bom: 3.0.0 + strip-json-comments: 2.0.1 + + tslib@2.8.1: {} + + tsx@4.19.4: + dependencies: + esbuild: 0.25.2 + get-tsconfig: 4.10.0 + optionalDependencies: + fsevents: 2.3.3 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-detect@4.0.8: {} + + type-fest@0.20.2: {} + + type-fest@0.21.3: {} + + type-fest@4.41.0: {} + + type-is@1.6.18: + dependencies: + media-typer: 0.3.0 + mime-types: 2.1.35 + + type-is@2.0.1: + dependencies: + content-type: 1.0.5 + media-typer: 1.1.0 + mime-types: 3.0.1 + + typeorm@0.3.24(pg@8.16.0)(reflect-metadata@0.2.2)(ts-node@10.9.2(@types/node@22.15.29)(typescript@5.8.3)): + dependencies: + '@sqltools/formatter': 1.2.5 + ansis: 3.17.0 + app-root-path: 3.1.0 + buffer: 6.0.3 + dayjs: 1.11.13 + debug: 4.4.1 + dedent: 1.6.0 + dotenv: 16.5.0 + glob: 10.4.5 + reflect-metadata: 0.2.2 + sha.js: 2.4.11 + sql-highlight: 6.0.0 + tslib: 2.8.1 + uuid: 11.1.0 + yargs: 17.7.2 + optionalDependencies: + pg: 8.16.0 + ts-node: 10.9.2(@types/node@22.15.29)(typescript@5.8.3) + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + typescript@5.8.3: {} + + undici-types@5.26.5: {} + + undici-types@6.21.0: {} + + universalify@2.0.1: {} + + unpipe@1.0.0: {} + + update-browserslist-db@1.1.3(browserslist@4.24.4): + dependencies: + browserslist: 4.24.4 + escalade: 3.2.0 + picocolors: 1.1.1 + + update-browserslist-db@1.1.3(browserslist@4.25.0): + dependencies: + browserslist: 4.25.0 + escalade: 3.2.0 + picocolors: 1.1.1 + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + util-deprecate@1.0.2: {} + + utils-merge@1.0.1: {} + + uuid@11.1.0: {} + + v8-compile-cache-lib@3.0.1: {} + + v8-to-istanbul@9.3.0: + dependencies: + '@jridgewell/trace-mapping': 0.3.25 + '@types/istanbul-lib-coverage': 2.0.6 + convert-source-map: 2.0.0 + + validator@13.12.0: {} + + vary@1.1.2: {} + + vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4): + dependencies: + esbuild: 0.25.2 + fdir: 6.4.4(picomatch@4.0.2) + picomatch: 4.0.2 + postcss: 8.5.4 + rollup: 4.40.1 + tinyglobby: 0.2.13 + optionalDependencies: + '@types/node': 22.15.29 + fsevents: 2.3.3 + jiti: 2.4.2 + lightningcss: 1.30.1 + tsx: 4.19.4 + + void-elements@3.1.0: {} + + walker@1.0.8: + dependencies: + makeerror: 1.0.12 + + wcwidth@1.0.1: + dependencies: + defaults: 1.0.4 + + web-streams-polyfill@3.3.3: {} + + web-streams-polyfill@4.0.0-beta.3: {} + + webidl-conversions@3.0.1: {} + + whatwg-url@5.0.0: + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrap-ansi@8.1.0: + dependencies: + ansi-styles: 6.2.1 + string-width: 5.1.2 + strip-ansi: 7.1.0 + + wrappy@1.0.2: {} + + write-file-atomic@4.0.2: + dependencies: + imurmurhash: 0.1.4 + signal-exit: 3.0.7 + + xtend@4.0.2: {} + + y18n@5.0.8: {} + + yallist@3.1.1: {} + + yallist@5.0.0: {} + + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + + yn@3.1.1: {} + + yocto-queue@0.1.0: {} + + zod-to-json-schema@3.24.5(zod@3.25.48): + dependencies: + zod: 3.25.48 + + zod@3.25.48: {} diff --git a/scripts/verify-dist.js b/scripts/verify-dist.js new file mode 100644 index 0000000000000000000000000000000000000000..05b88b4e4d858aa9f205432d2bcb69a649f335c0 --- /dev/null +++ b/scripts/verify-dist.js @@ -0,0 +1,44 @@ +// scripts/verify-dist.js +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const projectRoot = path.resolve(__dirname, '..'); + +// Check if frontend dist exists +const frontendDistPath = path.join(projectRoot, 'frontend', 'dist'); +const frontendIndexPath = path.join(frontendDistPath, 'index.html'); + +if (!fs.existsSync(frontendDistPath)) { + console.error('❌ Error: frontend/dist directory does not exist!'); + console.error('Run "npm run frontend:build" to generate the frontend dist files.'); + process.exit(1); +} + +if (!fs.existsSync(frontendIndexPath)) { + console.error('❌ Error: frontend/dist/index.html does not exist!'); + console.error('Frontend build may be incomplete. Run "npm run frontend:build" again.'); + process.exit(1); +} + +// Check if backend dist exists +const backendDistPath = path.join(projectRoot, 'dist'); +const serverJsPath = path.join(backendDistPath, 'server.js'); + +if (!fs.existsSync(backendDistPath)) { + console.error('❌ Error: dist directory does not exist!'); + console.error('Run "npm run backend:build" to generate the backend dist files.'); + process.exit(1); +} + +if (!fs.existsSync(serverJsPath)) { + console.error('❌ Error: dist/server.js does not exist!'); + console.error('Backend build may be incomplete. Run "npm run backend:build" again.'); + process.exit(1); +} + +// All checks passed +console.log('✅ Verification passed! Frontend and backend dist files are present.'); +console.log('📦 Package is ready for publishing.'); \ No newline at end of file diff --git a/servers.json b/servers.json new file mode 100644 index 0000000000000000000000000000000000000000..2512913a6518f367cb432ee63f7a54b952baf188 --- /dev/null +++ b/servers.json @@ -0,0 +1,74722 @@ +{ + "firecrawl": { + "name": "firecrawl", + "display_name": "Firecrawl", + "description": "Advanced web scraping with JavaScript rendering, PDF support, and smart rate limiting", + "repository": { + "type": "git", + "url": "https://github.com/mendableai/firecrawl-mcp-server" + }, + "homepage": "https://github.com/mendableai/firecrawl-mcp-server", + "author": { + "name": "mendableai" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "firecrawl", + "scraping", + "web", + "api", + "automation" + ], + "examples": [ + { + "title": "Basic Scraping Example", + "description": "Scrape content from a single URL", + "prompt": "firecrawl_scrape with url 'https://example.com'" + }, + { + "title": "Batch Scraping", + "description": "Scrape multiple URLs", + "prompt": "firecrawl_batch_scrape with urls ['https://example1.com', 'https://example2.com']" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "firecrawl-mcp" + ], + "env": { + "FIRECRAWL_API_KEY": "${FIRECRAWL_API_KEY}" + } + } + }, + "arguments": { + "FIRECRAWL_API_KEY": { + "description": "Your FireCrawl API key. Required for using the cloud API (default) and optional for self-hosted instances.", + "required": true, + "example": "fc-YOUR_API_KEY" + } + }, + "tools": [ + { + "name": "firecrawl_scrape", + "description": "Scrape a single webpage with advanced options for content extraction. Supports various formats including markdown, HTML, and screenshots. Can execute custom actions like clicking or scrolling before scraping.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL to scrape" + }, + "formats": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "markdown", + "html", + "rawHtml", + "screenshot", + "links", + "screenshot@fullPage", + "extract" + ] + }, + "description": "Content formats to extract (default: ['markdown'])" + }, + "onlyMainContent": { + "type": "boolean", + "description": "Extract only the main content, filtering out navigation, footers, etc." + }, + "includeTags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "HTML tags to specifically include in extraction" + }, + "excludeTags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "HTML tags to exclude from extraction" + }, + "waitFor": { + "type": "number", + "description": "Time in milliseconds to wait for dynamic content to load" + }, + "timeout": { + "type": "number", + "description": "Maximum time in milliseconds to wait for the page to load" + }, + "actions": { + "type": "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "wait", + "click", + "screenshot", + "write", + "press", + "scroll", + "scrape", + "executeJavascript" + ], + "description": "Type of action to perform" + }, + "selector": { + "type": "string", + "description": "CSS selector for the target element" + }, + "milliseconds": { + "type": "number", + "description": "Time to wait in milliseconds (for wait action)" + }, + "text": { + "type": "string", + "description": "Text to write (for write action)" + }, + "key": { + "type": "string", + "description": "Key to press (for press action)" + }, + "direction": { + "type": "string", + "enum": [ + "up", + "down" + ], + "description": "Scroll direction" + }, + "script": { + "type": "string", + "description": "JavaScript code to execute" + }, + "fullPage": { + "type": "boolean", + "description": "Take full page screenshot" + } + }, + "required": [ + "type" + ] + }, + "description": "List of actions to perform before scraping" + }, + "extract": { + "type": "object", + "properties": { + "schema": { + "type": "object", + "description": "Schema for structured data extraction" + }, + "systemPrompt": { + "type": "string", + "description": "System prompt for LLM extraction" + }, + "prompt": { + "type": "string", + "description": "User prompt for LLM extraction" + } + }, + "description": "Configuration for structured data extraction" + }, + "mobile": { + "type": "boolean", + "description": "Use mobile viewport" + }, + "skipTlsVerification": { + "type": "boolean", + "description": "Skip TLS certificate verification" + }, + "removeBase64Images": { + "type": "boolean", + "description": "Remove base64 encoded images from output" + }, + "location": { + "type": "object", + "properties": { + "country": { + "type": "string", + "description": "Country code for geolocation" + }, + "languages": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Language codes for content" + } + }, + "description": "Location settings for scraping" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "firecrawl_map", + "description": "Discover URLs from a starting point. Can use both sitemap.xml and HTML link discovery.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Starting URL for URL discovery" + }, + "search": { + "type": "string", + "description": "Optional search term to filter URLs" + }, + "ignoreSitemap": { + "type": "boolean", + "description": "Skip sitemap.xml discovery and only use HTML links" + }, + "sitemapOnly": { + "type": "boolean", + "description": "Only use sitemap.xml for discovery, ignore HTML links" + }, + "includeSubdomains": { + "type": "boolean", + "description": "Include URLs from subdomains in results" + }, + "limit": { + "type": "number", + "description": "Maximum number of URLs to return" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "firecrawl_crawl", + "description": "Start an asynchronous crawl of multiple pages from a starting URL. Supports depth control, path filtering, and webhook notifications.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Starting URL for the crawl" + }, + "excludePaths": { + "type": "array", + "items": { + "type": "string" + }, + "description": "URL paths to exclude from crawling" + }, + "includePaths": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Only crawl these URL paths" + }, + "maxDepth": { + "type": "number", + "description": "Maximum link depth to crawl" + }, + "ignoreSitemap": { + "type": "boolean", + "description": "Skip sitemap.xml discovery" + }, + "limit": { + "type": "number", + "description": "Maximum number of pages to crawl" + }, + "allowBackwardLinks": { + "type": "boolean", + "description": "Allow crawling links that point to parent directories" + }, + "allowExternalLinks": { + "type": "boolean", + "description": "Allow crawling links to external domains" + }, + "webhook": { + "oneOf": [ + { + "type": "string", + "description": "Webhook URL to notify when crawl is complete" + }, + { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Webhook URL" + }, + "headers": { + "type": "object", + "description": "Custom headers for webhook requests" + } + }, + "required": [ + "url" + ] + } + ] + }, + "deduplicateSimilarURLs": { + "type": "boolean", + "description": "Remove similar URLs during crawl" + }, + "ignoreQueryParameters": { + "type": "boolean", + "description": "Ignore query parameters when comparing URLs" + }, + "scrapeOptions": { + "type": "object", + "properties": { + "formats": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "markdown", + "html", + "rawHtml", + "screenshot", + "links", + "screenshot@fullPage", + "extract" + ] + } + }, + "onlyMainContent": { + "type": "boolean" + }, + "includeTags": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludeTags": { + "type": "array", + "items": { + "type": "string" + } + }, + "waitFor": { + "type": "number" + } + }, + "description": "Options for scraping each page" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "firecrawl_batch_scrape", + "description": "Scrape multiple URLs in batch mode. Returns a job ID that can be used to check status.", + "inputSchema": { + "type": "object", + "properties": { + "urls": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of URLs to scrape" + }, + "options": { + "type": "object", + "properties": { + "formats": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "markdown", + "html", + "rawHtml", + "screenshot", + "links", + "screenshot@fullPage", + "extract" + ] + } + }, + "onlyMainContent": { + "type": "boolean" + }, + "includeTags": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludeTags": { + "type": "array", + "items": { + "type": "string" + } + }, + "waitFor": { + "type": "number" + } + } + } + }, + "required": [ + "urls" + ] + } + }, + { + "name": "firecrawl_check_batch_status", + "description": "Check the status of a batch scraping job.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Batch job ID to check" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "firecrawl_check_crawl_status", + "description": "Check the status of a crawl job.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Crawl job ID to check" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "firecrawl_search", + "description": "Search and retrieve content from web pages with optional scraping. Returns SERP results by default (url, title, description) or full page content when scrapeOptions are provided.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query string" + }, + "limit": { + "type": "number", + "description": "Maximum number of results to return (default: 5)" + }, + "lang": { + "type": "string", + "description": "Language code for search results (default: en)" + }, + "country": { + "type": "string", + "description": "Country code for search results (default: us)" + }, + "tbs": { + "type": "string", + "description": "Time-based search filter" + }, + "filter": { + "type": "string", + "description": "Search filter" + }, + "location": { + "type": "object", + "properties": { + "country": { + "type": "string", + "description": "Country code for geolocation" + }, + "languages": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Language codes for content" + } + }, + "description": "Location settings for search" + }, + "scrapeOptions": { + "type": "object", + "properties": { + "formats": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "markdown", + "html", + "rawHtml" + ] + }, + "description": "Content formats to extract from search results" + }, + "onlyMainContent": { + "type": "boolean", + "description": "Extract only the main content from results" + }, + "waitFor": { + "type": "number", + "description": "Time in milliseconds to wait for dynamic content" + } + }, + "description": "Options for scraping search results" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "firecrawl_extract", + "description": "Extract structured information from web pages using LLM. Supports both cloud AI and self-hosted LLM extraction.", + "inputSchema": { + "type": "object", + "properties": { + "urls": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of URLs to extract information from" + }, + "prompt": { + "type": "string", + "description": "Prompt for the LLM extraction" + }, + "systemPrompt": { + "type": "string", + "description": "System prompt for LLM extraction" + }, + "schema": { + "type": "object", + "description": "JSON schema for structured data extraction" + }, + "allowExternalLinks": { + "type": "boolean", + "description": "Allow extraction from external links" + }, + "enableWebSearch": { + "type": "boolean", + "description": "Enable web search for additional context" + }, + "includeSubdomains": { + "type": "boolean", + "description": "Include subdomains in extraction" + } + }, + "required": [ + "urls" + ] + } + }, + { + "name": "firecrawl_deep_research", + "description": "Conduct deep research on a query using web crawling, search, and AI analysis.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query to research" + }, + "maxDepth": { + "type": "number", + "description": "Maximum depth of research iterations (1-10)" + }, + "timeLimit": { + "type": "number", + "description": "Time limit in seconds (30-300)" + }, + "maxUrls": { + "type": "number", + "description": "Maximum number of URLs to analyze (1-1000)" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "firecrawl_generate_llmstxt", + "description": "Generate standardized LLMs.txt file for a given URL, which provides context about how LLMs should interact with the website.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL to generate LLMs.txt from" + }, + "maxUrls": { + "type": "number", + "description": "Maximum number of URLs to process (1-100, default: 10)" + }, + "showFullText": { + "type": "boolean", + "description": "Whether to show the full LLMs-full.txt in the response" + } + }, + "required": [ + "url" + ] + } + } + ], + "is_official": true + }, + "rabbitmq": { + "name": "rabbitmq", + "display_name": "RabbitMQ", + "description": "The MCP server that interacts with RabbitMQ to publish and consume messages.", + "repository": { + "type": "git", + "url": "https://github.com/kenliao94/mcp-server-rabbitmq" + }, + "homepage": "https://github.com/kenliao94/mcp-server-rabbitmq", + "author": { + "name": "kenliao94" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "rabbitmq", + "server", + "messaging" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/kenliao94/mcp-server-rabbitmq", + "mcp-server-rabbitmq", + "--rabbitmq-host", + "${RABBITMQ_HOST}", + "--port", + "${RABBITMQ_PORT}", + "--username", + "${RABBITMQ_USERNAME}", + "--password", + "${RABBITMQ_PASSWORD}", + "--use-tls", + "${USE_TLS}" + ] + } + }, + "examples": [ + { + "title": "Publish Message", + "description": "Ask Claude to publish a message to a queue.", + "prompt": "Please publish a message to the queue." + } + ], + "arguments": { + "RABBITMQ_HOST": { + "description": "The hostname of the RabbitMQ server (e.g., test.rabbit.com, localhost).", + "required": true, + "example": "test.rabbit.com" + }, + "RABBITMQ_PORT": { + "description": "The port number to connect to the RabbitMQ server (e.g., 5672).", + "required": true, + "example": "5672" + }, + "RABBITMQ_USERNAME": { + "description": "The username to authenticate with the RabbitMQ server.", + "required": true, + "example": "guest" + }, + "RABBITMQ_PASSWORD": { + "description": "The password for the RabbitMQ username provided.", + "required": true, + "example": "guest" + }, + "USE_TLS": { + "description": "Set to true if using TLS (AMQPS), otherwise false.", + "required": false, + "example": "true or false" + } + }, + "tools": [ + { + "name": "enqueue", + "description": "Enqueue a message to a queue hosted on RabbitMQ", + "inputSchema": { + "properties": { + "message": { + "description": "The message to publish", + "title": "Message", + "type": "string" + }, + "queue": { + "description": "The name of the queue", + "title": "Queue", + "type": "string" + } + }, + "required": [ + "message", + "queue" + ], + "title": "Enqueue", + "type": "object" + } + }, + { + "name": "fanout", + "description": "Publish a message to an exchange with fanout type", + "inputSchema": { + "properties": { + "message": { + "description": "The message to publish", + "title": "Message", + "type": "string" + }, + "exchange": { + "description": "The name of the exchange", + "title": "Exchange", + "type": "string" + } + }, + "required": [ + "message", + "exchange" + ], + "title": "Fanout", + "type": "object" + } + }, + { + "name": "list_queues", + "description": "List all the queues in the broker", + "inputSchema": { + "properties": {}, + "title": "ListQueues", + "type": "object" + } + }, + { + "name": "list_exchanges", + "description": "List all the exchanges in the broker", + "inputSchema": { + "properties": {}, + "title": "ListExchanges", + "type": "object" + } + }, + { + "name": "get_queue_info", + "description": "Get detailed information about a specific queue", + "inputSchema": { + "properties": { + "queue": { + "description": "The name of the queue to get info about", + "title": "Queue", + "type": "string" + }, + "vhost": { + "default": "/", + "description": "The virtual host where the queue exists", + "title": "Vhost", + "type": "string" + } + }, + "required": [ + "queue" + ], + "title": "GetQueueInfo", + "type": "object" + } + }, + { + "name": "delete_queue", + "description": "Delete a specific queue", + "inputSchema": { + "properties": { + "queue": { + "description": "The name of the queue to delete", + "title": "Queue", + "type": "string" + }, + "vhost": { + "default": "/", + "description": "The virtual host where the queue exists", + "title": "Vhost", + "type": "string" + } + }, + "required": [ + "queue" + ], + "title": "DeleteQueue", + "type": "object" + } + }, + { + "name": "purge_queue", + "description": "Remove all messages from a specific queue", + "inputSchema": { + "properties": { + "queue": { + "description": "The name of the queue to purge", + "title": "Queue", + "type": "string" + }, + "vhost": { + "default": "/", + "description": "The virtual host where the queue exists", + "title": "Vhost", + "type": "string" + } + }, + "required": [ + "queue" + ], + "title": "PurgeQueue", + "type": "object" + } + }, + { + "name": "delete_exchange", + "description": "Delete a specific exchange", + "inputSchema": { + "properties": { + "exchange": { + "description": "The name of the exchange to delete", + "title": "Exchange", + "type": "string" + }, + "vhost": { + "default": "/", + "description": "The virtual host where the exchange exists", + "title": "Vhost", + "type": "string" + } + }, + "required": [ + "exchange" + ], + "title": "DeleteExchange", + "type": "object" + } + }, + { + "name": "get_exchange_info", + "description": "Get detailed information about a specific exchange", + "inputSchema": { + "properties": { + "exchange": { + "description": "The name of the exchange to get info about", + "title": "Exchange", + "type": "string" + }, + "vhost": { + "default": "/", + "description": "The virtual host where the exchange exists", + "title": "Vhost", + "type": "string" + } + }, + "required": [ + "exchange" + ], + "title": "GetExchangeInfo", + "type": "object" + } + } + ] + }, + "mcp-server-axiom": { + "display_name": "Axiom MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/axiomhq/mcp-server-axiom" + }, + "homepage": "https://axiom.co", + "author": { + "name": "axiomhq" + }, + "license": "MIT", + "tags": [ + "axiom", + "apl", + "data", + "query" + ], + "arguments": { + "token": { + "description": "Axiom API token", + "required": true, + "example": "xaat-your-token" + }, + "url": { + "description": "Axiom API URL", + "required": true, + "example": "https://api.axiom.co" + }, + "query-rate": { + "description": "Rate limit for queries", + "required": false, + "example": "1" + }, + "query-burst": { + "description": "Burst limit for queries", + "required": false, + "example": "1" + }, + "datasets-rate": { + "description": "Rate limit for dataset listing", + "required": false, + "example": "1" + }, + "datasets-burst": { + "description": "Burst limit for dataset listing", + "required": false, + "example": "1" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "axiom-mcp", + "args": [ + "--config", + "config.txt" + ], + "description": "Run using pre-built binary", + "recommended": true + } + }, + "examples": [ + { + "title": "Basic Configuration", + "description": "Configure the MCP server with a token", + "prompt": "echo \"token xaat-your-token\" > config.txt" + }, + { + "title": "Claude Desktop Integration", + "description": "Configure Claude desktop app to use the MCP server", + "prompt": "code ~/Library/Application\\ Support/Claude/claude_desktop_config.json" + } + ], + "name": "mcp-server-axiom", + "description": "A [Model Context Protocol](https://modelcontextprotocol.io/) server implementation for [Axiom](https://axiom.co) that enables AI agents to query your data using Axiom Processing Language (APL).", + "categories": [ + "Analytics" + ], + "is_official": true + }, + "mcp-clickhouse": { + "display_name": "ClickHouse MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/ClickHouse/mcp-clickhouse" + }, + "homepage": "https://glama.ai/mcp/servers/yvjy4csvo1", + "author": { + "name": "ClickHouse" + }, + "license": "[NOT GIVEN]", + "tags": [ + "clickhouse", + "database", + "sql" + ], + "arguments": { + "CLICKHOUSE_HOST": { + "description": "The hostname of your ClickHouse server", + "required": true, + "example": "sql-clickhouse.clickhouse.com" + }, + "CLICKHOUSE_USER": { + "description": "The username for authentication", + "required": true, + "example": "demo" + }, + "CLICKHOUSE_PASSWORD": { + "description": "The password for authentication", + "required": true, + "example": "" + }, + "CLICKHOUSE_PORT": { + "description": "The port number of your ClickHouse server", + "required": false, + "example": "8443" + }, + "CLICKHOUSE_SECURE": { + "description": "Enable/disable HTTPS connection", + "required": false, + "example": "true" + }, + "CLICKHOUSE_VERIFY": { + "description": "Enable/disable SSL certificate verification", + "required": false, + "example": "true" + }, + "CLICKHOUSE_CONNECT_TIMEOUT": { + "description": "Connection timeout in seconds", + "required": false, + "example": "30" + }, + "CLICKHOUSE_SEND_RECEIVE_TIMEOUT": { + "description": "Send/receive timeout in seconds", + "required": false, + "example": "300" + }, + "CLICKHOUSE_DATABASE": { + "description": "Default database to use", + "required": false, + "example": "your_database" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uv", + "args": [ + "run", + "--with", + "mcp-clickhouse", + "--python", + "3.13", + "mcp-clickhouse" + ], + "package": "mcp-clickhouse", + "env": { + "CLICKHOUSE_HOST": "", + "CLICKHOUSE_PORT": "", + "CLICKHOUSE_USER": "", + "CLICKHOUSE_PASSWORD": "", + "CLICKHOUSE_SECURE": "true", + "CLICKHOUSE_VERIFY": "true", + "CLICKHOUSE_CONNECT_TIMEOUT": "30", + "CLICKHOUSE_SEND_RECEIVE_TIMEOUT": "30" + }, + "description": "Install and run using uv package manager", + "recommended": true + }, + "python": { + "type": "python", + "command": "pip", + "args": [ + "install", + "mcp-clickhouse" + ], + "package": "mcp-clickhouse", + "description": "Install using pip", + "recommended": false + } + }, + "examples": [ + { + "title": "Run a SQL query", + "description": "Execute a SQL query on your ClickHouse cluster", + "prompt": "Run this SQL query: SELECT * FROM system.databases LIMIT 5" + }, + { + "title": "List databases", + "description": "List all databases on your ClickHouse cluster", + "prompt": "List all databases in my ClickHouse instance" + }, + { + "title": "List tables", + "description": "List all tables in a specific database", + "prompt": "Show me all tables in the system database" + } + ], + "name": "mcp-clickhouse", + "description": "An MCP server for ClickHouse.", + "categories": [ + "Databases" + ], + "tools": [ + { + "name": "list_databases", + "description": "List available ClickHouse databases", + "inputSchema": { + "properties": {}, + "title": "list_databasesArguments", + "type": "object" + } + }, + { + "name": "list_tables", + "description": "List available ClickHouse tables in a database", + "inputSchema": { + "properties": { + "database": { + "title": "Database", + "type": "string" + }, + "like": { + "default": null, + "title": "Like", + "type": "string" + } + }, + "required": [ + "database" + ], + "title": "list_tablesArguments", + "type": "object" + } + }, + { + "name": "run_select_query", + "description": "Run a SELECT query in a ClickHouse database", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "run_select_queryArguments", + "type": "object" + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "aws-cost-explorer": { + "name": "aws-cost-explorer", + "display_name": "AWS Cost Explorer", + "description": "Optimize your AWS spend (including Amazon Bedrock spend) with this MCP server by examining spend across regions, services, instance types and foundation models ([demo video](https://www.youtube.com/watch?v=WuVOmYLRFmI&feature=youtu.be)).", + "repository": { + "type": "git", + "url": "https://github.com/aarora79/aws-cost-explorer-mcp-server" + }, + "homepage": "https://github.com/aarora79/aws-cost-explorer-mcp-server", + "author": { + "name": "aarora79" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "Cost Explorer", + "Amazon Bedrock", + "AWS" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--directory", + "/path/to/aws-cost-explorer-mcp-server", + "run", + "server.py" + ], + "env": { + "AWS_ACCESS_KEY_ID": "${AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${AWS_SECRET_ACCESS_KEY}", + "AWS_REGION": "${AWS_REGION}", + "BEDROCK_LOG_GROUP_NAME": "${BEDROCK_LOG_GROUP_NAME}", + "MCP_TRANSPORT": "stdio" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "AWS_ACCESS_KEY_ID", + "-e", + "AWS_SECRET_ACCESS_KEY", + "-e", + "AWS_REGION", + "-e", + "BEDROCK_LOG_GROUP_NAME", + "-e", + "MCP_TRANSPORT", + "aws-cost-explorer-mcp:latest" + ], + "env": { + "AWS_ACCESS_KEY_ID": "${AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${AWS_SECRET_ACCESS_KEY}", + "AWS_REGION": "${AWS_REGION}", + "BEDROCK_LOG_GROUP_NAME": "${BEDROCK_LOG_GROUP_NAME}", + "MCP_TRANSPORT": "stdio" + } + } + }, + "examples": [ + { + "title": "Get EC2 Spending", + "description": "Retrieve the EC2 spending data for the previous day.", + "prompt": "What was my EC2 spend yesterday?" + }, + { + "title": "Analyze Spending", + "description": "Analyze spending by region for the past 14 days.", + "prompt": "Analyze my spending by region for the past 14 days." + }, + { + "title": "Show Top Services", + "description": "Show me my top 5 AWS services by cost for the last month.", + "prompt": "Show me my top 5 AWS services by cost for the last month." + } + ], + "arguments": { + "AWS_ACCESS_KEY_ID": { + "description": "Your AWS Access Key ID required for authenticating API calls to AWS services.", + "required": true, + "example": "AKIAIOSFODNN7EXAMPLE" + }, + "AWS_SECRET_ACCESS_KEY": { + "description": "Your AWS Secret Access Key required alongside the Access Key ID for authentication.", + "required": true, + "example": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + }, + "AWS_REGION": { + "description": "The AWS region where your resources are located. Examples include 'us-east-1', 'eu-west-1'.", + "required": true, + "example": "us-east-1" + }, + "BEDROCK_LOG_GROUP_NAME": { + "description": "The name of the CloudWatch log group where Amazon Bedrock model invocation logs are stored.", + "required": true, + "example": "my-bedrock-log-group-name" + } + } + }, + "meilisearch-mcp": { + "display_name": "Meilisearch MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/meilisearch/meilisearch-mcp" + }, + "homepage": "https://github.com/meilisearch/meilisearch-mcp", + "author": { + "name": "meilisearch" + }, + "license": "MIT", + "tags": [ + "search", + "meilisearch", + "indexing", + "document management" + ], + "arguments": { + "url": { + "description": "Meilisearch instance URL", + "required": false, + "example": "http://localhost:7700" + }, + "api_key": { + "description": "Meilisearch API key", + "required": false, + "example": "your_master_key" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "-n", + "meilisearch-mcp" + ], + "description": "Run the Meilisearch MCP server using uvx (for Claude Desktop)", + "recommended": false + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "src.meilisearch_mcp" + ], + "env": { + "MEILI_HTTP_ADDR": "http://localhost:7700", + "MEILI_MASTER_KEY": "your_master_key" + }, + "description": "Run the Meilisearch MCP server using Python", + "recommended": true + } + }, + "examples": [ + { + "title": "Search in a specific index", + "description": "Search for a term in a specific Meilisearch index", + "prompt": "{\"name\": \"search\", \"arguments\": {\"query\": \"search term\", \"indexUid\": \"movies\", \"limit\": 10}}" + }, + { + "title": "Search across all indices", + "description": "Search for a term across all Meilisearch indices", + "prompt": "{\"name\": \"search\", \"arguments\": {\"query\": \"search term\", \"limit\": 5, \"sort\": [\"releaseDate:desc\"]}}" + }, + { + "title": "Update connection settings", + "description": "Update the Meilisearch connection URL and API key", + "prompt": "{\"name\": \"update-connection-settings\", \"arguments\": {\"url\": \"http://new-host:7700\", \"api_key\": \"new-api-key\"}}" + } + ], + "name": "meilisearch-mcp", + "description": "A Model Context Protocol (MCP) server for interacting with Meilisearch through LLM interfaces like Claude.", + "categories": [ + "Databases" + ], + "tools": [ + { + "name": "get-connection-settings", + "description": "Get current Meilisearch connection settings", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "update-connection-settings", + "description": "Update Meilisearch connection settings", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "optional": true + }, + "api_key": { + "type": "string", + "optional": true + } + } + } + }, + { + "name": "health-check", + "description": "Check Meilisearch server health", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get-version", + "description": "Get Meilisearch version information", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get-stats", + "description": "Get database statistics", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "create-index", + "description": "Create a new Meilisearch index", + "inputSchema": { + "type": "object", + "properties": { + "uid": { + "type": "string" + }, + "primaryKey": { + "type": "string", + "optional": true + } + }, + "required": [ + "uid" + ] + } + }, + { + "name": "list-indexes", + "description": "List all Meilisearch indexes", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get-documents", + "description": "Get documents from an index", + "inputSchema": { + "type": "object", + "properties": { + "indexUid": { + "type": "string" + }, + "offset": { + "type": "integer", + "optional": true + }, + "limit": { + "type": "integer", + "optional": true + } + }, + "required": [ + "indexUid" + ] + } + }, + { + "name": "add-documents", + "description": "Add documents to an index", + "inputSchema": { + "type": "object", + "properties": { + "indexUid": { + "type": "string" + }, + "documents": { + "type": "array" + }, + "primaryKey": { + "type": "string", + "optional": true + } + }, + "required": [ + "indexUid", + "documents" + ] + } + }, + { + "name": "get-settings", + "description": "Get current settings for an index", + "inputSchema": { + "type": "object", + "properties": { + "indexUid": { + "type": "string" + } + }, + "required": [ + "indexUid" + ] + } + }, + { + "name": "update-settings", + "description": "Update settings for an index", + "inputSchema": { + "type": "object", + "properties": { + "indexUid": { + "type": "string" + }, + "settings": { + "type": "object" + } + }, + "required": [ + "indexUid", + "settings" + ] + } + }, + { + "name": "search", + "description": "Search through Meilisearch indices. If indexUid is not provided, it will search across all indices.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string" + }, + "indexUid": { + "type": "string", + "optional": true + }, + "limit": { + "type": "integer", + "optional": true + }, + "offset": { + "type": "integer", + "optional": true + }, + "filter": { + "type": "string", + "optional": true + }, + "sort": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "get-task", + "description": "Get information about a specific task", + "inputSchema": { + "type": "object", + "properties": { + "taskUid": { + "type": "integer" + } + }, + "required": [ + "taskUid" + ] + } + }, + { + "name": "get-tasks", + "description": "Get list of tasks with optional filters", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "optional": true + }, + "from": { + "type": "integer", + "optional": true + }, + "reverse": { + "type": "boolean", + "optional": true + }, + "batchUids": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "uids": { + "type": "array", + "items": { + "type": "integer" + }, + "optional": true + }, + "canceledBy": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "types": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "statuses": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "indexUids": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "afterEnqueuedAt": { + "type": "string", + "optional": true + }, + "beforeEnqueuedAt": { + "type": "string", + "optional": true + }, + "afterStartedAt": { + "type": "string", + "optional": true + }, + "beforeStartedAt": { + "type": "string", + "optional": true + }, + "afterFinishedAt": { + "type": "string", + "optional": true + }, + "beforeFinishedAt": { + "type": "string", + "optional": true + } + } + } + }, + { + "name": "cancel-tasks", + "description": "Cancel tasks based on filters", + "inputSchema": { + "type": "object", + "properties": { + "uids": { + "type": "string", + "optional": true + }, + "indexUids": { + "type": "string", + "optional": true + }, + "types": { + "type": "string", + "optional": true + }, + "statuses": { + "type": "string", + "optional": true + } + } + } + }, + { + "name": "get-keys", + "description": "Get list of API keys", + "inputSchema": { + "type": "object", + "properties": { + "offset": { + "type": "integer", + "optional": true + }, + "limit": { + "type": "integer", + "optional": true + } + } + } + }, + { + "name": "create-key", + "description": "Create a new API key", + "inputSchema": { + "type": "object", + "properties": { + "description": { + "type": "string", + "optional": true + }, + "actions": { + "type": "array" + }, + "indexes": { + "type": "array" + }, + "expiresAt": { + "type": "string", + "optional": true + } + }, + "required": [ + "actions", + "indexes" + ] + } + }, + { + "name": "delete-key", + "description": "Delete an API key", + "inputSchema": { + "type": "object", + "properties": { + "key": { + "type": "string" + } + }, + "required": [ + "key" + ] + } + }, + { + "name": "get-health-status", + "description": "Get comprehensive health status of Meilisearch", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get-index-metrics", + "description": "Get detailed metrics for an index", + "inputSchema": { + "type": "object", + "properties": { + "indexUid": { + "type": "string" + } + }, + "required": [ + "indexUid" + ] + } + }, + { + "name": "get-system-info", + "description": "Get system-level information", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "actors-mcp-server": { + "display_name": "Apify Model Context Protocol (MCP) Server", + "repository": { + "type": "git", + "url": "https://github.com/apify/actors-mcp-server" + }, + "homepage": "https://apify.com/apify/actors-mcp-server", + "author": { + "name": "apify" + }, + "license": "MIT", + "tags": [ + "mcp", + "model-context-protocol", + "apify", + "actors", + "ai-agents" + ], + "arguments": { + "APIFY_TOKEN": { + "description": "Your Apify API token for authentication", + "required": true, + "example": "your-apify-token" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@apify/actors-mcp-server" + ], + "env": { + "APIFY_TOKEN": "your-apify-token" + }, + "description": "Install and run using NPM" + } + }, + "examples": [ + { + "title": "Search for restaurants", + "description": "Find top restaurants in San Francisco", + "prompt": "Find top 10 best Italian restaurants in San Francisco." + }, + { + "title": "Instagram profile analysis", + "description": "Analyze an Instagram profile", + "prompt": "Find and analyze Instagram profile of The Rock." + }, + { + "title": "Web search and summarization", + "description": "Search the web and summarize information", + "prompt": "Search web and summarize recent trends about AI Agents." + } + ], + "name": "actors-mcp-server", + "description": "Implementation of an MCP server for all [Apify Actors](https://apify.com/store).", + "categories": [ + "Web Services" + ], + "is_official": true + }, + "cfbd-api": { + "name": "cfbd-api", + "display_name": "College Football Data API", + "description": "An MCP server for the [College Football Data API](https://collegefootballdata.com/).", + "repository": { + "type": "git", + "url": "https://github.com/lenwood/cfbd-mcp-server" + }, + "homepage": "https://github.com/lenwood/cfbd-mcp-server", + "author": { + "name": "lenwood" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "football", + "college", + "API", + "statistics" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/lenwood/cfbd-mcp-server", + "cfbd-mcp-server" + ], + "env": { + "CFB_API_KEY": "${CFB_API_KEY}", + "PATH": "${PATH}" + } + } + }, + "examples": [ + { + "title": "Get the largest upset among FCS games during the 2014 season", + "description": "Query the server for significant game upsets in the 2014 college football season.", + "prompt": "What was the largest upset among FCS games during the 2014 season?" + } + ], + "arguments": { + "CFB_API_KEY": { + "description": "The API key required to authenticate requests to the College Football Data API.", + "required": true, + "example": "your_api_key_here" + }, + "PATH": { + "description": "Environment variable that specifies the path to the Python executable being used by the server.", + "required": false, + "example": "/full/path/to/python" + } + }, + "tools": [ + { + "name": "get-games", + "description": "Get college football game data. Required: year. Optional: week, season_type, team, conference, category, game_id.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the games" + }, + "week": { + "type": "integer", + "description": "Week of the games" + }, + "season_type": { + "type": "string", + "description": "Type of season (e.g., regular, postseason)" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "conference": { + "type": "string", + "description": "Name of the conference" + }, + "category": { + "type": "string", + "description": "Category of games" + }, + "game_id": { + "type": "integer", + "description": "ID of the game" + } + }, + "required": [ + "year" + ] + } + }, + { + "name": "get-records", + "description": "Get college football team record data. Optional: year, team, conference.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the records" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "conference": { + "type": "string", + "description": "Name of the conference" + } + }, + "required": [] + } + }, + { + "name": "get-games-teams", + "description": "Get college football team game data. Required: year plus at least one of: week, team or conference.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the games" + }, + "week": { + "type": "integer", + "description": "Week of the games" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "conference": { + "type": "string", + "description": "Name of the conference" + } + }, + "required": [ + "year" + ] + } + }, + { + "name": "get-plays", + "description": "Get college football play-by-play data. Required: year AND week. Optional: season_type, team, offense, defense, conference, offense_conference, defense_conference, play_type, classification.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the plays" + }, + "week": { + "type": "integer", + "description": "Week of the plays" + }, + "season_type": { + "type": "string", + "description": "Type of season (e.g., regular, postseason)" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "offense": { + "type": "string", + "description": "Name of the offense team" + }, + "defense": { + "type": "string", + "description": "Name of the defense team" + }, + "conference": { + "type": "string", + "description": "Name of the conference" + }, + "offense_conference": { + "type": "string", + "description": "Conference of the offense team" + }, + "defense_conference": { + "type": "string", + "description": "Conference of the defense team" + }, + "play_type": { + "type": "string", + "description": "Type of play" + }, + "classification": { + "type": "string", + "description": "Classification of the play" + } + }, + "required": [ + "year", + "week" + ] + } + }, + { + "name": "get-drives", + "description": "Get college football drive data. Required: year. Optional: season_type, week, team, offense, defense, conference, offense_conference, defense_conference, classification.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the drives" + }, + "season_type": { + "type": "string", + "description": "Type of season (e.g., regular, postseason)" + }, + "week": { + "type": "integer", + "description": "Week of the drives" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "offense": { + "type": "string", + "description": "Name of the offense team" + }, + "defense": { + "type": "string", + "description": "Name of the defense team" + }, + "conference": { + "type": "string", + "description": "Name of the conference" + }, + "offense_conference": { + "type": "string", + "description": "Conference of the offense team" + }, + "defense_conference": { + "type": "string", + "description": "Conference of the defense team" + }, + "classification": { + "type": "string", + "description": "Classification of the drive" + } + }, + "required": [ + "year" + ] + } + }, + { + "name": "get-play-stats", + "description": "Get college football play statistic data. Optional: year, week, team, game_id, athlete_id, stat_type_id, season_type, conference. At least one parameter is required.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the statistics" + }, + "week": { + "type": "integer", + "description": "Week of the statistics" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "game_id": { + "type": "integer", + "description": "ID of the game" + }, + "athlete_id": { + "type": "integer", + "description": "ID of the athlete" + }, + "stat_type_id": { + "type": "integer", + "description": "ID of the statistic type" + }, + "season_type": { + "type": "string", + "description": "Type of season (e.g., regular, postseason)" + }, + "conference": { + "type": "string", + "description": "Name of the conference" + } + }, + "required": [] + } + }, + { + "name": "get-rankings", + "description": "Get college football rankings data. Required: year. Optional: week, season_type.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the rankings" + }, + "week": { + "type": "integer", + "description": "Week of the rankings" + }, + "season_type": { + "type": "string", + "description": "Type of season (e.g., regular, postseason)" + } + }, + "required": [ + "year" + ] + } + }, + { + "name": "get-pregame-win-probability", + "description": "Get college football pregame win probability data. Optional: year, week, team, season_type. At least one parameter is required.", + "inputSchema": { + "type": "object", + "properties": { + "year": { + "type": "integer", + "description": "Year of the probabilities" + }, + "week": { + "type": "integer", + "description": "Week of the probabilities" + }, + "team": { + "type": "string", + "description": "Name of the team" + }, + "season_type": { + "type": "string", + "description": "Type of season (e.g., regular, postseason)" + } + }, + "required": [] + } + }, + { + "name": "get-advanced-box-score", + "description": "Get advanced box score data for college football games. Required: gameId.", + "inputSchema": { + "type": "object", + "properties": { + "gameId": { + "type": "integer", + "description": "ID of the game" + } + }, + "required": [ + "gameId" + ] + } + } + ] + }, + "redis": { + "name": "redis", + "display_name": "Redis", + "description": "MCP server to interact with Redis Server, AWS Memory DB, etc for caching or other use-cases where in-memory and key-value based storage is appropriate", + "repository": { + "type": "git", + "url": "https://github.com/prajwalnayak7/mcp-server-redis" + }, + "homepage": "https://github.com/prajwalnayak7/mcp-server-redis", + "author": { + "name": "prajwalnayak7" + }, + "license": "MIT", + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/prajwalnayak7/mcp-server-redis", + "src/server.py" + ] + } + }, + "examples": [ + { + "title": "Check Redis Connection Status", + "description": "User requests the current Redis connection status.", + "prompt": "What's the current Redis connection status?" + }, + { + "title": "Store Name in Redis", + "description": "User wants to store their name in Redis.", + "prompt": "Can you store my name \"Alice\" in Redis?" + }, + { + "title": "Verify Stored Name in Redis", + "description": "User wants to verify the value stored in Redis.", + "prompt": "Yes please verify it" + } + ], + "categories": [ + "Databases" + ] + }, + "iterm-mcp": { + "name": "iterm-mcp", + "display_name": "iTerm", + "description": "Integration with iTerm2 terminal emulator for macOS, enabling LLMs to execute and monitor terminal commands.", + "repository": { + "type": "git", + "url": "https://github.com/ferrislucas/iterm-mcp" + }, + "homepage": "https://github.com/ferrislucas/iterm-mcp", + "author": { + "name": "ferrislucas" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "iTerm", + "server", + "automation" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "iterm-mcp" + ] + } + }, + "tools": [ + { + "name": "write_to_terminal", + "description": "Writes text to the active iTerm terminal - often used to run a command in the terminal", + "inputSchema": { + "type": "object", + "properties": { + "command": { + "type": "string", + "description": "The command to run or text to write to the terminal" + } + }, + "required": [ + "command" + ] + } + }, + { + "name": "read_terminal_output", + "description": "Reads the output from the active iTerm terminal", + "inputSchema": { + "type": "object", + "properties": { + "linesOfOutput": { + "type": "number", + "description": "The number of lines of output to read." + } + }, + "required": [ + "linesOfOutput" + ] + } + }, + { + "name": "send_control_character", + "description": "Sends a control character to the active iTerm terminal (e.g., Control-C, or special sequences like ']' for telnet escape)", + "inputSchema": { + "type": "object", + "properties": { + "letter": { + "type": "string", + "description": "The letter corresponding to the control character (e.g., 'C' for Control-C, ']' for telnet escape)" + } + }, + "required": [ + "letter" + ] + } + } + ] + }, + "everything-search": { + "name": "everything-search", + "display_name": "Everything Search", + "description": "Fast file searching capabilities across Windows (using [Everything SDK](https://www.voidtools.com/support/everything/sdk/)), macOS (using mdfind command), and Linux (using locate/plocate command).", + "repository": { + "type": "git", + "url": "https://github.com/mamertofabian/mcp-everything-search" + }, + "homepage": "https://github.com/mamertofabian/mcp-everything-search", + "author": { + "name": "mamertofabian" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "search", + "everything" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-everything-search" + ], + "env": { + "EVERYTHING_SDK_PATH": "${EVERYTHING_SDK_PATH}" + } + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp_server_everything_search" + ], + "env": { + "EVERYTHING_SDK_PATH": "${EVERYTHING_SDK_PATH}" + } + } + }, + "examples": [ + { + "title": "Search Python files", + "description": "Search for all Python files in the system.", + "prompt": "{\"query\": \"*.py\",\"max_results\": 50,\"sort_by\": 6}" + }, + { + "title": "Search files modified today", + "description": "Find files with the .py extension that were modified today.", + "prompt": "{\"query\": \"ext:py datemodified:today\",\"max_results\": 10}" + } + ], + "arguments": { + "EVERYTHING_SDK_PATH": { + "description": "Environment variable that specifies the path to the Everything SDK DLL required for the server to function properly.", + "required": true, + "example": "path/to/Everything-SDK/dll/Everything64.dll" + } + }, + "tools": [ + { + "name": "search", + "description": "Universal file search tool for Darwin\n\nCurrent Implementation:\nUsing mdfind (Spotlight) with native macOS search capabilities\n\nSearch Syntax Guide:\nmacOS Spotlight (mdfind) Search Syntax:\n \nBasic Usage:\n- Simple text search: Just type the words you're looking for\n- Phrase search: Use quotes (\"exact phrase\")\n- Filename search: -name \"filename\"\n- Directory scope: -onlyin /path/to/dir\n\nSpecial Parameters:\n- Live updates: -live\n- Literal search: -literal\n- Interpreted search: -interpret\n\nMetadata Attributes:\n- kMDItemDisplayName\n- kMDItemTextContent\n- kMDItemKind\n- kMDItemFSSize\n- And many more OS X metadata attributes\n", + "inputSchema": { + "type": "object", + "properties": { + "base": { + "description": "Base search parameters common to all platforms.", + "properties": { + "query": { + "description": "Search query string. See platform-specific documentation for syntax details.", + "title": "Query", + "type": "string" + }, + "max_results": { + "default": 100, + "description": "Maximum number of results to return (1-1000)", + "maximum": 1000, + "minimum": 1, + "title": "Max Results", + "type": "integer" + } + }, + "required": [ + "query" + ], + "title": "BaseSearchQuery", + "type": "object" + }, + "mac_params": { + "description": "macOS-specific search parameters for mdfind.", + "properties": { + "live_updates": { + "default": false, + "description": "Provide live updates to search results", + "title": "Live Updates", + "type": "boolean" + }, + "search_directory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Limit search to specific directory (-onlyin parameter)", + "title": "Search Directory" + }, + "literal_query": { + "default": false, + "description": "Treat query as literal string without interpretation", + "title": "Literal Query", + "type": "boolean" + }, + "interpret_query": { + "default": false, + "description": "Interpret query as if typed in Spotlight menu", + "title": "Interpret Query", + "type": "boolean" + } + }, + "title": "MacSpecificParams", + "type": "object" + } + }, + "required": [ + "base" + ] + } + } + ] + }, + "playwright-mcp": { + "name": "mcp-playwright", + "display_name": "Playwright", + "description": "This MCP Server will help you run browser automation and webscraping using Playwright", + "repository": { + "type": "git", + "url": "https://github.com/executeautomation/mcp-playwright" + }, + "homepage": "https://github.com/executeautomation/mcp-playwright", + "author": { + "name": "executeautomation" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Playwright", + "Browser Automation" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@executeautomation/playwright-mcp-server" + ] + } + }, + "tools": [ + { + "name": "start_codegen_session", + "description": "Start a new code generation session to record Playwright actions", + "inputSchema": { + "type": "object", + "properties": { + "options": { + "type": "object", + "description": "Code generation options", + "properties": { + "outputPath": { + "type": "string", + "description": "Directory path where generated tests will be saved (use absolute path)" + }, + "testNamePrefix": { + "type": "string", + "description": "Prefix to use for generated test names (default: 'GeneratedTest')" + }, + "includeComments": { + "type": "boolean", + "description": "Whether to include descriptive comments in generated tests" + } + }, + "required": [ + "outputPath" + ] + } + }, + "required": [ + "options" + ] + } + }, + { + "name": "end_codegen_session", + "description": "End a code generation session and generate the test file", + "inputSchema": { + "type": "object", + "properties": { + "sessionId": { + "type": "string", + "description": "ID of the session to end" + } + }, + "required": [ + "sessionId" + ] + } + }, + { + "name": "get_codegen_session", + "description": "Get information about a code generation session", + "inputSchema": { + "type": "object", + "properties": { + "sessionId": { + "type": "string", + "description": "ID of the session to retrieve" + } + }, + "required": [ + "sessionId" + ] + } + }, + { + "name": "clear_codegen_session", + "description": "Clear a code generation session without generating a test", + "inputSchema": { + "type": "object", + "properties": { + "sessionId": { + "type": "string", + "description": "ID of the session to clear" + } + }, + "required": [ + "sessionId" + ] + } + }, + { + "name": "playwright_navigate", + "description": "Navigate to a URL", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to navigate to the website specified" + }, + "browserType": { + "type": "string", + "description": "Browser type to use (chromium, firefox, webkit). Defaults to chromium", + "enum": [ + "chromium", + "firefox", + "webkit" + ] + }, + "width": { + "type": "number", + "description": "Viewport width in pixels (default: 1280)" + }, + "height": { + "type": "number", + "description": "Viewport height in pixels (default: 720)" + }, + "timeout": { + "type": "number", + "description": "Navigation timeout in milliseconds" + }, + "waitUntil": { + "type": "string", + "description": "Navigation wait condition" + }, + "headless": { + "type": "boolean", + "description": "Run browser in headless mode (default: false)" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "playwright_screenshot", + "description": "Take a screenshot of the current page or a specific element", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name for the screenshot" + }, + "selector": { + "type": "string", + "description": "CSS selector for element to screenshot" + }, + "width": { + "type": "number", + "description": "Width in pixels (default: 800)" + }, + "height": { + "type": "number", + "description": "Height in pixels (default: 600)" + }, + "storeBase64": { + "type": "boolean", + "description": "Store screenshot in base64 format (default: true)" + }, + "fullPage": { + "type": "boolean", + "description": "Store screenshot of the entire page (default: false)" + }, + "savePng": { + "type": "boolean", + "description": "Save screenshot as PNG file (default: false)" + }, + "downloadsDir": { + "type": "string", + "description": "Custom downloads directory path (default: user's Downloads folder)" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "playwright_click", + "description": "Click an element on the page", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for the element to click" + } + }, + "required": [ + "selector" + ] + } + }, + { + "name": "playwright_iframe_click", + "description": "Click an element in an iframe on the page", + "inputSchema": { + "type": "object", + "properties": { + "iframeSelector": { + "type": "string", + "description": "CSS selector for the iframe containing the element to click" + }, + "selector": { + "type": "string", + "description": "CSS selector for the element to click" + } + }, + "required": [ + "iframeSelector", + "selector" + ] + } + }, + { + "name": "playwright_fill", + "description": "fill out an input field", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for input field" + }, + "value": { + "type": "string", + "description": "Value to fill" + } + }, + "required": [ + "selector", + "value" + ] + } + }, + { + "name": "playwright_select", + "description": "Select an element on the page with Select tag", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for element to select" + }, + "value": { + "type": "string", + "description": "Value to select" + } + }, + "required": [ + "selector", + "value" + ] + } + }, + { + "name": "playwright_hover", + "description": "Hover an element on the page", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for element to hover" + } + }, + "required": [ + "selector" + ] + } + }, + { + "name": "playwright_evaluate", + "description": "Execute JavaScript in the browser console", + "inputSchema": { + "type": "object", + "properties": { + "script": { + "type": "string", + "description": "JavaScript code to execute" + } + }, + "required": [ + "script" + ] + } + }, + { + "name": "playwright_console_logs", + "description": "Retrieve console logs from the browser with filtering options", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Type of logs to retrieve (all, error, warning, log, info, debug)", + "enum": [ + "all", + "error", + "warning", + "log", + "info", + "debug" + ] + }, + "search": { + "type": "string", + "description": "Text to search for in logs (handles text with square brackets)" + }, + "limit": { + "type": "number", + "description": "Maximum number of logs to return" + }, + "clear": { + "type": "boolean", + "description": "Whether to clear logs after retrieval (default: false)" + } + }, + "required": [] + } + }, + { + "name": "playwright_close", + "description": "Close the browser and release all resources", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "playwright_get", + "description": "Perform an HTTP GET request", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to perform GET operation" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "playwright_post", + "description": "Perform an HTTP POST request", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to perform POST operation" + }, + "value": { + "type": "string", + "description": "Data to post in the body" + }, + "token": { + "type": "string", + "description": "Bearer token for authorization" + }, + "headers": { + "type": "object", + "description": "Additional headers to include in the request", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "url", + "value" + ] + } + }, + { + "name": "playwright_put", + "description": "Perform an HTTP PUT request", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to perform PUT operation" + }, + "value": { + "type": "string", + "description": "Data to PUT in the body" + } + }, + "required": [ + "url", + "value" + ] + } + }, + { + "name": "playwright_patch", + "description": "Perform an HTTP PATCH request", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to perform PUT operation" + }, + "value": { + "type": "string", + "description": "Data to PATCH in the body" + } + }, + "required": [ + "url", + "value" + ] + } + }, + { + "name": "playwright_delete", + "description": "Perform an HTTP DELETE request", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to perform DELETE operation" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "playwright_expect_response", + "description": "Ask Playwright to start waiting for a HTTP response. This tool initiates the wait operation but does not wait for its completion.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique & arbitrary identifier to be used for retrieving this response later with `Playwright_assert_response`." + }, + "url": { + "type": "string", + "description": "URL pattern to match in the response." + } + }, + "required": [ + "id", + "url" + ] + } + }, + { + "name": "playwright_assert_response", + "description": "Wait for and validate a previously initiated HTTP response wait operation.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Identifier of the HTTP response initially expected using `Playwright_expect_response`." + }, + "value": { + "type": "string", + "description": "Data to expect in the body of the HTTP response. If provided, the assertion will fail if this value is not found in the response body." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "playwright_custom_user_agent", + "description": "Set a custom User Agent for the browser", + "inputSchema": { + "type": "object", + "properties": { + "userAgent": { + "type": "string", + "description": "Custom User Agent for the Playwright browser instance" + } + }, + "required": [ + "userAgent" + ] + } + }, + { + "name": "playwright_get_visible_text", + "description": "Get the visible text content of the current page", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "playwright_get_visible_html", + "description": "Get the HTML content of the current page", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "playwright_go_back", + "description": "Navigate back in browser history", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "playwright_go_forward", + "description": "Navigate forward in browser history", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "playwright_drag", + "description": "Drag an element to a target location", + "inputSchema": { + "type": "object", + "properties": { + "sourceSelector": { + "type": "string", + "description": "CSS selector for the element to drag" + }, + "targetSelector": { + "type": "string", + "description": "CSS selector for the target location" + } + }, + "required": [ + "sourceSelector", + "targetSelector" + ] + } + }, + { + "name": "playwright_press_key", + "description": "Press a keyboard key", + "inputSchema": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key to press (e.g. 'Enter', 'ArrowDown', 'a')" + }, + "selector": { + "type": "string", + "description": "Optional CSS selector to focus before pressing key" + } + }, + "required": [ + "key" + ] + } + }, + { + "name": "playwright_save_as_pdf", + "description": "Save the current page as a PDF file", + "inputSchema": { + "type": "object", + "properties": { + "outputPath": { + "type": "string", + "description": "Directory path where PDF will be saved" + }, + "filename": { + "type": "string", + "description": "Name of the PDF file (default: page.pdf)" + }, + "format": { + "type": "string", + "description": "Page format (e.g. 'A4', 'Letter')" + }, + "printBackground": { + "type": "boolean", + "description": "Whether to print background graphics" + }, + "margin": { + "type": "object", + "description": "Page margins", + "properties": { + "top": { + "type": "string" + }, + "right": { + "type": "string" + }, + "bottom": { + "type": "string" + }, + "left": { + "type": "string" + } + } + } + }, + "required": [ + "outputPath" + ] + } + } + ] + }, + "chroma": { + "name": "chroma", + "display_name": "Chroma", + "description": "Vector database server for semantic document search and metadata filtering, built on Chroma", + "repository": { + "type": "git", + "url": "https://github.com/privetin/chroma" + }, + "homepage": "https://github.com/privetin/chroma", + "author": { + "name": "privetin" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "vector database", + "semantic search" + ], + "examples": [ + { + "title": "Create a document", + "description": "Creates a new document with specified content and metadata.", + "prompt": "create_document({\"document_id\": \"ml_paper1\", \"content\": \"Convolutional neural networks improve image recognition accuracy.\", \"metadata\": {\"year\": 2020, \"field\": \"computer vision\", \"complexity\": \"advanced\"}})" + }, + { + "title": "Search similar documents", + "description": "Finds documents semantically similar to a given query.", + "prompt": "search_similar({\"query\": \"machine learning models\", \"num_results\": 2, \"metadata_filter\": {\"year\": 2020, \"field\": \"computer vision\"}})" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/privetin/chroma", + "chroma" + ] + } + }, + "tools": [ + { + "name": "create_document", + "description": "Create a new document in the Chroma vector database", + "inputSchema": { + "type": "object", + "properties": { + "document_id": { + "type": "string" + }, + "content": { + "type": "string" + }, + "metadata": { + "type": "object", + "additionalProperties": true + } + }, + "required": [ + "document_id", + "content" + ] + } + }, + { + "name": "read_document", + "description": "Retrieve a document from the Chroma vector database by its ID", + "inputSchema": { + "type": "object", + "properties": { + "document_id": { + "type": "string" + } + }, + "required": [ + "document_id" + ] + } + }, + { + "name": "update_document", + "description": "Update an existing document in the Chroma vector database", + "inputSchema": { + "type": "object", + "properties": { + "document_id": { + "type": "string" + }, + "content": { + "type": "string" + }, + "metadata": { + "type": "object", + "additionalProperties": true + } + }, + "required": [ + "document_id", + "content" + ] + } + }, + { + "name": "delete_document", + "description": "Delete a document from the Chroma vector database by its ID", + "inputSchema": { + "type": "object", + "properties": { + "document_id": { + "type": "string" + } + }, + "required": [ + "document_id" + ] + } + }, + { + "name": "list_documents", + "description": "List all documents stored in the Chroma vector database with pagination", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "minimum": 1, + "default": 10 + }, + "offset": { + "type": "integer", + "minimum": 0, + "default": 0 + } + } + } + }, + { + "name": "search_similar", + "description": "Search for semantically similar documents in the Chroma vector database", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string" + }, + "num_results": { + "type": "integer", + "minimum": 1, + "default": 5 + }, + "metadata_filter": { + "type": "object", + "additionalProperties": true + }, + "content_filter": { + "type": "string" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "fetch-mcp": { + "name": "fetch-mcp", + "display_name": "Fetch", + "description": "A server that flexibly fetches HTML, JSON, Markdown, or plaintext.", + "repository": { + "type": "git", + "url": "https://github.com/zcaceres/fetch-mcp" + }, + "homepage": "https://github.com/zcaceres/fetch-mcp", + "author": { + "name": "zcaceres" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "fetch", + "web", + "api", + "html", + "json", + "markdown", + "plain text" + ], + "examples": [ + { + "title": "Fetch HTML", + "description": "Fetch a website and return the content as HTML", + "prompt": "fetch_html(url: string, headers?: object) -> string" + }, + { + "title": "Fetch JSON", + "description": "Fetch a JSON file from a URL", + "prompt": "fetch_json(url: string, headers?: object) -> object" + }, + { + "title": "Fetch Plain Text", + "description": "Fetch a website and return the content as plain text", + "prompt": "fetch_txt(url: string, headers?: object) -> string" + }, + { + "title": "Fetch Markdown", + "description": "Fetch a website and return the content as Markdown", + "prompt": "fetch_markdown(url: string, headers?: object) -> string" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/zcaceres/fetch-mcp" + ] + } + }, + "arguments": { + "url": { + "description": "URL of the website to fetch", + "required": true, + "example": "https://example.com" + }, + "headers": { + "description": "Custom headers to include in the request", + "required": false, + "example": "{\"Authorization\": \"Bearer token\"}" + } + }, + "tools": [ + { + "name": "fetch_html", + "description": "Fetch a website and return the content as HTML", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the website to fetch" + }, + "headers": { + "type": "object", + "description": "Optional headers to include in the request" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "fetch_markdown", + "description": "Fetch a website and return the content as Markdown", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the website to fetch" + }, + "headers": { + "type": "object", + "description": "Optional headers to include in the request" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "fetch_txt", + "description": "Fetch a website, return the content as plain text (no HTML)", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the website to fetch" + }, + "headers": { + "type": "object", + "description": "Optional headers to include in the request" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "fetch_json", + "description": "Fetch a JSON file from a URL", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the JSON to fetch" + }, + "headers": { + "type": "object", + "description": "Optional headers to include in the request" + } + }, + "required": [ + "url" + ] + } + } + ] + }, + "kubernetes": { + "name": "kubernetes", + "display_name": "Kubernetes", + "description": "Connect to Kubernetes cluster and manage pods, deployments, and services.", + "repository": { + "type": "git", + "url": "https://github.com/Flux159/mcp-server-kubernetes" + }, + "homepage": "https://github.com/Flux159/mcp-server-kubernetes", + "author": { + "name": "Flux159" + }, + "license": "[NOT FOUND]", + "categories": [ + "Dev Tools" + ], + "tags": [ + "kubernetes", + "server", + "management" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mcp-server-kubernetes" + ] + } + }, + "tools": [ + { + "name": "cleanup", + "description": "Cleanup all managed resources", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "create_deployment", + "description": "Create a new Kubernetes deployment", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "template": { + "type": "string", + "enum": [ + "ubuntu", + "nginx", + "busybox", + "alpine", + "custom" + ] + }, + "replicas": { + "type": "number", + "default": 1 + }, + "ports": { + "type": "array", + "items": { + "type": "number" + }, + "optional": true + }, + "customConfig": { + "type": "object", + "optional": true, + "properties": { + "image": { + "type": "string" + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + }, + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "properties": { + "containerPort": { + "type": "number" + }, + "name": { + "type": "string" + }, + "protocol": { + "type": "string" + } + } + } + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "requests": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "env": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "type": "object" + } + } + } + }, + "volumeMounts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "mountPath": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + } + } + } + } + } + } + }, + "required": [ + "name", + "namespace", + "template" + ] + } + }, + { + "name": "create_namespace", + "description": "Create a new Kubernetes namespace", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "create_pod", + "description": "Create a new Kubernetes pod", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "template": { + "type": "string", + "enum": [ + "ubuntu", + "nginx", + "busybox", + "alpine", + "custom" + ] + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "customConfig": { + "type": "object", + "optional": true, + "properties": { + "image": { + "type": "string" + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + }, + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "properties": { + "containerPort": { + "type": "number" + }, + "name": { + "type": "string" + }, + "protocol": { + "type": "string" + } + } + } + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "requests": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "env": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "valueFrom": { + "type": "object" + } + } + } + }, + "volumeMounts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "mountPath": { + "type": "string" + }, + "readOnly": { + "type": "boolean" + } + } + } + } + } + } + }, + "required": [ + "name", + "namespace", + "template" + ] + } + }, + { + "name": "create_cronjob", + "description": "Create a new Kubernetes CronJob", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "schedule": { + "type": "string" + }, + "image": { + "type": "string" + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "optional": true + }, + "suspend": { + "type": "boolean", + "optional": true + } + }, + "required": [ + "name", + "namespace", + "schedule", + "image" + ] + } + }, + { + "name": "delete_pod", + "description": "Delete a Kubernetes pod", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "ignoreNotFound": { + "type": "boolean", + "default": false + } + }, + "required": [ + "name", + "namespace" + ] + } + }, + { + "name": "describe_cronjob", + "description": "Get detailed information about a Kubernetes CronJob including recent job history", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string", + "default": "default" + } + }, + "required": [ + "name", + "namespace" + ] + } + }, + { + "name": "describe_pod", + "description": "Describe a Kubernetes pod (read details like status, containers, etc.)", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "name", + "namespace" + ] + } + }, + { + "name": "describe_deployment", + "description": "Get details about a Kubernetes deployment", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "name", + "namespace" + ] + } + }, + { + "name": "explain_resource", + "description": "Get documentation for a Kubernetes resource or field", + "inputSchema": { + "type": "object", + "properties": { + "resource": { + "type": "string", + "description": "Resource name or field path (e.g. 'pods' or 'pods.spec.containers')" + }, + "apiVersion": { + "type": "string", + "description": "API version to use (e.g. 'apps/v1')" + }, + "recursive": { + "type": "boolean", + "description": "Print the fields of fields recursively", + "default": false + }, + "output": { + "type": "string", + "description": "Output format (plaintext or plaintext-openapiv2)", + "enum": [ + "plaintext", + "plaintext-openapiv2" + ], + "default": "plaintext" + } + }, + "required": [ + "resource" + ] + } + }, + { + "name": "get_events", + "description": "Get Kubernetes events from the cluster", + "inputSchema": { + "type": "object", + "properties": { + "namespace": { + "type": "string", + "description": "Namespace to get events from. If not specified, gets events from all namespaces" + }, + "fieldSelector": { + "type": "string", + "description": "Field selector to filter events" + } + }, + "required": [] + } + }, + { + "name": "get_job_logs", + "description": "Get logs from Pods created by a specific Job", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the Job to get logs from" + }, + "namespace": { + "type": "string", + "default": "default" + }, + "tail": { + "type": "number", + "description": "Number of lines to return from the end of the logs", + "optional": true + }, + "timestamps": { + "type": "boolean", + "description": "Include timestamps in the logs", + "optional": true + } + }, + "required": [ + "name", + "namespace" + ] + } + }, + { + "name": "get_logs", + "description": "Get logs from pods, deployments, jobs, or resources matching a label selector", + "inputSchema": { + "type": "object", + "properties": { + "resourceType": { + "type": "string", + "enum": [ + "pod", + "deployment", + "job" + ], + "description": "Type of resource to get logs from" + }, + "name": { + "type": "string", + "description": "Name of the resource" + }, + "namespace": { + "type": "string", + "description": "Namespace of the resource", + "default": "default" + }, + "labelSelector": { + "type": "string", + "description": "Label selector to filter resources", + "optional": true + }, + "container": { + "type": "string", + "description": "Container name (required when pod has multiple containers)", + "optional": true + }, + "tail": { + "type": "number", + "description": "Number of lines to show from end of logs", + "optional": true + }, + "since": { + "type": "number", + "description": "Get logs since relative time in seconds", + "optional": true + }, + "timestamps": { + "type": "boolean", + "description": "Include timestamps in logs", + "default": false + } + }, + "required": [ + "resourceType" + ] + } + }, + { + "name": "install_helm_chart", + "description": "Install a Helm chart", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Release name" + }, + "chart": { + "type": "string", + "description": "Chart name" + }, + "repo": { + "type": "string", + "description": "Chart repository URL" + }, + "namespace": { + "type": "string", + "description": "Kubernetes namespace" + }, + "values": { + "type": "object", + "description": "Chart values", + "additionalProperties": true + } + }, + "required": [ + "name", + "chart", + "repo", + "namespace" + ] + } + }, + { + "name": "list_api_resources", + "description": "List the API resources available in the cluster", + "inputSchema": { + "type": "object", + "properties": { + "apiGroup": { + "type": "string", + "description": "API group to filter by" + }, + "namespaced": { + "type": "boolean", + "description": "If true, only show namespaced resources" + }, + "verbs": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of verbs to filter by" + }, + "output": { + "type": "string", + "description": "Output format (wide, name, or no-headers)", + "enum": [ + "wide", + "name", + "no-headers" + ], + "default": "wide" + } + } + } + }, + { + "name": "list_cronjobs", + "description": "List CronJobs in a namespace", + "inputSchema": { + "type": "object", + "properties": { + "namespace": { + "type": "string", + "default": "default" + } + }, + "required": [ + "namespace" + ] + } + }, + { + "name": "list_deployments", + "description": "List deployments in a namespace", + "inputSchema": { + "type": "object", + "properties": { + "namespace": { + "type": "string", + "default": "default" + } + }, + "required": [ + "namespace" + ] + } + }, + { + "name": "list_jobs", + "description": "List Jobs in a namespace, optionally filtered by a CronJob parent", + "inputSchema": { + "type": "object", + "properties": { + "namespace": { + "type": "string", + "default": "default" + }, + "cronJobName": { + "type": "string", + "description": "Optional: Filter jobs created by a specific CronJob", + "optional": true + } + }, + "required": [ + "namespace" + ] + } + }, + { + "name": "list_namespaces", + "description": "List all namespaces", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_nodes", + "description": "List all nodes in the cluster", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_pods", + "description": "List pods in a namespace", + "inputSchema": { + "type": "object", + "properties": { + "namespace": { + "type": "string", + "default": "default" + } + }, + "required": [ + "namespace" + ] + } + }, + { + "name": "list_services", + "description": "List services in a namespace", + "inputSchema": { + "type": "object", + "properties": { + "namespace": { + "type": "string", + "default": "default" + } + }, + "required": [ + "namespace" + ] + } + }, + { + "name": "uninstall_helm_chart", + "description": "Uninstall a Helm release", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Release name" + }, + "namespace": { + "type": "string", + "description": "Kubernetes namespace" + } + }, + "required": [ + "name", + "namespace" + ] + } + }, + { + "name": "upgrade_helm_chart", + "description": "Upgrade a Helm release", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Release name" + }, + "chart": { + "type": "string", + "description": "Chart name" + }, + "repo": { + "type": "string", + "description": "Chart repository URL" + }, + "namespace": { + "type": "string", + "description": "Kubernetes namespace" + }, + "values": { + "type": "object", + "description": "Chart values", + "additionalProperties": true + } + }, + "required": [ + "name", + "chart", + "repo", + "namespace" + ] + } + }, + { + "name": "port_forward", + "description": "Forward a local port to a port on a Kubernetes resource", + "inputSchema": { + "type": "object", + "properties": { + "resourceType": { + "type": "string" + }, + "resourceName": { + "type": "string" + }, + "localPort": { + "type": "number" + }, + "targetPort": { + "type": "number" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "resourceType", + "resourceName", + "localPort", + "targetPort" + ] + } + }, + { + "name": "stop_port_forward", + "description": "Stop a port-forward process", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "scale_deployment", + "description": "Scale a Kubernetes deployment", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "replicas": { + "type": "number" + } + }, + "required": [ + "name", + "namespace", + "replicas" + ] + } + } + ] + }, + "fibery-mcp-server": { + "display_name": "Fibery MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/Fibery-inc/fibery-mcp-server" + }, + "homepage": "[NOT GIVEN]", + "author": { + "name": "Fibery-inc" + }, + "license": "[NOT GIVEN]", + "tags": [ + "fibery", + "mcp", + "llm" + ], + "arguments": { + "fibery-host": { + "description": "Your Fibery domain (e.g., your-domain.fibery.io)", + "required": true, + "example": "your-domain.fibery.io" + }, + "fibery-api-token": { + "description": "Your Fibery API token", + "required": true, + "example": "your-api-token" + } + }, + "installations": { + "uv": { + "type": "uvx", + "command": "uv", + "args": [ + "tool", + "run", + "fibery-mcp-server", + "--fibery-host", + "your-domain.fibery.io", + "--fibery-api-token", + "your-api-token" + ], + "package": "fibery-mcp-server", + "recommended": true + } + }, + "examples": [ + { + "title": "List Databases", + "description": "Retrieves a list of all databases available in your Fibery workspace.", + "prompt": "What databases do I have in my Fibery workspace?" + }, + { + "title": "Describe Database", + "description": "Provides a detailed breakdown of a specific database's structure, showing all fields with their titles, names, and types.", + "prompt": "Tell me about the structure of my Tasks database" + }, + { + "title": "Query Database", + "description": "Offers powerful, flexible access to your Fibery data through the Fibery API.", + "prompt": "Find all high priority tasks that are due this week" + }, + { + "title": "Create Entity", + "description": "Creates new entities in your Fibery workspace with specified field values.", + "prompt": "Create a new task called 'Review project proposal' with high priority" + }, + { + "title": "Update Entity", + "description": "Updates existing entities in your Fibery workspace with new field values.", + "prompt": "Change the status of task 'Review project proposal' to 'In Progress'" + } + ], + "name": "fibery-mcp-server", + "description": "This MCP (Model Context Protocol) server provides integration between Fibery and any LLM provider supporting the MCP protocol (e.g., Claude for Desktop), allowing you to interact with your Fibery workspace using natural language.", + "categories": [ + "Productivity" + ], + "tools": [ + { + "name": "current_date", + "description": "Get today's date in ISO 8601 format (YYYY-mm-dd.HH:MM:SS.000Z)", + "inputSchema": { + "type": "object" + } + }, + { + "name": "list_databases", + "description": "Get list of all databases (their names) in user's Fibery workspace (schema)", + "inputSchema": { + "type": "object" + } + }, + { + "name": "describe_database", + "description": "Get list of all fields (in format of 'Title [name]: type') in the selected Fibery database and for all related databases.", + "inputSchema": { + "type": "object", + "properties": { + "database_name": { + "type": "string", + "description": "Database name as defined in Fibery schema" + } + }, + "required": [ + "database_name" + ] + } + }, + { + "name": "query_database", + "description": "Run any Fibery API command. This gives tremendous flexibility, but requires a bit of experience with the low-level Fibery API. In case query succeeded, return value contains a list of records with fields you specified in select. If request failed, will return detailed error message.\nExamples (note, that these databases are non-existent, use databases only from user's schema!):\nQuery: What newly created Features do we have for the past 2 months?\nTool use:\n{\n \"q_from\": \"Dev/Feature\",\n \"q_select\": {\n \"Name\": [\"Dev/Name\"],\n \"Public Id\": [\"fibery/public-id\"],\n \"Creation Date\": [\"fibery/creation-date\"]\n },\n \"q_where\": [\">\", [\"fibery/creation-date\"], \"$twoMonthsAgo\"],\n \"q_order_by\": {\"fibery/creation-date\": \"q/desc\"},\n \"q_limit\": 100,\n \"q_offset\": 0,\n \"q_params\": {\n $twoMonthsAgo: \"2025-01-16T00:00:00.000Z\"\n }\n}\n\nQuery: What Admin Tasks for the past week are Approval or Done?\nTool use:\n{\n \"q_from\": \"Administrative/Admin Task\",\n \"q_select\": {\n \"Name\": [\"Administrative/Name\"],\n \"Public Id\": [\"fibery/public-id\"],\n \"Creation Date\": [\"fibery/creation-date\"],\n \"State\": [\"workflow/state\", \"enum/name\"]\n },\n \"q_where\": [\n \"q/and\", # satisfy time AND states condition\n [\">\", [\"fibery/creation-date\"], \"$oneWeekAgo\"],\n [\n \"q/or\", # nested or, since entity can be in either of these states\n [\"=\", [\"workflow/state\", \"enum/name\"], \"$state1\"],\n [\"=\", [\"workflow/state\", \"enum/name\"], \"$state2\"]\n ]\n ],\n \"q_order_by\": {\"fibery/creation-date\": \"q/desc\"},\n \"q_limit\": 100,\n \"q_offset\": 0,\n \"q_params\": { # notice that parameters used in \"where\" are always passed in params!\n $oneWeekAgo: \"2025-03-07T00:00:00.000Z\",\n $state1: \"Approval\",\n $state2: \"Done\"\n }\n}\n\nQuery: What Admin Tasks for the past week are Approval or Done?\nTool use:\n{\n \"q_from\": \"Administrative/Admin Task\",\n \"q_select\": {\n \"State\": [\"workflow/state\", \"enum/name\"],\n \"Public Id\": [\"fibery/public-id\"],\n \"Creation Date\": [\"fibery/creation-date\"],\n \"Modification Date\": [\"fibery/modification-date\"],\n \"Deadline\": [\"Administrative/Deadline\"],\n \"Group\": [\"Administrative/Group\", \"Administrative/name\"],\n \"Name\": [\"Administrative/Name\"],\n \"Priority\": [\"Administrative/Priority_Administrative/Admin Task\", \"enum/name\"]\n },\n \"q_where\": [\"!=\", [\"workflow/state\", \"workflow/Final\"], \"$stateType\"], # Administrative/Admin Task is not \"Finished\" yet\n \"q_order_by\": {\"fibery/creation-date\": \"q/desc\"},\n \"q_limit\": 100,\n \"q_offset\": 0,\n \"q_params: {\n \"$stateType\": true\n }\n}\n\nQuery: Summarize acc contacts with public id 1.\nTool use:\n{\n \"q_from\": \"Accounting/Acc Contacts\",\n \"q_select\": {\n \"Name\": [\"Accounting/Name\"],\n \"Public Id\": [\"fibery/public-id\"],\n \"Creation Date\": [\"fibery/creation-date\"],\n \"Description\": [\"Accounting/Description\"]\n },\n \"q_where\": [\"=\", [\"fibery/public-id\"], \"$publicId\"],\n \"q_limit\": 1,\n \"q_params\": {\n $publicId: \"1\",\n }\n}", + "inputSchema": { + "type": "object", + "properties": { + "q_from": { + "type": "string", + "description": "Specifies the entity type in \"Space/Type\" format (e.g., \"Product Management/feature\", \"Product Management/Insight\")" + }, + "q_select": { + "type": "object", + "description": "Defines what fields to retrieve. Can include:\n - Primitive fields using format {\"AliasName\": \"FieldName\"} (i.e. {\"Name\": \"Product Management/Name\"})\n - Related entity fields using format {\"AliasName\": [\"Related entity\", \"related entity field\"]} (i.e. {\"Secret\": [\"Product Management/Description\", \"Collaboration~Documents/secret\"]}). Careful, does not work with 1-* connection!\nTo work with 1-* relationships, you can use sub-querying: {\"AliasName\": {\"q/from\": \"Related type\", \"q/select\": {\"AliasName 2\": \"fibery/id\"}, \"q/limit\": 50}}\nAliasName can be of any arbitrary value." + }, + "q_where": { + "type": "object", + "description": "Filter conditions in format [operator, [field_path], value] or [\"q/and\"|\"q/or\", ...conditions]. Common usages:\n- Simple comparison: [\"=\", [\"field\", \"path\"], \"$param\"]. You cannot pass value of $param directly in where clause. Use params object instead. Pay really close attention to it as it is not common practice, but that's how it works in our case!\n- Logical combinations: [\"q/and\", [\"<\", [\"field1\"], \"$param1\"], [\"=\", [\"field2\"], \"$param2\"]]\n- Available operators: =, !=, <, <=, >, >=, q/contains, q/not-contains, q/in, q/not-in" + }, + "q_order_by": { + "type": "object", + "description": "List of sorting criteria in format {\"field1\": \"q/asc\", \"field2\": \"q/desc\"}" + }, + "q_limit": { + "type": "integer", + "description": "Number of results per page (defaults to 50). Maximum allowed value is 1000" + }, + "q_offset": { + "type": "integer", + "description": "Number of results to skip. Mainly used in combination with limit and orderBy for pagination." + }, + "q_params": { + "type": "object", + "description": "Dictionary of parameter values referenced in where using \"$param\" syntax. For example, {$fromDate: \"2025-01-01\"}" + } + }, + "required": [ + "q_from", + "q_select" + ] + } + }, + { + "name": "create_entity", + "description": "Create Fibery entity with specified fields.\nExamples (note, that these databases are non-existent, use databases only from user's schema!):\nQuery: Create a feature\nTool use:\n{\n \"database\": \"Product Management/Feature\",\n \"entity\": {\n \"Product Management/Name\": \"New Feature\",\n \"Product Management/Description\": \"Description of the new feature\",\n \"workflow/state\": \"To Do\"\n }\n}\nIn case of successful execution, you will get a link to created entity. Make sure to give that link to the user.", + "inputSchema": { + "type": "object", + "properties": { + "database": { + "type": "string", + "description": "Fibery Database where to create an entity." + }, + "entity": { + "type": "object", + "description": "Dictionary that defines what fields to set in format {\"FieldName\": value} (i.e. {\"Product Management/Name\": \"My new entity\"})." + } + }, + "required": [ + "database", + "entity" + ] + } + }, + { + "name": "update_entity", + "description": "Update Fibery entity with specified fields.\nExamples (note, that these databases are non-existent, use databases only from user's schema!):\nQuery: Update a feature we talked about\nTool use:\n{\n \"database\": \"Product Management/Feature\",\n \"entity\": {\n \"fibery/id\": \"12345678-1234-5678-1234-567812345678\",\n \"Product Management/Name\": \"New Feature 2\",\n \"Product Management/Description\": {\"append\": true, \"content\": \"Notes: some notes\"},\n \"workflow/state\": \"In Progress\"\n }\n}\nIn case of successful execution, you will get a link to updated entity. Make sure to give that link to the user.", + "inputSchema": { + "type": "object", + "properties": { + "database": { + "type": "string", + "description": "Fibery Database where to update an entity." + }, + "entity": { + "type": "object", + "description": "Dictionary that defines what fields to set in format {\"FieldName\": value} (i.e. {\"Product Management/Name\": \"My new entity\"}).\nException are document fields. For them you must specify append (boolean, whether to append to current content) and content itself: {\"Product Management/Description\": {\"append\": true, \"content\": \"Additional info\"}}" + } + }, + "required": [ + "database", + "entity" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "unifai-mcp-server": { + "display_name": "UnifAI MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/unifai-network/unifai-mcp-server" + }, + "license": "MIT", + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "-p", + "unifai-sdk", + "unifai-tools-mcp" + ], + "env": { + "UNIFAI_AGENT_API_KEY": "${UNIFAI_AGENT_API_KEY}" + }, + "description": "Available in UnifAI Node SDK" + }, + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "unifai-sdk", + "unifai-tools-mcp" + ], + "env": { + "UNIFAI_AGENT_API_KEY": "${UNIFAI_AGENT_API_KEY}" + }, + "description": "Available in UnifAI Python SDK" + } + }, + "homepage": "https://github.com/unifai-network/unifai-mcp-server", + "author": { + "name": "unifai-network" + }, + "arguments": { + "UNIFAI_AGENT_API_KEY": { + "description": "UnifAI Agent API Key for authentication", + "required": true, + "example": "" + } + }, + "tags": [ + "unifai", + "mcp" + ], + "name": "unifai-mcp-server", + "description": "Dynamically search and call tools using UnifAI Network", + "categories": [ + "MCP Tools" + ], + "is_official": true, + "tools": [ + { + "name": "search_services", + "description": "Search for tools. The tools cover a wide range of domains include data source, API, SDK, etc. Try searching whenever you need to use a tool. Returned actions should ONLY be used in invoke_service.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query to search for tools, you can describe what you want to do or what tools you want to use" + }, + "limit": { + "type": "number", + "description": "The maximum number of tools to return, must be between 1 and 100, default is 10, recommend at least 10" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "invoke_service", + "description": "Call a tool returned by search_services", + "inputSchema": { + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "The exact action you want to call in the search_services result." + }, + "payload": { + "type": "string", + "description": "Action payload, based on the payload schema in the search_services result. You can pass either the json object directly or json encoded string of the object." + }, + "payment": { + "type": "number", + "description": "Amount to authorize in USD. Positive number means you will be charged no more than this amount, negative number means you are requesting to get paid for at least this amount. Only include this field if the action you are calling includes payment information." + } + }, + "required": [ + "action", + "payload" + ] + } + } + ] + }, + "contentful-mcp": { + "name": "contentful-mcp", + "display_name": "Contentful Management", + "description": "Read, update, delete, publish content in your [Contentful](https://contentful.com/) space(s) from this MCP Server.", + "repository": { + "type": "git", + "url": "https://github.com/ivo-toby/contentful-mcp" + }, + "homepage": "https://github.com/ivo-toby/contentful-mcp", + "author": { + "name": "ivo-toby" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Contentful", + "Management API", + "CRUD Operations" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@ivotoby/contentful-management-mcp-server" + ], + "env": { + "CONTENTFUL_MANAGEMENT_ACCESS_TOKEN": "${CONTENTFUL_MANAGEMENT_ACCESS_TOKEN}" + } + } + }, + "arguments": { + "CONTENTFUL_MANAGEMENT_ACCESS_TOKEN": { + "description": "Your Content Management API token for accessing Contentful services.", + "required": true, + "example": "" + } + }, + "tools": [ + { + "name": "search_entries", + "description": "Search for entries using query parameters. Returns a maximum of 3 items per request. Use skip parameter to paginate through results.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "object", + "description": "Query parameters for searching entries", + "properties": { + "content_type": { + "type": "string" + }, + "select": { + "type": "string" + }, + "limit": { + "type": "number", + "default": 3, + "maximum": 3, + "description": "Maximum number of items to return (max: 3)" + }, + "skip": { + "type": "number", + "default": 0, + "description": "Number of items to skip for pagination" + }, + "order": { + "type": "string" + }, + "query": { + "type": "string" + } + }, + "required": [ + "limit", + "skip" + ] + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "query", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "create_entry", + "description": "Create a new entry in Contentful, before executing this function, you need to know the contentTypeId (not the content type NAME) and the fields of that contentType, you can get the fields definition by using the GET_CONTENT_TYPE tool. ", + "inputSchema": { + "type": "object", + "properties": { + "contentTypeId": { + "type": "string", + "description": "The ID of the content type for the new entry" + }, + "fields": { + "type": "object", + "description": "The fields of the entry" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "contentTypeId", + "fields", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "get_entry", + "description": "Retrieve an existing entry", + "inputSchema": { + "type": "object", + "properties": { + "entryId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "entryId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "update_entry", + "description": "Update an existing entry, very important: always send all field values and all values related to locales, also the fields values that have not been updated", + "inputSchema": { + "type": "object", + "properties": { + "entryId": { + "type": "string" + }, + "fields": { + "type": "object" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "entryId", + "fields", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "delete_entry", + "description": "Delete an entry", + "inputSchema": { + "type": "object", + "properties": { + "entryId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "entryId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "publish_entry", + "description": "Publish an entry or multiple entries. Accepts either a single entryId (string) or an array of entryIds (up to 100 entries). For a single entry, it uses the standard publish operation. For multiple entries, it automatically uses bulk publishing.", + "inputSchema": { + "type": "object", + "properties": { + "entryId": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "maxItems": 100, + "description": "Array of entry IDs to publish (max: 100)" + } + ], + "description": "ID of the entry to publish, or an array of entry IDs (max: 100)" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "entryId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "unpublish_entry", + "description": "Unpublish an entry or multiple entries. Accepts either a single entryId (string) or an array of entryIds (up to 100 entries). For a single entry, it uses the standard unpublish operation. For multiple entries, it automatically uses bulk unpublishing.", + "inputSchema": { + "type": "object", + "properties": { + "entryId": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "maxItems": 100, + "description": "Array of entry IDs to unpublish (max: 100)" + } + ], + "description": "ID of the entry to unpublish, or an array of entry IDs (max: 100)" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "entryId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "list_assets", + "description": "List assets in a space. Returns a maximum of 3 items per request. Use skip parameter to paginate through results.", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "default": 3, + "maximum": 3, + "description": "Maximum number of items to return (max: 3)" + }, + "skip": { + "type": "number", + "default": 0, + "description": "Number of items to skip for pagination" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "limit", + "skip", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "upload_asset", + "description": "Upload a new asset", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "file": { + "type": "object", + "properties": { + "upload": { + "type": "string" + }, + "fileName": { + "type": "string" + }, + "contentType": { + "type": "string" + } + }, + "required": [ + "upload", + "fileName", + "contentType" + ] + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "title", + "file", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "get_asset", + "description": "Retrieve an asset", + "inputSchema": { + "type": "object", + "properties": { + "assetId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "assetId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "update_asset", + "description": "Update an asset", + "inputSchema": { + "type": "object", + "properties": { + "assetId": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "file": { + "type": "object", + "properties": { + "url": { + "type": "string" + }, + "fileName": { + "type": "string" + }, + "contentType": { + "type": "string" + } + }, + "required": [ + "url", + "fileName", + "contentType" + ] + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "assetId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "delete_asset", + "description": "Delete an asset", + "inputSchema": { + "type": "object", + "properties": { + "assetId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "assetId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "publish_asset", + "description": "Publish an asset", + "inputSchema": { + "type": "object", + "properties": { + "assetId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "assetId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "unpublish_asset", + "description": "Unpublish an asset", + "inputSchema": { + "type": "object", + "properties": { + "assetId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "assetId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "list_content_types", + "description": "List content types in a space. Returns a maximum of 10 items per request. Use skip parameter to paginate through results.", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "default": 10, + "maximum": 20, + "description": "Maximum number of items to return (max: 3)" + }, + "skip": { + "type": "number", + "default": 0, + "description": "Number of items to skip for pagination" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "limit", + "skip", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "get_content_type", + "description": "Get details of a specific content type", + "inputSchema": { + "type": "object", + "properties": { + "contentTypeId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "contentTypeId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "create_content_type", + "description": "Create a new content type", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "fields": { + "type": "array", + "description": "Array of field definitions for the content type", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the field" + }, + "name": { + "type": "string", + "description": "Display name of the field" + }, + "type": { + "type": "string", + "description": "Type of the field (Text, Number, Date, Location, Media, Boolean, JSON, Link, Array, etc)", + "enum": [ + "Symbol", + "Text", + "Integer", + "Number", + "Date", + "Location", + "Object", + "Boolean", + "Link", + "Array" + ] + }, + "required": { + "type": "boolean", + "description": "Whether this field is required", + "default": false + }, + "localized": { + "type": "boolean", + "description": "Whether this field can be localized", + "default": false + }, + "linkType": { + "type": "string", + "description": "Required for Link fields. Specifies what type of resource this field links to", + "enum": [ + "Entry", + "Asset" + ] + }, + "items": { + "type": "object", + "description": "Required for Array fields. Specifies the type of items in the array", + "properties": { + "type": { + "type": "string", + "enum": [ + "Symbol", + "Link" + ] + }, + "linkType": { + "type": "string", + "enum": [ + "Entry", + "Asset" + ] + }, + "validations": { + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "validations": { + "type": "array", + "description": "Array of validation rules for the field", + "items": { + "type": "object" + } + } + }, + "required": [ + "id", + "name", + "type" + ] + } + }, + "description": { + "type": "string" + }, + "displayField": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "name", + "fields", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "update_content_type", + "description": "Update an existing content type", + "inputSchema": { + "type": "object", + "properties": { + "contentTypeId": { + "type": "string" + }, + "name": { + "type": "string" + }, + "fields": { + "type": "array", + "items": { + "type": "object" + } + }, + "description": { + "type": "string" + }, + "displayField": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "contentTypeId", + "name", + "fields", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "delete_content_type", + "description": "Delete a content type", + "inputSchema": { + "type": "object", + "properties": { + "contentTypeId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "contentTypeId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "publish_content_type", + "description": "Publish a content type", + "inputSchema": { + "type": "object", + "properties": { + "contentTypeId": { + "type": "string" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "contentTypeId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "list_spaces", + "description": "List all available spaces", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_space", + "description": "Get details of a space", + "inputSchema": { + "type": "object", + "properties": { + "spaceId": { + "type": "string" + } + }, + "required": [ + "spaceId" + ] + } + }, + { + "name": "list_environments", + "description": "List all environments in a space", + "inputSchema": { + "type": "object", + "properties": { + "spaceId": { + "type": "string" + } + }, + "required": [ + "spaceId" + ] + } + }, + { + "name": "create_environment", + "description": "Create a new environment", + "inputSchema": { + "type": "object", + "properties": { + "spaceId": { + "type": "string" + }, + "environmentId": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "spaceId", + "environmentId", + "name" + ] + } + }, + { + "name": "delete_environment", + "description": "Delete an environment", + "inputSchema": { + "type": "object", + "properties": { + "spaceId": { + "type": "string" + }, + "environmentId": { + "type": "string" + } + }, + "required": [ + "spaceId", + "environmentId" + ] + } + }, + { + "name": "bulk_validate", + "description": "Validate multiple entries at once", + "inputSchema": { + "type": "object", + "properties": { + "entryIds": { + "type": "array", + "description": "Array of entry IDs to validate", + "items": { + "type": "string" + } + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "entryIds", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "list_ai_actions", + "description": "List all AI Actions in a space", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "default": 100, + "description": "Maximum number of AI Actions to return" + }, + "skip": { + "type": "number", + "default": 0, + "description": "Number of AI Actions to skip for pagination" + }, + "status": { + "type": "string", + "enum": [ + "all", + "published" + ], + "description": "Filter AI Actions by status" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "spaceId", + "environmentId" + ] + } + }, + { + "name": "get_ai_action", + "description": "Get a specific AI Action by ID", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action to retrieve" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "create_ai_action", + "description": "Create a new AI Action", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the AI Action" + }, + "description": { + "type": "string", + "description": "The description of the AI Action" + }, + "instruction": { + "type": "object", + "description": "The instruction object containing the template and variables", + "properties": { + "template": { + "type": "string", + "description": "The prompt template with variable placeholders" + }, + "variables": { + "type": "array", + "description": "Array of variable definitions", + "items": { + "type": "object" + } + }, + "conditions": { + "type": "array", + "description": "Optional array of conditions for the template", + "items": { + "type": "object" + } + } + }, + "required": [ + "template", + "variables" + ] + }, + "configuration": { + "type": "object", + "description": "The model configuration", + "properties": { + "modelType": { + "type": "string", + "description": "The type of model to use (e.g., gpt-4)" + }, + "modelTemperature": { + "type": "number", + "description": "The temperature setting for the model (0.0 to 1.0)", + "minimum": 0, + "maximum": 1 + } + }, + "required": [ + "modelType", + "modelTemperature" + ] + }, + "testCases": { + "type": "array", + "description": "Optional array of test cases for the AI Action", + "items": { + "type": "object" + } + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "name", + "description", + "instruction", + "configuration", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "update_ai_action", + "description": "Update an existing AI Action", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action to update" + }, + "name": { + "type": "string", + "description": "The name of the AI Action" + }, + "description": { + "type": "string", + "description": "The description of the AI Action" + }, + "instruction": { + "type": "object", + "description": "The instruction object containing the template and variables", + "properties": { + "template": { + "type": "string", + "description": "The prompt template with variable placeholders" + }, + "variables": { + "type": "array", + "description": "Array of variable definitions", + "items": { + "type": "object" + } + }, + "conditions": { + "type": "array", + "description": "Optional array of conditions for the template", + "items": { + "type": "object" + } + } + }, + "required": [ + "template", + "variables" + ] + }, + "configuration": { + "type": "object", + "description": "The model configuration", + "properties": { + "modelType": { + "type": "string", + "description": "The type of model to use (e.g., gpt-4)" + }, + "modelTemperature": { + "type": "number", + "description": "The temperature setting for the model (0.0 to 1.0)", + "minimum": 0, + "maximum": 1 + } + }, + "required": [ + "modelType", + "modelTemperature" + ] + }, + "testCases": { + "type": "array", + "description": "Optional array of test cases for the AI Action", + "items": { + "type": "object" + } + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "name", + "description", + "instruction", + "configuration", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "delete_ai_action", + "description": "Delete an AI Action", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action to delete" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "publish_ai_action", + "description": "Publish an AI Action", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action to publish" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "unpublish_ai_action", + "description": "Unpublish an AI Action", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action to unpublish" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "invoke_ai_action", + "description": "Invoke an AI Action with variables", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action to invoke" + }, + "variables": { + "type": "object", + "description": "Key-value pairs of variable IDs and their values", + "additionalProperties": { + "type": "string" + } + }, + "rawVariables": { + "type": "array", + "description": "Array of raw variable objects (for complex variable types like references)", + "items": { + "type": "object" + } + }, + "outputFormat": { + "type": "string", + "enum": [ + "Markdown", + "RichText", + "PlainText" + ], + "default": "Markdown", + "description": "The format of the output content" + }, + "waitForCompletion": { + "type": "boolean", + "default": true, + "description": "Whether to wait for the AI Action to complete before returning" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "spaceId", + "environmentId" + ] + } + }, + { + "name": "get_ai_action_invocation", + "description": "Get the result of a previous AI Action invocation", + "inputSchema": { + "type": "object", + "properties": { + "aiActionId": { + "type": "string", + "description": "The ID of the AI Action" + }, + "invocationId": { + "type": "string", + "description": "The ID of the specific invocation to retrieve" + }, + "spaceId": { + "type": "string", + "description": "The ID of the Contentful space. This must be the space's ID, not its name, ask for this ID if it's unclear." + }, + "environmentId": { + "type": "string", + "description": "The ID of the environment within the space, by default this will be called Master", + "default": "master" + } + }, + "required": [ + "aiActionId", + "invocationId", + "spaceId", + "environmentId" + ] + } + } + ] + }, + "deepseek-mcp-server": { + "name": "deepseek-mcp-server", + "display_name": "DeepSeek", + "description": "Model Context Protocol server integrating DeepSeek's advanced language models, in addition to [other useful API endpoints](https://github.com/DMontgomery40/deepseek-mcp-server?tab=readme-ov-file#features)", + "repository": { + "type": "git", + "url": "https://github.com/DMontgomery40/deepseek-mcp-server" + }, + "homepage": "https://github.com/DMontgomery40/deepseek-mcp-server", + "author": { + "name": "DMontgomery40" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "DeepSeek", + "API", + "Language Model" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "deepseek-mcp-server" + ], + "env": { + "DEEPSEEK_API_KEY": "${DEEPSEEK_API_KEY}" + } + } + }, + "arguments": { + "DEEPSEEK_API_KEY": { + "description": "An API key required to authenticate requests to the DeepSeek API.", + "required": true, + "example": "your-api-key" + } + }, + "tools": [ + { + "name": "chat_completion", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "messages": { + "type": "array", + "items": { + "type": "object", + "properties": { + "role": { + "type": "string", + "enum": [ + "system", + "user", + "assistant" + ] + }, + "content": { + "type": "string" + } + }, + "required": [ + "role", + "content" + ], + "additionalProperties": false + } + }, + "model": { + "type": "string", + "default": "deepseek-reasoner" + }, + "temperature": { + "type": "number", + "minimum": 0, + "maximum": 2, + "default": 0.7 + }, + "max_tokens": { + "type": "integer", + "exclusiveMinimum": 0, + "default": 8000 + }, + "top_p": { + "type": "number", + "minimum": 0, + "maximum": 1, + "default": 1 + }, + "frequency_penalty": { + "type": "number", + "minimum": -2, + "maximum": 2, + "default": 0.1 + }, + "presence_penalty": { + "type": "number", + "minimum": -2, + "maximum": 2, + "default": 0 + } + } + } + }, + { + "name": "multi_turn_chat", + "inputSchema": { + "type": "object", + "properties": { + "messages": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "object", + "properties": { + "role": { + "type": "string", + "enum": [ + "system", + "user", + "assistant" + ] + }, + "content": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text" + }, + "text": { + "type": "string" + } + }, + "required": [ + "type", + "text" + ], + "additionalProperties": false + } + }, + "required": [ + "role", + "content" + ], + "additionalProperties": false + } + } + ] + }, + "model": { + "type": "string", + "default": "deepseek-chat" + }, + "temperature": { + "type": "number", + "minimum": 0, + "maximum": 2, + "default": 0.7 + }, + "max_tokens": { + "type": "integer", + "exclusiveMinimum": 0, + "default": 8000 + }, + "top_p": { + "type": "number", + "minimum": 0, + "maximum": 1, + "default": 1 + }, + "frequency_penalty": { + "type": "number", + "minimum": -2, + "maximum": 2, + "default": 0.1 + }, + "presence_penalty": { + "type": "number", + "minimum": -2, + "maximum": 2, + "default": 0 + } + }, + "required": [ + "messages" + ] + } + } + ] + }, + "gitlab": { + "name": "gitlab", + "display_name": "GitLab", + "description": "GitLab API, enabling project management", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/gitlab", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "GitLab", + "API" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-gitlab" + ], + "env": { + "GITLAB_PERSONAL_ACCESS_TOKEN": "${GITLAB_PERSONAL_ACCESS_TOKEN}", + "GITLAB_API_URL": "${GITLAB_API_URL}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "-e", + "GITLAB_PERSONAL_ACCESS_TOKEN", + "-e", + "GITLAB_API_URL", + "mcp/gitlab" + ], + "env": { + "GITLAB_PERSONAL_ACCESS_TOKEN": "${GITLAB_PERSONAL_ACCESS_TOKEN}", + "GITLAB_API_URL": "${GITLAB_API_URL}" + } + } + }, + "arguments": { + "GITLAB_PERSONAL_ACCESS_TOKEN": { + "description": "Your GitLab personal access token", + "required": true + }, + "GITLAB_API_URL": { + "description": "Base URL for GitLab API", + "required": false, + "example": "https://gitlab.com/api/v4" + } + }, + "tools": [ + { + "name": "create_or_update_file", + "description": "Create or update a single file in a GitLab project", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "file_path": { + "type": "string", + "description": "Path where to create/update the file" + }, + "content": { + "type": "string", + "description": "Content of the file" + }, + "commit_message": { + "type": "string", + "description": "Commit message" + }, + "branch": { + "type": "string", + "description": "Branch to create/update the file in" + }, + "previous_path": { + "type": "string", + "description": "Path of the file to move/rename" + } + }, + "required": [ + "project_id", + "file_path", + "content", + "commit_message", + "branch" + ] + } + }, + { + "name": "search_repositories", + "description": "Search for GitLab projects", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "Search query" + }, + "page": { + "type": "number", + "description": "Page number for pagination (default: 1)" + }, + "per_page": { + "type": "number", + "description": "Number of results per page (default: 20)" + } + }, + "required": [ + "search" + ] + } + }, + { + "name": "create_repository", + "description": "Create a new GitLab project", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Repository name" + }, + "description": { + "type": "string", + "description": "Repository description" + }, + "visibility": { + "type": "string", + "enum": [ + "private", + "internal", + "public" + ], + "description": "Repository visibility level" + }, + "initialize_with_readme": { + "type": "boolean", + "description": "Initialize with README.md" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "get_file_contents", + "description": "Get the contents of a file or directory from a GitLab project", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "file_path": { + "type": "string", + "description": "Path to the file or directory" + }, + "ref": { + "type": "string", + "description": "Branch/tag/commit to get contents from" + } + }, + "required": [ + "project_id", + "file_path" + ] + } + }, + { + "name": "push_files", + "description": "Push multiple files to a GitLab project in a single commit", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "branch": { + "type": "string", + "description": "Branch to push to" + }, + "files": { + "type": "array", + "items": { + "type": "object", + "properties": { + "file_path": { + "type": "string", + "description": "Path where to create the file" + }, + "content": { + "type": "string", + "description": "Content of the file" + } + }, + "required": [ + "file_path", + "content" + ], + "additionalProperties": false + }, + "description": "Array of files to push" + }, + "commit_message": { + "type": "string", + "description": "Commit message" + } + }, + "required": [ + "project_id", + "branch", + "files", + "commit_message" + ] + } + }, + { + "name": "create_issue", + "description": "Create a new issue in a GitLab project", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "title": { + "type": "string", + "description": "Issue title" + }, + "description": { + "type": "string", + "description": "Issue description" + }, + "assignee_ids": { + "type": "array", + "items": { + "type": "number" + }, + "description": "Array of user IDs to assign" + }, + "labels": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of label names" + }, + "milestone_id": { + "type": "number", + "description": "Milestone ID to assign" + } + }, + "required": [ + "project_id", + "title" + ] + } + }, + { + "name": "create_merge_request", + "description": "Create a new merge request in a GitLab project", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "title": { + "type": "string", + "description": "Merge request title" + }, + "description": { + "type": "string", + "description": "Merge request description" + }, + "source_branch": { + "type": "string", + "description": "Branch containing changes" + }, + "target_branch": { + "type": "string", + "description": "Branch to merge into" + }, + "draft": { + "type": "boolean", + "description": "Create as draft merge request" + }, + "allow_collaboration": { + "type": "boolean", + "description": "Allow commits from upstream members" + } + }, + "required": [ + "project_id", + "title", + "source_branch", + "target_branch" + ] + } + }, + { + "name": "fork_repository", + "description": "Fork a GitLab project to your account or specified namespace", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "namespace": { + "type": "string", + "description": "Namespace to fork to (full path)" + } + }, + "required": [ + "project_id" + ] + } + }, + { + "name": "create_branch", + "description": "Create a new branch in a GitLab project", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Project ID or URL-encoded path" + }, + "branch": { + "type": "string", + "description": "Name for the new branch" + }, + "ref": { + "type": "string", + "description": "Source branch/commit for new branch" + } + }, + "required": [ + "project_id", + "branch" + ] + } + } + ], + "is_official": true + }, + "dune-analytics-mcp": { + "name": "dune-analytics-mcp", + "display_name": "Dune Analytics", + "description": "A mcp server that bridges Dune Analytics data to AI agents.", + "repository": { + "type": "git", + "url": "https://github.com/kukapay/dune-analytics-mcp" + }, + "homepage": "https://github.com/kukapay/dune-analytics-mcp", + "author": { + "name": "Kukapay" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "Dune", + "Analytics", + "AI agents" + ], + "examples": [ + { + "title": "Get Latest Result", + "description": "Retrieves the latest results of a specified Dune query.", + "prompt": "get_latest_result(query_id=4853921)" + }, + { + "title": "Run Query", + "description": "Executes a Dune query and returns the results.", + "prompt": "run_query(query_id=1215383)" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/kukapay/dune-analytics-mcp", + "main.py" + ], + "env": { + "DUNE_API_KEY": "${DUNE_API_KEY}" + } + } + }, + "arguments": { + "DUNE_API_KEY": { + "description": "A valid Dune Analytics API key obtained from Dune Analytics for authentication and data access.", + "required": true, + "example": "your_api_key_here" + } + } + }, + "whois-mcp": { + "name": "whois-mcp", + "display_name": "Whois Lookup", + "description": "MCP server that performs whois lookup against domain, IP, ASN and TLD.", + "repository": { + "type": "git", + "url": "https://github.com/bharathvaj-ganesan/whois-mcp" + }, + "homepage": "https://github.com/bharathvaj-ganesan/whois-mcp", + "author": { + "name": "bharathvaj-ganesan" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "whois", + "domain", + "tools" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@bharathvaj/whois-mcp@latest" + ] + } + }, + "examples": [ + { + "title": "Look up WHOIS information", + "description": "Using the Whois MCP to find out domain details.", + "prompt": "What can you tell me about example.com?" + } + ], + "tools": [ + { + "name": "whois_domain", + "description": "Looksup whois information about the domain", + "inputSchema": { + "type": "object", + "properties": { + "domain": { + "type": "string", + "minLength": 1 + } + }, + "required": [ + "domain" + ] + } + }, + { + "name": "whois_tld", + "description": "Looksup whois information about the Top Level Domain (TLD)", + "inputSchema": { + "type": "object", + "properties": { + "tld": { + "type": "string", + "minLength": 1 + } + }, + "required": [ + "tld" + ] + } + }, + { + "name": "whois_ip", + "description": "Looksup whois information about the IP", + "inputSchema": { + "type": "object", + "properties": { + "ip": { + "type": "string", + "anyOf": [ + { + "format": "ipv4" + }, + { + "format": "ipv6" + } + ] + } + }, + "required": [ + "ip" + ] + } + }, + { + "name": "whois_as", + "description": "Looksup whois information about the Autonomous System Number (ASN)", + "inputSchema": { + "type": "object", + "properties": { + "asn": { + "type": "string", + "pattern": "^AS\\d+$" + } + }, + "required": [ + "asn" + ] + } + } + ] + }, + "deepseek-thinker-mcp": { + "name": "deepseek-thinker-mcp", + "display_name": "Deepseek Thinker", + "description": "A MCP (Model Context Protocol) provider Deepseek reasoning content to MCP-enabled AI Clients, like Claude Desktop. Supports access to Deepseek's thought processes from the Deepseek API service or from a local Ollama server.", + "repository": { + "type": "git", + "url": "https://github.com/ruixingshi/deepseek-thinker-mcp" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "Deepseek", + "AI Clients", + "Reasoning" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "deepseek-thinker-mcp" + ], + "env": { + "API_KEY": "${API_KEY}", + "BASE_URL": "${BASE_URL}" + } + } + }, + "author": { + "name": "ruixingshi" + }, + "homepage": "https://github.com/ruixingshi/deepseek-thinker-mcp", + "arguments": { + "API_KEY": { + "description": "Your OpenAI API Key for authentication with the OpenAI services.", + "required": true, + "example": "sk-xxxxxxxxxx" + }, + "BASE_URL": { + "description": "The base URL for the OpenAI API that you are connecting to.", + "required": true, + "example": "https://api.openai.com/v1" + } + }, + "tools": [ + { + "name": "get-deepseek-thinker", + "description": "think with deepseek", + "inputSchema": { + "type": "object", + "properties": { + "originPrompt": { + "type": "string", + "description": "user's original prompt" + } + }, + "required": [ + "originPrompt" + ] + } + } + ] + }, + "inbox-zero": { + "display_name": "Inbox Zero MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/elie222/inbox-zero" + }, + "homepage": "https://github.com/elie222/inbox-zero/tree/main/apps/mcp-server", + "author": { + "name": "elie222" + }, + "license": "MIT", + "tags": [ + "email", + "inbox", + "assistant", + "mcp" + ], + "arguments": { + "API_KEY": { + "description": "Your Inbox Zero API key from the /settings page in the web app", + "required": true, + "example": "your-api-key-here" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "build/index.js" + ], + "env": { + "API_KEY": "" + }, + "description": "Run the MCP server using Node.js" + } + }, + "examples": [ + { + "title": "Manage your inbox", + "description": "Use the MCP server to interact with your Inbox Zero personal assistant", + "prompt": "Help me organize my inbox" + } + ], + "name": "inbox-zero", + "description": "data-color-mode=\"auto\" data-light-theme=\"light\" data-dark-theme=\"dark\"", + "categories": [ + "Messaging" + ], + "is_official": true + }, + "git": { + "name": "git", + "display_name": "git", + "description": "Tools to read, search, and manipulate Git repositories", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/git", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Git", + "Server", + "Automation" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-git", + "--repository", + "${GIT_REPO_PATH}" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "--mount", + "type=bind,src=${GIT_REPO_PATH},dst=${GIT_REPO_PATH}", + "mcp/git" + ] + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp_server_git", + "--repository", + "${GIT_REPO_PATH}" + ] + } + }, + "arguments": { + "GIT_REPO_PATH": { + "description": "The path to the Git repository that the mcp-server-git will interact with.", + "required": true, + "example": "/path/to/git/repo" + } + }, + "tools": [ + { + "name": "git_status", + "description": "Shows the working tree status", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + } + }, + "required": [ + "repo_path" + ], + "title": "GitStatus", + "type": "object" + } + }, + { + "name": "git_diff_unstaged", + "description": "Shows changes in the working directory that are not yet staged", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + } + }, + "required": [ + "repo_path" + ], + "title": "GitDiffUnstaged", + "type": "object" + } + }, + { + "name": "git_diff_staged", + "description": "Shows changes that are staged for commit", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + } + }, + "required": [ + "repo_path" + ], + "title": "GitDiffStaged", + "type": "object" + } + }, + { + "name": "git_diff", + "description": "Shows differences between branches or commits", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "target": { + "title": "Target", + "type": "string" + } + }, + "required": [ + "repo_path", + "target" + ], + "title": "GitDiff", + "type": "object" + } + }, + { + "name": "git_commit", + "description": "Records changes to the repository", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "message": { + "title": "Message", + "type": "string" + } + }, + "required": [ + "repo_path", + "message" + ], + "title": "GitCommit", + "type": "object" + } + }, + { + "name": "git_add", + "description": "Adds file contents to the staging area", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "files": { + "items": { + "type": "string" + }, + "title": "Files", + "type": "array" + } + }, + "required": [ + "repo_path", + "files" + ], + "title": "GitAdd", + "type": "object" + } + }, + { + "name": "git_reset", + "description": "Unstages all staged changes", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + } + }, + "required": [ + "repo_path" + ], + "title": "GitReset", + "type": "object" + } + }, + { + "name": "git_log", + "description": "Shows the commit logs", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "max_count": { + "default": 10, + "title": "Max Count", + "type": "integer" + } + }, + "required": [ + "repo_path" + ], + "title": "GitLog", + "type": "object" + } + }, + { + "name": "git_create_branch", + "description": "Creates a new branch from an optional base branch", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "branch_name": { + "title": "Branch Name", + "type": "string" + }, + "base_branch": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Base Branch" + } + }, + "required": [ + "repo_path", + "branch_name" + ], + "title": "GitCreateBranch", + "type": "object" + } + }, + { + "name": "git_checkout", + "description": "Switches branches", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "branch_name": { + "title": "Branch Name", + "type": "string" + } + }, + "required": [ + "repo_path", + "branch_name" + ], + "title": "GitCheckout", + "type": "object" + } + }, + { + "name": "git_show", + "description": "Shows the contents of a commit", + "inputSchema": { + "properties": { + "repo_path": { + "title": "Repo Path", + "type": "string" + }, + "revision": { + "title": "Revision", + "type": "string" + } + }, + "required": [ + "repo_path", + "revision" + ], + "title": "GitShow", + "type": "object" + } + } + ], + "is_official": true + }, + "code-executor": { + "name": "code-executor", + "display_name": "Code Executor", + "description": "An MCP server that allows LLMs to execute Python code within a specified Conda environment.", + "repository": { + "type": "git", + "url": "https://github.com/bazinga012/mcp_code_executor" + }, + "homepage": "https://github.com/bazinga012/mcp_code_executor", + "author": { + "name": "bazinga012" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Python", + "Conda", + "Execution" + ], + "examples": [ + { + "title": "Execute Python Code", + "description": "An example of executing Python code using MCP Code Executor", + "prompt": "Please execute the following code: print('Hello, World!')" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/bazinga012/mcp_code_executor" + ], + "env": { + "CODE_STORAGE_DIR": "${CODE_STORAGE_DIR}", + "CONDA_ENV_NAME": "${CONDA_ENV_NAME}" + } + } + }, + "arguments": { + "CODE_STORAGE_DIR": { + "description": "The directory where the generated code will be stored.", + "required": true, + "example": "/path/to/code/storage" + }, + "CONDA_ENV_NAME": { + "description": "The name of the Conda environment in which the code will be executed.", + "required": true, + "example": "your-conda-env" + } + }, + "tools": [ + { + "name": "execute_code", + "description": "Execute Python code in the specified conda environment", + "inputSchema": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Python code to execute" + }, + "filename": { + "type": "string", + "description": "Optional: Name of the file to save the code (default: generated UUID)" + } + }, + "required": [ + "code" + ] + } + } + ] + }, + "world-bank-data-api": { + "name": "world-bank-data-api", + "display_name": "World Bank Data API", + "description": "A server that fetches data indicators available with the World Bank as part of their data API", + "repository": { + "type": "git", + "url": "https://github.com/anshumax/world_bank_mcp_server" + }, + "homepage": "https://github.com/anshumax/world_bank_mcp_server", + "author": { + "name": "anshumax" + }, + "license": "[NOT FOUND]", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "World Bank", + "Data", + "API", + "Indicators", + "Analysis" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/anshumax/world_bank_mcp_server", + "world_bank_mcp_server" + ] + } + }, + "examples": [ + { + "title": "List Countries", + "description": "Lists available countries in the World Bank open data API.", + "prompt": "List all countries available in the World Bank data." + }, + { + "title": "List Indicators", + "description": "Lists available indicators in the World Bank open data API.", + "prompt": "List all indicators available in the World Bank data." + }, + { + "title": "Analyze Indicators", + "description": "Analyzes specific indicators for a selected country.", + "prompt": "Analyze the poverty indicators for Kenya." + } + ], + "tools": [ + { + "name": "get_indicator_for_country", + "description": "Get values for an indicator for a specific country from the World Bank API", + "inputSchema": { + "type": "object", + "properties": { + "country_id": { + "type": "string", + "description": "The ID of the country for which the indicator is to be queried" + }, + "indicator_id": { + "type": "string", + "description": "The ID of the indicator to be queried" + } + }, + "required": [ + "country_id", + "indicator_id" + ] + } + } + ] + }, + "firebase": { + "name": "firebase", + "display_name": "Firebase", + "description": "Server to interact with Firebase services including Firebase Authentication, Firestore, and Firebase Storage.", + "repository": { + "type": "git", + "url": "https://github.com/gannonh/firebase-mcp" + }, + "homepage": "https://github.com/gannonh/firebase-mcp", + "author": { + "name": "gannonh" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Firebase", + "LLM", + "Server" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@gannonh/firebase-mcp" + ], + "env": { + "SERVICE_ACCOUNT_KEY_PATH": "${SERVICE_ACCOUNT_KEY_PATH}", + "FIREBASE_STORAGE_BUCKET": "${FIREBASE_STORAGE_BUCKET}" + } + } + }, + "arguments": { + "SERVICE_ACCOUNT_KEY_PATH": { + "description": "Path to your Firebase service account key JSON file", + "required": true, + "example": "/absolute/path/to/serviceAccountKey.json" + }, + "FIREBASE_STORAGE_BUCKET": { + "description": "Bucket name for Firebase Storage", + "required": false, + "example": "your-project-id.firebasestorage.app" + } + }, + "tools": [ + { + "name": "firestore_add_document", + "description": "Add a document to a Firestore collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "data": { + "type": "object", + "description": "Document data" + } + }, + "required": [ + "collection", + "data" + ] + } + }, + { + "name": "firestore_list_documents", + "description": "List documents from a Firestore collection with filtering and ordering", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "filters": { + "type": "array", + "description": "Array of filter conditions", + "items": { + "type": "object", + "properties": { + "field": { + "type": "string", + "description": "Field name to filter" + }, + "operator": { + "type": "string", + "description": "Comparison operator (==, >, <, >=, <=, array-contains, in, array-contains-any)" + }, + "value": { + "description": "Value to compare against (use ISO format for dates)" + } + }, + "required": [ + "field", + "operator", + "value" + ] + } + }, + "limit": { + "type": "number", + "description": "Number of documents to return", + "default": 20 + }, + "pageToken": { + "type": "string", + "description": "Token for pagination to get the next page of results" + }, + "orderBy": { + "type": "array", + "description": "Array of fields to order by", + "items": { + "type": "object", + "properties": { + "field": { + "type": "string", + "description": "Field name to order by" + }, + "direction": { + "type": "string", + "description": "Sort direction (asc or desc)", + "enum": [ + "asc", + "desc" + ], + "default": "asc" + } + }, + "required": [ + "field" + ] + } + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "firestore_get_document", + "description": "Get a document from a Firestore collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "id": { + "type": "string", + "description": "Document ID" + } + }, + "required": [ + "collection", + "id" + ] + } + }, + { + "name": "firestore_update_document", + "description": "Update a document in a Firestore collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "id": { + "type": "string", + "description": "Document ID" + }, + "data": { + "type": "object", + "description": "Updated document data" + } + }, + "required": [ + "collection", + "id", + "data" + ] + } + }, + { + "name": "firestore_delete_document", + "description": "Delete a document from a Firestore collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "id": { + "type": "string", + "description": "Document ID" + } + }, + "required": [ + "collection", + "id" + ] + } + }, + { + "name": "auth_get_user", + "description": "Get a user by ID or email from Firebase Authentication", + "inputSchema": { + "type": "object", + "properties": { + "identifier": { + "type": "string", + "description": "User ID or email address" + } + }, + "required": [ + "identifier" + ] + } + }, + { + "name": "storage_list_files", + "description": "List files in a given path in Firebase Storage", + "inputSchema": { + "type": "object", + "properties": { + "directoryPath": { + "type": "string", + "description": "The optional path to list files from. If not provided, the root is used." + } + }, + "required": [] + } + }, + { + "name": "storage_get_file_info", + "description": "Get file information including metadata and download URL", + "inputSchema": { + "type": "object", + "properties": { + "filePath": { + "type": "string", + "description": "The path of the file to get information for" + } + }, + "required": [ + "filePath" + ] + } + }, + { + "name": "firestore_list_collections", + "description": "List root collections in Firestore", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + ] + }, + "dify": { + "name": "dify", + "display_name": "Dify", + "description": "A simple implementation of an MCP server for dify workflows.", + "repository": { + "type": "git", + "url": "https://github.com/YanxingLiu/dify-mcp-server" + }, + "homepage": "https://github.com/YanxingLiu/dify-mcp-server", + "author": { + "name": "YanxingLiu" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "dify", + "server", + "workflows" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/YanxingLiu/dify-mcp-server", + "dify_mcp_server" + ], + "env": { + "CONFIG_PATH": "${CONFIG_PATH}" + } + } + }, + "arguments": { + "CONFIG_PATH": { + "description": "This environment variable indicates the path to the configuration file for the Dify MCP server, typically a YAML file containing necessary settings.", + "required": true, + "example": "/Users/lyx/Downloads/config.yaml" + } + } + }, + "code-sandbox-mcp": { + "name": "code-sandbox-mcp", + "display_name": "Code Sandbox", + "description": "An MCP server to create secure code sandbox environment for executing code within Docker containers.", + "repository": { + "type": "git", + "url": "https://github.com/Automata-Labs-team/code-sandbox-mcp" + }, + "homepage": "https://github.com/Automata-Labs-team/code-sandbox-mcp", + "author": { + "name": "Automata-Labs-team" + }, + "license": "MIT", + "installations": { + "custom": { + "type": "custom", + "command": "/path/to/code-sandbox-mcp", + "args": [], + "env": {} + } + }, + "categories": [ + "Dev Tools" + ], + "tags": [ + "Docker", + "Sandbox", + "Code Execution" + ] + }, + "rijksmuseum": { + "name": "rijksmuseum", + "display_name": "Rijksmuseum", + "description": "Interface with the Rijksmuseum API to search artworks, retrieve artwork details, access image tiles, and explore user collections.", + "repository": { + "type": "git", + "url": "https://github.com/r-huijts/rijksmuseum-mcp" + }, + "homepage": "https://github.com/r-huijts/rijksmuseum-mcp", + "author": { + "name": "r-huijts" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "collection", + "Rijksmuseum" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mcp-server-rijksmuseum" + ], + "env": { + "RIJKSMUSEUM_API_KEY": "${RIJKSMUSEUM_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Artwork Discovery", + "description": "Queries related to discovering artworks in the museum's collection.", + "prompt": "\"Show me all paintings by Rembrandt from the 1640s\"" + }, + { + "title": "Artwork Analysis", + "description": "Queries related to analyzing specific artworks.", + "prompt": "\"Tell me everything about The Night Watch\"" + }, + { + "title": "Artist Research", + "description": "Queries focused on researching artists and their works.", + "prompt": "\"Create a timeline of Rembrandt's self-portraits\"" + }, + { + "title": "Thematic Exploration", + "description": "Queries that explore themes in the artworks.", + "prompt": "\"Find all artworks depicting biblical scenes\"" + }, + { + "title": "Collection Analysis", + "description": "Queries about user-curated collections.", + "prompt": "\"Show me the most popular user-curated collections\"" + }, + { + "title": "Visual Details", + "description": "Queries for examining visual details in artworks.", + "prompt": "\"Let me examine the details in the background of The Night Watch\"" + } + ], + "arguments": { + "RIJKSMUSEUM_API_KEY": { + "description": "Your Rijksmuseum API key used for authenticating requests to the Rijksmuseum API.", + "required": true, + "example": "your_api_key_here" + } + }, + "tools": [ + { + "name": "search_artwork", + "description": "Search and filter artworks in the Rijksmuseum collection. This tool provides extensive filtering options including artist name, type of artwork, materials, techniques, time periods, colors, and more. Results can be sorted in various ways and are paginated.", + "inputSchema": { + "type": "object", + "properties": { + "q": { + "type": "string", + "description": "General search query that will match against artwork titles, descriptions, materials, techniques, etc. Use this for broad searches like 'sunflowers', 'portrait', 'landscape', etc." + }, + "involvedMaker": { + "type": "string", + "description": "Search for artworks by a specific artist. Must be case-sensitive and exact, e.g., 'Rembrandt van Rijn', 'Vincent van Gogh'. Use + for spaces in names." + }, + "type": { + "type": "string", + "description": "Filter by the type of artwork. Common values include 'painting', 'print', 'drawing', 'sculpture', 'photograph', 'furniture'. Use singular form." + }, + "material": { + "type": "string", + "description": "Filter by the material used in the artwork. Examples: 'canvas', 'paper', 'wood', 'oil paint', 'marble'. Matches exact material names from the museum's classification." + }, + "technique": { + "type": "string", + "description": "Filter by the technique used to create the artwork. Examples: 'oil painting', 'etching', 'watercolor', 'photography'. Matches specific techniques from the museum's classification." + }, + "century": { + "type": "integer", + "description": "Filter artworks by the century they were created in. Use negative numbers for BCE, positive for CE. Range from -1 (100-1 BCE) to 21 (2000-2099 CE). Example: 17 for 17th century (1600-1699).", + "minimum": -1, + "maximum": 21 + }, + "color": { + "type": "string", + "description": "Filter artworks by predominant color. Use hexadecimal color codes without the # symbol. Examples: 'FF0000' for red, '00FF00' for green, '0000FF' for blue. The API will match artworks containing this color." + }, + "imgonly": { + "type": "boolean", + "description": "When true, only returns artworks that have associated images. Set to true if you need to show or analyze the visual aspects of artworks.", + "default": false + }, + "toppieces": { + "type": "boolean", + "description": "When true, only returns artworks designated as masterpieces by the Rijksmuseum. These are the most significant and famous works in the collection.", + "default": false + }, + "sortBy": { + "type": "string", + "enum": [ + "relevance", + "objecttype", + "chronologic", + "achronologic", + "artist", + "artistdesc" + ], + "description": "Determines the order of results. Options: 'relevance' (best matches first), 'objecttype' (grouped by type), 'chronologic' (oldest to newest), 'achronologic' (newest to oldest), 'artist' (artist name A-Z), 'artistdesc' (artist name Z-A).", + "default": "relevance" + }, + "p": { + "type": "integer", + "description": "Page number for paginated results, starting at 0. Use in combination with 'ps' to navigate through large result sets. Note: p * ps cannot exceed 10,000.", + "minimum": 0, + "default": 0 + }, + "ps": { + "type": "integer", + "description": "Number of artworks to return per page. Higher values return more results but take longer to process. Maximum of 100 items per page.", + "minimum": 1, + "maximum": 100, + "default": 10 + }, + "culture": { + "type": "string", + "enum": [ + "nl", + "en" + ], + "description": "Language for the search and returned data. Use 'en' for English or 'nl' for Dutch (Nederlands). Affects artwork titles, descriptions, and other text fields.", + "default": "en" + } + } + } + }, + { + "name": "get_artwork_details", + "description": "Retrieve comprehensive details about a specific artwork from the Rijksmuseum collection. Returns extensive information including:\n\n- Basic details (title, artist, dates)\n- Physical properties (dimensions, materials, techniques)\n- Historical context (dating, historical persons, documentation)\n- Visual information (colors, image data)\n- Curatorial information (descriptions, labels, location)\n- Acquisition details\n- Exhibition history\n\nThis is the primary tool for in-depth research on a specific artwork, providing all available museum documentation and metadata.", + "inputSchema": { + "type": "object", + "properties": { + "objectNumber": { + "type": "string", + "description": "The unique identifier of the artwork in the Rijksmuseum collection. Format is typically a combination of letters and numbers (e.g., 'SK-C-5' for The Night Watch, 'SK-A-3262' for Van Gogh's Self Portrait). Case-sensitive. This ID can be obtained from search results." + }, + "culture": { + "type": "string", + "enum": [ + "nl", + "en" + ], + "description": "Language for the artwork details. Use 'en' for English or 'nl' for Dutch (Nederlands). Affects all textual information including descriptions, titles, and historical documentation.", + "default": "en" + } + }, + "required": [ + "objectNumber" + ] + } + }, + { + "name": "get_artwork_image", + "description": "Retrieve detailed image tile information for high-resolution viewing of an artwork. This tool provides data for implementing deep zoom functionality, allowing detailed examination of the artwork at various zoom levels.\n\nThe response includes multiple zoom levels (z0 to z6):\n- z0: Highest resolution (largest image)\n- z6: Lowest resolution (smallest image)\n\nEach zoom level contains:\n- Total width and height of the image at that level\n- A set of image tiles that make up the complete image\n- Position information (x,y) for each tile\n\nThis is particularly useful for:\n- Implementing deep zoom viewers\n- Studying fine artwork details\n- Analyzing brushwork or conservation details\n- Creating interactive viewing experiences", + "inputSchema": { + "type": "object", + "properties": { + "objectNumber": { + "type": "string", + "description": "The unique identifier of the artwork in the Rijksmuseum collection. Same format as used in get_artwork_details. The artwork must have an associated image for this to work." + }, + "culture": { + "type": "string", + "enum": [ + "nl", + "en" + ], + "description": "Language for the API response. Use 'en' for English or 'nl' for Dutch (Nederlands). While this endpoint primarily returns image data, any textual metadata will be in the specified language.", + "default": "en" + } + }, + "required": [ + "objectNumber" + ] + } + }, + { + "name": "get_user_sets", + "description": "Retrieve collections created by Rijksstudio users. These are curated sets of artworks that users have grouped together based on themes, artists, periods, or personal interests.\n\nEach set includes:\n- Basic information (name, description, creation date)\n- Creator details (username, language preference)\n- Collection statistics (number of items)\n- Navigation links (API and web URLs)\n\nThis tool is useful for:\n- Discovering user-curated exhibitions\n- Finding thematically related artworks\n- Exploring popular artwork groupings\n- Studying collection patterns", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "description": "Page number for paginated results, starting at 0. Use with pageSize to navigate through sets. Note: page * pageSize cannot exceed 10,000.", + "minimum": 0, + "default": 0 + }, + "pageSize": { + "type": "number", + "description": "Number of user sets to return per page. Must be between 1 and 100. Larger values return more results but take longer to process.", + "minimum": 1, + "maximum": 100, + "default": 10 + }, + "culture": { + "type": "string", + "enum": [ + "nl", + "en" + ], + "description": "Language for the response data. Use 'en' for English or 'nl' for Dutch (Nederlands). Affects set descriptions and user information.", + "default": "en" + } + } + } + }, + { + "name": "get_user_set_details", + "description": "Retrieve detailed information about a specific user-created collection in Rijksstudio. Returns comprehensive information about the set and its contents, including:\n\n- Set metadata (name, description, creation date)\n- Creator information\n- List of artworks in the set\n- Image data for each artwork\n- Navigation links\n\nThis tool is particularly useful for:\n- Analyzing thematic groupings of artworks\n- Studying curatorial choices\n- Understanding collection patterns\n- Exploring relationships between artworks", + "inputSchema": { + "type": "object", + "properties": { + "setId": { + "type": "string", + "description": "The unique identifier of the user set to fetch. Format is typically 'userId-setname'. This ID can be obtained from the get_user_sets results." + }, + "culture": { + "type": "string", + "enum": [ + "nl", + "en" + ], + "description": "Language for the response data. Use 'en' for English or 'nl' for Dutch (Nederlands). Affects set descriptions and artwork information.", + "default": "en" + }, + "page": { + "type": "number", + "description": "Page number for paginated results, starting at 0. Use with pageSize to navigate through large sets. Note: page * pageSize cannot exceed 10,000.", + "minimum": 0, + "default": 0 + }, + "pageSize": { + "type": "number", + "description": "Number of artworks to return per page. Must be between 1 and 100. Default is 25. Larger values return more artworks but take longer to process.", + "minimum": 1, + "maximum": 100, + "default": 25 + } + }, + "required": [ + "setId" + ] + } + }, + { + "name": "open_image_in_browser", + "description": "Open a high-resolution image of an artwork in the default web browser for viewing. This tool is useful when you want to examine an artwork visually or show it to the user. Works with any valid Rijksmuseum image URL.", + "inputSchema": { + "type": "object", + "properties": { + "imageUrl": { + "type": "string", + "description": "The full URL of the artwork image to open. Must be a valid HTTP/HTTPS URL from the Rijksmuseum's servers. These URLs can be obtained from artwork search results or details." + } + }, + "required": [ + "imageUrl" + ] + } + }, + { + "name": "get_artist_timeline", + "description": "Generate a chronological timeline of an artist's works in the Rijksmuseum collection. This tool is perfect for studying an artist's development, analyzing their artistic periods, or understanding their contribution to art history over time.", + "inputSchema": { + "type": "object", + "properties": { + "artist": { + "type": "string", + "description": "The name of the artist to create a timeline for. Must match the museum's naming convention (e.g., 'Rembrandt van Rijn', 'Vincent van Gogh'). Case sensitive and exact match required." + }, + "maxWorks": { + "type": "number", + "description": "Maximum number of works to include in the timeline. Works are selected based on significance and quality of available images. Higher numbers give a more complete picture but may include less significant works.", + "minimum": 1, + "maximum": 50, + "default": 10 + } + }, + "required": [ + "artist" + ] + } + } + ] + }, + "mem0-mcp": { + "name": "mem0-mcp", + "display_name": "Mem0 Server", + "description": "A Model Context Protocol server for Mem0, which helps with managing coding preferences.", + "repository": { + "type": "git", + "url": "https://github.com/mem0ai/mem0-mcp" + }, + "homepage": "https://github.com/mem0ai/mem0-mcp", + "author": { + "name": "mem0ai" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "coding preferences", + "mem0" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/mem0ai/mem0-mcp", + "main.py" + ] + } + }, + "arguments": { + "host": { + "description": "The host address that the server will bind to. This can be configured to allow access from different IP addresses or set it to 'localhost' for local access only.", + "required": false, + "example": "0.0.0.0" + }, + "port": { + "description": "The port number on which the server will listen for incoming connections. Changing this can help to avoid port conflicts with other services on the same machine.", + "required": false, + "example": "8080" + } + } + }, + "slack": { + "name": "slack", + "display_name": "Slack", + "description": "Channel management and messaging capabilities", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "slack", + "api", + "bot" + ], + "examples": [ + { + "title": "Post a message to a channel", + "description": "Send a message to a specified Slack channel.", + "prompt": "Include the channel ID and the message text." + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-slack" + ], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "SLACK_BOT_TOKEN", + "-e", + "SLACK_TEAM_ID", + "mcp/slack" + ], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}" + } + } + }, + "author": { + "name": "modelcontextprotocol" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/slack", + "arguments": { + "SLACK_BOT_TOKEN": { + "description": "The OAuth token for the bot user in the Slack workspace, used for authenticating API requests.", + "required": true, + "example": "xoxb-your-bot-token" + }, + "SLACK_TEAM_ID": { + "description": "The unique identifier of the Slack workspace, required for operations within the workspace.", + "required": true, + "example": "T01234567" + } + }, + "tools": [ + { + "name": "slack_list_channels", + "description": "List public channels in the workspace with pagination", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Maximum number of channels to return (default 100, max 200)", + "default": 100 + }, + "cursor": { + "type": "string", + "description": "Pagination cursor for next page of results" + } + } + } + }, + { + "name": "slack_post_message", + "description": "Post a new message to a Slack channel", + "inputSchema": { + "type": "object", + "properties": { + "channel_id": { + "type": "string", + "description": "The ID of the channel to post to" + }, + "text": { + "type": "string", + "description": "The message text to post" + } + }, + "required": [ + "channel_id", + "text" + ] + } + }, + { + "name": "slack_reply_to_thread", + "description": "Reply to a specific message thread in Slack", + "inputSchema": { + "type": "object", + "properties": { + "channel_id": { + "type": "string", + "description": "The ID of the channel containing the thread" + }, + "thread_ts": { + "type": "string", + "description": "The timestamp of the parent message in the format '1234567890.123456'. Timestamps in the format without the period can be converted by adding the period such that 6 numbers come after it." + }, + "text": { + "type": "string", + "description": "The reply text" + } + }, + "required": [ + "channel_id", + "thread_ts", + "text" + ] + } + }, + { + "name": "slack_add_reaction", + "description": "Add a reaction emoji to a message", + "inputSchema": { + "type": "object", + "properties": { + "channel_id": { + "type": "string", + "description": "The ID of the channel containing the message" + }, + "timestamp": { + "type": "string", + "description": "The timestamp of the message to react to" + }, + "reaction": { + "type": "string", + "description": "The name of the emoji reaction (without ::)" + } + }, + "required": [ + "channel_id", + "timestamp", + "reaction" + ] + } + }, + { + "name": "slack_get_channel_history", + "description": "Get recent messages from a channel", + "inputSchema": { + "type": "object", + "properties": { + "channel_id": { + "type": "string", + "description": "The ID of the channel" + }, + "limit": { + "type": "number", + "description": "Number of messages to retrieve (default 10)", + "default": 10 + } + }, + "required": [ + "channel_id" + ] + } + }, + { + "name": "slack_get_thread_replies", + "description": "Get all replies in a message thread", + "inputSchema": { + "type": "object", + "properties": { + "channel_id": { + "type": "string", + "description": "The ID of the channel containing the thread" + }, + "thread_ts": { + "type": "string", + "description": "The timestamp of the parent message in the format '1234567890.123456'. Timestamps in the format without the period can be converted by adding the period such that 6 numbers come after it." + } + }, + "required": [ + "channel_id", + "thread_ts" + ] + } + }, + { + "name": "slack_get_users", + "description": "Get a list of all users in the workspace with their basic profile information", + "inputSchema": { + "type": "object", + "properties": { + "cursor": { + "type": "string", + "description": "Pagination cursor for next page of results" + }, + "limit": { + "type": "number", + "description": "Maximum number of users to return (default 100, max 200)", + "default": 100 + } + } + } + }, + { + "name": "slack_get_user_profile", + "description": "Get detailed profile information for a specific user", + "inputSchema": { + "type": "object", + "properties": { + "user_id": { + "type": "string", + "description": "The ID of the user" + } + }, + "required": [ + "user_id" + ] + } + } + ], + "is_official": true + }, + "openai-websearch-mcp": { + "name": "openai-websearch-mcp", + "display_name": "OpenAI WebSearch", + "description": "This is a Python-based MCP server that provides OpenAI `web_search` build-in tool.", + "repository": { + "type": "git", + "url": "https://github.com/ConechoAI/openai-websearch-mcp" + }, + "homepage": "https://github.com/ConechoAI/openai-websearch-mcp", + "author": { + "name": "ConechoAI" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "openai", + "websearch", + "AI assistant" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "openai-websearch-mcp" + ], + "env": { + "OPENAI_API_KEY": "${OPENAI_API_KEY}" + } + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "openai_websearch_mcp" + ], + "env": { + "OPENAI_API_KEY": "${OPENAI_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Using web search", + "description": "Perform a web search using the OpenAI WebSearch MCP server.", + "prompt": "search('latest news on AI')" + } + ], + "arguments": { + "OPENAI_API_KEY": { + "description": "Your OpenAI API key to authenticate requests to the OpenAI API.", + "required": true, + "example": "sk-xxxx" + } + }, + "tools": [ + { + "name": "web_search", + "description": " It allows AI assistants to search the web during conversations with users", + "inputSchema": { + "$defs": { + "UserLocation": { + "properties": { + "type": { + "const": "approximate", + "default": "approximate", + "title": "Type", + "type": "string" + }, + "city": { + "title": "City", + "type": "string" + }, + "country": { + "default": null, + "title": "Country", + "type": "string" + }, + "region": { + "default": null, + "title": "Region", + "type": "string" + }, + "timezone": { + "enum": [ + "Africa/Abidjan", + "Africa/Accra", + "Africa/Addis_Ababa", + "Africa/Algiers", + "Africa/Asmara", + "Africa/Asmera", + "Africa/Bamako", + "Africa/Bangui", + "Africa/Banjul", + "Africa/Bissau", + "Africa/Blantyre", + "Africa/Brazzaville", + "Africa/Bujumbura", + "Africa/Cairo", + "Africa/Casablanca", + "Africa/Ceuta", + "Africa/Conakry", + "Africa/Dakar", + "Africa/Dar_es_Salaam", + "Africa/Djibouti", + "Africa/Douala", + "Africa/El_Aaiun", + "Africa/Freetown", + "Africa/Gaborone", + "Africa/Harare", + "Africa/Johannesburg", + "Africa/Juba", + "Africa/Kampala", + "Africa/Khartoum", + "Africa/Kigali", + "Africa/Kinshasa", + "Africa/Lagos", + "Africa/Libreville", + "Africa/Lome", + "Africa/Luanda", + "Africa/Lubumbashi", + "Africa/Lusaka", + "Africa/Malabo", + "Africa/Maputo", + "Africa/Maseru", + "Africa/Mbabane", + "Africa/Mogadishu", + "Africa/Monrovia", + "Africa/Nairobi", + "Africa/Ndjamena", + "Africa/Niamey", + "Africa/Nouakchott", + "Africa/Ouagadougou", + "Africa/Porto-Novo", + "Africa/Sao_Tome", + "Africa/Timbuktu", + "Africa/Tripoli", + "Africa/Tunis", + "Africa/Windhoek", + "America/Adak", + "America/Anchorage", + "America/Anguilla", + "America/Antigua", + "America/Araguaina", + "America/Argentina/Buenos_Aires", + "America/Argentina/Catamarca", + "America/Argentina/ComodRivadavia", + "America/Argentina/Cordoba", + "America/Argentina/Jujuy", + "America/Argentina/La_Rioja", + "America/Argentina/Mendoza", + "America/Argentina/Rio_Gallegos", + "America/Argentina/Salta", + "America/Argentina/San_Juan", + "America/Argentina/San_Luis", + "America/Argentina/Tucuman", + "America/Argentina/Ushuaia", + "America/Aruba", + "America/Asuncion", + "America/Atikokan", + "America/Atka", + "America/Bahia", + "America/Bahia_Banderas", + "America/Barbados", + "America/Belem", + "America/Belize", + "America/Blanc-Sablon", + "America/Boa_Vista", + "America/Bogota", + "America/Boise", + "America/Buenos_Aires", + "America/Cambridge_Bay", + "America/Campo_Grande", + "America/Cancun", + "America/Caracas", + "America/Catamarca", + "America/Cayenne", + "America/Cayman", + "America/Chicago", + "America/Chihuahua", + "America/Ciudad_Juarez", + "America/Coral_Harbour", + "America/Cordoba", + "America/Costa_Rica", + "America/Creston", + "America/Cuiaba", + "America/Curacao", + "America/Danmarkshavn", + "America/Dawson", + "America/Dawson_Creek", + "America/Denver", + "America/Detroit", + "America/Dominica", + "America/Edmonton", + "America/Eirunepe", + "America/El_Salvador", + "America/Ensenada", + "America/Fort_Nelson", + "America/Fort_Wayne", + "America/Fortaleza", + "America/Glace_Bay", + "America/Godthab", + "America/Goose_Bay", + "America/Grand_Turk", + "America/Grenada", + "America/Guadeloupe", + "America/Guatemala", + "America/Guayaquil", + "America/Guyana", + "America/Halifax", + "America/Havana", + "America/Hermosillo", + "America/Indiana/Indianapolis", + "America/Indiana/Knox", + "America/Indiana/Marengo", + "America/Indiana/Petersburg", + "America/Indiana/Tell_City", + "America/Indiana/Vevay", + "America/Indiana/Vincennes", + "America/Indiana/Winamac", + "America/Indianapolis", + "America/Inuvik", + "America/Iqaluit", + "America/Jamaica", + "America/Jujuy", + "America/Juneau", + "America/Kentucky/Louisville", + "America/Kentucky/Monticello", + "America/Knox_IN", + "America/Kralendijk", + "America/La_Paz", + "America/Lima", + "America/Los_Angeles", + "America/Louisville", + "America/Lower_Princes", + "America/Maceio", + "America/Managua", + "America/Manaus", + "America/Marigot", + "America/Martinique", + "America/Matamoros", + "America/Mazatlan", + "America/Mendoza", + "America/Menominee", + "America/Merida", + "America/Metlakatla", + "America/Mexico_City", + "America/Miquelon", + "America/Moncton", + "America/Monterrey", + "America/Montevideo", + "America/Montreal", + "America/Montserrat", + "America/Nassau", + "America/New_York", + "America/Nipigon", + "America/Nome", + "America/Noronha", + "America/North_Dakota/Beulah", + "America/North_Dakota/Center", + "America/North_Dakota/New_Salem", + "America/Nuuk", + "America/Ojinaga", + "America/Panama", + "America/Pangnirtung", + "America/Paramaribo", + "America/Phoenix", + "America/Port-au-Prince", + "America/Port_of_Spain", + "America/Porto_Acre", + "America/Porto_Velho", + "America/Puerto_Rico", + "America/Punta_Arenas", + "America/Rainy_River", + "America/Rankin_Inlet", + "America/Recife", + "America/Regina", + "America/Resolute", + "America/Rio_Branco", + "America/Rosario", + "America/Santa_Isabel", + "America/Santarem", + "America/Santiago", + "America/Santo_Domingo", + "America/Sao_Paulo", + "America/Scoresbysund", + "America/Shiprock", + "America/Sitka", + "America/St_Barthelemy", + "America/St_Johns", + "America/St_Kitts", + "America/St_Lucia", + "America/St_Thomas", + "America/St_Vincent", + "America/Swift_Current", + "America/Tegucigalpa", + "America/Thule", + "America/Thunder_Bay", + "America/Tijuana", + "America/Toronto", + "America/Tortola", + "America/Vancouver", + "America/Virgin", + "America/Whitehorse", + "America/Winnipeg", + "America/Yakutat", + "America/Yellowknife", + "Antarctica/Casey", + "Antarctica/Davis", + "Antarctica/DumontDUrville", + "Antarctica/Macquarie", + "Antarctica/Mawson", + "Antarctica/McMurdo", + "Antarctica/Palmer", + "Antarctica/Rothera", + "Antarctica/South_Pole", + "Antarctica/Syowa", + "Antarctica/Troll", + "Antarctica/Vostok", + "Arctic/Longyearbyen", + "Asia/Aden", + "Asia/Almaty", + "Asia/Amman", + "Asia/Anadyr", + "Asia/Aqtau", + "Asia/Aqtobe", + "Asia/Ashgabat", + "Asia/Ashkhabad", + "Asia/Atyrau", + "Asia/Baghdad", + "Asia/Bahrain", + "Asia/Baku", + "Asia/Bangkok", + "Asia/Barnaul", + "Asia/Beirut", + "Asia/Bishkek", + "Asia/Brunei", + "Asia/Calcutta", + "Asia/Chita", + "Asia/Choibalsan", + "Asia/Chongqing", + "Asia/Chungking", + "Asia/Colombo", + "Asia/Dacca", + "Asia/Damascus", + "Asia/Dhaka", + "Asia/Dili", + "Asia/Dubai", + "Asia/Dushanbe", + "Asia/Famagusta", + "Asia/Gaza", + "Asia/Harbin", + "Asia/Hebron", + "Asia/Ho_Chi_Minh", + "Asia/Hong_Kong", + "Asia/Hovd", + "Asia/Irkutsk", + "Asia/Istanbul", + "Asia/Jakarta", + "Asia/Jayapura", + "Asia/Jerusalem", + "Asia/Kabul", + "Asia/Kamchatka", + "Asia/Karachi", + "Asia/Kashgar", + "Asia/Kathmandu", + "Asia/Katmandu", + "Asia/Khandyga", + "Asia/Kolkata", + "Asia/Krasnoyarsk", + "Asia/Kuala_Lumpur", + "Asia/Kuching", + "Asia/Kuwait", + "Asia/Macao", + "Asia/Macau", + "Asia/Magadan", + "Asia/Makassar", + "Asia/Manila", + "Asia/Muscat", + "Asia/Nicosia", + "Asia/Novokuznetsk", + "Asia/Novosibirsk", + "Asia/Omsk", + "Asia/Oral", + "Asia/Phnom_Penh", + "Asia/Pontianak", + "Asia/Pyongyang", + "Asia/Qatar", + "Asia/Qostanay", + "Asia/Qyzylorda", + "Asia/Rangoon", + "Asia/Riyadh", + "Asia/Saigon", + "Asia/Sakhalin", + "Asia/Samarkand", + "Asia/Seoul", + "Asia/Shanghai", + "Asia/Singapore", + "Asia/Srednekolymsk", + "Asia/Taipei", + "Asia/Tashkent", + "Asia/Tbilisi", + "Asia/Tehran", + "Asia/Tel_Aviv", + "Asia/Thimbu", + "Asia/Thimphu", + "Asia/Tokyo", + "Asia/Tomsk", + "Asia/Ujung_Pandang", + "Asia/Ulaanbaatar", + "Asia/Ulan_Bator", + "Asia/Urumqi", + "Asia/Ust-Nera", + "Asia/Vientiane", + "Asia/Vladivostok", + "Asia/Yakutsk", + "Asia/Yangon", + "Asia/Yekaterinburg", + "Asia/Yerevan", + "Atlantic/Azores", + "Atlantic/Bermuda", + "Atlantic/Canary", + "Atlantic/Cape_Verde", + "Atlantic/Faeroe", + "Atlantic/Faroe", + "Atlantic/Jan_Mayen", + "Atlantic/Madeira", + "Atlantic/Reykjavik", + "Atlantic/South_Georgia", + "Atlantic/St_Helena", + "Atlantic/Stanley", + "Australia/ACT", + "Australia/Adelaide", + "Australia/Brisbane", + "Australia/Broken_Hill", + "Australia/Canberra", + "Australia/Currie", + "Australia/Darwin", + "Australia/Eucla", + "Australia/Hobart", + "Australia/LHI", + "Australia/Lindeman", + "Australia/Lord_Howe", + "Australia/Melbourne", + "Australia/NSW", + "Australia/North", + "Australia/Perth", + "Australia/Queensland", + "Australia/South", + "Australia/Sydney", + "Australia/Tasmania", + "Australia/Victoria", + "Australia/West", + "Australia/Yancowinna", + "Brazil/Acre", + "Brazil/DeNoronha", + "Brazil/East", + "Brazil/West", + "CET", + "CST6CDT", + "Canada/Atlantic", + "Canada/Central", + "Canada/Eastern", + "Canada/Mountain", + "Canada/Newfoundland", + "Canada/Pacific", + "Canada/Saskatchewan", + "Canada/Yukon", + "Chile/Continental", + "Chile/EasterIsland", + "Cuba", + "EET", + "EST", + "EST5EDT", + "Egypt", + "Eire", + "Etc/GMT", + "Etc/GMT+0", + "Etc/GMT+1", + "Etc/GMT+10", + "Etc/GMT+11", + "Etc/GMT+12", + "Etc/GMT+2", + "Etc/GMT+3", + "Etc/GMT+4", + "Etc/GMT+5", + "Etc/GMT+6", + "Etc/GMT+7", + "Etc/GMT+8", + "Etc/GMT+9", + "Etc/GMT-0", + "Etc/GMT-1", + "Etc/GMT-10", + "Etc/GMT-11", + "Etc/GMT-12", + "Etc/GMT-13", + "Etc/GMT-14", + "Etc/GMT-2", + "Etc/GMT-3", + "Etc/GMT-4", + "Etc/GMT-5", + "Etc/GMT-6", + "Etc/GMT-7", + "Etc/GMT-8", + "Etc/GMT-9", + "Etc/GMT0", + "Etc/Greenwich", + "Etc/UCT", + "Etc/UTC", + "Etc/Universal", + "Etc/Zulu", + "Europe/Amsterdam", + "Europe/Andorra", + "Europe/Astrakhan", + "Europe/Athens", + "Europe/Belfast", + "Europe/Belgrade", + "Europe/Berlin", + "Europe/Bratislava", + "Europe/Brussels", + "Europe/Bucharest", + "Europe/Budapest", + "Europe/Busingen", + "Europe/Chisinau", + "Europe/Copenhagen", + "Europe/Dublin", + "Europe/Gibraltar", + "Europe/Guernsey", + "Europe/Helsinki", + "Europe/Isle_of_Man", + "Europe/Istanbul", + "Europe/Jersey", + "Europe/Kaliningrad", + "Europe/Kiev", + "Europe/Kirov", + "Europe/Kyiv", + "Europe/Lisbon", + "Europe/Ljubljana", + "Europe/London", + "Europe/Luxembourg", + "Europe/Madrid", + "Europe/Malta", + "Europe/Mariehamn", + "Europe/Minsk", + "Europe/Monaco", + "Europe/Moscow", + "Europe/Nicosia", + "Europe/Oslo", + "Europe/Paris", + "Europe/Podgorica", + "Europe/Prague", + "Europe/Riga", + "Europe/Rome", + "Europe/Samara", + "Europe/San_Marino", + "Europe/Sarajevo", + "Europe/Saratov", + "Europe/Simferopol", + "Europe/Skopje", + "Europe/Sofia", + "Europe/Stockholm", + "Europe/Tallinn", + "Europe/Tirane", + "Europe/Tiraspol", + "Europe/Ulyanovsk", + "Europe/Uzhgorod", + "Europe/Vaduz", + "Europe/Vatican", + "Europe/Vienna", + "Europe/Vilnius", + "Europe/Volgograd", + "Europe/Warsaw", + "Europe/Zagreb", + "Europe/Zaporozhye", + "Europe/Zurich", + "Factory", + "GB", + "GB-Eire", + "GMT", + "GMT+0", + "GMT-0", + "GMT0", + "Greenwich", + "HST", + "Hongkong", + "Iceland", + "Indian/Antananarivo", + "Indian/Chagos", + "Indian/Christmas", + "Indian/Cocos", + "Indian/Comoro", + "Indian/Kerguelen", + "Indian/Mahe", + "Indian/Maldives", + "Indian/Mauritius", + "Indian/Mayotte", + "Indian/Reunion", + "Iran", + "Israel", + "Jamaica", + "Japan", + "Kwajalein", + "Libya", + "MET", + "MST", + "MST7MDT", + "Mexico/BajaNorte", + "Mexico/BajaSur", + "Mexico/General", + "NZ", + "NZ-CHAT", + "Navajo", + "PRC", + "PST8PDT", + "Pacific/Apia", + "Pacific/Auckland", + "Pacific/Bougainville", + "Pacific/Chatham", + "Pacific/Chuuk", + "Pacific/Easter", + "Pacific/Efate", + "Pacific/Enderbury", + "Pacific/Fakaofo", + "Pacific/Fiji", + "Pacific/Funafuti", + "Pacific/Galapagos", + "Pacific/Gambier", + "Pacific/Guadalcanal", + "Pacific/Guam", + "Pacific/Honolulu", + "Pacific/Johnston", + "Pacific/Kanton", + "Pacific/Kiritimati", + "Pacific/Kosrae", + "Pacific/Kwajalein", + "Pacific/Majuro", + "Pacific/Marquesas", + "Pacific/Midway", + "Pacific/Nauru", + "Pacific/Niue", + "Pacific/Norfolk", + "Pacific/Noumea", + "Pacific/Pago_Pago", + "Pacific/Palau", + "Pacific/Pitcairn", + "Pacific/Pohnpei", + "Pacific/Ponape", + "Pacific/Port_Moresby", + "Pacific/Rarotonga", + "Pacific/Saipan", + "Pacific/Samoa", + "Pacific/Tahiti", + "Pacific/Tarawa", + "Pacific/Tongatapu", + "Pacific/Truk", + "Pacific/Wake", + "Pacific/Wallis", + "Pacific/Yap", + "Poland", + "Portugal", + "ROC", + "ROK", + "Singapore", + "Turkey", + "UCT", + "US/Alaska", + "US/Aleutian", + "US/Arizona", + "US/Central", + "US/East-Indiana", + "US/Eastern", + "US/Hawaii", + "US/Indiana-Starke", + "US/Michigan", + "US/Mountain", + "US/Pacific", + "US/Samoa", + "UTC", + "Universal", + "W-SU", + "WET", + "Zulu" + ], + "minLength": 1, + "title": "Timezone", + "type": "string" + } + }, + "required": [ + "city", + "timezone" + ], + "title": "UserLocation", + "type": "object" + } + }, + "properties": { + "input": { + "title": "Input", + "type": "string" + }, + "model": { + "default": "gpt-4o-mini", + "enum": [ + "gpt-4o", + "gpt-4o-mini" + ], + "title": "Model", + "type": "string" + }, + "type": { + "default": "web_search_preview", + "enum": [ + "web_search_preview", + "web_search_preview_2025_03_11" + ], + "title": "Type", + "type": "string" + }, + "search_context_size": { + "default": "medium", + "enum": [ + "low", + "medium", + "high" + ], + "title": "Search Context Size", + "type": "string" + }, + "user_location": { + "$ref": "#/$defs/UserLocation", + "default": null + } + }, + "required": [ + "input" + ], + "title": "web_searchArguments", + "type": "object" + } + } + ] + }, + "linear": { + "name": "linear", + "display_name": "Linear", + "description": "Allows LLM to interact with Linear's API for project management, including searching, creating, and updating issues.", + "repository": { + "type": "git", + "url": "https://github.com/jerhadf/linear-mcp-server" + }, + "homepage": "https://github.com/jerhadf/linear-mcp-server", + "author": { + "name": "jerhadf" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "linear", + "issue tracking", + "LLM" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "linear-mcp-server" + ], + "env": { + "LINEAR_API_KEY": "${LINEAR_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Show me all my high-priority issues", + "description": "Execute the search_issues tool and/or linear-user:///{userId}/assigned to find issues assigned to the user with priority 1", + "prompt": "Show me all my high-priority issues" + }, + { + "title": "Create a bug report", + "description": "Use create_issue to create a new high-priority issue with appropriate details and status tracking.", + "prompt": "Based on what I've told you about this bug already, make a bug report for the authentication system" + }, + { + "title": "Find all in-progress frontend tasks", + "description": "Use search_issues to locate frontend-related issues with in progress status.", + "prompt": "Find all in progress frontend tasks" + }, + { + "title": "Get summary of recent updates", + "description": "Use search_issues to identify relevant issue(s) and fetch the issue details.", + "prompt": "Give me a summary of recent updates on the issues for mobile app development" + }, + { + "title": "Analyze current workload for the mobile team", + "description": "Combine linear-team:///{teamId}/issues and search_issues to analyze issue distribution and priorities across the mobile team.", + "prompt": "What's the current workload for the mobile team?" + } + ], + "arguments": { + "LINEAR_API_KEY": { + "description": "Your Linear API key to authenticate requests to the Linear API.", + "required": true, + "example": "your_api_key_here" + } + }, + "tools": [ + { + "name": "linear_create_issue", + "description": "Creates a new Linear issue with specified details. Use this to create tickets for tasks, bugs, or feature requests. Returns the created issue's identifier and URL. Required fields are title and teamId, with optional description, priority (0-4, where 0 is no priority and 1 is urgent), and status.", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "Issue title" + }, + "teamId": { + "type": "string", + "description": "Team ID" + }, + "description": { + "type": "string", + "description": "Issue description" + }, + "priority": { + "type": "number", + "description": "Priority (0-4)" + }, + "status": { + "type": "string", + "description": "Issue status" + } + }, + "required": [ + "title", + "teamId" + ] + } + }, + { + "name": "linear_update_issue", + "description": "Updates an existing Linear issue's properties. Use this to modify issue details like title, description, priority, or status. Requires the issue ID and accepts any combination of updatable fields. Returns the updated issue's identifier and URL.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Issue ID" + }, + "title": { + "type": "string", + "description": "New title" + }, + "description": { + "type": "string", + "description": "New description" + }, + "priority": { + "type": "number", + "description": "New priority (0-4)" + }, + "status": { + "type": "string", + "description": "New status" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "linear_search_issues", + "description": "Searches Linear issues using flexible criteria. Supports filtering by any combination of: title/description text, team, status, assignee, labels, priority (1=urgent, 2=high, 3=normal, 4=low), and estimate. Returns up to 10 issues by default (configurable via limit).", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Optional text to search in title and description" + }, + "teamId": { + "type": "string", + "description": "Filter by team ID" + }, + "status": { + "type": "string", + "description": "Filter by status name (e.g., 'In Progress', 'Done')" + }, + "assigneeId": { + "type": "string", + "description": "Filter by assignee's user ID" + }, + "labels": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Filter by label names" + }, + "priority": { + "type": "number", + "description": "Filter by priority (1=urgent, 2=high, 3=normal, 4=low)" + }, + "estimate": { + "type": "number", + "description": "Filter by estimate points" + }, + "includeArchived": { + "type": "boolean", + "description": "Include archived issues in results (default: false)" + }, + "limit": { + "type": "number", + "description": "Max results to return (default: 10)" + } + } + } + }, + { + "name": "linear_get_user_issues", + "description": "Retrieves issues assigned to a specific user or the authenticated user if no userId is provided. Returns issues sorted by last updated, including priority, status, and other metadata. Useful for finding a user's workload or tracking assigned tasks.", + "inputSchema": { + "type": "object", + "properties": { + "userId": { + "type": "string", + "description": "Optional user ID. If not provided, returns authenticated user's issues" + }, + "includeArchived": { + "type": "boolean", + "description": "Include archived issues in results" + }, + "limit": { + "type": "number", + "description": "Maximum number of issues to return (default: 50)" + } + } + } + }, + { + "name": "linear_add_comment", + "description": "Adds a comment to an existing Linear issue. Supports markdown formatting in the comment body. Can optionally specify a custom user name and avatar for the comment. Returns the created comment's details including its URL.", + "inputSchema": { + "type": "object", + "properties": { + "issueId": { + "type": "string", + "description": "ID of the issue to comment on" + }, + "body": { + "type": "string", + "description": "Comment text in markdown format" + }, + "createAsUser": { + "type": "string", + "description": "Optional custom username to show for the comment" + }, + "displayIconUrl": { + "type": "string", + "description": "Optional avatar URL for the comment" + } + }, + "required": [ + "issueId", + "body" + ] + } + } + ] + }, + "mcp-create": { + "name": "mcp-create", + "display_name": "Create Server", + "description": "A dynamic MCP server management service that creates, runs, and manages Model Context Protocol servers on-the-fly.", + "repository": { + "type": "git", + "url": "https://github.com/tesla0225/mcp-create" + }, + "homepage": "https://github.com/tesla0225/mcp-create", + "author": { + "name": "tesla0225" + }, + "license": "MIT", + "categories": [ + "MCP Tools" + ], + "tags": [ + "dynamic", + "TypeScript" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/tesla0225/mcp-create" + ] + } + }, + "examples": [ + { + "title": "Creating a New Server", + "description": "Example of creating a new server using TypeScript.", + "prompt": "{\"name\":\"create-server-from-template\",\"arguments\":{\"language\":\"typescript\"}}" + }, + { + "title": "Executing a Tool", + "description": "Example of executing a tool on a server.", + "prompt": "{\"name\":\"execute-tool\",\"arguments\":{\"serverId\":\"ba7c9a4f-6ba8-4cad-8ec8-a41a08c19fac\",\"toolName\":\"echo\",\"args\":{\"message\":\"Hello, dynamic MCP server!\"}}}" + } + ] + }, + "thirdweb": { + "display_name": "thirdweb MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/thirdweb-dev/ai" + }, + "homepage": "https://thirdweb.com", + "author": { + "name": "thirdweb-dev" + }, + "license": "Apache-2.0", + "tags": [ + "blockchain", + "mcp", + "thirdweb", + "web3", + "ipfs" + ], + "arguments": { + "THIRDWEB_SECRET_KEY": { + "description": "Your thirdweb API secret key from dashboard", + "required": true, + "example": "your-secret-key" + }, + "THIRDWEB_ENGINE_URL": { + "description": "URL endpoint for thirdweb Engine service", + "required": false, + "example": "https://your-engine-url" + }, + "THIRDWEB_ENGINE_AUTH_JWT": { + "description": "Authentication JWT token for Engine", + "required": false, + "example": "your-jwt-token" + }, + "THIRDWEB_ENGINE_BACKEND_WALLET_ADDRESS": { + "description": "Wallet address for Engine backend", + "required": false, + "example": "0x..." + }, + "chain-id": { + "description": "Blockchain network IDs to connect to (e.g., 1 for Ethereum mainnet, 137 for Polygon)", + "required": false, + "example": "1" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "thirdweb-mcp" + ], + "env": { + "THIRDWEB_SECRET_KEY": "your-secret-key" + }, + "description": "Run with uvx package manager", + "recommended": true + } + }, + "examples": [ + { + "title": "Basic Usage", + "description": "Basic usage with default settings (stdio transport with Nebula and Insight)", + "prompt": "THIRDWEB_SECRET_KEY=... thirdweb-mcp" + }, + { + "title": "SSE Transport", + "description": "Using SSE transport on a custom port", + "prompt": "THIRDWEB_SECRET_KEY=... thirdweb-mcp --transport sse --port 8080" + }, + { + "title": "Full Configuration", + "description": "Enabling all services with specific chain IDs", + "prompt": "THIRDWEB_SECRET_KEY=... thirdweb-mcp --chain-id 1 --chain-id 137 \\\n --engine-url YOUR_ENGINE_URL \\\n --engine-auth-jwt YOUR_ENGINE_JWT \\\n --engine-backend-wallet-address YOUR_ENGINE_BACKEND_WALLET_ADDRESS" + } + ], + "name": "thirdweb", + "description": "Read/write to over 2k blockchains, enabling data querying, contract analysis/deployment, and transaction execution, powered by Thirdweb", + "categories": [ + "AI Systems" + ], + "tools": [ + { + "name": "chat", + "description": "Send a message to Nebula AI and get a response. This can be used for blockchain queries, contract interactions, and access to thirdweb tools.", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "description": "The natural language message to process. Can be a question about blockchain data, a request to execute a transaction, or any web3-related query.", + "title": "Message", + "type": "string" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional session ID to maintain conversation context. If provided, this message will be part of an ongoing conversation; if omitted, a new session is created.", + "title": "Session Id" + }, + "context": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Contextual information for processing the request, including: chainIds (array of chain identifiers) and walletAddress (user's wallet for transaction signing). Example: {'chainIds': ['1', '137'], 'walletAddress': '0x123...'}", + "title": "Context" + } + }, + "required": [ + "message" + ] + } + }, + { + "name": "get_session", + "description": "Fetch complete information about a specific Nebula AI session, including conversation history, context settings, and metadata. Use this to examine past interactions or resume an existing conversation thread.", + "inputSchema": { + "type": "object", + "properties": { + "session_id": { + "description": "Unique identifier for the target session. This UUID references a specific conversation history in the Nebula system.", + "title": "Session Id", + "type": "string" + } + }, + "required": [ + "session_id" + ] + } + }, + { + "name": "list_sessions", + "description": "Retrieve all available Nebula AI sessions for the authenticated account. Returns an array of session metadata including IDs, titles, and creation timestamps, allowing you to find and reference existing conversations.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "decode_signature", + "description": "Decode a function or event signature. Use this when you need to understand what a specific function selector or event signature does and what parameters it accepts.", + "inputSchema": { + "type": "object", + "properties": { + "signature": { + "description": "Function or event signature to decode (e.g., '0x095ea7b3' for the approve function). Usually begins with 0x.", + "title": "Signature", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum). Specify to improve signature lookup accuracy.", + "title": "Chain" + } + }, + "required": [ + "signature" + ] + } + }, + { + "name": "get_address_transactions", + "description": "Look up transactions for a wallet or contract address. Use this when asked about a specific Ethereum address (e.g., '0x1234...') to get account details including balance, transaction count, and contract verification status. This tool is specifically for addresses (accounts and contracts), NOT transaction hashes or ENS names.", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "description": "Wallet or contract address to look up (e.g., '0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045' for Vitalik's address). Must be a valid blockchain address starting with 0x and 42 characters long.", + "title": "Address", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum). Specify the blockchain network for the address.", + "title": "Chain" + } + }, + "required": [ + "address" + ] + } + }, + { + "name": "get_all_events", + "description": "Retrieve blockchain events with flexible filtering options. Use this to search for specific events or to analyze event patterns across multiple blocks. Do not use this tool to simply look up a single transaction.", + "inputSchema": { + "type": "object", + "properties": { + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum Mainnet, 137 for Polygon). Specify multiple IDs as a list [1, 137] for cross-chain queries (max 5).", + "title": "Chain" + }, + "contract_address": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Contract address to filter events by (e.g., '0x1234...'). Only return events emitted by this contract.", + "title": "Contract Address" + }, + "block_number_gte": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Minimum block number to start querying from (inclusive).", + "title": "Block Number Gte" + }, + "block_number_lt": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum block number to query up to (exclusive).", + "title": "Block Number Lt" + }, + "transaction_hash": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specific transaction hash to filter events by (e.g., '0xabc123...'). Useful for examining events in a particular transaction.", + "title": "Transaction Hash" + }, + "topic_0": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by event signature hash (first topic). For example, '0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc' for Transfer events.", + "title": "Topic 0" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of events to return per request. Default is 20, adjust for pagination.", + "title": "Limit" + }, + "page": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Page number for paginated results, starting from 0. Use with limit parameter.", + "title": "Page" + }, + "sort_order": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "desc", + "description": "Sort order for the events. Default is 'desc' for descending order. Use 'asc' for ascending order.", + "title": "Sort Order" + } + }, + "required": [] + } + }, + { + "name": "get_all_transactions", + "description": "Retrieve blockchain transactions with flexible filtering options. Use this to analyze transaction patterns, track specific transactions, or monitor wallet activity.", + "inputSchema": { + "type": "object", + "properties": { + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum, 137 for Polygon). Specify multiple IDs as a list for cross-chain queries.", + "title": "Chain" + }, + "from_address": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter transactions sent from this address (e.g., '0x1234...'). Useful for tracking outgoing transactions from a wallet.", + "title": "From Address" + }, + "to_address": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter transactions sent to this address (e.g., '0x1234...'). Useful for tracking incoming transactions to a contract or wallet.", + "title": "To Address" + }, + "function_selector": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by function selector (e.g., '0x095ea7b3' for the approve function). Useful for finding specific contract interactions.", + "title": "Function Selector" + }, + "sort_order": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "desc", + "description": "Sort order for the transactions. Default is 'asc' for ascending order. Use 'desc' for descending order.", + "title": "Sort Order" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of transactions to return per request. Default is 20, adjust based on your needs.", + "title": "Limit" + }, + "page": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Page number for paginated results, starting from 0. Use with limit parameter for browsing large result sets.", + "title": "Page" + } + }, + "required": [] + } + }, + { + "name": "get_block_details", + "description": "Get detailed information about a specific block by its number or hash. Use this when asked about blockchain blocks (e.g., 'What's in block 12345678?' or 'Tell me about this block: 0xabc123...'). This tool is specifically for block data, NOT transactions, addresses, or contracts.", + "inputSchema": { + "type": "object", + "properties": { + "block_identifier": { + "description": "Block number or block hash to look up. Can be either a simple number (e.g., '12345678') or a block hash (e.g., '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3' for Ethereum block 0). Use for queries like 'what happened in block 14000000' or 'show me block 0xd4e56...'.", + "title": "Block Identifier", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum). Specify the blockchain network where the block exists.", + "title": "Chain" + } + }, + "required": [ + "block_identifier" + ] + } + }, + { + "name": "get_contract_events", + "description": "Retrieve events from a specific contract address. Use this to analyze activity or monitor events for a particular smart contract.", + "inputSchema": { + "type": "object", + "properties": { + "contract_address": { + "description": "The contract address to query events for (e.g., '0x1234...'). Must be a valid Ethereum address.", + "title": "Contract Address", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum Mainnet, 137 for Polygon). Specify multiple IDs as a list for cross-chain queries (max 5).", + "title": "Chain" + }, + "block_number_gte": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Only return events from blocks with number greater than or equal to this value. Useful for querying recent history.", + "title": "Block Number Gte" + }, + "topic_0": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by event signature hash (first topic). For example, Transfer event has a specific signature hash.", + "title": "Topic 0" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of events to return per request. Default is 20, increase for more results.", + "title": "Limit" + }, + "page": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Page number for paginated results, starting from 0. Use with limit parameter for browsing large result sets.", + "title": "Page" + }, + "sort_order": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "desc", + "description": "Sort order for the events. Default is 'desc' for descending order. Use 'asc' for ascending order.", + "title": "Sort Order" + } + }, + "required": [ + "contract_address" + ] + } + }, + { + "name": "get_contract_metadata", + "description": "Get contract ABI and metadata about a smart contract, including name, symbol, decimals, and other contract-specific information. Use this when asked about a contract's functions, interface, or capabilities. This tool specifically retrieves details about deployed smart contracts (NOT regular wallet addresses or transaction hashes).", + "inputSchema": { + "type": "object", + "properties": { + "contract_address": { + "description": "The contract address to get metadata for (e.g., '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2' for WETH). Must be a deployed smart contract address (not a regular wallet). Use this for queries like 'what functions does this contract have' or 'get the ABI for contract 0x1234...'.", + "title": "Contract Address", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) where the contract is deployed (e.g., 1 for Ethereum). Specify the correct network.", + "title": "Chain" + } + }, + "required": [ + "contract_address" + ] + } + }, + { + "name": "get_ens_transactions", + "description": "Look up transactions associated with an ENS domain name (anything ending in .eth like 'vitalik.eth'). This tool is specifically for ENS domains, NOT addresses, transaction hashes, or contract queries.", + "inputSchema": { + "type": "object", + "properties": { + "ens_name": { + "description": "ENS name to resolve (e.g., 'vitalik.eth', 'thirdweb.eth'). Must be a valid ENS domain ending with .eth.", + "title": "Ens Name", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum). ENS is primarily on Ethereum mainnet.", + "title": "Chain" + } + }, + "required": [ + "ens_name" + ] + } + }, + { + "name": "get_erc1155_tokens", + "description": "Retrieve ERC1155 tokens (semi-fungible tokens) owned by a specified address. Shows balances of multi-token contracts with metadata.", + "inputSchema": { + "type": "object", + "properties": { + "owner_address": { + "description": "The wallet address to get ERC1155 tokens for (e.g., '0x1234...'). Returns all token IDs and their quantities.", + "title": "Owner Address", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum, 137 for Polygon). Specify multiple IDs as a list for cross-chain queries.", + "title": "Chain" + }, + "include_price": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include estimated prices for tokens where available. Useful for valuation.", + "title": "Include Price" + }, + "include_spam": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include suspected spam tokens. Default is False to filter out potentially unwanted items.", + "title": "Include Spam" + } + }, + "required": [ + "owner_address" + ] + } + }, + { + "name": "get_erc20_tokens", + "description": "Retrieve ERC20 token balances for a specified address. Lists all fungible tokens owned with their balances, metadata, and optionally prices.", + "inputSchema": { + "type": "object", + "properties": { + "owner_address": { + "description": "The wallet address to get ERC20 token balances for (e.g., '0x1234...'). Must be a valid Ethereum address.", + "title": "Owner Address", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum, 137 for Polygon). Specify multiple IDs as a list for cross-chain queries.", + "title": "Chain" + }, + "include_price": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include current market prices for tokens. Useful for calculating portfolio value.", + "title": "Include Price" + }, + "include_spam": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include suspected spam tokens. Default is False to filter out unwanted tokens.", + "title": "Include Spam" + } + }, + "required": [ + "owner_address" + ] + } + }, + { + "name": "get_erc721_tokens", + "description": "Retrieve ERC721 NFTs (non-fungible tokens) owned by a specified address. Lists all unique NFTs with their metadata and optionally prices.", + "inputSchema": { + "type": "object", + "properties": { + "owner_address": { + "description": "The wallet address to get ERC721 NFTs for (e.g., '0x1234...'). Returns all NFTs owned by this address.", + "title": "Owner Address", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum, 137 for Polygon). Specify multiple IDs as a list for cross-chain queries.", + "title": "Chain" + }, + "include_price": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include estimated prices for NFTs where available. Useful for valuation.", + "title": "Include Price" + }, + "include_spam": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include suspected spam NFTs. Default is False to filter out potentially unwanted items.", + "title": "Include Spam" + } + }, + "required": [ + "owner_address" + ] + } + }, + { + "name": "get_nft_owners", + "description": "Get ownership information for NFTs in a specific collection. Shows which addresses own which token IDs and in what quantities.", + "inputSchema": { + "type": "object", + "properties": { + "contract_address": { + "description": "The NFT contract address to query ownership for (e.g., '0x1234...'). Must be an ERC721 or ERC1155 contract.", + "title": "Contract Address", + "type": "string" + }, + "token_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specific token ID to query owners for (e.g., '42'). If provided, shows all owners of this specific NFT.", + "title": "Token Id" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) where the NFT contract is deployed (e.g., 1 for Ethereum). Specify the correct network.", + "title": "Chain" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of ownership records to return per request. Default is 20, adjust for pagination.", + "title": "Limit" + }, + "page": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Page number for paginated results, starting from 0. Use with limit parameter for browsing large collections.", + "title": "Page" + } + }, + "required": [ + "contract_address" + ] + } + }, + { + "name": "get_nft_transfers", + "description": "Track NFT transfers for a collection, specific token, or transaction. Useful for monitoring NFT trading activity or verifying transfers.", + "inputSchema": { + "type": "object", + "properties": { + "contract_address": { + "description": "The NFT contract address to query transfers for (e.g., '0x1234...'). Must be an ERC721 or ERC1155 contract.", + "title": "Contract Address", + "type": "string" + }, + "token_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specific token ID to query transfers for (e.g., '42'). If provided, only shows transfers of this NFT.", + "title": "Token Id" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum). Specify the chain where the NFT contract is deployed.", + "title": "Chain" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of transfer records to return per request. Default is 20, adjust for pagination.", + "title": "Limit" + }, + "page": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Page number for paginated results, starting from 0. Use with limit parameter for browsing transfer history.", + "title": "Page" + } + }, + "required": [ + "contract_address" + ] + } + }, + { + "name": "get_nfts", + "description": "Retrieve detailed information about NFTs from a specific collection, including metadata, attributes, and images. Optionally get data for a specific token ID.", + "inputSchema": { + "type": "object", + "properties": { + "contract_address": { + "description": "The NFT contract address to query (e.g., '0x1234...'). Must be an ERC721 or ERC1155 contract.", + "title": "Contract Address", + "type": "string" + }, + "token_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specific token ID to query (e.g., '42'). If provided, returns data only for this NFT. Otherwise returns collection data.", + "title": "Token Id" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) where the NFT contract is deployed (e.g., 1 for Ethereum). Specify the correct network.", + "title": "Chain" + }, + "include_metadata": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to True to include full NFT metadata like attributes, image URL, etc. Useful for displaying NFT details.", + "title": "Include Metadata" + } + }, + "required": [ + "contract_address" + ] + } + }, + { + "name": "get_token_prices", + "description": "Get current market prices for native and ERC20 tokens. Useful for valuation, tracking portfolio value, or monitoring price changes.", + "inputSchema": { + "type": "object", + "properties": { + "token_addresses": { + "description": "List of token contract addresses to get prices for (e.g., ['0x1234...', '0x5678...']). Can include ERC20 tokens. Use '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' for native tokens (ETH, POL, MATIC, etc.).", + "items": { + "type": "string" + }, + "title": "Token Addresses", + "type": "array" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) where the tokens exist (e.g., 1 for Ethereum, 137 for Polygon). Must match the token network.", + "title": "Chain" + } + }, + "required": [ + "token_addresses" + ] + } + }, + { + "name": "get_transaction_details", + "description": "Get detailed information about a specific transaction by its hash. Use this when asked to analyze, look up, check, or get details about a transaction hash (e.g., 'What can you tell me about this transaction: 0x5407ea41...'). This tool specifically deals with transaction hashes (txid/txhash), NOT addresses, contracts, or ENS names.", + "inputSchema": { + "type": "object", + "properties": { + "transaction_hash": { + "description": "Transaction hash to look up (e.g., '0x5407ea41de24b7353d70eab42d72c92b42a44e140f930e349973cfc7b8c9c1d7'). Must be a valid transaction hash beginning with 0x and typically 66 characters long. Use this for queries like 'tell me about this transaction' or 'what happened in transaction 0x1234...'.", + "title": "Transaction Hash", + "type": "string" + }, + "chain": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Chain ID(s) to query (e.g., 1 for Ethereum). Specify the blockchain network where the transaction exists.", + "title": "Chain" + } + }, + "required": [ + "transaction_hash" + ] + } + }, + { + "name": "fetch_ipfs_content", + "description": "Fetch content from IPFS by hash. Retrieves data stored on IPFS using the thirdweb gateway.", + "inputSchema": { + "type": "object", + "properties": { + "ipfs_hash": { + "description": "The IPFS hash/URI to fetch content from (e.g., 'ipfs://QmXyZ...'). Must start with 'ipfs://'.", + "title": "Ipfs Hash", + "type": "string" + } + }, + "required": [ + "ipfs_hash" + ] + } + }, + { + "name": "upload_to_ipfs", + "description": "Upload a file, directory, or JSON data to IPFS. Stores any type on decentralized storage and returns an IPFS URI.", + "inputSchema": { + "type": "object", + "properties": { + "data": { + "description": "Data to upload: can be a file path, directory path, dict, dataclass, or BaseModel instance.", + "title": "Data" + } + }, + "required": [ + "data" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "okta": { + "name": "okta", + "display_name": "Okta", + "description": "Interact with Okta API.", + "repository": { + "type": "git", + "url": "https://github.com/kapilduraphe/okta-mcp-server" + }, + "homepage": "https://github.com/kapilduraphe/okta-mcp-server", + "author": { + "name": "kapilduraphe" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "Okta", + "user management", + "group management" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/kapilduraphe/okta-mcp-server" + ], + "env": { + "OKTA_ORG_URL": "${OKTA_ORG_URL}", + "OKTA_API_TOKEN": "${OKTA_API_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Show user details", + "description": "Retrieve details for a specific user.", + "prompt": "Show me details for user with userId XXXX" + }, + { + "title": "Check user status", + "description": "Get the status of a specific user.", + "prompt": "What's the status of user john.doe@company.com" + }, + { + "title": "Last login info", + "description": "Find out when a user last logged in.", + "prompt": "When was the last login for user jane.smith@organization.com" + }, + { + "title": "List users by department", + "description": "Get a list of all users in the marketing department.", + "prompt": "List all users in the marketing department" + }, + { + "title": "Find recent users", + "description": "Retrieve users created in the last month.", + "prompt": "Find users created in the last month" + }, + { + "title": "Show user groups", + "description": "List all groups in the Okta organization.", + "prompt": "Show me all the groups in my Okta organization" + }, + { + "title": "Admin groups", + "description": "List groups that contain the word 'admin'.", + "prompt": "List groups containing the word 'admin'" + } + ], + "arguments": { + "OKTA_ORG_URL": { + "description": "The base URL for your Okta organization, should include 'https://'.", + "required": true, + "example": "https://dev-123456.okta.com" + }, + "OKTA_API_TOKEN": { + "description": "A valid API token used to authenticate API requests to Okta.", + "required": true + } + } + }, + "base-free-usdc-transfer": { + "name": "base-free-usdc-transfer", + "display_name": "Free USDC Transfer", + "description": "Send USDC on [Base](https://base.org/) for free using Claude AI! Built with [Coinbase CDP](https://docs.cdp.coinbase.com/mpc-wallet/docs/welcome).", + "repository": { + "type": "git", + "url": "https://github.com/magnetai/mcp-free-usdc-transfer" + }, + "homepage": "https://github.com/magnetai/mcp-free-usdc-transfer", + "author": { + "name": "magnetai" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "USDC", + "Base", + "Coinbase", + "MPC Wallet" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@magnetai/free-usdc-transfer" + ], + "env": { + "COINBASE_CDP_API_KEY_NAME": "${COINBASE_CDP_API_KEY_NAME}", + "COINBASE_CDP_PRIVATE_KEY": "${COINBASE_CDP_PRIVATE_KEY}" + } + } + }, + "arguments": { + "COINBASE_CDP_API_KEY_NAME": { + "description": "The name of your Coinbase CDP API key, which is required for authenticating API requests.", + "required": true, + "example": "my_api_key_name" + } + }, + "tools": [ + { + "name": "tranfer-usdc", + "description": "Analyze the value of the purchased items and transfer USDC to the recipient via the Base chain. Due to the uncertainty of blockchain transaction times, the transaction is only scheduled here and will not wait for the transaction to be completed.", + "inputSchema": { + "type": "object", + "properties": { + "usdc_amount": { + "type": "number", + "description": "USDC amount, greater than 0" + }, + "recipient": { + "type": "string", + "description": "Recipient's on-chain address or ENS addresses ending in .eth" + } + }, + "required": [ + "usdc_amount", + "recipient" + ] + } + }, + { + "name": "create_coinbase_mpc_wallet", + "description": "Used to create your Coinbase MPC wallet address. The newly created wallet cannot be used directly; the user must first deposit USDC. The transfer after creation requires user confirmation", + "inputSchema": { + "type": "object" + } + } + ] + }, + "mariadb": { + "name": "mariadb", + "display_name": "MariaDB Database Integration", + "description": "MariaDB database integration with configurable access controls in Python.", + "repository": { + "type": "git", + "url": "https://github.com/abel9851/mcp-server-mariadb" + }, + "homepage": "https://github.com/abel9851/mcp-server-mariadb", + "author": { + "name": "abel9851" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "MariaDB", + "Data Retrieval" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-mariadb", + "--host", + "${DB_HOST}", + "--port", + "${DB_PORT}", + "--user", + "${DB_USER}", + "--password", + "${DB_PASSWORD}", + "--database", + "${DB_NAME}" + ] + } + }, + "examples": [ + { + "title": "Query Database", + "description": "Example of executing a read-only operation against MariaDB.", + "prompt": "Execute read-only operations against your MariaDB database." + } + ], + "arguments": { + "DB_HOST": { + "description": "The hostname of the MariaDB server to connect to.", + "required": true, + "example": "localhost" + }, + "DB_PORT": { + "description": "The port number on which the MariaDB server is listening.", + "required": true, + "example": "3306" + }, + "DB_USER": { + "description": "The username to connect to the MariaDB database.", + "required": true, + "example": "root" + }, + "DB_PASSWORD": { + "description": "The password for the MariaDB user.", + "required": true + }, + "DB_NAME": { + "description": "The name of the database to connect to.", + "required": true + } + }, + "tools": [ + { + "name": "query_database", + "description": "Execute a read-only operation against the MariaDB database.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "SQL query to execute" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "servicenow": { + "name": "servicenow", + "display_name": "ServiceNow", + "description": "A MCP server to interact with a ServiceNow instance", + "repository": { + "type": "git", + "url": "https://github.com/osomai/servicenow-mcp" + }, + "homepage": "https://github.com/osomai/servicenow-mcp", + "author": { + "name": "osomai" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "ServiceNow", + "Automation" + ], + "examples": [ + { + "title": "Incident Management - Creating an Incident", + "description": "Create a new incident for a network outage in the east region.", + "prompt": "Create a new incident for a network outage in the east region." + }, + { + "title": "Service Catalog - List Items", + "description": "Show me all items in the service catalog.", + "prompt": "Show me all items in the service catalog." + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/osomai/servicenow-mcp", + "servicenow-mcp" + ], + "env": { + "SERVICENOW_INSTANCE_URL": "${SERVICENOW_INSTANCE_URL}", + "SERVICENOW_USERNAME": "${SERVICENOW_USERNAME}", + "SERVICENOW_PASSWORD": "${SERVICENOW_PASSWORD}", + "SERVICENOW_AUTH_TYPE": "${SERVICENOW_AUTH_TYPE}" + } + } + }, + "arguments": { + "SERVICENOW_INSTANCE_URL": { + "description": "URL of the ServiceNow instance to connect to.", + "required": true, + "example": "https://your-instance.service-now.com" + }, + "SERVICENOW_USERNAME": { + "description": "Username for accessing the ServiceNow instance.", + "required": true, + "example": "your-username" + }, + "SERVICENOW_PASSWORD": { + "description": "Password for the ServiceNow username.", + "required": true, + "example": "your-password" + }, + "SERVICENOW_AUTH_TYPE": { + "description": "Authentication type for connecting to ServiceNow. Options are 'basic', 'oauth', or 'api_key'.", + "required": true, + "example": "basic" + } + }, + "tools": [ + { + "name": "create_incident", + "description": "Create a new incident in ServiceNow", + "inputSchema": { + "$defs": { + "CreateIncidentParams": { + "description": "Parameters for creating an incident.", + "properties": { + "short_description": { + "description": "Short description of the incident", + "title": "Short Description", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Detailed description of the incident", + "title": "Description" + }, + "caller_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "User who reported the incident", + "title": "Caller Id" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Category of the incident", + "title": "Category" + }, + "subcategory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Subcategory of the incident", + "title": "Subcategory" + }, + "priority": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Priority of the incident", + "title": "Priority" + }, + "impact": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Impact of the incident", + "title": "Impact" + }, + "urgency": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Urgency of the incident", + "title": "Urgency" + }, + "assigned_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "User assigned to the incident", + "title": "Assigned To" + }, + "assignment_group": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Group assigned to the incident", + "title": "Assignment Group" + } + }, + "required": [ + "short_description" + ], + "title": "CreateIncidentParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateIncidentParams" + } + }, + "required": [ + "params" + ], + "title": "create_incidentArguments", + "type": "object" + } + }, + { + "name": "update_incident", + "description": "Update an existing incident in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateIncidentParams": { + "description": "Parameters for updating an incident.", + "properties": { + "incident_id": { + "description": "Incident ID or sys_id", + "title": "Incident Id", + "type": "string" + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Short description of the incident", + "title": "Short Description" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Detailed description of the incident", + "title": "Description" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "State of the incident", + "title": "State" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Category of the incident", + "title": "Category" + }, + "subcategory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Subcategory of the incident", + "title": "Subcategory" + }, + "priority": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Priority of the incident", + "title": "Priority" + }, + "impact": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Impact of the incident", + "title": "Impact" + }, + "urgency": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Urgency of the incident", + "title": "Urgency" + }, + "assigned_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "User assigned to the incident", + "title": "Assigned To" + }, + "assignment_group": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Group assigned to the incident", + "title": "Assignment Group" + }, + "work_notes": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Work notes to add to the incident", + "title": "Work Notes" + }, + "close_notes": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Close notes to add to the incident", + "title": "Close Notes" + }, + "close_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Close code for the incident", + "title": "Close Code" + } + }, + "required": [ + "incident_id" + ], + "title": "UpdateIncidentParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateIncidentParams" + } + }, + "required": [ + "params" + ], + "title": "update_incidentArguments", + "type": "object" + } + }, + { + "name": "add_comment", + "description": "Add a comment to an incident in ServiceNow", + "inputSchema": { + "$defs": { + "AddCommentParams": { + "description": "Parameters for adding a comment to an incident.", + "properties": { + "incident_id": { + "description": "Incident ID or sys_id", + "title": "Incident Id", + "type": "string" + }, + "comment": { + "description": "Comment to add to the incident", + "title": "Comment", + "type": "string" + }, + "is_work_note": { + "default": false, + "description": "Whether the comment is a work note", + "title": "Is Work Note", + "type": "boolean" + } + }, + "required": [ + "incident_id", + "comment" + ], + "title": "AddCommentParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/AddCommentParams" + } + }, + "required": [ + "params" + ], + "title": "add_commentArguments", + "type": "object" + } + }, + { + "name": "resolve_incident", + "description": "Resolve an incident in ServiceNow", + "inputSchema": { + "$defs": { + "ResolveIncidentParams": { + "description": "Parameters for resolving an incident.", + "properties": { + "incident_id": { + "description": "Incident ID or sys_id", + "title": "Incident Id", + "type": "string" + }, + "resolution_code": { + "description": "Resolution code for the incident", + "title": "Resolution Code", + "type": "string" + }, + "resolution_notes": { + "description": "Resolution notes for the incident", + "title": "Resolution Notes", + "type": "string" + } + }, + "required": [ + "incident_id", + "resolution_code", + "resolution_notes" + ], + "title": "ResolveIncidentParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ResolveIncidentParams" + } + }, + "required": [ + "params" + ], + "title": "resolve_incidentArguments", + "type": "object" + } + }, + { + "name": "list_incidents", + "description": "List incidents from ServiceNow", + "inputSchema": { + "$defs": { + "ListIncidentsParams": { + "description": "Parameters for listing incidents.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of incidents to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by incident state", + "title": "State" + }, + "assigned_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by assigned user", + "title": "Assigned To" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by category", + "title": "Category" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for incidents", + "title": "Query" + } + }, + "title": "ListIncidentsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListIncidentsParams" + } + }, + "required": [ + "params" + ], + "title": "list_incidentsArguments", + "type": "object" + } + }, + { + "name": "list_catalog_items", + "description": "List service catalog items.", + "inputSchema": { + "$defs": { + "ListCatalogItemsParams": { + "description": "Parameters for listing service catalog items.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of catalog items to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by category", + "title": "Category" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for catalog items", + "title": "Query" + }, + "active": { + "default": true, + "description": "Whether to only return active catalog items", + "title": "Active", + "type": "boolean" + } + }, + "title": "ListCatalogItemsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListCatalogItemsParams" + } + }, + "required": [ + "params" + ], + "title": "list_catalog_itemsArguments", + "type": "object" + } + }, + { + "name": "get_catalog_item", + "description": "Get a specific service catalog item.", + "inputSchema": { + "$defs": { + "GetCatalogItemParams": { + "description": "Parameters for getting a specific service catalog item.", + "properties": { + "item_id": { + "description": "Catalog item ID or sys_id", + "title": "Item Id", + "type": "string" + } + }, + "required": [ + "item_id" + ], + "title": "GetCatalogItemParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetCatalogItemParams" + } + }, + "required": [ + "params" + ], + "title": "get_catalog_itemArguments", + "type": "object" + } + }, + { + "name": "list_catalog_categories", + "description": "List service catalog categories.", + "inputSchema": { + "$defs": { + "ListCatalogCategoriesParams": { + "description": "Parameters for listing service catalog categories.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of categories to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for categories", + "title": "Query" + }, + "active": { + "default": true, + "description": "Whether to only return active categories", + "title": "Active", + "type": "boolean" + } + }, + "title": "ListCatalogCategoriesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListCatalogCategoriesParams" + } + }, + "required": [ + "params" + ], + "title": "list_catalog_categoriesArguments", + "type": "object" + } + }, + { + "name": "create_catalog_category", + "description": "Create a new service catalog category.", + "inputSchema": { + "$defs": { + "CreateCatalogCategoryParams": { + "description": "Parameters for creating a new service catalog category.", + "properties": { + "title": { + "description": "Title of the category", + "title": "Title", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the category", + "title": "Description" + }, + "parent": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parent category sys_id", + "title": "Parent" + }, + "icon": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Icon for the category", + "title": "Icon" + }, + "active": { + "default": true, + "description": "Whether the category is active", + "title": "Active", + "type": "boolean" + }, + "order": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Order of the category", + "title": "Order" + } + }, + "required": [ + "title" + ], + "title": "CreateCatalogCategoryParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateCatalogCategoryParams" + } + }, + "required": [ + "params" + ], + "title": "create_catalog_categoryArguments", + "type": "object" + } + }, + { + "name": "update_catalog_category", + "description": "Update an existing service catalog category.", + "inputSchema": { + "$defs": { + "UpdateCatalogCategoryParams": { + "description": "Parameters for updating a service catalog category.", + "properties": { + "category_id": { + "description": "Category ID or sys_id", + "title": "Category Id", + "type": "string" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Title of the category", + "title": "Title" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the category", + "title": "Description" + }, + "parent": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parent category sys_id", + "title": "Parent" + }, + "icon": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Icon for the category", + "title": "Icon" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the category is active", + "title": "Active" + }, + "order": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Order of the category", + "title": "Order" + } + }, + "required": [ + "category_id" + ], + "title": "UpdateCatalogCategoryParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateCatalogCategoryParams" + } + }, + "required": [ + "params" + ], + "title": "update_catalog_categoryArguments", + "type": "object" + } + }, + { + "name": "move_catalog_items", + "description": "Move catalog items to a different category.", + "inputSchema": { + "$defs": { + "MoveCatalogItemsParams": { + "description": "Parameters for moving catalog items between categories.", + "properties": { + "item_ids": { + "description": "List of catalog item IDs to move", + "items": { + "type": "string" + }, + "title": "Item Ids", + "type": "array" + }, + "target_category_id": { + "description": "Target category ID to move items to", + "title": "Target Category Id", + "type": "string" + } + }, + "required": [ + "item_ids", + "target_category_id" + ], + "title": "MoveCatalogItemsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/MoveCatalogItemsParams" + } + }, + "required": [ + "params" + ], + "title": "move_catalog_itemsArguments", + "type": "object" + } + }, + { + "name": "get_optimization_recommendations", + "description": "Get optimization recommendations for the service catalog.", + "inputSchema": { + "$defs": { + "OptimizationRecommendationsParams": { + "properties": { + "recommendation_types": { + "items": { + "type": "string" + }, + "title": "Recommendation Types", + "type": "array" + }, + "category_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Category Id" + } + }, + "required": [ + "recommendation_types" + ], + "title": "OptimizationRecommendationsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/OptimizationRecommendationsParams" + } + }, + "required": [ + "params" + ], + "title": "get_optimization_recommendationsArguments", + "type": "object" + } + }, + { + "name": "update_catalog_item", + "description": "Update a service catalog item.", + "inputSchema": { + "$defs": { + "UpdateCatalogItemParams": { + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Short Description" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Category" + }, + "price": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Price" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Active" + }, + "order": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order" + } + }, + "required": [ + "item_id" + ], + "title": "UpdateCatalogItemParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateCatalogItemParams" + } + }, + "required": [ + "params" + ], + "title": "update_catalog_itemArguments", + "type": "object" + } + }, + { + "name": "create_catalog_item_variable", + "description": "Create a new catalog item variable", + "inputSchema": { + "$defs": { + "CreateCatalogItemVariableParams": { + "description": "Parameters for creating a catalog item variable.", + "properties": { + "catalog_item_id": { + "description": "The sys_id of the catalog item", + "title": "Catalog Item Id", + "type": "string" + }, + "name": { + "description": "The name of the variable (internal name)", + "title": "Name", + "type": "string" + }, + "type": { + "description": "The type of variable (e.g., string, integer, boolean, reference)", + "title": "Type", + "type": "string" + }, + "label": { + "description": "The display label for the variable", + "title": "Label", + "type": "string" + }, + "mandatory": { + "default": false, + "description": "Whether the variable is required", + "title": "Mandatory", + "type": "boolean" + }, + "help_text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Help text to display with the variable", + "title": "Help Text" + }, + "default_value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default value for the variable", + "title": "Default Value" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the variable", + "title": "Description" + }, + "order": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Display order of the variable", + "title": "Order" + }, + "reference_table": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "For reference fields, the table to reference", + "title": "Reference Table" + }, + "reference_qualifier": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "For reference fields, the query to filter reference options", + "title": "Reference Qualifier" + }, + "max_length": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum length for string fields", + "title": "Max Length" + }, + "min": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Minimum value for numeric fields", + "title": "Min" + }, + "max": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum value for numeric fields", + "title": "Max" + } + }, + "required": [ + "catalog_item_id", + "name", + "type", + "label" + ], + "title": "CreateCatalogItemVariableParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateCatalogItemVariableParams" + } + }, + "required": [ + "params" + ], + "title": "create_catalog_item_variableArguments", + "type": "object" + } + }, + { + "name": "list_catalog_item_variables", + "description": "List catalog item variables", + "inputSchema": { + "$defs": { + "ListCatalogItemVariablesParams": { + "description": "Parameters for listing catalog item variables.", + "properties": { + "catalog_item_id": { + "description": "The sys_id of the catalog item", + "title": "Catalog Item Id", + "type": "string" + }, + "include_details": { + "default": true, + "description": "Whether to include detailed information about each variable", + "title": "Include Details", + "type": "boolean" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of variables to return", + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Offset for pagination", + "title": "Offset" + } + }, + "required": [ + "catalog_item_id" + ], + "title": "ListCatalogItemVariablesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListCatalogItemVariablesParams" + } + }, + "required": [ + "params" + ], + "title": "list_catalog_item_variablesArguments", + "type": "object" + } + }, + { + "name": "update_catalog_item_variable", + "description": "Update a catalog item variable", + "inputSchema": { + "$defs": { + "UpdateCatalogItemVariableParams": { + "description": "Parameters for updating a catalog item variable.", + "properties": { + "variable_id": { + "description": "The sys_id of the variable to update", + "title": "Variable Id", + "type": "string" + }, + "label": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The display label for the variable", + "title": "Label" + }, + "mandatory": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the variable is required", + "title": "Mandatory" + }, + "help_text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Help text to display with the variable", + "title": "Help Text" + }, + "default_value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default value for the variable", + "title": "Default Value" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the variable", + "title": "Description" + }, + "order": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Display order of the variable", + "title": "Order" + }, + "reference_qualifier": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "For reference fields, the query to filter reference options", + "title": "Reference Qualifier" + }, + "max_length": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum length for string fields", + "title": "Max Length" + }, + "min": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Minimum value for numeric fields", + "title": "Min" + }, + "max": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum value for numeric fields", + "title": "Max" + } + }, + "required": [ + "variable_id" + ], + "title": "UpdateCatalogItemVariableParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateCatalogItemVariableParams" + } + }, + "required": [ + "params" + ], + "title": "update_catalog_item_variableArguments", + "type": "object" + } + }, + { + "name": "create_change_request", + "description": "Create a new change request in ServiceNow", + "inputSchema": { + "$defs": { + "CreateChangeRequestParams": { + "description": "Parameters for creating a change request.", + "properties": { + "short_description": { + "description": "Short description of the change request", + "title": "Short Description", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Detailed description of the change request", + "title": "Description" + }, + "type": { + "description": "Type of change (normal, standard, emergency)", + "title": "Type", + "type": "string" + }, + "risk": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Risk level of the change", + "title": "Risk" + }, + "impact": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Impact of the change", + "title": "Impact" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Category of the change", + "title": "Category" + }, + "requested_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "User who requested the change", + "title": "Requested By" + }, + "assignment_group": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Group assigned to the change", + "title": "Assignment Group" + }, + "start_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Planned start date (YYYY-MM-DD HH:MM:SS)", + "title": "Start Date" + }, + "end_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Planned end date (YYYY-MM-DD HH:MM:SS)", + "title": "End Date" + } + }, + "required": [ + "short_description", + "type" + ], + "title": "CreateChangeRequestParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateChangeRequestParams" + } + }, + "required": [ + "params" + ], + "title": "create_change_requestArguments", + "type": "object" + } + }, + { + "name": "update_change_request", + "description": "Update an existing change request in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateChangeRequestParams": { + "description": "Parameters for updating a change request.", + "properties": { + "change_id": { + "description": "Change request ID or sys_id", + "title": "Change Id", + "type": "string" + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Short description of the change request", + "title": "Short Description" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Detailed description of the change request", + "title": "Description" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "State of the change request", + "title": "State" + }, + "risk": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Risk level of the change", + "title": "Risk" + }, + "impact": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Impact of the change", + "title": "Impact" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Category of the change", + "title": "Category" + }, + "assignment_group": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Group assigned to the change", + "title": "Assignment Group" + }, + "start_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Planned start date (YYYY-MM-DD HH:MM:SS)", + "title": "Start Date" + }, + "end_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Planned end date (YYYY-MM-DD HH:MM:SS)", + "title": "End Date" + }, + "work_notes": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Work notes to add to the change request", + "title": "Work Notes" + } + }, + "required": [ + "change_id" + ], + "title": "UpdateChangeRequestParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateChangeRequestParams" + } + }, + "required": [ + "params" + ], + "title": "update_change_requestArguments", + "type": "object" + } + }, + { + "name": "list_change_requests", + "description": "List change requests from ServiceNow", + "inputSchema": { + "$defs": { + "ListChangeRequestsParams": { + "description": "Parameters for listing change requests.", + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 10, + "description": "Maximum number of records to return", + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Offset to start from", + "title": "Offset" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by state", + "title": "State" + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by type (normal, standard, emergency)", + "title": "Type" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by category", + "title": "Category" + }, + "assignment_group": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by assignment group", + "title": "Assignment Group" + }, + "timeframe": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by timeframe (upcoming, in-progress, completed)", + "title": "Timeframe" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional query string", + "title": "Query" + } + }, + "title": "ListChangeRequestsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListChangeRequestsParams" + } + }, + "required": [ + "params" + ], + "title": "list_change_requestsArguments", + "type": "object" + } + }, + { + "name": "get_change_request_details", + "description": "Get detailed information about a specific change request", + "inputSchema": { + "$defs": { + "GetChangeRequestDetailsParams": { + "description": "Parameters for getting change request details.", + "properties": { + "change_id": { + "description": "Change request ID or sys_id", + "title": "Change Id", + "type": "string" + } + }, + "required": [ + "change_id" + ], + "title": "GetChangeRequestDetailsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetChangeRequestDetailsParams" + } + }, + "required": [ + "params" + ], + "title": "get_change_request_detailsArguments", + "type": "object" + } + }, + { + "name": "add_change_task", + "description": "Add a task to a change request", + "inputSchema": { + "$defs": { + "AddChangeTaskParams": { + "description": "Parameters for adding a task to a change request.", + "properties": { + "change_id": { + "description": "Change request ID or sys_id", + "title": "Change Id", + "type": "string" + }, + "short_description": { + "description": "Short description of the task", + "title": "Short Description", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Detailed description of the task", + "title": "Description" + }, + "assigned_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "User assigned to the task", + "title": "Assigned To" + }, + "planned_start_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Planned start date (YYYY-MM-DD HH:MM:SS)", + "title": "Planned Start Date" + }, + "planned_end_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Planned end date (YYYY-MM-DD HH:MM:SS)", + "title": "Planned End Date" + } + }, + "required": [ + "change_id", + "short_description" + ], + "title": "AddChangeTaskParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/AddChangeTaskParams" + } + }, + "required": [ + "params" + ], + "title": "add_change_taskArguments", + "type": "object" + } + }, + { + "name": "submit_change_for_approval", + "description": "Submit a change request for approval", + "inputSchema": { + "$defs": { + "SubmitChangeForApprovalParams": { + "description": "Parameters for submitting a change request for approval.", + "properties": { + "change_id": { + "description": "Change request ID or sys_id", + "title": "Change Id", + "type": "string" + }, + "approval_comments": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Comments for the approval request", + "title": "Approval Comments" + } + }, + "required": [ + "change_id" + ], + "title": "SubmitChangeForApprovalParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/SubmitChangeForApprovalParams" + } + }, + "required": [ + "params" + ], + "title": "submit_change_for_approvalArguments", + "type": "object" + } + }, + { + "name": "approve_change", + "description": "Approve a change request", + "inputSchema": { + "$defs": { + "ApproveChangeParams": { + "description": "Parameters for approving a change request.", + "properties": { + "change_id": { + "description": "Change request ID or sys_id", + "title": "Change Id", + "type": "string" + }, + "approver_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the approver", + "title": "Approver Id" + }, + "approval_comments": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Comments for the approval", + "title": "Approval Comments" + } + }, + "required": [ + "change_id" + ], + "title": "ApproveChangeParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ApproveChangeParams" + } + }, + "required": [ + "params" + ], + "title": "approve_changeArguments", + "type": "object" + } + }, + { + "name": "reject_change", + "description": "Reject a change request", + "inputSchema": { + "$defs": { + "RejectChangeParams": { + "description": "Parameters for rejecting a change request.", + "properties": { + "change_id": { + "description": "Change request ID or sys_id", + "title": "Change Id", + "type": "string" + }, + "approver_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the approver", + "title": "Approver Id" + }, + "rejection_reason": { + "description": "Reason for rejection", + "title": "Rejection Reason", + "type": "string" + } + }, + "required": [ + "change_id", + "rejection_reason" + ], + "title": "RejectChangeParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/RejectChangeParams" + } + }, + "required": [ + "params" + ], + "title": "reject_changeArguments", + "type": "object" + } + }, + { + "name": "list_workflows", + "description": "List workflows from ServiceNow", + "inputSchema": { + "$defs": { + "ListWorkflowsParams": { + "description": "Parameters for listing workflows.", + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 10, + "description": "Maximum number of records to return", + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Offset to start from", + "title": "Offset" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by active status", + "title": "Active" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by name (contains)", + "title": "Name" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional query string", + "title": "Query" + } + }, + "title": "ListWorkflowsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListWorkflowsParams" + } + }, + "required": [ + "params" + ], + "title": "list_workflowsArguments", + "type": "object" + } + }, + { + "name": "get_workflow_details", + "description": "Get detailed information about a specific workflow", + "inputSchema": { + "$defs": { + "GetWorkflowDetailsParams": { + "description": "Parameters for getting workflow details.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + }, + "include_versions": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Include workflow versions", + "title": "Include Versions" + } + }, + "required": [ + "workflow_id" + ], + "title": "GetWorkflowDetailsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetWorkflowDetailsParams" + } + }, + "required": [ + "params" + ], + "title": "get_workflow_detailsArguments", + "type": "object" + } + }, + { + "name": "list_workflow_versions", + "description": "List workflow versions from ServiceNow", + "inputSchema": { + "$defs": { + "ListWorkflowVersionsParams": { + "description": "Parameters for listing workflow versions.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 10, + "description": "Maximum number of records to return", + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Offset to start from", + "title": "Offset" + } + }, + "required": [ + "workflow_id" + ], + "title": "ListWorkflowVersionsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListWorkflowVersionsParams" + } + }, + "required": [ + "params" + ], + "title": "list_workflow_versionsArguments", + "type": "object" + } + }, + { + "name": "get_workflow_activities", + "description": "Get activities for a specific workflow", + "inputSchema": { + "$defs": { + "GetWorkflowActivitiesParams": { + "description": "Parameters for getting workflow activities.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specific version to get activities for", + "title": "Version" + } + }, + "required": [ + "workflow_id" + ], + "title": "GetWorkflowActivitiesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetWorkflowActivitiesParams" + } + }, + "required": [ + "params" + ], + "title": "get_workflow_activitiesArguments", + "type": "object" + } + }, + { + "name": "create_workflow", + "description": "Create a new workflow in ServiceNow", + "inputSchema": { + "$defs": { + "CreateWorkflowParams": { + "description": "Parameters for creating a new workflow.", + "properties": { + "name": { + "description": "Name of the workflow", + "title": "Name", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the workflow", + "title": "Description" + }, + "table": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Table the workflow applies to", + "title": "Table" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": true, + "description": "Whether the workflow is active", + "title": "Active" + }, + "attributes": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional attributes for the workflow", + "title": "Attributes" + } + }, + "required": [ + "name" + ], + "title": "CreateWorkflowParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateWorkflowParams" + } + }, + "required": [ + "params" + ], + "title": "create_workflowArguments", + "type": "object" + } + }, + { + "name": "update_workflow", + "description": "Update an existing workflow in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateWorkflowParams": { + "description": "Parameters for updating a workflow.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the workflow", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the workflow", + "title": "Description" + }, + "table": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Table the workflow applies to", + "title": "Table" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the workflow is active", + "title": "Active" + }, + "attributes": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional attributes for the workflow", + "title": "Attributes" + } + }, + "required": [ + "workflow_id" + ], + "title": "UpdateWorkflowParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateWorkflowParams" + } + }, + "required": [ + "params" + ], + "title": "update_workflowArguments", + "type": "object" + } + }, + { + "name": "activate_workflow", + "description": "Activate a workflow in ServiceNow", + "inputSchema": { + "$defs": { + "ActivateWorkflowParams": { + "description": "Parameters for activating a workflow.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + } + }, + "required": [ + "workflow_id" + ], + "title": "ActivateWorkflowParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ActivateWorkflowParams" + } + }, + "required": [ + "params" + ], + "title": "activate_workflowArguments", + "type": "object" + } + }, + { + "name": "deactivate_workflow", + "description": "Deactivate a workflow in ServiceNow", + "inputSchema": { + "$defs": { + "DeactivateWorkflowParams": { + "description": "Parameters for deactivating a workflow.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + } + }, + "required": [ + "workflow_id" + ], + "title": "DeactivateWorkflowParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/DeactivateWorkflowParams" + } + }, + "required": [ + "params" + ], + "title": "deactivate_workflowArguments", + "type": "object" + } + }, + { + "name": "add_workflow_activity", + "description": "Add a new activity to a workflow in ServiceNow", + "inputSchema": { + "$defs": { + "AddWorkflowActivityParams": { + "description": "Parameters for adding an activity to a workflow.", + "properties": { + "workflow_version_id": { + "description": "Workflow version ID", + "title": "Workflow Version Id", + "type": "string" + }, + "name": { + "description": "Name of the activity", + "title": "Name", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the activity", + "title": "Description" + }, + "activity_type": { + "description": "Type of activity (e.g., 'approval', 'task', 'notification')", + "title": "Activity Type", + "type": "string" + }, + "attributes": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional attributes for the activity", + "title": "Attributes" + } + }, + "required": [ + "workflow_version_id", + "name", + "activity_type" + ], + "title": "AddWorkflowActivityParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/AddWorkflowActivityParams" + } + }, + "required": [ + "params" + ], + "title": "add_workflow_activityArguments", + "type": "object" + } + }, + { + "name": "update_workflow_activity", + "description": "Update an existing activity in a workflow", + "inputSchema": { + "$defs": { + "UpdateWorkflowActivityParams": { + "description": "Parameters for updating a workflow activity.", + "properties": { + "activity_id": { + "description": "Activity ID or sys_id", + "title": "Activity Id", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the activity", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the activity", + "title": "Description" + }, + "attributes": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional attributes for the activity", + "title": "Attributes" + } + }, + "required": [ + "activity_id" + ], + "title": "UpdateWorkflowActivityParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateWorkflowActivityParams" + } + }, + "required": [ + "params" + ], + "title": "update_workflow_activityArguments", + "type": "object" + } + }, + { + "name": "delete_workflow_activity", + "description": "Delete an activity from a workflow", + "inputSchema": { + "$defs": { + "DeleteWorkflowActivityParams": { + "description": "Parameters for deleting a workflow activity.", + "properties": { + "activity_id": { + "description": "Activity ID or sys_id", + "title": "Activity Id", + "type": "string" + } + }, + "required": [ + "activity_id" + ], + "title": "DeleteWorkflowActivityParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/DeleteWorkflowActivityParams" + } + }, + "required": [ + "params" + ], + "title": "delete_workflow_activityArguments", + "type": "object" + } + }, + { + "name": "reorder_workflow_activities", + "description": "Reorder activities in a workflow", + "inputSchema": { + "$defs": { + "ReorderWorkflowActivitiesParams": { + "description": "Parameters for reordering workflow activities.", + "properties": { + "workflow_id": { + "description": "Workflow ID or sys_id", + "title": "Workflow Id", + "type": "string" + }, + "activity_ids": { + "description": "List of activity IDs in the desired order", + "items": { + "type": "string" + }, + "title": "Activity Ids", + "type": "array" + } + }, + "required": [ + "workflow_id", + "activity_ids" + ], + "title": "ReorderWorkflowActivitiesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ReorderWorkflowActivitiesParams" + } + }, + "required": [ + "params" + ], + "title": "reorder_workflow_activitiesArguments", + "type": "object" + } + }, + { + "name": "list_changesets", + "description": "List changesets from ServiceNow", + "inputSchema": { + "$defs": { + "ListChangesetsParams": { + "description": "Parameters for listing changesets.", + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 10, + "description": "Maximum number of records to return", + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Offset to start from", + "title": "Offset" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by state", + "title": "State" + }, + "application": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by application", + "title": "Application" + }, + "developer": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by developer", + "title": "Developer" + }, + "timeframe": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by timeframe (recent, last_week, last_month)", + "title": "Timeframe" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional query string", + "title": "Query" + } + }, + "title": "ListChangesetsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListChangesetsParams" + } + }, + "required": [ + "params" + ], + "title": "list_changesetsArguments", + "type": "object" + } + }, + { + "name": "get_changeset_details", + "description": "Get detailed information about a specific changeset", + "inputSchema": { + "$defs": { + "GetChangesetDetailsParams": { + "description": "Parameters for getting changeset details.", + "properties": { + "changeset_id": { + "description": "Changeset ID or sys_id", + "title": "Changeset Id", + "type": "string" + } + }, + "required": [ + "changeset_id" + ], + "title": "GetChangesetDetailsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetChangesetDetailsParams" + } + }, + "required": [ + "params" + ], + "title": "get_changeset_detailsArguments", + "type": "object" + } + }, + { + "name": "create_changeset", + "description": "Create a new changeset in ServiceNow", + "inputSchema": { + "$defs": { + "CreateChangesetParams": { + "description": "Parameters for creating a changeset.", + "properties": { + "name": { + "description": "Name of the changeset", + "title": "Name", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the changeset", + "title": "Description" + }, + "application": { + "description": "Application the changeset belongs to", + "title": "Application", + "type": "string" + }, + "developer": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Developer responsible for the changeset", + "title": "Developer" + } + }, + "required": [ + "name", + "application" + ], + "title": "CreateChangesetParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateChangesetParams" + } + }, + "required": [ + "params" + ], + "title": "create_changesetArguments", + "type": "object" + } + }, + { + "name": "update_changeset", + "description": "Update an existing changeset in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateChangesetParams": { + "description": "Parameters for updating a changeset.", + "properties": { + "changeset_id": { + "description": "Changeset ID or sys_id", + "title": "Changeset Id", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the changeset", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the changeset", + "title": "Description" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "State of the changeset", + "title": "State" + }, + "developer": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Developer responsible for the changeset", + "title": "Developer" + } + }, + "required": [ + "changeset_id" + ], + "title": "UpdateChangesetParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateChangesetParams" + } + }, + "required": [ + "params" + ], + "title": "update_changesetArguments", + "type": "object" + } + }, + { + "name": "commit_changeset", + "description": "Commit a changeset in ServiceNow", + "inputSchema": { + "$defs": { + "CommitChangesetParams": { + "description": "Parameters for committing a changeset.", + "properties": { + "changeset_id": { + "description": "Changeset ID or sys_id", + "title": "Changeset Id", + "type": "string" + }, + "commit_message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Commit message", + "title": "Commit Message" + } + }, + "required": [ + "changeset_id" + ], + "title": "CommitChangesetParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CommitChangesetParams" + } + }, + "required": [ + "params" + ], + "title": "commit_changesetArguments", + "type": "object" + } + }, + { + "name": "publish_changeset", + "description": "Publish a changeset in ServiceNow", + "inputSchema": { + "$defs": { + "PublishChangesetParams": { + "description": "Parameters for publishing a changeset.", + "properties": { + "changeset_id": { + "description": "Changeset ID or sys_id", + "title": "Changeset Id", + "type": "string" + }, + "publish_notes": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Notes for publishing", + "title": "Publish Notes" + } + }, + "required": [ + "changeset_id" + ], + "title": "PublishChangesetParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/PublishChangesetParams" + } + }, + "required": [ + "params" + ], + "title": "publish_changesetArguments", + "type": "object" + } + }, + { + "name": "add_file_to_changeset", + "description": "Add a file to a changeset in ServiceNow", + "inputSchema": { + "$defs": { + "AddFileToChangesetParams": { + "description": "Parameters for adding a file to a changeset.", + "properties": { + "changeset_id": { + "description": "Changeset ID or sys_id", + "title": "Changeset Id", + "type": "string" + }, + "file_path": { + "description": "Path of the file to add", + "title": "File Path", + "type": "string" + }, + "file_content": { + "description": "Content of the file", + "title": "File Content", + "type": "string" + } + }, + "required": [ + "changeset_id", + "file_path", + "file_content" + ], + "title": "AddFileToChangesetParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/AddFileToChangesetParams" + } + }, + "required": [ + "params" + ], + "title": "add_file_to_changesetArguments", + "type": "object" + } + }, + { + "name": "list_script_includes", + "description": "List script includes from ServiceNow", + "inputSchema": { + "$defs": { + "ListScriptIncludesParams": { + "description": "Parameters for listing script includes.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of script includes to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by active status", + "title": "Active" + }, + "client_callable": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by client callable status", + "title": "Client Callable" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for script includes", + "title": "Query" + } + }, + "title": "ListScriptIncludesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListScriptIncludesParams" + } + }, + "required": [ + "params" + ], + "title": "list_script_includesArguments", + "type": "object" + } + }, + { + "name": "get_script_include", + "description": "Get a specific script include from ServiceNow", + "inputSchema": { + "$defs": { + "GetScriptIncludeParams": { + "description": "Parameters for getting a script include.", + "properties": { + "script_include_id": { + "description": "Script include ID or name", + "title": "Script Include Id", + "type": "string" + } + }, + "required": [ + "script_include_id" + ], + "title": "GetScriptIncludeParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetScriptIncludeParams" + } + }, + "required": [ + "params" + ], + "title": "get_script_includeArguments", + "type": "object" + } + }, + { + "name": "create_script_include", + "description": "Create a new script include in ServiceNow", + "inputSchema": { + "$defs": { + "CreateScriptIncludeParams": { + "description": "Parameters for creating a script include.", + "properties": { + "name": { + "description": "Name of the script include", + "title": "Name", + "type": "string" + }, + "script": { + "description": "Script content", + "title": "Script", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the script include", + "title": "Description" + }, + "api_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API name of the script include", + "title": "Api Name" + }, + "client_callable": { + "default": false, + "description": "Whether the script include is client callable", + "title": "Client Callable", + "type": "boolean" + }, + "active": { + "default": true, + "description": "Whether the script include is active", + "title": "Active", + "type": "boolean" + }, + "access": { + "default": "package_private", + "description": "Access level of the script include", + "title": "Access", + "type": "string" + } + }, + "required": [ + "name", + "script" + ], + "title": "CreateScriptIncludeParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateScriptIncludeParams" + } + }, + "required": [ + "params" + ], + "title": "create_script_includeArguments", + "type": "object" + } + }, + { + "name": "update_script_include", + "description": "Update an existing script include in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateScriptIncludeParams": { + "description": "Parameters for updating a script include.", + "properties": { + "script_include_id": { + "description": "Script include ID or name", + "title": "Script Include Id", + "type": "string" + }, + "script": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Script content", + "title": "Script" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the script include", + "title": "Description" + }, + "api_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API name of the script include", + "title": "Api Name" + }, + "client_callable": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the script include is client callable", + "title": "Client Callable" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the script include is active", + "title": "Active" + }, + "access": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Access level of the script include", + "title": "Access" + } + }, + "required": [ + "script_include_id" + ], + "title": "UpdateScriptIncludeParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateScriptIncludeParams" + } + }, + "required": [ + "params" + ], + "title": "update_script_includeArguments", + "type": "object" + } + }, + { + "name": "delete_script_include", + "description": "Delete a script include in ServiceNow", + "inputSchema": { + "$defs": { + "DeleteScriptIncludeParams": { + "description": "Parameters for deleting a script include.", + "properties": { + "script_include_id": { + "description": "Script include ID or name", + "title": "Script Include Id", + "type": "string" + } + }, + "required": [ + "script_include_id" + ], + "title": "DeleteScriptIncludeParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/DeleteScriptIncludeParams" + } + }, + "required": [ + "params" + ], + "title": "delete_script_includeArguments", + "type": "object" + } + }, + { + "name": "create_knowledge_base", + "description": "Create a new knowledge base in ServiceNow", + "inputSchema": { + "$defs": { + "CreateKnowledgeBaseParams": { + "description": "Parameters for creating a knowledge base.", + "properties": { + "title": { + "description": "Title of the knowledge base", + "title": "Title", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the knowledge base", + "title": "Description" + }, + "owner": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The specified admin user or group", + "title": "Owner" + }, + "managers": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Users who can manage this knowledge base", + "title": "Managers" + }, + "publish_workflow": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "Knowledge - Instant Publish", + "description": "Publication workflow", + "title": "Publish Workflow" + }, + "retire_workflow": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "Knowledge - Instant Retire", + "description": "Retirement workflow", + "title": "Retire Workflow" + } + }, + "required": [ + "title" + ], + "title": "CreateKnowledgeBaseParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateKnowledgeBaseParams" + } + }, + "required": [ + "params" + ], + "title": "create_knowledge_baseArguments", + "type": "object" + } + }, + { + "name": "list_knowledge_bases", + "description": "List knowledge bases from ServiceNow", + "inputSchema": { + "$defs": { + "ListKnowledgeBasesParams": { + "description": "Parameters for listing knowledge bases.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of knowledge bases to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by active status", + "title": "Active" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for knowledge bases", + "title": "Query" + } + }, + "title": "ListKnowledgeBasesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListKnowledgeBasesParams" + } + }, + "required": [ + "params" + ], + "title": "list_knowledge_basesArguments", + "type": "object" + } + }, + { + "name": "create_category", + "description": "Create a new category in a knowledge base", + "inputSchema": { + "$defs": { + "CreateCategoryParams": { + "description": "Parameters for creating a category in a knowledge base.", + "properties": { + "title": { + "description": "Title of the category", + "title": "Title", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the category", + "title": "Description" + }, + "knowledge_base": { + "description": "The knowledge base to create the category in", + "title": "Knowledge Base", + "type": "string" + }, + "parent_category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parent category (if creating a subcategory)", + "title": "Parent Category" + }, + "active": { + "default": true, + "description": "Whether the category is active", + "title": "Active", + "type": "boolean" + } + }, + "required": [ + "title", + "knowledge_base" + ], + "title": "CreateCategoryParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateCategoryParams" + } + }, + "required": [ + "params" + ], + "title": "create_categoryArguments", + "type": "object" + } + }, + { + "name": "create_article", + "description": "Create a new knowledge article", + "inputSchema": { + "$defs": { + "CreateArticleParams": { + "description": "Parameters for creating a knowledge article.", + "properties": { + "title": { + "description": "Title of the article", + "title": "Title", + "type": "string" + }, + "text": { + "description": "The main body text for the article", + "title": "Text", + "type": "string" + }, + "short_description": { + "description": "Short description of the article", + "title": "Short Description", + "type": "string" + }, + "knowledge_base": { + "description": "The knowledge base to create the article in", + "title": "Knowledge Base", + "type": "string" + }, + "category": { + "description": "Category for the article", + "title": "Category", + "type": "string" + }, + "keywords": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Keywords for search", + "title": "Keywords" + }, + "article_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "text", + "description": "The type of article", + "title": "Article Type" + } + }, + "required": [ + "title", + "text", + "short_description", + "knowledge_base", + "category" + ], + "title": "CreateArticleParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateArticleParams" + } + }, + "required": [ + "params" + ], + "title": "create_articleArguments", + "type": "object" + } + }, + { + "name": "update_article", + "description": "Update an existing knowledge article", + "inputSchema": { + "$defs": { + "UpdateArticleParams": { + "description": "Parameters for updating a knowledge article.", + "properties": { + "article_id": { + "description": "ID of the article to update", + "title": "Article Id", + "type": "string" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Updated title of the article", + "title": "Title" + }, + "text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Updated main body text for the article", + "title": "Text" + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Updated short description", + "title": "Short Description" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Updated category for the article", + "title": "Category" + }, + "keywords": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Updated keywords for search", + "title": "Keywords" + } + }, + "required": [ + "article_id" + ], + "title": "UpdateArticleParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateArticleParams" + } + }, + "required": [ + "params" + ], + "title": "update_articleArguments", + "type": "object" + } + }, + { + "name": "publish_article", + "description": "Publish a knowledge article", + "inputSchema": { + "$defs": { + "PublishArticleParams": { + "description": "Parameters for publishing a knowledge article.", + "properties": { + "article_id": { + "description": "ID of the article to publish", + "title": "Article Id", + "type": "string" + }, + "workflow_state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "published", + "description": "The workflow state to set", + "title": "Workflow State" + }, + "workflow_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The workflow version to use", + "title": "Workflow Version" + } + }, + "required": [ + "article_id" + ], + "title": "PublishArticleParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/PublishArticleParams" + } + }, + "required": [ + "params" + ], + "title": "publish_articleArguments", + "type": "object" + } + }, + { + "name": "list_articles", + "description": "List knowledge articles", + "inputSchema": { + "$defs": { + "ListArticlesParams": { + "description": "Parameters for listing knowledge articles.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of articles to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "knowledge_base": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by knowledge base", + "title": "Knowledge Base" + }, + "category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by category", + "title": "Category" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for articles", + "title": "Query" + }, + "workflow_state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by workflow state", + "title": "Workflow State" + } + }, + "title": "ListArticlesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListArticlesParams" + } + }, + "required": [ + "params" + ], + "title": "list_articlesArguments", + "type": "object" + } + }, + { + "name": "get_article", + "description": "Get a specific knowledge article by ID", + "inputSchema": { + "$defs": { + "GetArticleParams": { + "description": "Parameters for getting a knowledge article.", + "properties": { + "article_id": { + "description": "ID of the article to get", + "title": "Article Id", + "type": "string" + } + }, + "required": [ + "article_id" + ], + "title": "GetArticleParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetArticleParams" + } + }, + "required": [ + "params" + ], + "title": "get_articleArguments", + "type": "object" + } + }, + { + "name": "list_categories", + "description": "List categories in a knowledge base", + "inputSchema": { + "$defs": { + "ListCategoriesParams": { + "description": "Parameters for listing categories in a knowledge base.", + "properties": { + "knowledge_base": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by knowledge base ID", + "title": "Knowledge Base" + }, + "parent_category": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by parent category ID", + "title": "Parent Category" + }, + "limit": { + "default": 10, + "description": "Maximum number of categories to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by active status", + "title": "Active" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Search query for categories", + "title": "Query" + } + }, + "title": "ListCategoriesParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListCategoriesParams" + } + }, + "required": [ + "params" + ], + "title": "list_categoriesArguments", + "type": "object" + } + }, + { + "name": "create_user", + "description": "Create a new user in ServiceNow", + "inputSchema": { + "$defs": { + "CreateUserParams": { + "description": "Parameters for creating a user.", + "properties": { + "user_name": { + "description": "Username for the user", + "title": "User Name", + "type": "string" + }, + "first_name": { + "description": "First name of the user", + "title": "First Name", + "type": "string" + }, + "last_name": { + "description": "Last name of the user", + "title": "Last Name", + "type": "string" + }, + "email": { + "description": "Email address of the user", + "title": "Email", + "type": "string" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Job title of the user", + "title": "Title" + }, + "department": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Department the user belongs to", + "title": "Department" + }, + "manager": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Manager of the user (sys_id or username)", + "title": "Manager" + }, + "roles": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Roles to assign to the user", + "title": "Roles" + }, + "phone": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Phone number of the user", + "title": "Phone" + }, + "mobile_phone": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Mobile phone number of the user", + "title": "Mobile Phone" + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Location of the user", + "title": "Location" + }, + "password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Password for the user account", + "title": "Password" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": true, + "description": "Whether the user account is active", + "title": "Active" + } + }, + "required": [ + "user_name", + "first_name", + "last_name", + "email" + ], + "title": "CreateUserParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateUserParams" + } + }, + "required": [ + "params" + ], + "title": "create_userArguments", + "type": "object" + } + }, + { + "name": "update_user", + "description": "Update an existing user in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateUserParams": { + "description": "Parameters for updating a user.", + "properties": { + "user_id": { + "description": "User ID or sys_id to update", + "title": "User Id", + "type": "string" + }, + "user_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Username for the user", + "title": "User Name" + }, + "first_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "First name of the user", + "title": "First Name" + }, + "last_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Last name of the user", + "title": "Last Name" + }, + "email": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Email address of the user", + "title": "Email" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Job title of the user", + "title": "Title" + }, + "department": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Department the user belongs to", + "title": "Department" + }, + "manager": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Manager of the user (sys_id or username)", + "title": "Manager" + }, + "roles": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Roles to assign to the user", + "title": "Roles" + }, + "phone": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Phone number of the user", + "title": "Phone" + }, + "mobile_phone": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Mobile phone number of the user", + "title": "Mobile Phone" + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Location of the user", + "title": "Location" + }, + "password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Password for the user account", + "title": "Password" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the user account is active", + "title": "Active" + } + }, + "required": [ + "user_id" + ], + "title": "UpdateUserParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateUserParams" + } + }, + "required": [ + "params" + ], + "title": "update_userArguments", + "type": "object" + } + }, + { + "name": "get_user", + "description": "Get a specific user in ServiceNow", + "inputSchema": { + "$defs": { + "GetUserParams": { + "description": "Parameters for getting a user.", + "properties": { + "user_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "User ID or sys_id", + "title": "User Id" + }, + "user_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Username of the user", + "title": "User Name" + }, + "email": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Email address of the user", + "title": "Email" + } + }, + "title": "GetUserParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/GetUserParams" + } + }, + "required": [ + "params" + ], + "title": "get_userArguments", + "type": "object" + } + }, + { + "name": "list_users", + "description": "List users in ServiceNow", + "inputSchema": { + "$defs": { + "ListUsersParams": { + "description": "Parameters for listing users.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of users to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by active status", + "title": "Active" + }, + "department": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by department", + "title": "Department" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Case-insensitive search term that matches against name, username, or email fields. Uses ServiceNow's LIKE operator for partial matching.", + "title": "Query" + } + }, + "title": "ListUsersParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListUsersParams" + } + }, + "required": [ + "params" + ], + "title": "list_usersArguments", + "type": "object" + } + }, + { + "name": "create_group", + "description": "Create a new group in ServiceNow", + "inputSchema": { + "$defs": { + "CreateGroupParams": { + "description": "Parameters for creating a group.", + "properties": { + "name": { + "description": "Name of the group", + "title": "Name", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the group", + "title": "Description" + }, + "manager": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Manager of the group (sys_id or username)", + "title": "Manager" + }, + "parent": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parent group (sys_id or name)", + "title": "Parent" + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Type of the group", + "title": "Type" + }, + "email": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Email address for the group", + "title": "Email" + }, + "members": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of user sys_ids or usernames to add as members", + "title": "Members" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": true, + "description": "Whether the group is active", + "title": "Active" + } + }, + "required": [ + "name" + ], + "title": "CreateGroupParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/CreateGroupParams" + } + }, + "required": [ + "params" + ], + "title": "create_groupArguments", + "type": "object" + } + }, + { + "name": "update_group", + "description": "Update an existing group in ServiceNow", + "inputSchema": { + "$defs": { + "UpdateGroupParams": { + "description": "Parameters for updating a group.", + "properties": { + "group_id": { + "description": "Group ID or sys_id to update", + "title": "Group Id", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the group", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Description of the group", + "title": "Description" + }, + "manager": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Manager of the group (sys_id or username)", + "title": "Manager" + }, + "parent": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parent group (sys_id or name)", + "title": "Parent" + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Type of the group", + "title": "Type" + }, + "email": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Email address for the group", + "title": "Email" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether the group is active", + "title": "Active" + } + }, + "required": [ + "group_id" + ], + "title": "UpdateGroupParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/UpdateGroupParams" + } + }, + "required": [ + "params" + ], + "title": "update_groupArguments", + "type": "object" + } + }, + { + "name": "add_group_members", + "description": "Add members to an existing group in ServiceNow", + "inputSchema": { + "$defs": { + "AddGroupMembersParams": { + "description": "Parameters for adding members to a group.", + "properties": { + "group_id": { + "description": "Group ID or sys_id", + "title": "Group Id", + "type": "string" + }, + "members": { + "description": "List of user sys_ids or usernames to add as members", + "items": { + "type": "string" + }, + "title": "Members", + "type": "array" + } + }, + "required": [ + "group_id", + "members" + ], + "title": "AddGroupMembersParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/AddGroupMembersParams" + } + }, + "required": [ + "params" + ], + "title": "add_group_membersArguments", + "type": "object" + } + }, + { + "name": "remove_group_members", + "description": "Remove members from an existing group in ServiceNow", + "inputSchema": { + "$defs": { + "RemoveGroupMembersParams": { + "description": "Parameters for removing members from a group.", + "properties": { + "group_id": { + "description": "Group ID or sys_id", + "title": "Group Id", + "type": "string" + }, + "members": { + "description": "List of user sys_ids or usernames to remove as members", + "items": { + "type": "string" + }, + "title": "Members", + "type": "array" + } + }, + "required": [ + "group_id", + "members" + ], + "title": "RemoveGroupMembersParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/RemoveGroupMembersParams" + } + }, + "required": [ + "params" + ], + "title": "remove_group_membersArguments", + "type": "object" + } + }, + { + "name": "list_groups", + "description": "List groups from ServiceNow with optional filtering", + "inputSchema": { + "$defs": { + "ListGroupsParams": { + "description": "Parameters for listing groups.", + "properties": { + "limit": { + "default": 10, + "description": "Maximum number of groups to return", + "title": "Limit", + "type": "integer" + }, + "offset": { + "default": 0, + "description": "Offset for pagination", + "title": "Offset", + "type": "integer" + }, + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by active status", + "title": "Active" + }, + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Case-insensitive search term that matches against group name or description fields. Uses ServiceNow's LIKE operator for partial matching.", + "title": "Query" + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by group type", + "title": "Type" + } + }, + "title": "ListGroupsParams", + "type": "object" + } + }, + "properties": { + "params": { + "$ref": "#/$defs/ListGroupsParams" + } + }, + "required": [ + "params" + ], + "title": "list_groupsArguments", + "type": "object" + } + } + ] + }, + "mcp-compass": { + "name": "mcp-compass", + "display_name": "Compass", + "description": "Suggest the right MCP server for your needs", + "repository": { + "type": "git", + "url": "https://github.com/liuyoshio/mcp-compass" + }, + "homepage": "https://github.com/liuyoshio/mcp-compass", + "author": { + "name": "liuyoshio" + }, + "license": "MIT", + "categories": [ + "MCP Tools" + ], + "tags": [ + "compass", + "service discovery" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@liuyoshio/mcp-compass" + ] + } + }, + "tools": [ + { + "name": "recommend-mcp-servers", + "description": "\n Use this tool when there is a need to findn external MCP tools.\n It explores and recommends existing MCP servers from the \n internet, based on the description of the MCP Server \n needed. It returns a list of MCP servers with their IDs, \n descriptions, GitHub URLs, and similarity scores.\n ", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "\n Description for the MCP Server needed. \n It should be specific and actionable, e.g.:\n GOOD:\n - 'MCP Server for AWS Lambda Python3.9 deployment'\n - 'MCP Server for United Airlines booking API'\n - 'MCP Server for Stripe refund webhook handling'\n\n BAD:\n - 'MCP Server for cloud' (too vague)\n - 'MCP Server for booking' (which booking system?)\n - 'MCP Server for payment' (which payment provider?)\n\n Query should explicitly specify:\n 1. Target platform/vendor (e.g. AWS, Stripe, MongoDB)\n 2. Exact operation/service (e.g. Lambda deployment, webhook handling)\n 3. Additional context if applicable (e.g. Python, refund events)\n " + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "alicloud-hologres": { + "display_name": "Hologres MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/aliyun/alibabacloud-hologres-mcp-server" + }, + "homepage": "https://github.com/aliyun/alibabacloud-hologres-mcp-server", + "author": { + "name": "aliyun" + }, + "license": "Apache-2.0", + "tags": [ + "hologres", + "database", + "SQL" + ], + "arguments": { + "HOLOGRES_HOST": { + "description": "Hologres database host", + "required": true, + "example": "host" + }, + "HOLOGRES_PORT": { + "description": "Hologres database port", + "required": true, + "example": "port" + }, + "HOLOGRES_USER": { + "description": "Hologres database user (access_id)", + "required": true, + "example": "access_id" + }, + "HOLOGRES_PASSWORD": { + "description": "Hologres database password (access_key)", + "required": true, + "example": "access_key" + }, + "HOLOGRES_DATABASE": { + "description": "Hologres database name", + "required": true, + "example": "database" + } + }, + "installations": { + "local_file": { + "type": "uvx", + "command": "uvx", + "args": [ + "hologres-mcp-server" + ], + "env": { + "HOLOGRES_HOST": "host", + "HOLOGRES_PORT": "port", + "HOLOGRES_USER": "access_id", + "HOLOGRES_PASSWORD": "access_key", + "HOLOGRES_DATABASE": "database" + }, + "description": "Run using local file" + } + }, + "examples": [], + "name": "alicloud-hologres", + "description": "Hologres MCP Server serves as a universal interface between AI Agents and Hologres databases. It enables seamless communication between AI Agents and Hologres, helping AI Agents retrieve Hologres database metadata and execute SQL operations.", + "categories": [ + "Databases" + ], + "is_official": true, + "tools": [ + { + "name": "execute_select_sql", + "description": "Execute SELECT SQL to query data from Hologres database.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The (SELECT) SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "execute_dml_sql", + "description": "Execute (INSERT, UPDATE, DELETE) SQL to insert, update, and delete data in Hologres databse.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The DML SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "execute_ddl_sql", + "description": "Execute (CREATE, ALTER, DROP) SQL statements to CREATE, ALTER, or DROP tables, views, procedures, GUCs etc. in Hologres databse.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The DDL SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "gather_table_statistics", + "description": "Execute the ANALYZE TABLE command to have Hologres collect table statistics, enabling QO to generate better query plans", + "inputSchema": { + "type": "object", + "properties": { + "schema": { + "type": "string", + "description": "Schema name" + }, + "table": { + "type": "string", + "description": "Table name" + } + }, + "required": [ + "schema", + "table" + ] + } + }, + { + "name": "get_query_plan", + "description": "Get query plan for a SQL query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The SQL query to analyze" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "get_execution_plan", + "description": "Get actual execution plan with runtime statistics for a SQL query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The SQL query to analyze" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "alphavantage": { + "name": "alphavantage", + "display_name": "Alphavantage", + "description": "MCP server for stock market data API [AlphaVantage](https://www.alphavantage.co/)", + "repository": { + "type": "git", + "url": "https://github.com/calvernaz/alphavantage" + }, + "homepage": "https://github.com/calvernaz/alphavantage", + "author": { + "name": "calvernaz" + }, + "license": "Apache-2.0", + "categories": [ + "Finance" + ], + "tags": [ + "alphavantage", + "stock market" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/calvernaz/alphavantage.git", + "alphavantage" + ], + "env": { + "ALPHAVANTAGE_API_KEY": "${ALPHAVANTAGE_API_KEY}" + } + } + }, + "arguments": { + "ALPHAVANTAGE_API_KEY": { + "description": "The API key to access the Alphavantage service.", + "required": true, + "example": "YOUR_API_KEY_HERE" + } + }, + "tools": [ + { + "name": "stock_quote", + "description": "Fetch a stock quote", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "time_series_intraday", + "description": "Fetch a time series intraday", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "adjusted": { + "type": "boolean" + }, + "outputsize": { + "type": "string" + }, + "datatype": { + "type": "string" + }, + "monthly": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "time_series_daily", + "description": "Fetch a time series daily", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "outputsize": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "time_series_daily_adjusted", + "description": "Fetch a time series daily adjusted", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "outputsize": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "time_series_weekly", + "description": "Fetch a time series weekly", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "time_series_weekly_adjusted", + "description": "Fetch a time series weekly adjusted", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "time_series_monthly", + "description": "Fetch a time series monthly", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "time_series_monthly_adjusted", + "description": "Fetch a time series monthly adjusted", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "realtime_bulk_quotes", + "description": "Fetch real time bulk quotes", + "inputSchema": { + "type": "object", + "properties": { + "symbols": { + "type": "array" + } + }, + "required": [ + "symbols" + ] + } + }, + { + "name": "symbol_search", + "description": "Search endpoint", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "keywords" + ] + } + }, + { + "name": "market_status", + "description": "Fetch market status", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "realtime_options", + "description": "Fetch realtime options", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + }, + "contract": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "historical_options", + "description": "Fetch historical options", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + }, + "contract": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "news_sentiment", + "description": "Fetch news sentiment", + "inputSchema": { + "type": "object", + "properties": { + "tickers": { + "type": "array" + }, + "topics": { + "type": "string" + }, + "time_from": { + "type": "string" + }, + "time_to": { + "type": "string" + }, + "sort": { + "type": "string" + }, + "limit": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "tickers" + ] + } + }, + { + "name": "top_gainers_losers", + "description": "Fetch top gainers and losers", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "insider_transactions", + "description": "Fetch insider transactions", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "analytics_fixed_window", + "description": "Fetch analytics fixed window", + "inputSchema": { + "type": "object", + "properties": { + "symbols": { + "type": "array" + }, + "interval": { + "type": "string" + }, + "series_range": { + "type": "string" + }, + "ohlc": { + "type": "string" + }, + "calculations": { + "type": "array" + } + }, + "required": [ + "symbols", + "series_range", + "interval", + "calculations" + ] + } + }, + { + "name": "analytics_sliding_window", + "description": "Fetch analytics sliding window", + "inputSchema": { + "type": "object", + "properties": { + "symbols": { + "type": "array" + }, + "interval": { + "type": "string" + }, + "series_range": { + "type": "string" + }, + "ohlc": { + "type": "string" + }, + "window_size": { + "type": "number" + }, + "calculations": { + "type": "array" + } + }, + "required": [ + "symbols", + "series_range", + "interval", + "calculations", + "window_size" + ] + } + }, + { + "name": "company_overview", + "description": "Fetch company overview", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "etf_profile", + "description": "Fetch ETF profile", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "company_dividends", + "description": "Fetch company dividends", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "company_splits", + "description": "Fetch company splits", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "income_statement", + "description": "Fetch company income statement", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "balance_sheet", + "description": "Fetch company balance sheet", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "cash_flow", + "description": "Fetch company cash flow", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + } + }, + "required": [ + "symbol" + ] + } + }, + { + "name": "listing_status", + "description": "Fetch listing status", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "date": { + "type": "string" + }, + "state": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "earnings_calendar", + "description": "Fetch company earnings calendar", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "horizon": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "ipo_calendar", + "description": "Fetch IPO calendar", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "exchange_rate", + "description": "Fetch exchange rate", + "inputSchema": { + "type": "object", + "properties": { + "from_currency": { + "type": "string" + }, + "to_currency": { + "type": "string" + } + }, + "required": [ + "from_currency", + "to_currency" + ] + } + }, + { + "name": "fx_intraday", + "description": "Fetch FX intraday", + "inputSchema": { + "type": "object", + "properties": { + "from_symbol": { + "type": "string" + }, + "to_symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "outputsize": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "from_symbol", + "to_symbol", + "interval" + ] + } + }, + { + "name": "fx_daily", + "description": "Fetch FX daily", + "inputSchema": { + "type": "object", + "properties": { + "from_symbol": { + "type": "string" + }, + "to_symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + }, + "outputsize": { + "type": "string" + } + }, + "required": [ + "from_symbol", + "to_symbol" + ] + } + }, + { + "name": "fx_weekly", + "description": "Fetch FX weekly", + "inputSchema": { + "type": "object", + "properties": { + "from_symbol": { + "type": "string" + }, + "to_symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "from_symbol", + "to_symbol" + ] + } + }, + { + "name": "fx_monthly", + "description": "Fetch FX monthly", + "inputSchema": { + "type": "object", + "properties": { + "from_symbol": { + "type": "string" + }, + "to_symbol": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "from_symbol", + "to_symbol" + ] + } + }, + { + "name": "crypto_intraday", + "description": "Fetch crypto intraday", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "market": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "outputsize": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "market", + "interval" + ] + } + }, + { + "name": "digital_currency_daily", + "description": "Fetch digital currency daily", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "market": { + "type": "string" + } + }, + "required": [ + "symbol", + "market" + ] + } + }, + { + "name": "digital_currency_weekly", + "description": "Fetch digital currency weekly", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "market": { + "type": "string" + } + }, + "required": [ + "symbol", + "market" + ] + } + }, + { + "name": "digital_currency_monthly", + "description": "Fetch digital currency monthly", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "market": { + "type": "string" + } + }, + "required": [ + "symbol", + "market" + ] + } + }, + { + "name": "wti_crude_oil", + "description": "Fetch WTI crude oil", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "brent_crude_oil", + "description": "Fetch Brent crude oil", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "natural_gas", + "description": "Fetch natural gas", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "copper", + "description": "Fetch copper", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "aluminum", + "description": "Fetch aluminum", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "wheat", + "description": "Fetch wheat", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "corn", + "description": "Fetch corn", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "cotton", + "description": "Fetch cotton", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "sugar", + "description": "Fetch sugar", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "coffee", + "description": "Fetch coffee", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "all_commodities", + "description": "Fetch all commodities", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "real_gdp", + "description": "Fetch real GDP", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "real_gdp_per_capita", + "description": "Fetch real GDP per capita", + "inputSchema": { + "type": "object", + "properties": { + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "treasury_yield", + "description": "Fetch treasury yield", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "maturity": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "federal_funds_rate", + "description": "Fetch federal funds rate", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "cpi", + "description": "Fetch consumer price index", + "inputSchema": { + "type": "object", + "properties": { + "interval": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "inflation", + "description": "Fetch inflation", + "inputSchema": { + "type": "object", + "properties": { + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "retail_sales", + "description": "Fetch retail sales", + "inputSchema": { + "type": "object", + "properties": { + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "durables", + "description": "Fetch durables", + "inputSchema": { + "type": "object", + "properties": { + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "unemployment", + "description": "Fetch unemployment", + "inputSchema": { + "type": "object", + "properties": { + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "nonfarm_payroll", + "description": "Fetch nonfarm payroll", + "inputSchema": { + "type": "object", + "properties": { + "datatype": { + "type": "string" + } + }, + "required": [] + } + }, + { + "name": "sma", + "description": "Fetch simple moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "ema", + "description": "Fetch exponential moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "wma", + "description": "Fetch weighted moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "dema", + "description": "Fetch double exponential moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "trima", + "description": "Fetch triangular moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "kama", + "description": "Fetch Kaufman adaptive moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + } + } + }, + { + "name": "mama", + "description": "Fetch MESA adaptive moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "fastlimit": { + "type": "number" + }, + "slowlimit": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type", + "fastlimit", + "slowlimit" + ] + } + }, + { + "name": "vwap", + "description": "Fetch volume weighted average price", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "t3", + "description": "Fetch triple exponential moving average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "macd", + "description": "Fetch moving average convergence divergence", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "fastperiod": { + "type": "number" + }, + "slowperiod": { + "type": "number" + }, + "signalperiod": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type" + ] + } + }, + { + "name": "macdext", + "description": "Fetch moving average convergence divergence next", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "fastperiod": { + "type": "number" + }, + "slowperiod": { + "type": "number" + }, + "signalperiod": { + "type": "number" + }, + "fastmatype": { + "type": "number" + }, + "slowmatype": { + "type": "number" + }, + "signalmatype": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type" + ] + } + }, + { + "name": "stoch", + "description": "Fetch stochastic oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "fastkperiod": { + "type": "number" + }, + "slowkperiod": { + "type": "number" + }, + "slowdperiod": { + "type": "number" + }, + "slowkmatype": { + "type": "string" + }, + "slowdmatype": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "stochf", + "description": "Fetch stochastic oscillator fast", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "fastkperiod": { + "type": "number" + }, + "fastdperiod": { + "type": "number" + }, + "fastdmatype": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "rsi", + "description": "Fetch relative strength index", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "stochrsi", + "description": "Fetch stochastic relative strength index", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "fastkperiod": { + "type": "number" + }, + "fastdperiod": { + "type": "number" + }, + "fastdmatype": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "willr", + "description": "Fetch williams percent range", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "adx", + "description": "Fetch average directional movement index", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "adxr", + "description": "Fetch average directional movement index rating", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "apo", + "description": "Fetch absolute price oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "fastperiod": { + "type": "number" + }, + "slowperiod": { + "type": "number" + }, + "matype": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type", + "fastperiod", + "slowperiod" + ] + } + }, + { + "name": "ppo", + "description": "Fetch percentage price oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "fastperiod": { + "type": "number" + }, + "slowperiod": { + "type": "number" + }, + "matype": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type", + "fastperiod", + "slowperiod" + ] + } + }, + { + "name": "mom", + "description": "Fetch momentum", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "bop", + "description": "Fetch balance of power", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "cci", + "description": "Fetch commodity channel index", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "cmo", + "description": "Fetch chande momentum oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "roc", + "description": "Fetch rate of change", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "rocr", + "description": "Fetch rate of change ratio", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "aroon", + "description": "Fetch aroon", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "aroonosc", + "description": "Fetch aroon oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "mfi", + "description": "Fetch money flow index", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "trix", + "description": "Fetch triple exponential average", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "ultosc", + "description": "Fetch ultimate oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "timeperiod1": { + "type": "number" + }, + "timeperiod2": { + "type": "number" + }, + "timeperiod3": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "timeperiod1", + "timeperiod2", + "timeperiod3" + ] + } + }, + { + "name": "dx", + "description": "Fetch directional movement index", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "minus_di", + "description": "Fetch minus directional indicator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "plus_di", + "description": "Fetch plus directional indicator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "minus_dm", + "description": "Fetch minus directional movement", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "plus_dm", + "description": "Fetch plus directional movement", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "bbands", + "description": "Fetch bollinger bands", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "nbdevup": { + "type": "number" + }, + "nbdevdn": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type", + "nbdevup", + "nbdevdn" + ] + } + }, + { + "name": "midpoint", + "description": "Fetch midpoint", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period", + "series_type" + ] + } + }, + { + "name": "midprice", + "description": "Fetch midprice", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "sar", + "description": "Fetch parabolic sar", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "acceleration": { + "type": "number" + }, + "maximum": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "trange", + "description": "Fetch true range", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "atr", + "description": "Fetch average true range", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "natr", + "description": "Fetch normalized average true range", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "time_period": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "time_period" + ] + } + }, + { + "name": "ad", + "description": "Fetch accumulation/distribution line", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "adosc", + "description": "Fetch accumulation/distribution oscillator", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "fastperiod": { + "type": "number" + }, + "slowperiod": { + "type": "number" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "fastperiod", + "slowperiod" + ] + } + }, + { + "name": "obv", + "description": "Fetch on balance volume", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "ht_trendline", + "description": "Fetch hilbert transform - trendline", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type" + ] + } + }, + { + "name": "ht_sine", + "description": "Fetch hilbert transform - sine wave", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval", + "series_type" + ] + } + }, + { + "name": "ht_trendmode", + "description": "Fetch hilbert transform - trend mode", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "ht_dcperiod", + "description": "Fetch hilbert transform - dominant cycle period", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "series_type": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "ht_dcphase", + "description": "Fetch hilbert transform - dominant cycle phase", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + }, + { + "name": "ht_phasor", + "description": "Fetch hilbert transform - phasor components", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string" + }, + "interval": { + "type": "string" + }, + "month": { + "type": "string" + }, + "datatype": { + "type": "string" + } + }, + "required": [ + "symbol", + "interval" + ] + } + } + ] + }, + "drupal": { + "name": "drupal", + "display_name": "Drupal Server", + "description": "Server for interacting with [Drupal](https://www.drupal.org/project/mcp) using STDIO transport layer.", + "repository": { + "type": "git", + "url": "https://github.com/Omedia/mcp-server-drupal" + }, + "homepage": "https://github.com/Omedia/mcp-server-drupal", + "author": { + "name": "Omedia" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Drupal", + "TypeScript" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "deno", + "run", + "-A", + "jsr:@omedia/mcp-server-drupal@${VERSION}", + "--drupal-url", + "${DRUPAL_BASE_URL}" + ], + "env": {} + } + }, + "arguments": { + "VERSION": { + "description": "The version of the MCP server to be used. This must be provided to ensure compatibility with the installed Drupal version.", + "required": true, + "example": "1.0.0" + }, + "DRUPAL_BASE_URL": { + "description": "The base URL of the Drupal site that the MCP server will interact with.", + "required": true, + "example": "https://example.com" + } + } + }, + "placid-app": { + "name": "placid-app", + "display_name": "Placid.app", + "description": "Generate image and video creatives using Placid.app templates", + "repository": { + "type": "git", + "url": "https://github.com/felores/placid-mcp-server" + }, + "homepage": "https://github.com/felores/placid-mcp-server", + "author": { + "name": "felores" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Placid", + "Templates", + "Image Generation", + "Video Generation" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@felores/placid-mcp-server" + ], + "env": { + "PLACID_API_TOKEN": "${PLACID_API_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Generate Video Example", + "description": "Example usage for generating a video using Placid templates.", + "prompt": "{\"template_id\":\"template-uuid\",\"layers\":{\"MEDIA\":{\"video\":\"https://example.com/video.mp4\"},\"PHOTO\":{\"image\":\"https://example.com/photo.jpg\"},\"LOGO\":{\"image\":\"https://example.com/logo.png\"},\"HEADLINE\":{\"text\":\"My Video Title\"}},\"audio\":\"https://example.com/background.mp3\",\"audio_duration\":\"auto\"}" + }, + { + "title": "Generate Image Example", + "description": "Example usage for generating an image using Placid templates.", + "prompt": "{\"template_id\":\"template-uuid\",\"layers\":{\"headline\":{\"text\":\"Welcome to My App\"},\"background\":{\"image\":\"https://example.com/bg.jpg\"}}}" + } + ], + "arguments": { + "PLACID_API_TOKEN": { + "description": "Your Placid API token used for authenticating requests to the Placid API.", + "required": true, + "example": "my-secret-api-token" + } + }, + "tools": [ + { + "name": "placid_list_templates", + "description": "Get a list of available Placid templates with optional filtering. Each template includes its title, ID, preview image URL, available layers, and tags.", + "inputSchema": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "description": "Optional: Filter templates by collection ID" + }, + "custom_data": { + "type": "string", + "description": "Optional: Filter by custom reference data" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional: Filter templates by tags" + } + } + } + }, + { + "name": "placid_generate_image", + "description": "Generate an image using a template and provided assets", + "inputSchema": { + "type": "object", + "required": [ + "template_id", + "layers" + ], + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template to use" + }, + "layers": { + "type": "object", + "description": "Key-value pairs for dynamic content. Keys must match template layer names.", + "additionalProperties": { + "oneOf": [ + { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Content for text layers" + } + }, + "required": [ + "text" + ] + }, + { + "type": "object", + "properties": { + "image": { + "type": "string", + "format": "uri", + "description": "URL for image/video layers" + } + }, + "required": [ + "image" + ] + } + ] + } + } + } + } + }, + { + "name": "placid_generate_video", + "description": "Generate a video using one or more templates and provided assets. Every 10 seconds of video uses 10 credits.", + "inputSchema": { + "type": "object", + "required": [ + "template_id", + "layers" + ], + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template to use" + }, + "layers": { + "type": "object", + "description": "Key-value pairs for dynamic content. Keys must match template layer names.", + "additionalProperties": { + "oneOf": [ + { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Content for text layers" + } + }, + "required": [ + "text" + ] + }, + { + "type": "object", + "properties": { + "image": { + "type": "string", + "format": "uri", + "description": "URL for image layers" + } + }, + "required": [ + "image" + ] + }, + { + "type": "object", + "properties": { + "video": { + "type": "string", + "format": "uri", + "description": "URL for video layers (.mp4)" + } + }, + "required": [ + "video" + ] + } + ] + } + }, + "audio": { + "type": "string", + "description": "URL of mp3 audio file for this video" + }, + "audio_duration": { + "type": "string", + "description": "Set to 'auto' to trim audio to video length" + }, + "audio_trim_start": { + "type": "string", + "description": "Timestamp of the trim start point (e.g. '00:00:45' or '00:00:45.25')" + }, + "audio_trim_end": { + "type": "string", + "description": "Timestamp of the trim end point (e.g. '00:00:55' or '00:00:55.25')" + } + } + } + } + ] + }, + "web-fetch": { + "name": "web-fetch", + "description": "A Model Context Protocol (MCP) server for fetching webpages including html/pdf/plain text type content.", + "display_name": "Web Fetch", + "repository": { + "type": "git", + "url": "https://github.com/pathintegral-institute/mcp.science" + }, + "homepage": "https://github.com/pathintegral-institute/mcp.science/tree/main/servers/web-fetch", + "author": { + "name": "pathintegral-institute" + }, + "license": "MIT", + "tags": [ + "web", + "fetch", + "html", + "pdf", + "text" + ], + "arguments": { + "user_agent": { + "description": "Custom user-agent for fetching web content", + "required": false, + "example": "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pathintegral-institute/mcp.science#subdirectory=servers/web-fetch", + "mcp-web-fetch" + ], + "description": "Run using uv (recommended)" + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "ghcr.io/mcp-servers/fetch:latest" + ], + "description": "Run using Docker" + } + }, + "examples": [ + { + "title": "Fetch PDF content", + "description": "Fetch PDF content from a URL", + "prompt": "fetch web from url: https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf" + }, + { + "title": "Fetch HTML content", + "description": "Fetch HTML content from a website", + "prompt": "fetch web from url: https://example.com" + }, + { + "title": "Fetch raw content", + "description": "Fetch raw content from a URL", + "prompt": "fetch web from url: https://example.com/data.json with raw=true" + }, + { + "title": "Fetch with custom user-agent", + "description": "Fetch content with a custom user-agent (requires server configuration)", + "prompt": "fetch web from url: https://example.com using a mobile browser user-agent" + } + ], + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "fetch-web", + "description": "Fetch URL and return content according to its content type. Returns parsed content by default or raw content if specified.", + "prompt": "Fetch web from url: https://example.com", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to fetch", + "format": "uri", + "minLength": 1 + }, + "raw": { + "type": "boolean", + "description": "Whether to return raw content", + "default": false + } + } + }, + "required": [ + "url" + ] + } + ], + "is_official": true + }, + "siri-shortcuts": { + "name": "siri-shortcuts", + "display_name": "Siri Shortcuts", + "description": "MCP to interact with Siri Shortcuts on macOS. Exposes all Shortcuts as MCP tools.", + "repository": { + "type": "git", + "url": "https://github.com/dvcrn/mcp-server-siri-shortcuts" + }, + "homepage": "https://github.com/dvcrn/mcp-server-siri-shortcuts", + "author": { + "name": "dvcrn" + }, + "license": "[NOT FOUND]", + "categories": [ + "System Tools" + ], + "tags": [ + "siri", + "shortcuts", + "automation" + ], + "examples": [ + { + "title": "List all shortcuts", + "description": "Fetches all available Siri shortcuts", + "prompt": "list_shortcuts" + }, + { + "title": "Run a specific shortcut", + "description": "Execute a shortcut with optional input", + "prompt": "run_shortcut_My_Shortcut_1" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "mcp-server-siri-shortcuts" + ] + } + }, + "tools": [ + { + "name": "list_shortcuts", + "description": "List all available Siri shortcuts", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "open_shortcut", + "description": "Open a shortcut in the Shortcuts app", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the shortcut to open" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "run_shortcut", + "description": "Run a shortcut with optional input and output parameters", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name or identifier of the shortcut to run" + }, + "input": { + "type": "string", + "description": "The input to pass to the shortcut. Can be text, or a filepath" + } + }, + "required": [ + "name" + ] + } + } + ] + }, + "windows-cli": { + "name": "windows-cli", + "display_name": "Windows CLI", + "description": "MCP server for secure command-line interactions on Windows systems, enabling controlled access to PowerShell, CMD, and Git Bash shells.", + "repository": { + "type": "git", + "url": "https://github.com/SimonB97/win-cli-mcp-server" + }, + "homepage": "https://github.com/SimonB97/win-cli-mcp-server", + "author": { + "name": "SimonB97" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "CLI", + "Windows", + "Security", + "SSH" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@simonb97/server-win-cli", + "--config", + "${config}" + ] + } + }, + "examples": [ + { + "title": "Usage with Claude Desktop", + "description": "Add MCP server configuration to Claude Desktop.", + "prompt": "\n{\n \"mcpServers\": {\n \"windows-cli\": {\n \"command\": \"npx\",\n \"args\": [\"-y\", \"@simonb97/server-win-cli\"]\n }\n }\n}\n" + } + ], + "arguments": { + "config": { + "description": "The path to your configuration file, which customizes the server behavior.", + "required": true, + "example": "path/to/your/config.json" + } + }, + "tools": [ + { + "name": "execute_command", + "description": "Execute a command in the specified shell (powershell, cmd, or gitbash)\n\nExample usage (PowerShell):\n```json\n{\n \"shell\": \"powershell\",\n \"command\": \"Get-Process | Select-Object -First 5\",\n \"workingDir\": \"C:\\Users\\username\"\n}\n```\n\nExample usage (CMD):\n```json\n{\n \"shell\": \"cmd\",\n \"command\": \"dir /b\",\n \"workingDir\": \"C:\\Projects\"\n}\n```\n\nExample usage (Git Bash):\n```json\n{\n \"shell\": \"gitbash\",\n \"command\": \"ls -la\",\n \"workingDir\": \"/c/Users/username\"\n}\n```", + "inputSchema": { + "type": "object", + "properties": { + "shell": { + "type": "string", + "enum": [ + "powershell", + "cmd", + "gitbash" + ], + "description": "Shell to use for command execution" + }, + "command": { + "type": "string", + "description": "Command to execute" + }, + "workingDir": { + "type": "string", + "description": "Working directory for command execution (optional)" + } + }, + "required": [ + "shell", + "command" + ] + } + }, + { + "name": "get_command_history", + "description": "Get the history of executed commands\n\nExample usage:\n```json\n{\n \"limit\": 5\n}\n```\n\nExample response:\n```json\n[\n {\n \"command\": \"Get-Process\",\n \"output\": \"...\",\n \"timestamp\": \"2024-03-20T10:30:00Z\",\n \"exitCode\": 0\n }\n]\n```", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Maximum number of history entries to return (default: 10, max: 1000)" + } + } + } + }, + { + "name": "ssh_execute", + "description": "Execute a command on a remote host via SSH\n\nExample usage:\n```json\n{\n \"connectionId\": \"raspberry-pi\",\n \"command\": \"uname -a\"\n}\n```\n\nConfiguration required in config.json:\n```json\n{\n \"ssh\": {\n \"enabled\": true,\n \"connections\": {\n \"raspberry-pi\": {\n \"host\": \"raspberrypi.local\",\n \"port\": 22,\n \"username\": \"pi\",\n \"password\": \"raspberry\"\n }\n }\n }\n}\n```", + "inputSchema": { + "type": "object", + "properties": { + "connectionId": { + "type": "string", + "description": "ID of the SSH connection to use", + "enum": [] + }, + "command": { + "type": "string", + "description": "Command to execute" + } + }, + "required": [ + "connectionId", + "command" + ] + } + }, + { + "name": "ssh_disconnect", + "description": "Disconnect from an SSH server\n\nExample usage:\n```json\n{\n \"connectionId\": \"raspberry-pi\"\n}\n```\n\nUse this to cleanly close SSH connections when they're no longer needed.", + "inputSchema": { + "type": "object", + "properties": { + "connectionId": { + "type": "string", + "description": "ID of the SSH connection to disconnect", + "enum": [] + } + }, + "required": [ + "connectionId" + ] + } + }, + { + "name": "create_ssh_connection", + "description": "Create a new SSH connection", + "inputSchema": { + "type": "object", + "properties": { + "connectionId": { + "type": "string", + "description": "ID of the SSH connection" + }, + "connectionConfig": { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "Host of the SSH connection" + }, + "port": { + "type": "number", + "description": "Port of the SSH connection" + }, + "username": { + "type": "string", + "description": "Username for the SSH connection" + }, + "password": { + "type": "string", + "description": "Password for the SSH connection" + }, + "privateKeyPath": { + "type": "string", + "description": "Path to the private key for the SSH connection" + } + }, + "required": [ + "connectionId", + "connectionConfig" + ] + } + } + } + }, + { + "name": "read_ssh_connections", + "description": "Read all SSH connections", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "update_ssh_connection", + "description": "Update an existing SSH connection", + "inputSchema": { + "type": "object", + "properties": { + "connectionId": { + "type": "string", + "description": "ID of the SSH connection to update" + }, + "connectionConfig": { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "Host of the SSH connection" + }, + "port": { + "type": "number", + "description": "Port of the SSH connection" + }, + "username": { + "type": "string", + "description": "Username for the SSH connection" + }, + "password": { + "type": "string", + "description": "Password for the SSH connection" + }, + "privateKeyPath": { + "type": "string", + "description": "Path to the private key for the SSH connection" + } + }, + "required": [ + "connectionId", + "connectionConfig" + ] + } + } + } + }, + { + "name": "delete_ssh_connection", + "description": "Delete an existing SSH connection", + "inputSchema": { + "type": "object", + "properties": { + "connectionId": { + "type": "string", + "description": "ID of the SSH connection to delete" + } + }, + "required": [ + "connectionId" + ] + } + }, + { + "name": "get_current_directory", + "description": "Get the current working directory", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ] + }, + "make-mcp-server": { + "display_name": "Make MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/integromat/make-mcp-server" + }, + "homepage": "https://github.com/integromat/make-mcp-server", + "author": { + "name": "integromat" + }, + "license": "MIT", + "tags": [ + "make", + "automation", + "ai", + "mcp", + "scenarios" + ], + "arguments": { + "MAKE_API_KEY": { + "description": "API key generated in your Make profile", + "required": true, + "example": "" + }, + "MAKE_ZONE": { + "description": "The zone your organization is hosted in", + "required": true, + "example": "eu2.make.com" + }, + "MAKE_TEAM": { + "description": "Team ID found in the URL of the Team page", + "required": true, + "example": "" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@makehq/mcp-server" + ], + "env": { + "MAKE_API_KEY": "", + "MAKE_ZONE": "", + "MAKE_TEAM": "" + }, + "description": "Install and run using npm", + "recommended": true + } + }, + "examples": [ + { + "title": "Using Make scenarios with Claude Desktop", + "description": "Configure the Make MCP server in Claude Desktop to access your Make scenarios", + "prompt": "I'd like to use my Make scenarios as tools. Can you help me set that up?" + } + ], + "name": "make-mcp-server", + "description": "A Model Context Protocol server that enables Make scenarios to be utilized as tools by AI assistants. This integration allows AI systems to trigger and interact with your Make automation workflows.", + "categories": [ + "Productivity" + ], + "tools": [], + "prompts": [], + "resources": [], + "is_official": true + }, + "x-twitter": { + "name": "x-twitter", + "display_name": "X (Twitter)", + "description": "Create, manage and publish X/Twitter posts directly through Claude chat.", + "repository": { + "type": "git", + "url": "https://github.com/vidhupv/x-mcp" + }, + "homepage": "https://github.com/vidhupv/x-mcp", + "author": { + "name": "vidhupv" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "Twitter", + "X" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/vidhupv/x-mcp", + "x-mcp" + ], + "env": { + "TWITTER_API_KEY": "${TWITTER_API_KEY}", + "TWITTER_API_SECRET": "${TWITTER_API_SECRET}", + "TWITTER_ACCESS_TOKEN": "${TWITTER_ACCESS_TOKEN}", + "TWITTER_ACCESS_TOKEN_SECRET": "${TWITTER_ACCESS_TOKEN_SECRET}" + } + } + }, + "examples": [ + { + "title": "Tweet", + "description": "Example of sending a tweet through Claude chat.", + "prompt": "Tweet 'Just learned how to tweet through AI - mind blown! \ud83e\udd16\u2728'" + }, + { + "title": "Create Thread", + "description": "Create a thread about a specific topic.", + "prompt": "Create a thread about the history of pizza" + }, + { + "title": "Show Drafts", + "description": "Request to see draft tweets.", + "prompt": "Show me my draft tweets" + }, + { + "title": "Publish Draft", + "description": "Publish an existing draft.", + "prompt": "Publish this draft!" + }, + { + "title": "Delete Draft", + "description": "Delete a specific draft.", + "prompt": "Delete that draft" + } + ], + "arguments": { + "TWITTER_API_KEY": { + "description": "The API key for accessing Twitter's API.", + "required": true, + "example": "your_api_key" + }, + "TWITTER_API_SECRET": { + "description": "The API secret key for accessing Twitter's API.", + "required": true, + "example": "your_api_secret" + }, + "TWITTER_ACCESS_TOKEN": { + "description": "The access token for authorizing the application to access Twitter on behalf of the user.", + "required": true, + "example": "your_access_token" + }, + "TWITTER_ACCESS_TOKEN_SECRET": { + "description": "The access token secret for authorizing the application to access Twitter on behalf of the user.", + "required": true, + "example": "your_access_token_secret" + } + }, + "tools": [ + { + "name": "create_draft_tweet", + "description": "Create a draft tweet", + "inputSchema": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The content of the tweet" + } + }, + "required": [ + "content" + ] + } + }, + { + "name": "create_draft_thread", + "description": "Create a draft tweet thread", + "inputSchema": { + "type": "object", + "properties": { + "contents": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of tweet contents for the thread" + } + }, + "required": [ + "contents" + ] + } + }, + { + "name": "list_drafts", + "description": "List all draft tweets and threads", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "publish_draft", + "description": "Publish a draft tweet or thread", + "inputSchema": { + "type": "object", + "properties": { + "draft_id": { + "type": "string", + "description": "ID of the draft to publish" + } + }, + "required": [ + "draft_id" + ] + } + }, + { + "name": "delete_draft", + "description": "Delete a draft tweet or thread", + "inputSchema": { + "type": "object", + "properties": { + "draft_id": { + "type": "string", + "description": "ID of the draft to delete" + } + }, + "required": [ + "draft_id" + ] + } + } + ] + }, + "chatmcp": { + "name": "chatmcp", + "display_name": "Chat Desktop App", + "description": "\u2013 An Open Source Cross-platform GUI Desktop application compatible with Linux, macOS, and Windows, enabling seamless interaction with MCP servers across dynamically selectable LLMs, by **[AIQL](https://github.com/AI-QL/chat-mcp)**", + "repository": { + "type": "git", + "url": "https://github.com/AI-QL/chat-mcp" + }, + "homepage": "https://github.com/AI-QL/chat-mcp", + "author": { + "name": "AIQL" + }, + "license": "Apache-2.0", + "categories": [ + "MCP Tools" + ], + "tags": [ + "LLM", + "Electron", + "cross-platform" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/AI-QL/chat-mcp" + ], + "env": { + "SEARCH_PATH": "${SEARCH_PATH}" + } + } + }, + "arguments": { + "SEARCH_PATH": { + "description": "This environment variable specifies the system's executable search path, which determines where the operating system looks for executable files when running commands.", + "required": false, + "example": "C:\\Program Files\\nodejs;C:\\Windows\\System32" + } + } + }, + "monday-com": { + "name": "monday-com", + "display_name": "Monday.com", + "description": "MCP Server to interact with Monday.com boards and items.", + "repository": { + "type": "git", + "url": "https://github.com/sakce/mcp-server-monday" + }, + "homepage": "https://github.com/sakce/mcp-server-monday", + "author": { + "name": "sakce" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "monday.com", + "API" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-monday" + ], + "env": { + "MONDAY_API_KEY": "${MONDAY_API_KEY}", + "MONDAY_WORKSPACE_NAME": "${MONDAY_WORKSPACE_NAME}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "-e", + "MONDAY_API_KEY=${MONDAY_API_KEY}", + "-e", + "MONDAY_WORKSPACE_NAME=${MONDAY_WORKSPACE_NAME}", + "sakce/mcp-server-monday" + ] + } + }, + "arguments": { + "MONDAY_API_KEY": { + "description": "API key for authenticating with the Monday.com API.", + "required": true, + "example": "your-monday-api-key" + }, + "MONDAY_WORKSPACE_NAME": { + "description": "The name of the Monday.com workspace you are working with.", + "required": true, + "example": "myworkspace" + } + }, + "tools": [ + { + "name": "monday-create-item", + "description": "Create a new item in a Monday.com Board. Optionally, specify the parent Item ID to create a Sub-item.", + "inputSchema": { + "type": "object", + "properties": { + "boardId": { + "type": "string", + "description": "Monday.com Board ID that the Item or Sub-item is on." + }, + "itemTitle": { + "type": "string", + "description": "Name of the Monday.com Item or Sub-item that will be created." + }, + "groupId": { + "type": "string", + "description": "Monday.com Board's Group ID to create the Item in. If set, parentItemId should not be set." + }, + "parentItemId": { + "type": "string", + "description": "Monday.com Item ID to create the Sub-item under. If set, groupId should not be set." + }, + "columnValues": { + "type": "object", + "description": "Dictionary of column values to set {column_id: value}" + } + }, + "required": [ + "boardId", + "itemTitle" + ] + } + }, + { + "name": "monday-get-items-by-id", + "description": "Fetch specific Monday.com item by its ID", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string", + "description": "ID of the Monday.com item to fetch." + } + }, + "required": [ + "itemId" + ] + } + }, + { + "name": "monday-update-item", + "description": "Update a Monday.com item's or sub-item's column values.", + "inputSchema": { + "type": "object", + "properties": { + "boardId": { + "type": "string", + "description": "Monday.com Board ID that the Item or Sub-item is on." + }, + "itemId": { + "type": "string", + "description": "Monday.com Item or Sub-item ID to update the columns of." + }, + "columnValues": { + "type": "object", + "description": "Dictionary of column values to update the Monday.com Item or Sub-item with. ({column_id: value})" + } + }, + "required": [ + "boardId", + "itemId", + "columnValues" + ] + } + }, + { + "name": "monday-get-board-columns", + "description": "Get the Columns of a Monday.com Board.", + "inputSchema": { + "type": "object", + "properties": { + "boardId": { + "type": "string", + "description": "Monday.com Board ID that the Item or Sub-item is on." + } + }, + "required": [ + "boardId" + ] + } + }, + { + "name": "monday-get-board-groups", + "description": "Get the Groups of a Monday.com Board.", + "inputSchema": { + "type": "object", + "properties": { + "boardId": { + "type": "string", + "description": "Monday.com Board ID that the Item or Sub-item is on." + } + }, + "required": [ + "boardId" + ] + } + }, + { + "name": "monday-create-update", + "description": "Create an update (comment) on a Monday.com Item or Sub-item.", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string" + }, + "updateText": { + "type": "string", + "description": "Content to update the Item or Sub-item with." + } + }, + "required": [ + "itemId", + "updateText" + ] + } + }, + { + "name": "monday-list-boards", + "description": "Get all Boards from Monday.com", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "description": "Maximum number of Monday.com Boards to return." + } + } + } + }, + { + "name": "monday-list-items-in-groups", + "description": "List all items in the specified groups of a Monday.com board", + "inputSchema": { + "type": "object", + "properties": { + "boardId": { + "type": "string", + "description": "Monday.com Board ID that the Item or Sub-item is on." + }, + "groupIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "limit": { + "type": "integer" + }, + "cursor": { + "type": "string" + } + }, + "required": [ + "boardId", + "groupIds", + "limit" + ] + } + }, + { + "name": "monday-list-subitems-in-items", + "description": "List all Sub-items of a list of Monday.com Items", + "inputSchema": { + "type": "object", + "properties": { + "itemIds": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "itemIds" + ] + } + }, + { + "name": "monday-create-board", + "description": "Create a new Monday.com board", + "inputSchema": { + "type": "object", + "properties": { + "board_name": { + "type": "string", + "description": "Name of the Monday.com board to create" + }, + "board_kind": { + "type": "string", + "description": "Kind of the Monday.com board to create (public, private, shareable). Default is public." + } + }, + "required": [ + "board_name" + ] + } + }, + { + "name": "monday-create-board-group", + "description": "Create a new group in a Monday.com board", + "inputSchema": { + "type": "object", + "properties": { + "boardId": { + "type": "string", + "description": "Monday.com Board ID that the group will be created in." + }, + "groupName": { + "type": "string", + "description": "Name of the group to create." + } + }, + "required": [ + "boardId", + "groupName" + ] + } + }, + { + "name": "monday-move-item-to-group", + "description": "Move an item to a group in a Monday.com board", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string", + "description": "Monday.com Item ID to move." + }, + "groupId": { + "type": "string", + "description": "Monday.com Group ID to move the Item to." + } + }, + "required": [ + "itemId", + "groupId" + ] + } + }, + { + "name": "monday-delete-item", + "description": "Delete an item from a Monday.com board", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string", + "description": "Monday.com Item ID to delete." + } + }, + "required": [ + "itemId" + ] + } + }, + { + "name": "monday-archive-item", + "description": "Archive an item from a Monday.com board", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string", + "description": "Monday.com Item ID to archive." + } + }, + "required": [ + "itemId" + ] + } + }, + { + "name": "monday-get-item-updates", + "description": "Get updates for a specific item in Monday.com", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string", + "description": "ID of the Monday.com item to get updates for." + }, + "limit": { + "type": "integer", + "description": "Maximum number of updates to retrieve. Default is 25." + } + }, + "required": [ + "itemId" + ] + } + }, + { + "name": "monday-get-docs", + "description": "Get a list of documents from Monday.com, optionally filtered by folder", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "description": "Maximum number of documents to retrieve. Default is 25." + }, + "folder_id": { + "type": "string", + "description": "Optional folder ID to filter documents by." + } + } + } + }, + { + "name": "monday-get-doc-content", + "description": "Get the content of a specific document by ID", + "inputSchema": { + "type": "object", + "properties": { + "doc_id": { + "type": "string", + "description": "ID of the Monday.com document to retrieve." + } + }, + "required": [ + "doc_id" + ] + } + }, + { + "name": "monday-create-doc", + "description": "Create a new document in Monday.com", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "Title of the document to create." + }, + "content": { + "type": "string", + "description": "Content of the document to create." + }, + "folder_id": { + "type": "string", + "description": "Optional folder ID to create the document in." + } + }, + "required": [ + "title", + "content" + ] + } + }, + { + "name": "monday-add-doc-block", + "description": "Add a block to a document", + "inputSchema": { + "type": "object", + "properties": { + "doc_id": { + "type": "string", + "description": "ID of the Monday.com document to add a block to." + }, + "block_type": { + "type": "string", + "description": "Type of block to add (normal_text, bullet_list, numbered_list, heading, divider, etc.)." + }, + "content": { + "type": "string", + "description": "Content of the block to add." + }, + "after_block_id": { + "type": "string", + "description": "Optional ID of the block to add this block after." + } + }, + "required": [ + "doc_id", + "block_type", + "content" + ] + } + }, + { + "name": "monday-get-item-files", + "description": "Get files (PDFs, documents, images, etc.) attached to a Monday.com item", + "inputSchema": { + "type": "object", + "properties": { + "itemId": { + "type": "string", + "description": "ID of the Monday.com item to get files from." + } + }, + "required": [ + "itemId" + ] + } + }, + { + "name": "monday-get-update-files", + "description": "Get files (PDFs, documents, images, etc.) attached to a specific update in Monday.com", + "inputSchema": { + "type": "object", + "properties": { + "updateId": { + "type": "string", + "description": "ID of the Monday.com update to get files from." + } + }, + "required": [ + "updateId" + ] + } + } + ] + }, + "crypto-feargreed-mcp": { + "name": "crypto-feargreed-mcp", + "display_name": "Crypto Fear & Greed Index", + "description": "Providing real-time and historical Crypto Fear & Greed Index data.", + "repository": { + "type": "git", + "url": "https://github.com/kukapay/crypto-feargreed-mcp" + }, + "homepage": "https://github.com/kukapay/crypto-feargreed-mcp", + "author": { + "name": "KukaPay", + "url": "https://github.com/kukapay" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "Fear & Greed", + "Crypto Index", + "Analytics" + ], + "examples": [ + { + "title": "Get Current Index", + "description": "What is the current Crypto Fear & Greed Index?", + "prompt": "What's the current Crypto Fear & Greed Index?" + }, + { + "title": "Analyze Trend", + "description": "Show the Fear & Greed Index trend for a specific number of days.", + "prompt": "Show me the Crypto Fear & Greed Index trend for the last 30 days." + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/kukapay/crypto-feargreed-mcp", + "main.py" + ] + } + }, + "tools": [ + { + "name": "get_current_fng_tool", + "description": "Get the current Fear and Greed Index value.", + "inputSchema": {}, + "required": [] + }, + { + "name": "get_historical_fng_tool", + "description": "Get historical Fear and Greed Index data for the specified number of days.", + "inputSchema": { + "days": { + "type": "integer", + "description": "Number of days for historical data" + } + }, + "required": [ + "days" + ] + }, + { + "name": "analyze_fng_trend", + "description": "Analyze the Fear and Greed Index trend over the specified number of days.", + "inputSchema": { + "days": { + "type": "integer", + "description": "Number of days for trend analysis" + } + }, + "required": [ + "days" + ] + } + ] + }, + "mcp-local-rag": { + "name": "mcp-local-rag", + "display_name": "Local RAG", + "description": "\"primitive\" RAG-like web search model context protocol (MCP) server that runs locally using Google's MediaPipe Text Embedder and DuckDuckGo Search. \u2728 no APIs required \u2728.", + "repository": { + "type": "git", + "url": "https://github.com/nkapila6/mcp-local-rag" + }, + "license": "MIT", + "author": { + "name": "nkapila6" + }, + "homepage": "https://github.com/nkapila6/mcp-local-rag", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "RAG", + "Search" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--python=3.10", + "--from", + "git+https://github.com/nkapila6/mcp-local-rag", + "mcp-local-rag" + ] + } + }, + "tools": [ + { + "name": "rag_search", + "description": "\n Search the web for a given query. Give back context to the LLM\n with a RAG-like similarity sort.\n\n Args:\n query (str): The query to search for.\n num_results (int): Number of results to return.\n top_k (int): Use top \"k\" results for content.\n\n Returns:\n Dict of strings containing best search based on input query. Formatted in markdown.\n ", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "num_results": { + "default": 10, + "title": "Num Results", + "type": "integer" + }, + "top_k": { + "default": 5, + "title": "Top K", + "type": "integer" + } + }, + "required": [ + "query" + ], + "title": "rag_searchArguments", + "type": "object" + } + } + ] + }, + "rememberizer-ai": { + "name": "rememberizer-ai", + "display_name": "Rememberizer", + "description": "An MCP server designed for interacting with the Rememberizer data source, facilitating enhanced knowledge retrieval.", + "repository": { + "type": "git", + "url": "https://github.com/skydeckai/mcp-server-rememberizer" + }, + "homepage": "https://github.com/skydeckai/mcp-server-rememberizer", + "author": { + "name": "skydeckai" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "Rememberizer", + "Document Management", + "Knowledge Management", + "API" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-rememberizer" + ], + "env": { + "REMEMBERIZER_API_TOKEN": "${REMEMBERIZER_API_TOKEN}" + } + } + }, + "arguments": { + "REMEMBERIZER_API_TOKEN": { + "description": "Your Rememberizer API token, required for accessing the Rememberizer API.", + "required": true, + "example": "your_rememberizer_api_token" + } + }, + "tools": [ + { + "name": "rememberizer_account_information", + "description": "Get information about your Rememberizer.ai personal/team knowledge repository account. This includes account holder name and email address.", + "inputSchema": { + "type": "object" + } + }, + { + "name": "retrieve_semantically_similar_internal_knowledge", + "description": "Send a block of text and retrieve cosine similar matches from your connected Rememberizer personal/team internal knowledge and memory repository.", + "inputSchema": { + "type": "object", + "properties": { + "match_this": { + "type": "string", + "description": "Up to a 400-word sentence for which you wish to find semantically similar chunks of knowledge." + }, + "n_results": { + "type": "integer", + "description": "Number of semantically similar chunks of text to return. Use 'n_results=3' for up to 5, and 'n_results=10' for more information. If you do not receive enough information, consider trying again with a larger 'n_results' value." + }, + "from_datetime_ISO8601": { + "type": "string", + "description": "Start date in ISO 8601 format with timezone (e.g., 2023-01-01T00:00:00Z). Use this to filter results from a specific date." + }, + "to_datetime_ISO8601": { + "type": "string", + "description": "End date in ISO 8601 format with timezone (e.g., 2024-01-01T00:00:00Z). Use this to filter results until a specific date." + } + }, + "required": [ + "match_this" + ] + } + }, + { + "name": "smart_search_internal_knowledge", + "description": "Search for documents in Rememberizer in its personal/team internal knowledge and memory repository using a simple query that returns the results of an agentic search. The search may include sources such as Slack discussions, Gmail, Dropbox documents, Google Drive documents, and uploaded files. Consider using the tool list_internal_knowledge_systems to find out which are available. Use the tool list_internal_knowledge_systems to find out which sources are available. \n\nYou can specify a from_datetime_ISO8601 and a to_datetime_ISO8601, and you should look at the context of your request to make sure you put reasonable parameters around this by, for example, converting a reference to recently to a start date two weeks before today, or converting yesterday to a timeframe during the last day. But do be aware of the effect of time zone differences in the source data and for the requestor.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Up to a 400-word sentence for which you wish to find semantically similar chunks of knowledge." + }, + "user_context": { + "type": "string", + "description": "The additional context for the query. You might need to summarize the conversation up to this point for better context-awared results." + }, + "n_results": { + "type": "integer", + "description": "Number of semantically similar chunks of text to return. Use 'n_results=3' for up to 5, and 'n_results=10' for more information. If you do not receive enough information, consider trying again with a larger 'n_results' value." + }, + "from_datetime_ISO8601": { + "type": "string", + "description": "Start date in ISO 8601 format with timezone (e.g., 2023-01-01T00:00:00Z). Use this to filter results from a specific date." + }, + "to_datetime_ISO8601": { + "type": "string", + "description": "End date in ISO 8601 format with timezone (e.g., 2024-01-01T00:00:00Z). Use this to filter results until a specific date." + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "list_internal_knowledge_systems", + "description": "List the sources of personal/team internal knowledge. These may include Slack discussions, Gmail, Dropbox documents, Google Drive documents, and uploaded files.", + "inputSchema": { + "type": "object" + } + }, + { + "name": "list_personal_team_knowledge_documents", + "description": "Retrieves a paginated list of all documents in your personal/team knowledge system. Sources could include Slack discussions, Gmail, Dropbox documents, Google Drive documents, and uploaded files. Consider using the tool list_internal_knowledge_systems to find out which are available. \n\nUse this tool to browse through available documents and their metadata.\n\nExamples:\n- List first 100 documents: {\"page\": 1, \"page_size\": 100}\n- Get next page: {\"page\": 2, \"page_size\": 100}\n- Get maximum allowed documents: {\"page\": 1, \"page_size\": 1000}\n", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "integer", + "description": "Page number for pagination (starts at 1)", + "minimum": 1, + "default": 1 + }, + "page_size": { + "type": "integer", + "description": "Number of documents per page (1-1000)", + "minimum": 1, + "maximum": 1000, + "default": 100 + } + } + } + }, + { + "name": "remember_this", + "description": "Save a piece of text information in your Rememberizer.ai knowledge system so that it may be recalled in future through tools retrieve_semantically_similar_internal_knowledge or smart_search_internal_knowledge.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the information. This is used to identify the information in the future." + }, + "content": { + "type": "string", + "description": "The information you wish to memorize." + } + } + } + } + ] + }, + "octagon-mcp-server": { + "display_name": "Octagon MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/OctagonAI/octagon-mcp-server" + }, + "homepage": "https://docs.octagonagents.com", + "author": { + "name": "OctagonAI" + }, + "license": "MIT", + "tags": [ + "market intelligence", + "financial analysis", + "SEC filings", + "earnings calls", + "stock market data", + "private company research", + "funding rounds", + "M&A", + "IPO", + "web scraping" + ], + "arguments": { + "OCTAGON_API_KEY": { + "description": "Your Octagon API key", + "required": true, + "example": "your_octagon_api_key" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "octagon-mcp" + ], + "env": { + "OCTAGON_API_KEY": "your_octagon_api_key" + }, + "description": "Run directly with npx", + "recommended": true + } + }, + "examples": [ + { + "title": "SEC Filing Analysis", + "description": "Extract information from SEC filings", + "prompt": "What was Apple's gross margin percentage from their latest 10-Q filing?" + }, + { + "title": "Earnings Call Analysis", + "description": "Analyze earnings call transcripts", + "prompt": "What did NVIDIA's CEO say about AI chip demand in their latest earnings call?" + }, + { + "title": "Financial Metrics", + "description": "Retrieve financial metrics and ratios", + "prompt": "Calculate the price-to-earnings ratio for Tesla over the last 4 quarters" + }, + { + "title": "Stock Market Data", + "description": "Access stock market data", + "prompt": "How has Apple's stock performed compared to the S&P 500 over the last 6 months?" + }, + { + "title": "Private Company Research", + "description": "Research private company information", + "prompt": "What is the employee count and funding history for Anthropic?" + } + ], + "name": "octagon-mcp-server", + "description": "A Model Context Protocol (MCP) server implementation that integrates with Octagon Market Intelligence API.", + "categories": [ + "Analytics" + ], + "is_official": true, + "tools": [ + { + "name": "octagon-sec-agent", + "description": "[PUBLIC MARKET INTELLIGENCE] A specialized agent for SEC filings analysis and financial data extraction. Covers over 8,000 public companies from SEC EDGAR with comprehensive coverage of financial statements from annual and quarterly reports (10-K, 10-Q, 20-F), offering filings (S-1), amendments, and event filings (8-K). Updated daily with historical data dating back to 2018 for time-series analysis. Best for extracting financial and segment metrics, management discussion, footnotes, risk factors, and quantitative data from SEC filings. Example queries: 'What was Apple's R&D expense as a percentage of revenue in their latest fiscal year?', 'Find the risk factors related to supply chain in Tesla's latest 10-K', 'Extract quarterly revenue growth rates for Microsoft over the past 2 years'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-transcripts-agent", + "description": "[PUBLIC MARKET INTELLIGENCE] A specialized agent for analyzing earnings call transcripts and management commentary. Covers over 8,000 public companies with continuous daily updates for real-time insights. Historical data dating back to 2018 enables robust time-series analysis. Extract information from earnings call transcripts, including executive statements, financial guidance, analyst questions, and forward-looking statements. Best for analyzing management sentiment, extracting guidance figures, and identifying key business trends. Example queries: 'What did Amazon's CEO say about AWS growth expectations in the latest earnings call?', 'Summarize key financial metrics mentioned in Tesla's Q2 2023 earnings call', 'What questions did analysts ask about margins during Netflix's latest earnings call?'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-financials-agent", + "description": "[PUBLIC MARKET INTELLIGENCE] Specialized agent for financial statement analysis and ratio calculations. Capabilities: Analyze financial statements, calculate financial metrics, compare ratios, and evaluate performance indicators. Best for: Deep financial analysis and comparison of company financial performance. Example queries: 'Compare the gross margins, operating margins, and net margins of Apple, Microsoft, and Google over the last 3 years', 'Analyze Tesla's cash flow statements from 2021 to 2023 and calculate free cash flow trends', 'Calculate and explain key financial ratios for Amazon including P/E, EV/EBITDA, and ROIC'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-stock-data-agent", + "description": "[PUBLIC MARKET INTELLIGENCE] Specialized agent for stock market data and equity investment analysis. Capabilities: Analyze stock price movements, trading volumes, market trends, valuation metrics, and technical indicators. Best for: Stock market research, equity analysis, and trading pattern identification. Example queries: 'How has Apple's stock performed compared to the S&P 500 over the last 6 months?', 'Analyze the trading volume patterns for Tesla stock before and after earnings releases', 'What were the major price movements for NVIDIA in 2023 and what were the catalysts?'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-companies-agent", + "description": "[PRIVATE MARKET INTELLIGENCE] A specialized database agent for looking up company information and financials. Capabilities: Query comprehensive company financial information and business intelligence from Octagon's company database. Best for: Finding basic information about companies, their financial metrics, and industry benchmarks. NOTE: For better and more accurate results, provide the company's website URL instead of just the company name. Example queries: 'What is the employee trends for Stripe (stripe.com)?', 'List the top 5 companies in the AI sector by revenue growth', 'Who are the top competitors to Databricks (databricks.com)?'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-funding-agent", + "description": "[PRIVATE MARKET INTELLIGENCE] A specialized database agent for company funding transactions and venture capital research. Capabilities: Extract information about funding rounds, investors, valuations, and investment trends. Best for: Researching startup funding history, investor activity, and venture capital patterns. NOTE: For better and more accurate results, provide the company's website URL instead of just the company name. Example queries: 'What was Anthropic's latest funding round size, valuation, and key investors (anthropic.com)?', 'How much has OpenAI raised in total funding and at what valuation (openai.com)?', 'Who were the lead investors in Databricks' Series G round and what was the post-money valuation (databricks.com)?'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-deals-agent", + "description": "[PRIVATE MARKET INTELLIGENCE] A specialized database agent for M&A and IPO transaction analysis. Capabilities: Retrieve information about mergers, acquisitions, initial public offerings, and other financial transactions. Best for: Research on corporate transactions, IPO valuations, and M&A activity. NOTE: For better and more accurate results, provide the company's website URL instead of just the company name. Example queries: 'What was the acquisition price when Microsoft (microsoft.com) acquired GitHub (github.com)?', 'List the valuation multiples for AI companies in 2024', 'List all the acquisitions and price, valuation by Salesforce (salesforce.com) in 2023?'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-investors-agent", + "description": "[PRIVATE MARKET INTELLIGENCE] A specialized database agent for looking up information on investors. Capabilities: Retrieve information about investors, their investment criteria, and past activities. Best for: Research on investors and details about their investment activities. NOTE: For better and more accurate results, provide the investor's website URL instead of just the investor name. Example queries: 'What is the latest investment criteria of Insight Partners (insightpartners.com)?', 'How many investments did Andreessen Horowitz (a16z.com) make in the last 6 months', 'What is the typical check size for QED Investors (qedinvestors.com)'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-scraper-agent", + "description": "[PUBLIC & PRIVATE MARKET INTELLIGENCE] Specialized agent for financial data extraction from investor websites. Capabilities: Extract structured financial data from investor relations websites, tables, and online financial sources. Best for: Gathering financial data from websites that don't have accessible APIs. Example queries: 'Extract all data fields from zillow.com/san-francisco-ca/', 'Extract all data fields from www.carvana.com/cars/'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-deep-research-agent", + "description": "[PUBLIC & PRIVATE MARKET INTELLIGENCE] A comprehensive agent that can utilize multiple sources for deep research analysis. Capabilities: Aggregate research across multiple data sources, synthesize information, and provide comprehensive investment research. Best for: Investment research questions requiring up-to-date aggregated information from the web. Example queries: 'Research the financial impact of Apple's privacy changes on digital advertising companies' revenue and margins', 'Analyze the competitive landscape in the cloud computing sector, focusing on AWS, Azure, and Google Cloud margin and growth trends', 'Investigate the factors driving electric vehicle adoption and their impact on battery supplier financials'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "octagon-debts-agent", + "description": "[PRIVATE MARKET INTELLIGENCE] A specialized database agent for analyzing private debts and lenders. Capabilities: Retrieve information about private debts and lenders. Best for: Research on borrowers, and lenders and details about the private debt facilities. Example queries: 'List all the debt activities from borrower American Tower', 'Compile all the debt activities from lender ING Group in Q4 2024'.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Your natural language query or request for the agent" + } + }, + "required": [ + "prompt" + ] + } + } + ] + }, + "langflow-doc-qa-server": { + "name": "langflow-doc-qa-server", + "display_name": "Langflow Document Q&A", + "description": "A Model Context Protocol server for document Q&A powered by Langflow. It demonstrates core MCP concepts by providing a simple interface to query documents through a Langflow backend.", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/Langflow-DOC-QA-SERVER" + }, + "homepage": "https://github.com/GongRzhe/Langflow-DOC-QA-SERVER", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "Langflow", + "Document Q&A" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/GongRzhe/Langflow-DOC-QA-SERVER" + ], + "env": { + "API_ENDPOINT": "${API_ENDPOINT}" + } + } + }, + "arguments": { + "API_ENDPOINT": { + "description": "The endpoint URL for the Langflow API service.", + "required": false, + "example": "http://127.0.0.1:7860/api/v1/run/?stream=false" + } + }, + "tools": [ + { + "name": "query_docs", + "description": "Query the document Q&A system with a prompt", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query prompt to search for in the documents" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "ssh-exec": { + "name": "ssh-exec", + "description": "A Model Context Protocol (MCP) server for executing command-line operations on remote servers via SSH.", + "display_name": "SSH Execution", + "repository": { + "type": "git", + "url": "https://github.com/pathintegral-institute/mcp.science" + }, + "homepage": "https://github.com/pathintegral-institute/mcp.science/tree/main/servers/ssh-exec", + "author": { + "name": "pathintegral-institute" + }, + "license": "MIT", + "tags": [ + "ssh", + "command execution", + "remote systems" + ], + "arguments": { + "SSH_HOST": { + "description": "SSH host to connect to", + "required": true, + "example": "your-server.com" + }, + "SSH_PORT": { + "description": "SSH port", + "required": false, + "example": "22" + }, + "SSH_USERNAME": { + "description": "SSH username", + "required": true, + "example": "your_username" + }, + "SSH_PRIVATE_KEY": { + "description": "SSH private key content (not path)", + "required": false, + "example": "$(cat ~/.ssh/id_rsa)" + }, + "SSH_PASSWORD": { + "description": "SSH password", + "required": false, + "example": "[NOT GIVEN]" + }, + "SSH_ALLOWED_COMMANDS": { + "description": "Comma-separated list of commands that are allowed to be executed", + "required": false, + "example": "ls,ps,cat" + }, + "SSH_ALLOWED_PATHS": { + "description": "Comma-separated list of paths that are allowed for command execution", + "required": false, + "example": "/tmp,/home" + }, + "SSH_COMMANDS_BLACKLIST": { + "description": "Comma-separated list of commands that are not allowed", + "required": false, + "example": "rm,mv,dd,mkfs,fdisk,format" + }, + "SSH_ARGUMENTS_BLACKLIST": { + "description": "Comma-separated list of arguments that are not allowed", + "required": false, + "example": "-rf,-fr,--force" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pathintegral-institute/mcp.science#subdirectory=servers/ssh-exec", + "mcp-ssh-exec" + ], + "env": { + "SSH_HOST": "your-server.com", + "SSH_PORT": "22", + "SSH_USERNAME": "your_username", + "SSH_PRIVATE_KEY": "$(cat ~/.ssh/id_rsa)", + "SSH_ALLOWED_COMMANDS": "ls,ps,cat", + "SSH_ALLOWED_PATHS": "/tmp,/home", + "SSH_COMMANDS_BLACKLIST": "rm,mv,dd,mkfs,fdisk,format", + "SSH_ARGUMENTS_BLACKLIST": "-rf,-fr,--force" + }, + "description": "Run server using uv" + } + }, + "examples": [ + { + "title": "Execute a command", + "description": "Execute a command on the remote system", + "prompt": "Execute 'ls -la /tmp' on the remote server" + } + ], + "categories": [ + "System Tools" + ], + "tools": [ + { + "name": "ssh_exec", + "description": "Execute a command on the remote system", + "inputSchema": { + "properties": { + "command": { + "description": "Command for SSH server to execute", + "title": "Command", + "type": "string" + }, + "arguments": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Arguments to pass to the command", + "title": "Arguments" + }, + "timeout": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Timeout in seconds for command execution", + "title": "Timeout" + } + }, + "required": [ + "command" + ], + "title": "ssh_execArguments", + "type": "object" + } + } + ], + "is_official": true + }, + "github": { + "name": "github", + "display_name": "GitHub", + "description": "MCP Server for the GitHub API, enabling file operations, repository management, search functionality, and more.", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/tree/main/src/github#readme", + "author": { + "name": "MCP Team" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "github", + "code", + "repository", + "git" + ], + "arguments": { + "GITHUB_PERSONAL_ACCESS_TOKEN": { + "description": "Personal Access Token for GitHub to authenticate API requests", + "required": true, + "example": "ghp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-github" + ], + "package": "@modelcontextprotocol/server-github", + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" + }, + "description": "Install and run using NPX", + "recommended": true + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "mcp/github" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" + }, + "description": "Run with Docker" + } + }, + "examples": [ + { + "title": "Search GitHub repositories", + "description": "Find repositories related to machine learning", + "prompt": "Find GitHub repositories about machine learning with more than 1000 stars." + }, + { + "title": "View repository contents", + "description": "Browse files in a GitHub repository", + "prompt": "Show me the main Python files in the Hugging Face transformers repository." + } + ], + "tools": [ + { + "name": "create_or_update_file", + "description": "Create or update a single file in a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "path": { + "type": "string", + "description": "Path where to create/update the file" + }, + "content": { + "type": "string", + "description": "Content of the file" + }, + "message": { + "type": "string", + "description": "Commit message" + }, + "branch": { + "type": "string", + "description": "Branch to create/update the file in" + }, + "sha": { + "type": "string", + "description": "SHA of the file being replaced (required when updating existing files)" + } + }, + "required": [ + "owner", + "repo", + "path", + "content", + "message", + "branch" + ] + } + }, + { + "name": "search_repositories", + "description": "Search for GitHub repositories", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query (see GitHub search syntax)" + }, + "page": { + "type": "number", + "description": "Page number for pagination (default: 1)" + }, + "perPage": { + "type": "number", + "description": "Number of results per page (default: 30, max: 100)" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "create_repository", + "description": "Create a new GitHub repository in your account", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Repository name" + }, + "description": { + "type": "string", + "description": "Repository description" + }, + "private": { + "type": "boolean", + "description": "Whether the repository should be private" + }, + "autoInit": { + "type": "boolean", + "description": "Initialize with README.md" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "get_file_contents", + "description": "Get the contents of a file or directory from a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "path": { + "type": "string", + "description": "Path to the file or directory" + }, + "branch": { + "type": "string", + "description": "Branch to get contents from" + } + }, + "required": [ + "owner", + "repo", + "path" + ] + } + }, + { + "name": "push_files", + "description": "Push multiple files to a GitHub repository in a single commit", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "branch": { + "type": "string", + "description": "Branch to push to (e.g., 'main' or 'master')" + }, + "files": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "content": { + "type": "string" + } + }, + "required": [ + "path", + "content" + ], + "additionalProperties": false + }, + "description": "Array of files to push" + }, + "message": { + "type": "string", + "description": "Commit message" + } + }, + "required": [ + "owner", + "repo", + "branch", + "files", + "message" + ] + } + }, + { + "name": "create_issue", + "description": "Create a new issue in a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string" + }, + "repo": { + "type": "string" + }, + "title": { + "type": "string" + }, + "body": { + "type": "string" + }, + "assignees": { + "type": "array", + "items": { + "type": "string" + } + }, + "milestone": { + "type": "number" + }, + "labels": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "owner", + "repo", + "title" + ] + } + }, + { + "name": "create_pull_request", + "description": "Create a new pull request in a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "title": { + "type": "string", + "description": "Pull request title" + }, + "body": { + "type": "string", + "description": "Pull request body/description" + }, + "head": { + "type": "string", + "description": "The name of the branch where your changes are implemented" + }, + "base": { + "type": "string", + "description": "The name of the branch you want the changes pulled into" + }, + "draft": { + "type": "boolean", + "description": "Whether to create the pull request as a draft" + }, + "maintainer_can_modify": { + "type": "boolean", + "description": "Whether maintainers can modify the pull request" + } + }, + "required": [ + "owner", + "repo", + "title", + "head", + "base" + ] + } + }, + { + "name": "fork_repository", + "description": "Fork a GitHub repository to your account or specified organization", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "organization": { + "type": "string", + "description": "Optional: organization to fork to (defaults to your personal account)" + } + }, + "required": [ + "owner", + "repo" + ] + } + }, + { + "name": "create_branch", + "description": "Create a new branch in a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "branch": { + "type": "string", + "description": "Name for the new branch" + }, + "from_branch": { + "type": "string", + "description": "Optional: source branch to create from (defaults to the repository's default branch)" + } + }, + "required": [ + "owner", + "repo", + "branch" + ] + } + }, + { + "name": "list_commits", + "description": "Get list of commits of a branch in a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string" + }, + "repo": { + "type": "string" + }, + "sha": { + "type": "string" + }, + "page": { + "type": "number" + }, + "perPage": { + "type": "number" + } + }, + "required": [ + "owner", + "repo" + ] + } + }, + { + "name": "list_issues", + "description": "List issues in a GitHub repository with filtering options", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string" + }, + "repo": { + "type": "string" + }, + "direction": { + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + "labels": { + "type": "array", + "items": { + "type": "string" + } + }, + "page": { + "type": "number" + }, + "per_page": { + "type": "number" + }, + "since": { + "type": "string" + }, + "sort": { + "type": "string", + "enum": [ + "created", + "updated", + "comments" + ] + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed", + "all" + ] + } + }, + "required": [ + "owner", + "repo" + ] + } + }, + { + "name": "update_issue", + "description": "Update an existing issue in a GitHub repository", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string" + }, + "repo": { + "type": "string" + }, + "issue_number": { + "type": "number" + }, + "title": { + "type": "string" + }, + "body": { + "type": "string" + }, + "assignees": { + "type": "array", + "items": { + "type": "string" + } + }, + "milestone": { + "type": "number" + }, + "labels": { + "type": "array", + "items": { + "type": "string" + } + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ] + } + }, + "required": [ + "owner", + "repo", + "issue_number" + ] + } + }, + { + "name": "add_issue_comment", + "description": "Add a comment to an existing issue", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string" + }, + "repo": { + "type": "string" + }, + "issue_number": { + "type": "number" + }, + "body": { + "type": "string" + } + }, + "required": [ + "owner", + "repo", + "issue_number", + "body" + ] + } + }, + { + "name": "search_code", + "description": "Search for code across GitHub repositories", + "inputSchema": { + "type": "object", + "properties": { + "q": { + "type": "string" + }, + "order": { + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + "page": { + "type": "number", + "minimum": 1 + }, + "per_page": { + "type": "number", + "minimum": 1, + "maximum": 100 + } + }, + "required": [ + "q" + ] + } + }, + { + "name": "search_issues", + "description": "Search for issues and pull requests across GitHub repositories", + "inputSchema": { + "type": "object", + "properties": { + "q": { + "type": "string" + }, + "order": { + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + "page": { + "type": "number", + "minimum": 1 + }, + "per_page": { + "type": "number", + "minimum": 1, + "maximum": 100 + }, + "sort": { + "type": "string", + "enum": [ + "comments", + "reactions", + "reactions-+1", + "reactions--1", + "reactions-smile", + "reactions-thinking_face", + "reactions-heart", + "reactions-tada", + "interactions", + "created", + "updated" + ] + } + }, + "required": [ + "q" + ] + } + }, + { + "name": "search_users", + "description": "Search for users on GitHub", + "inputSchema": { + "type": "object", + "properties": { + "q": { + "type": "string" + }, + "order": { + "type": "string", + "enum": [ + "asc", + "desc" + ] + }, + "page": { + "type": "number", + "minimum": 1 + }, + "per_page": { + "type": "number", + "minimum": 1, + "maximum": 100 + }, + "sort": { + "type": "string", + "enum": [ + "followers", + "repositories", + "joined" + ] + } + }, + "required": [ + "q" + ] + } + }, + { + "name": "get_issue", + "description": "Get details of a specific issue in a GitHub repository.", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string" + }, + "repo": { + "type": "string" + }, + "issue_number": { + "type": "number" + } + }, + "required": [ + "owner", + "repo", + "issue_number" + ] + } + }, + { + "name": "get_pull_request", + "description": "Get details of a specific pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + }, + { + "name": "list_pull_requests", + "description": "List and filter repository pull requests", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed", + "all" + ], + "description": "State of the pull requests to return" + }, + "head": { + "type": "string", + "description": "Filter by head user or head organization and branch name" + }, + "base": { + "type": "string", + "description": "Filter by base branch name" + }, + "sort": { + "type": "string", + "enum": [ + "created", + "updated", + "popularity", + "long-running" + ], + "description": "What to sort results by" + }, + "direction": { + "type": "string", + "enum": [ + "asc", + "desc" + ], + "description": "The direction of the sort" + }, + "per_page": { + "type": "number", + "description": "Results per page (max 100)" + }, + "page": { + "type": "number", + "description": "Page number of the results" + } + }, + "required": [ + "owner", + "repo" + ] + } + }, + { + "name": "create_pull_request_review", + "description": "Create a review on a pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + }, + "commit_id": { + "type": "string", + "description": "The SHA of the commit that needs a review" + }, + "body": { + "type": "string", + "description": "The body text of the review" + }, + "event": { + "type": "string", + "enum": [ + "APPROVE", + "REQUEST_CHANGES", + "COMMENT" + ], + "description": "The review action to perform" + }, + "comments": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "The relative path to the file being commented on" + }, + "position": { + "type": "number", + "description": "The position in the diff where you want to add a review comment" + }, + "body": { + "type": "string", + "description": "Text of the review comment" + } + }, + "required": [ + "path", + "position", + "body" + ], + "additionalProperties": false + }, + "description": "Comments to post as part of the review" + } + }, + "required": [ + "owner", + "repo", + "pull_number", + "body", + "event" + ] + } + }, + { + "name": "merge_pull_request", + "description": "Merge a pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + }, + "commit_title": { + "type": "string", + "description": "Title for the automatic commit message" + }, + "commit_message": { + "type": "string", + "description": "Extra detail to append to automatic commit message" + }, + "merge_method": { + "type": "string", + "enum": [ + "merge", + "squash", + "rebase" + ], + "description": "Merge method to use" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + }, + { + "name": "get_pull_request_files", + "description": "Get the list of files changed in a pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + }, + { + "name": "get_pull_request_status", + "description": "Get the combined status of all status checks for a pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + }, + { + "name": "update_pull_request_branch", + "description": "Update a pull request branch with the latest changes from the base branch", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + }, + "expected_head_sha": { + "type": "string", + "description": "The expected SHA of the pull request's HEAD ref" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + }, + { + "name": "get_pull_request_comments", + "description": "Get the review comments on a pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + }, + { + "name": "get_pull_request_reviews", + "description": "Get the reviews on a pull request", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Repository owner (username or organization)" + }, + "repo": { + "type": "string", + "description": "Repository name" + }, + "pull_number": { + "type": "number", + "description": "Pull request number" + } + }, + "required": [ + "owner", + "repo", + "pull_number" + ] + } + } + ], + "is_official": true + }, + "qgis": { + "name": "qgis", + "display_name": "QGIS Model Context Protocol Integration", + "description": "connects QGIS to Claude AI through the MCP. This integration enables prompt-assisted project creation, layer loading, code execution, and more.", + "repository": { + "type": "git", + "url": "https://github.com/jjsantos01/qgis_mcp" + }, + "homepage": "https://github.com/jjsantos01/qgis_mcp", + "author": { + "name": "jjsantos01" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "QGIS" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/jjsantos01/qgis_mcp", + "src/qgis_mcp/qgis_mcp_server.py" + ] + } + }, + "examples": [ + { + "title": "Demo Command Sequence", + "description": "A series of commands to demonstrate the QGIS MCP integration.", + "prompt": "1. Ping to check the connection. If it works, continue with the following steps.\n2. Create a new project and save it at: \"C:/Users/USER/GitHub/qgis_mcp/data/cdmx.qgz\"\n3. Load the vector layer: \"C:/Users/USER/GitHub/qgis_mcp/data/cdmx/mgpc_2019.shp\" and name it \"Colonias\".\n4. Load the raster layer: \"C:/Users/USER/GitHub/qgis_mcp/data/09014.tif\" and name it \"BJ\".\n5. Zoom to the \"BJ\" layer.\n6. Execute the centroid algorithm on the \"Colonias\" layer. Skip the geometry check. Save the output to \"colonias_centroids.geojson\".\n7. Execute code to create a choropleth map using the \"POB2010\" field in the \"Colonias\" layer. Use the quantile classification method with 5 classes and the Spectral color ramp.\n8. Render the map to \"C:/Users/USER/GitHub/qgis_mcp/data/cdmx.png\"\n9. Save the project." + } + ] + }, + "exa-mcp-server": { + "display_name": "Exa MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/exa-labs/exa-mcp-server" + }, + "homepage": "https://github.com/exa-labs/exa-mcp-server", + "author": { + "name": "exa-labs" + }, + "license": "MIT", + "tags": [ + "search", + "web search", + "AI", + "Claude", + "MCP", + "Model Context Protocol" + ], + "arguments": { + "EXA_API_KEY": { + "description": "API key from dashboard.exa.ai/api-keys", + "required": true, + "example": "your-api-key-here" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "exa-mcp-server" + ], + "env": { + "EXA_API_KEY": "your-api-key-here" + }, + "description": "Run with NPX", + "recommended": true + } + }, + "examples": [ + { + "title": "Web Search", + "description": "Search for recent developments in quantum computing", + "prompt": "Can you search for recent developments in quantum computing?" + }, + { + "title": "News Search", + "description": "Search for and summarize news about AI startups", + "prompt": "Search for and summarize the latest news about artificial intelligence startups in new york." + }, + { + "title": "Research Paper Search", + "description": "Find research papers about climate change", + "prompt": "Find and analyze recent research papers about climate change solutions." + }, + { + "title": "Twitter Search", + "description": "Search for tweets from specific users", + "prompt": "Search Twitter for posts from @elonmusk about SpaceX." + } + ], + "name": "exa-mcp-server", + "description": "A Model Context Protocol (MCP) server lets AI assistants like Claude use the Exa AI Search API for web searches. This setup allows AI models to get real-time web information in a safe and controlled way.", + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "web_search", + "description": "Search the web using Exa AI - performs real-time web searches and can scrape content from specific URLs. Supports configurable result counts and returns the content from the most relevant websites.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + }, + "numResults": { + "type": "number", + "description": "Number of search results to return (default: 5)" + } + }, + "required": [ + "query" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "openapi": { + "name": "openapi", + "display_name": "OpenAPI", + "description": "Interact with [OpenAPI](https://www.openapis.org/) APIs.", + "repository": { + "type": "git", + "url": "https://github.com/snaggle-ai/openapi-mcp-server" + }, + "homepage": "https://github.com/snaggle-ai/openapi-mcp-server", + "author": { + "name": "snaggle-ai" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "openapi", + "api exploration" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "openapi-mcp-server" + ] + } + }, + "examples": [ + { + "title": "Finding information about an API", + "description": "Ask Claude to find information about specific APIs.", + "prompt": "Find information about the Stripe API." + }, + { + "title": "Explaining API usage", + "description": "Request explanations on using specific endpoints.", + "prompt": "Explain how to use the GitHub API's repository endpoints." + } + ] + }, + "rember-mcp": { + "display_name": "Rember MCP", + "repository": { + "type": "git", + "url": "https://github.com/rember/rember-mcp" + }, + "homepage": "https://rember.com", + "author": { + "name": "rember" + }, + "license": "MIT", + "tags": [ + "flashcards", + "spaced repetition", + "learning", + "memory" + ], + "arguments": { + "api-key": { + "description": "Your Rember API key from the Settings page", + "required": true, + "example": "rember_32randomcharacters" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@getrember/mcp", + "--api-key=${api-key}" + ], + "env": {}, + "description": "Run using npx", + "recommended": true + } + }, + "examples": [ + { + "title": "Create flashcards from chat", + "description": "Ask Claude to create flashcards from your conversation", + "prompt": "I like your answer, help me remember it" + }, + { + "title": "Create flashcards from PDF", + "description": "Ask Claude to create flashcards from a PDF document", + "prompt": "Create flashcards from chapter 2 of this PDF" + } + ], + "name": "rember-mcp", + "description": "Create spaced repetition flashcards in Rember to remember anything you learn in your chats", + "categories": [ + "Knowledge Base" + ], + "is_official": true + }, + "bicscan-mcp": { + "display_name": "BICScan MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/ahnlabio/bicscan-mcp" + }, + "homepage": "https://bicscan.io", + "author": { + "name": "ahnlabio" + }, + "license": "[NOT GIVEN]", + "tags": [ + "blockchain", + "risk scoring", + "crypto", + "API" + ], + "arguments": { + "BICSCAN_API_KEY": { + "description": "API key obtained from https://bicscan.io", + "required": true, + "example": "YOUR_BICSCAN_API_KEY_HERE" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/ahnlabio/bicscan-mcp", + "bicscan-mcp" + ], + "env": { + "BICSCAN_API_KEY": "{BICSCAN_API_KEY}" + }, + "description": "Run directly using uvx" + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "--interactive", + "--env", + "BICSCAN_API_KEY={BICSCAN_API_KEY}", + "bicscan-mcp" + ], + "description": "Run using Docker" + } + }, + "examples": [ + { + "title": "Risk Scoring", + "description": "Obtain risk scores for blockchain entities like crypto addresses, domain names, and dApp URLs", + "prompt": "[NOT GIVEN]" + }, + { + "title": "Asset Information", + "description": "Retrieve detailed asset holdings for crypto addresses across multiple blockchain networks", + "prompt": "[NOT GIVEN]" + } + ], + "name": "bicscan-mcp", + "description": "A powerful and efficient Blockchain address risk scoring API MCP Server, leveraging the BICScan API to provide comprehensive risk assessments and asset information for blockchain addresses, domains, and decentralized applications (dApps).", + "categories": [ + "Finance" + ], + "is_official": true, + "tools": [ + { + "name": "get_risk_score", + "description": "Get Risk Score for Crypto, Domain Name, ENS, CNS, KNS or even Hostname Address\n\n Args:\n address: EOA, CA, ENS, CNS, KNS or even HostName\n Returns:\n Dict: where summary.bicscan_score is from 0 to 100. 100 is high risk.\n ", + "inputSchema": { + "properties": { + "address": { + "title": "Address", + "type": "string" + } + }, + "required": [ + "address" + ], + "title": "get_risk_scoreArguments", + "type": "object" + } + }, + { + "name": "get_assets", + "description": "Get Assets holdings by CryptoAddress\n\n Args:\n address: EOA, CA, ENS, CNS, KNS.\n Returns:\n Dict: where assets is a list of assets\n ", + "inputSchema": { + "properties": { + "address": { + "title": "Address", + "type": "string" + } + }, + "required": [ + "address" + ], + "title": "get_assetsArguments", + "type": "object" + } + } + ] + }, + "financial-dataset": { + "display_name": "Financial Datasets MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/financial-datasets/mcp-server" + }, + "homepage": "https://www.financialdatasets.ai/", + "author": { + "name": "financial-datasets" + }, + "license": "MIT", + "tags": [ + "finance", + "stock market", + "financial data" + ], + "arguments": { + "FINANCIAL_DATASETS_API_KEY": { + "description": "API key for Financial Datasets", + "required": true, + "example": "your-financial-datasets-api-key" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "uv", + "args": [ + "run", + "server.py" + ], + "env": { + "FINANCIAL_DATASETS_API_KEY": "your-financial-datasets-api-key" + }, + "description": "Run using uv package manager", + "recommended": true + } + }, + "examples": [ + { + "title": "Income Statement Query", + "description": "Ask for a company's income statements", + "prompt": "What are Apple's recent income statements?" + }, + { + "title": "Current Stock Price", + "description": "Get the current price of a stock", + "prompt": "Show me the current price of Tesla stock" + }, + { + "title": "Historical Stock Prices", + "description": "Get historical stock prices for a specific date range", + "prompt": "Get historical prices for MSFT from 2024-01-01 to 2024-12-31" + } + ], + "name": "financial-datasets", + "description": "This is a Model Context Protocol (MCP) server that provides access to stock market data from [Financial Datasets](https://www.financialdatasets.ai/).", + "categories": [ + "Finance" + ], + "is_official": true + }, + "salesforce-mcp": { + "name": "salesforce-mcp", + "display_name": "Salesforce Connector", + "description": "Interact with Salesforce Data and Metadata", + "repository": { + "type": "git", + "url": "https://github.com/smn2gnt/MCP-Salesforce" + }, + "license": "[NOT FOUND]", + "author": { + "name": "smn2gnt" + }, + "homepage": "https://github.com/smn2gnt/MCP-Salesforce", + "categories": [ + "Productivity" + ], + "tags": [ + "salesforce" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "mcp-salesforce-connector", + "salesforce" + ], + "env": { + "SALESFORCE_USERNAME": "${SALESFORCE_USERNAME}", + "SALESFORCE_PASSWORD": "${SALESFORCE_PASSWORD}", + "SALESFORCE_SECURITY_TOKEN": "${SALESFORCE_SECURITY_TOKEN}" + } + } + }, + "arguments": { + "SALESFORCE_USERNAME": { + "description": "Your Salesforce username for authentication", + "required": true, + "example": "myemail@example.com" + }, + "SALESFORCE_PASSWORD": { + "description": "Your Salesforce password for authentication", + "required": true + }, + "SALESFORCE_SECURITY_TOKEN": { + "description": "Your Salesforce security token for additional security measures", + "required": true + } + }, + "tools": [ + { + "name": "run_soql_query", + "description": "Executes a SOQL query against Salesforce", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The SOQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "run_sosl_search", + "description": "Executes a SOSL search against Salesforce", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "The SOSL search to execute (e.g., 'FIND {John Smith} IN ALL FIELDS')" + } + }, + "required": [ + "search" + ] + } + }, + { + "name": "get_object_fields", + "description": "Retrieves field Names, labels and types for a specific Salesforce object", + "inputSchema": { + "type": "object", + "properties": { + "object_name": { + "type": "string", + "description": "The name of the Salesforce object (e.g., 'Account', 'Contact')" + } + }, + "required": [ + "object_name" + ] + } + }, + { + "name": "get_record", + "description": "Retrieves a specific record by ID", + "inputSchema": { + "type": "object", + "properties": { + "object_name": { + "type": "string", + "description": "The name of the Salesforce object (e.g., 'Account', 'Contact')" + }, + "record_id": { + "type": "string", + "description": "The ID of the record to retrieve" + } + }, + "required": [ + "object_name", + "record_id" + ] + } + }, + { + "name": "create_record", + "description": "Creates a new record", + "inputSchema": { + "type": "object", + "properties": { + "object_name": { + "type": "string", + "description": "The name of the Salesforce object (e.g., 'Account', 'Contact')" + }, + "data": { + "type": "object", + "description": "The data for the new record", + "properties": {}, + "additionalProperties": true + } + }, + "required": [ + "object_name", + "data" + ] + } + }, + { + "name": "update_record", + "description": "Updates an existing record", + "inputSchema": { + "type": "object", + "properties": { + "object_name": { + "type": "string", + "description": "The name of the Salesforce object (e.g., 'Account', 'Contact')" + }, + "record_id": { + "type": "string", + "description": "The ID of the record to update" + }, + "data": { + "type": "object", + "description": "The updated data for the record", + "properties": {}, + "additionalProperties": true + } + }, + "required": [ + "object_name", + "record_id", + "data" + ] + } + }, + { + "name": "delete_record", + "description": "Deletes a record", + "inputSchema": { + "type": "object", + "properties": { + "object_name": { + "type": "string", + "description": "The name of the Salesforce object (e.g., 'Account', 'Contact')" + }, + "record_id": { + "type": "string", + "description": "The ID of the record to delete" + } + }, + "required": [ + "object_name", + "record_id" + ] + } + }, + { + "name": "tooling_execute", + "description": "Executes a Tooling API request", + "inputSchema": { + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "The Tooling API endpoint to call (e.g., 'sobjects/ApexClass')" + }, + "method": { + "type": "string", + "description": "The HTTP method (default: 'GET')", + "enum": [ + "GET", + "POST", + "PATCH", + "DELETE" + ], + "default": "GET" + }, + "data": { + "type": "object", + "description": "Data for POST/PATCH requests", + "properties": {}, + "additionalProperties": true + } + }, + "required": [ + "action" + ] + } + }, + { + "name": "apex_execute", + "description": "Executes an Apex REST request", + "inputSchema": { + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "The Apex REST endpoint to call (e.g., '/MyApexClass')" + }, + "method": { + "type": "string", + "description": "The HTTP method (default: 'GET')", + "enum": [ + "GET", + "POST", + "PATCH", + "DELETE" + ], + "default": "GET" + }, + "data": { + "type": "object", + "description": "Data for POST/PATCH requests", + "properties": {}, + "additionalProperties": true + } + }, + "required": [ + "action" + ] + } + }, + { + "name": "restful", + "description": "Makes a direct REST API call to Salesforce", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "The path of the REST API endpoint (e.g., 'sobjects/Account/describe')" + }, + "method": { + "type": "string", + "description": "The HTTP method (default: 'GET')", + "enum": [ + "GET", + "POST", + "PATCH", + "DELETE" + ], + "default": "GET" + }, + "params": { + "type": "object", + "description": "Query parameters for the request", + "properties": {}, + "additionalProperties": true + }, + "data": { + "type": "object", + "description": "Data for POST/PATCH requests", + "properties": {}, + "additionalProperties": true + } + }, + "required": [ + "path" + ] + } + } + ] + }, + "youtube": { + "name": "youtube", + "display_name": "YouTube", + "description": "Comprehensive YouTube API integration for video management, Shorts creation, and analytics.", + "repository": { + "type": "git", + "url": "https://github.com/ZubeidHendricks/youtube-mcp-server" + }, + "homepage": "https://github.com/ZubeidHendricks/youtube-mcp-server", + "author": { + "name": "ZubeidHendricks" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "youtube", + "video", + "transcripts", + "api" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-youtube" + ], + "env": { + "YOUTUBE_API_KEY": "${YOUTUBE_API_KEY}" + } + } + }, + "arguments": { + "YOUTUBE_API_KEY": { + "description": "Your YouTube Data API key, needed for authentication when making requests to the YouTube API.", + "required": true, + "example": "AIzaSyD4-1234abcdEFGHijklmnop" + } + } + }, + "scrapling-fetch": { + "name": "scrapling-fetch", + "display_name": "Scrapling Fetch", + "description": "Access text content from bot-protected websites. Fetches HTML/markdown from sites with anti-automation measures using Scrapling.", + "repository": { + "type": "git", + "url": "https://github.com/cyberchitta/scrapling-fetch-mcp" + }, + "license": "Apache 2", + "author": { + "name": "cyberchitta" + }, + "homepage": "https://github.com/cyberchitta/scrapling-fetch-mcp", + "categories": [ + "Web Services" + ], + "tags": [ + "scrapling", + "fetch" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "scrapling-fetch-mcp" + ] + } + }, + "tools": [ + { + "name": "s-fetch-page", + "description": "Fetches a complete web page with pagination support. Retrieves content from websites with bot-detection avoidance. For best performance, start with 'basic' mode (fastest), then only escalate to 'stealth' or 'max-stealth' modes if basic mode fails. Content is returned as 'METADATA: {json}\\n\\n[content]' where metadata includes length information and truncation status.", + "inputSchema": { + "properties": { + "url": { + "description": "URL to fetch", + "title": "Url", + "type": "string" + }, + "mode": { + "default": "basic", + "description": "Fetching mode (basic, stealth, or max-stealth)", + "title": "Mode", + "type": "string" + }, + "format": { + "default": "markdown", + "description": "Output format (html or markdown)", + "title": "Format", + "type": "string" + }, + "max_length": { + "default": 5000, + "description": "Maximum number of characters to return.", + "exclusiveMaximum": 1000000, + "exclusiveMinimum": 0, + "title": "Max Length", + "type": "integer" + }, + "start_index": { + "default": 0, + "description": "On return output starting at this character index, useful if a previous fetch was truncated and more content is required.", + "minimum": 0, + "title": "Start Index", + "type": "integer" + } + }, + "required": [ + "url" + ], + "title": "PageFetchRequest", + "type": "object" + } + }, + { + "name": "s-fetch-pattern", + "description": "Extracts content matching regex patterns from web pages. Retrieves specific content from websites with bot-detection avoidance. For best performance, start with 'basic' mode (fastest), then only escalate to 'stealth' or 'max-stealth' modes if basic mode fails. Returns matched content as 'METADATA: {json}\\n\\n[content]' where metadata includes match statistics and truncation information. Each matched content chunk is delimited with '\u0965\u0e5b\u0965' and prefixed with '[Position: start-end]' indicating its byte position in the original document, allowing targeted follow-up requests with s-fetch-page using specific start_index values.", + "inputSchema": { + "properties": { + "url": { + "description": "URL to fetch", + "title": "Url", + "type": "string" + }, + "mode": { + "default": "basic", + "description": "Fetching mode (basic, stealth, or max-stealth)", + "title": "Mode", + "type": "string" + }, + "format": { + "default": "markdown", + "description": "Output format (html or markdown)", + "title": "Format", + "type": "string" + }, + "max_length": { + "default": 5000, + "description": "Maximum number of characters to return.", + "exclusiveMaximum": 1000000, + "exclusiveMinimum": 0, + "title": "Max Length", + "type": "integer" + }, + "search_pattern": { + "description": "Regular expression pattern to search for in the content", + "title": "Search Pattern", + "type": "string" + }, + "context_chars": { + "default": 200, + "description": "Number of characters to include before and after each match", + "minimum": 0, + "title": "Context Chars", + "type": "integer" + } + }, + "required": [ + "url", + "search_pattern" + ], + "title": "PatternFetchRequest", + "type": "object" + } + } + ] + }, + "mcp": { + "display_name": "Semgrep MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/semgrep/mcp" + }, + "homepage": "https://semgrep.dev", + "author": { + "name": "semgrep" + }, + "license": "MIT", + "tags": [ + "security", + "static analysis", + "code scanning", + "vulnerability detection" + ], + "arguments": { + "SEMGREP_APP_TOKEN": { + "description": "Token for connecting to Semgrep AppSec Platform", + "required": false, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "semgrep-mcp" + ], + "package": "semgrep-mcp", + "description": "Run using Python package with uv", + "recommended": true + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "ghcr.io/semgrep/mcp", + "-t", + "stdio" + ], + "description": "Run using Docker container", + "recommended": false + } + }, + "name": "mcp", + "description": "An MCP server for using Semgrep to scan code for security vulnerabilies. Secure your vibe coding! ", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "semgrep_rule_schema", + "description": "\n Get the schema for a Semgrep rule\n\n Use this tool when you need to:\n - get the schema required to write a Semgrep rule\n - need to see what fields are available for a Semgrep rule\n - verify what fields are available for a Semgrep rule\n - verify the syntax for a Semgrep rule is correct\n ", + "inputSchema": { + "properties": {}, + "title": "semgrep_rule_schemaArguments", + "type": "object" + } + }, + { + "name": "get_supported_languages", + "description": "\n Returns a list of supported languages by Semgrep\n\n Only use this tool if you are not sure what languages Semgrep supports.\n ", + "inputSchema": { + "properties": {}, + "title": "get_supported_languagesArguments", + "type": "object" + } + }, + { + "name": "semgrep_scan_with_custom_rule", + "description": "\n Runs a Semgrep scan with a custom rule on provided code content\n and returns the findings in JSON format\n\n Use this tool when you need to:\n - scan code files for specific security vulnerability not covered by the default Semgrep rules\n - scan code files for specific issue not covered by the default Semgrep rules\n ", + "inputSchema": { + "$defs": { + "CodeFile": { + "properties": { + "filename": { + "description": "Relative path to the code file", + "title": "Filename", + "type": "string" + }, + "content": { + "description": "Content of the code file", + "title": "Content", + "type": "string" + } + }, + "required": [ + "filename", + "content" + ], + "title": "CodeFile", + "type": "object" + } + }, + "properties": { + "code_files": { + "description": "List of dictionaries with 'filename' and 'content' keys", + "items": { + "$ref": "#/$defs/CodeFile" + }, + "title": "Code Files", + "type": "array" + }, + "rule": { + "description": "Semgrep YAML rule string", + "title": "Rule", + "type": "string" + } + }, + "required": [ + "code_files", + "rule" + ], + "title": "semgrep_scan_with_custom_ruleArguments", + "type": "object" + } + }, + { + "name": "semgrep_scan", + "description": "\n Runs a Semgrep scan on provided code content and returns the findings in JSON format\n\n Use this tool when you need to:\n - scan code files for security vulnerabilities\n - scan code files for other issues\n ", + "inputSchema": { + "$defs": { + "CodeFile": { + "properties": { + "filename": { + "description": "Relative path to the code file", + "title": "Filename", + "type": "string" + }, + "content": { + "description": "Content of the code file", + "title": "Content", + "type": "string" + } + }, + "required": [ + "filename", + "content" + ], + "title": "CodeFile", + "type": "object" + } + }, + "properties": { + "code_files": { + "description": "List of dictionaries with 'filename' and 'content' keys", + "items": { + "$ref": "#/$defs/CodeFile" + }, + "title": "Code Files", + "type": "array" + }, + "config": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional Semgrep configuration string (e.g. 'p/docker', 'p/xss', 'auto')", + "title": "Config" + } + }, + "required": [ + "code_files" + ], + "title": "semgrep_scanArguments", + "type": "object" + } + }, + { + "name": "security_check", + "description": "\n Runs a fast security check on code and returns any issues found.\n\n Use this tool when you need to:\n - scan code for security vulnerabilities\n - verify that code is secure\n - double check that code is secure before committing\n - get a second opinion on code security\n\n If there are no issues, you can be reasonably confident that the code is secure.\n ", + "inputSchema": { + "$defs": { + "CodeFile": { + "properties": { + "filename": { + "description": "Relative path to the code file", + "title": "Filename", + "type": "string" + }, + "content": { + "description": "Content of the code file", + "title": "Content", + "type": "string" + } + }, + "required": [ + "filename", + "content" + ], + "title": "CodeFile", + "type": "object" + } + }, + "properties": { + "code_files": { + "description": "List of dictionaries with 'filename' and 'content' keys", + "items": { + "$ref": "#/$defs/CodeFile" + }, + "title": "Code Files", + "type": "array" + } + }, + "required": [ + "code_files" + ], + "title": "security_checkArguments", + "type": "object" + } + }, + { + "name": "get_abstract_syntax_tree", + "description": "\n Returns the Abstract Syntax Tree (AST) for the provided code file in JSON format\n\n Use this tool when you need to:\n - get the Abstract Syntax Tree (AST) for the provided code file - get the AST of a file\n - understand the structure of the code in a more granular way\n - see what a parser sees in the code\n ", + "inputSchema": { + "properties": { + "code": { + "description": "The code to get the AST for", + "title": "Code", + "type": "string" + }, + "language": { + "description": "The programming language of the code", + "title": "Language", + "type": "string" + } + }, + "required": [ + "code", + "language" + ], + "title": "get_abstract_syntax_treeArguments", + "type": "object" + } + } + ], + "prompts": [ + { + "name": "write_custom_semgrep_rule", + "description": "\n Write a custom Semgrep rule for the provided code and language\n\n Use this prompt when you need to:\n - write a custom Semgrep rule\n - write a Semgrep rule for a specific issue or pattern\n ", + "arguments": [ + { + "name": "code", + "description": "The code to get the AST for", + "required": true + }, + { + "name": "language", + "description": "The programming language of the code", + "required": true + } + ] + } + ], + "resources": [], + "is_official": true + }, + "mcp-server-langfuse": { + "display_name": "Langfuse Prompt Management MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/langfuse/mcp-server-langfuse" + }, + "license": "MIT", + "homepage": "https://langfuse.com/docs/prompts/get-started", + "author": { + "name": "langfuse" + }, + "tags": [ + "prompts", + "mcp", + "langfuse" + ], + "arguments": { + "LANGFUSE_PUBLIC_KEY": { + "description": "Your Langfuse public API key", + "required": true, + "example": "your-public-key" + }, + "LANGFUSE_SECRET_KEY": { + "description": "Your Langfuse secret API key", + "required": true, + "example": "your-secret-key" + }, + "LANGFUSE_BASEURL": { + "description": "Langfuse API base URL", + "required": true, + "example": "https://cloud.langfuse.com" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "./build/index.js" + ], + "env": { + "LANGFUSE_PUBLIC_KEY": "your-public-key", + "LANGFUSE_SECRET_KEY": "your-secret-key", + "LANGFUSE_BASEURL": "https://cloud.langfuse.com" + }, + "description": "Run the server using Node.js", + "recommended": true + } + }, + "examples": [ + { + "title": "List all available prompts", + "description": "Use the prompts/list endpoint to get all available prompts", + "prompt": "Use the Langfuse MCP server to list all available prompts" + }, + { + "title": "Get a specific prompt", + "description": "Retrieve and compile a specific prompt with variables", + "prompt": "Use the Langfuse MCP server to get the prompt named 'example-prompt' with the variables {\"key\": \"value\"}" + } + ], + "name": "mcp-server-langfuse", + "description": "Open-source tool for collaborative editing, versioning, evaluating, and releasing prompts.", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "mcp-tinybird": { + "display_name": "Tinybird MCP server", + "repository": { + "type": "git", + "url": "https://github.com/tinybirdco/mcp-tinybird" + }, + "homepage": "https://github.com/tinybirdco/mcp-tinybird", + "author": { + "name": "tinybirdco" + }, + "license": "Apache-2.0", + "tags": [ + "tinybird", + "data", + "analytics" + ], + "arguments": { + "TB_API_URL": { + "description": "Tinybird API URL for your workspace", + "required": true, + "example": "" + }, + "TB_ADMIN_TOKEN": { + "description": "Tinybird Admin Token for authentication", + "required": true, + "example": "" + }, + "topic": { + "description": "Topic of the data you want to explore", + "required": true, + "example": "Bluesky data" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-tinybird", + "stdio" + ], + "description": "Run with uvx in stdio mode", + "env": { + "TB_API_URL": "", + "TB_ADMIN_TOKEN": "" + } + } + }, + "examples": [ + { + "title": "Bluesky metrics", + "description": "Analyze Bluesky data using Tinybird MCP server", + "prompt": "Help me analyze my Bluesky data stored in Tinybird" + }, + { + "title": "Web analytics", + "description": "Analyze web analytics data from the web analytics starter kit", + "prompt": "Help me understand the metrics from my web analytics data in Tinybird" + } + ], + "name": "mcp-tinybird", + "description": "An MCP server to interact with a Tinybird Workspace from any MCP client.", + "categories": [ + "Analytics" + ], + "tools": [ + { + "name": "list-data-sources", + "description": "List all Data Sources in the Tinybird Workspace", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get-data-source", + "description": "Get details of a Data Source in the Tinybird Workspace, such as the schema", + "inputSchema": { + "type": "object", + "properties": { + "datasource_id": { + "type": "string" + } + }, + "required": [ + "datasource_id" + ] + } + }, + { + "name": "list-pipes", + "description": "List all Pipe Endpoints in the Tinybird Workspace", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get-pipe", + "description": "Get details of a Pipe Endpoint in the Tinybird Workspace, such as the nodes SQLs to understand what they do or what Data Sources they use", + "inputSchema": { + "type": "object", + "properties": { + "pipe_id": { + "type": "string" + } + }, + "required": [ + "pipe_id" + ] + } + }, + { + "name": "request-pipe-data", + "description": "Requests data from a Pipe Endpoint in the Tinybird Workspace, includes parameters", + "inputSchema": { + "type": "object", + "properties": { + "pipe_id": { + "type": "string" + }, + "params": { + "type": "object", + "properties": {} + } + }, + "required": [ + "pipe_id" + ] + } + }, + { + "name": "run-select-query", + "description": "Runs a select query to the Tinybird Workspace. It may query Data Sources or Pipe Endpoints", + "inputSchema": { + "type": "object", + "properties": { + "select_query": { + "type": "string" + } + }, + "required": [ + "select_query" + ] + } + }, + { + "name": "append-insight", + "description": "Add a business insight to the memo", + "inputSchema": { + "type": "object", + "properties": { + "insight": { + "type": "string", + "description": "Business insight discovered from data analysis" + } + }, + "required": [ + "insight" + ] + } + }, + { + "name": "llms-tinybird-docs", + "description": "The Tinybird product description and documentation, including API Reference in LLM friendly format", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "analyze-pipe", + "description": "Analyze the Pipe Endpoint SQL", + "inputSchema": { + "type": "object", + "properties": { + "pipe_name": { + "type": "string", + "description": "The Pipe Endpoint name" + } + }, + "required": [ + "pipe_name" + ] + } + }, + { + "name": "push-datafile", + "description": "Push a .datasource or .pipe file to the Workspace", + "inputSchema": { + "type": "object", + "properties": { + "files": { + "type": "string", + "description": "The datafile local path" + } + }, + "required": [ + "files" + ] + } + }, + { + "name": "save-event", + "description": "Sends an event to a Data Source in Tinybird. The data needs to be in NDJSON format and conform to the Data Source schema in Tinybird", + "inputSchema": { + "type": "object", + "properties": { + "datasource_name": { + "type": "string", + "description": "The name of the Data Source in Tinybird" + }, + "data": { + "type": "string", + "description": "A JSON object that will be converted to a NDJSON String to save in the Tinybird Data Source via the events API. It should contain one key for each column in the Data Source" + } + } + } + } + ], + "prompts": [ + { + "name": "datasource-definition", + "description": "Builds a .datasource file from sample NDJSON data", + "arguments": [] + }, + { + "name": "tinybird-default", + "description": "A prompt to get insights from the Data Sources and Pipe Endpoints in the Tinybird Workspace", + "arguments": [ + { + "name": "topic", + "description": "The topic of the data you want to explore", + "required": true + } + ] + } + ], + "resources": [ + { + "uri": "tinybird://insights", + "name": "Insights from Tinybird", + "description": "A living document of discovered insights", + "mimeType": "text/plain", + "annotations": null + }, + { + "uri": "tinybird://datasource-definition-context", + "name": "Context for datasource definition", + "description": "Syntax and context to build .datasource datafiles", + "mimeType": "text/plain", + "annotations": null + } + ], + "is_official": true + }, + "mcp-server-singlestore": { + "display_name": "SingleStore MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/singlestore-labs/mcp-server-singlestore" + }, + "homepage": "https://github.com/singlestore-labs/mcp-server-singlestore", + "author": { + "name": "singlestore-labs" + }, + "license": "MIT", + "tags": [ + "singlestore", + "database", + "sql", + "mcp", + "model context protocol" + ], + "arguments": { + "SINGLESTORE_API_KEY": { + "description": "SingleStore's management API key", + "required": true, + "example": "your_api_key_here" + }, + "SINGLESTORE_DB_USERNAME": { + "description": "Database username", + "required": false, + "example": "your_db_username_here" + }, + "SINGLESTORE_DB_PASSWORD": { + "description": "Database password", + "required": false, + "example": "your_db_password_here" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "singlestore-mcp-server" + ], + "env": { + "SINGLESTORE_DB_USERNAME": "${SINGLESTORE_DB_USERNAME}", + "SINGLESTORE_DB_PASSWORD": "${SINGLESTORE_DB_PASSWORD}", + "SINGLESTORE_API_KEY": "${SINGLESTORE_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Query SingleStore Database", + "description": "Execute a SQL query on a connected workspace", + "prompt": "Can you help me execute a SQL query to list all tables in my SingleStore database?" + }, + { + "title": "Create a Virtual Workspace", + "description": "Set up a new starter workspace in SingleStore", + "prompt": "I need to create a new starter workspace in SingleStore. Can you help me set it up?" + }, + { + "title": "Workspace Information", + "description": "Get information about available workspaces", + "prompt": "Show me all the workspace groups I have access to in my SingleStore account." + } + ], + "name": "mcp-server-singlestore", + "description": "Interact with the SingleStore database platform", + "categories": [ + "Databases" + ], + "is_official": true, + "tools": [ + { + "name": "workspace_groups_info", + "description": "List all workspace groups accessible to the user in SingleStore.\n\nReturns detailed information for each group:\n- name: Display name of the workspace group\n- deploymentType: Type of deployment (e.g., 'PRODUCTION')\n- state: Current status (e.g., 'ACTIVE', 'PAUSED')\n- workspaceGroupID: Unique identifier for the group\n- firewallRanges: Array of allowed IP ranges for access control\n- createdAt: Timestamp of group creation\n- regionID: Identifier for deployment region\n- updateWindow: Maintenance window configuration\n\nUse this tool to:\n1. Get workspace group IDs for other operations\n2. Plan maintenance windows\n\nRelated operations:\n- Use workspaces_info to list workspaces within a group\n- Use execute_sql to run queries on workspaces in a group\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "workspaces_info", + "description": "List all workspaces within a specified workspace group in SingleStore.\n\nReturns detailed information for each workspace:\n- createdAt: Timestamp of workspace creation\n- deploymentType: Type of deployment (e.g., 'PRODUCTION')\n- endpoint: Connection URL for database access\n- name: Display name of the workspace\n- size: Compute and storage configuration\n- state: Current status (e.g., 'ACTIVE', 'PAUSED')\n- terminatedAt: Timestamp of termination if applicable\n- workspaceGroupID: Workspacegroup identifier\n- workspaceID: Unique workspace identifier\n\nUse this tool to:\n1. Monitor workspace status\n2. Get connection details for database operations\n3. Track workspace lifecycle\n\nRequired parameter:\n- workspaceGroupID: Unique identifier of the workspace group\n\nRelated operations:\n- Use workspace_groups_info first to get workspacegroupID\n- Use execute_sql to run queries on specific workspace\n\n", + "inputSchema": { + "type": "object", + "properties": { + "workspaceGroupID": { + "type": "string", + "description": "The unique identifier of the workspace group to retrieve workspaces from." + } + }, + "required": [] + } + }, + { + "name": "organization_info", + "description": "Retrieve information about the current user's organization in SingleStore.\n\nReturns organization details including:\n- orgID: Unique identifier for the organization\n- name: Organization display name\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "list_of_regions", + "description": "List all available deployment regions where SingleStore workspaces can be deployed for the user.\n\nReturns region information including:\n- regionID: Unique identifier for the region\n- provider: Cloud provider (AWS, GCP, or Azure)\n- name: Human-readable region name (e.g., Europe West 2 (London),US West 2 (Oregon)) \n\nUse this tool to:\n1. Select optimal deployment regions based on:\n - Geographic proximity to users\n - Compliance requirements\n - Cost considerations\n - Available cloud providers\n2. Plan multi-region deployments\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "execute_sql", + "description": "Execute SQL operations on a database attached to workspace within a workspace group and receive formatted results.\n\nReturns:\n- Query results with column names and typed values\n- Row count and metadata\n- Execution status\n\n\u26a0\ufe0f CRITICAL SECURITY WARNINGS:\n- Never display or log credentials in responses\n- Use only READ-ONLY queries (SELECT, SHOW, DESCRIBE)\n- DO NOT USE data modification statements:\n \u00d7 No INSERT/UPDATE/DELETE\n \u00d7 No DROP/CREATE/ALTER\n- Ensure queries are properly sanitized\n\nRequired parameters:\n- workspace_group_identifier: ID/name of the workspace group\n- workspace_identifier: ID/name of the specific workspace within the workspace group\n- database: Name of the database to query\n- sql_query: The SQL query to execute\n\nOptional parameters:\n- username: Username for database access (defaults to SINGLESTORE_DB_USERNAME)\n- password: Password for database access (defaults to SINGLESTORE_DB_PASSWORD)\n\nAllowed query examples:\n- SELECT * FROM table_name\n- SELECT COUNT(*) FROM table_name\n- SHOW TABLES\n- DESCRIBE table_name\n\nNote: For data modifications, please use appropriate admin tools or APIs.", + "inputSchema": { + "type": "object", + "properties": { + "workspace_group_identifier": { + "type": "string", + "description": "The ID or name of the workspace group containing the target workspace." + }, + "workspace_identifier": { + "type": "string", + "description": "The ID or name of the specific workspace where the query will run." + }, + "database": { + "type": "string", + "description": "The name of the database to query within the workspace." + }, + "sql_query": { + "type": "string", + "description": "The SQL query to execute. Must be valid SingleStore SQL." + }, + "username": { + "type": "string", + "description": "Optional: Username for database connection. Will use environment default if not specified." + }, + "password": { + "type": "string", + "description": "Optional: Password for database connection. Will use environment default if not specified." + } + }, + "required": [ + "workspace_group_identifier", + "workspace_identifier", + "database", + "sql_query" + ] + } + }, + { + "name": "list_virtual_workspaces", + "description": "List all starter (virtual) workspaces available to the user in SingleStore.\n\nReturns detailed information about each starter workspace:\n- virtualWorkspaceID: Unique identifier for the workspace\n- name: Display name of the workspace\n- endpoint: Connection endpoint URL\n- databaseName: Name of the primary database\n- mysqlDmlPort: Port for MySQL protocol connections\n- webSocketPort: Port for WebSocket connections\n- state: Current status of the workspace\n\nUse this tool to:\n1. Get virtual workspace IDs for other operations\n2. Check starter workspace availability and status\n3. Obtain connection details for database access\n\nNote: This tool only lists starter workspaces, not standard workspaces.\nUse workspaces_info for standard workspace information.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "create_virtual_workspace", + "description": "Create a new starter (virtual) workspace in SingleStore and set up user access.\n\nProcess:\n1. Creates a virtual workspace with specified name and database\n2. Creates a user account for accessing the workspace\n3. Returns both workspace details and access credentials\n\nRequired parameters:\n- name: Unique name for the starter workspace\n- database_name: Name for the database to create\n- username: Username for accessing the starter workspace\n- password: Password for accessing the starter workspace\n\nUsage notes:\n- Workspace names must be unique\n- Passwords should meet security requirements\n- Use execute_sql_on_virtual_workspace to interact with the created starter workspace", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Unique name for the new starter workspace" + }, + "database_name": { + "type": "string", + "description": "Name of the database to create in the starter workspace" + }, + "username": { + "type": "string", + "description": "Username for accessing the new starter workspace" + }, + "password": { + "type": "string", + "description": "Password for accessing the new starter workspace" + } + }, + "required": [ + "name", + "database_name", + "username", + "password" + ] + } + }, + { + "name": "execute_sql_on_virtual_workspace", + "description": "Execute SQL operations on a virtual (starter) workspace and receive formatted results.\n\nReturns:\n- Query results with column names and typed values\n- Row count\n- Column metadata\n- Execution status\n\n\u26a0\ufe0f CRITICAL SECURITY WARNING:\n- Never display or log credentials in responses\n- Ensure SQL queries are properly sanitized\n- ONLY USE SELECT statements or queries that don't modify data\n- DO NOT USE INSERT, UPDATE, DELETE, DROP, CREATE, or ALTER statements\n\nRequired input parameters:\n- virtual_workspace_id: Unique identifier of the starter workspace\n- sql_query: The SQL query to execute (READ-ONLY queries only)\n\nOptional input parameters:\n- username: For accessing the starter workspace (defaults to SINGLESTORE_DB_USERNAME)\n- password: For accessing the starter workspace (defaults to SINGLESTORE_DB_PASSWORD)\n\nAllowed query examples:\n- SELECT * FROM table_name\n- SELECT COUNT(*) FROM table_name\n- SHOW TABLES\n- DESCRIBE table_name\n\nNote: This tool is specifically designed for read-only operations on starter workspaces.", + "inputSchema": { + "type": "object", + "properties": { + "virtual_workspace_id": { + "type": "string", + "description": "Unique identifier of the starter workspace to connect to" + }, + "sql_query": { + "type": "string", + "description": "SQL query to execute on the starter workspace" + }, + "username": { + "type": "string", + "description": "Optional: Username for accessing the starter workspace. Will use environment default if not specified." + }, + "password": { + "type": "string", + "description": "Optional: Password for accessing the starter workspace, Will use environment default if not specified." + } + }, + "required": [ + "virtual_workspace_id", + "sql_query" + ] + } + }, + { + "name": "organization_billing_usage", + "description": "Retrieve detailed billing and usage metrics for your organization over a specified time period. Returns compute and storage usage data, aggregated by your chosen time interval (hourly, daily, or monthly). This tool is essential for: \n1. Monitoring resource consumption patterns\n2. Analyzing cost trends\nRequired input parameters:\n- start_time: Beginning of the usage period (UTC ISO 8601 format, e.g., '2023-07-30T18:30:00Z')\n- end_time: End of the usage period (UTC ISO 8601 format)\n- aggregate_type: Time interval for data grouping ('hour', 'day', or 'month')\n\n", + "inputSchema": { + "type": "object", + "properties": { + "start_time": { + "type": "string", + "description": "Start of the usage period in UTC ISO 8601 format (e.g., '2023-07-30T18:30:00Z')" + }, + "end_time": { + "type": "string", + "description": "End of the usage period in UTC ISO 8601 format (e.g., '2023-07-30T18:30:00Z')" + }, + "aggregate_type": { + "type": "string", + "description": "How to group the usage data: 'hour', 'day', or 'month'" + } + }, + "required": [ + "start_time", + "end_time", + "aggregate_type" + ] + } + }, + { + "name": "list_notebook_samples", + "description": "Retrieve a catalog of pre-built notebook templates available in SingleStore Spaces.\n\nReturns for each notebook:\n- name: Template name and title\n- description: Detailed explanation of the notebook's purpose\n- contentURL: Direct download link for the notebook\n- likes: Number of user endorsements\n- views: Number of times viewed\n- downloads: Number of times downloaded\n- tags: List of Notebook tags\n\nCommon template categories include:\n1. Getting Started guides\n2. Data loading and ETL patterns\n3. Query optimization examples\n4. Machine learning integrations\n5. Performance monitoring\n6. Best practices demonstrations\n\nUse this tool to:\n1. Find popular and well-tested example code\n2. Learn SingleStore features and best practices\n3. Start new projects with proven patterns\n4. Discover trending notebook templates\n\nRelated operations:\nRelated operations:\n- list_notebook_samples: To find example templates\n- list_shared_files: To check existing notebooks\n- create_scheduled_job: To automate notebook execution\n- get_notebook_path : To reference created notebooks\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "create_notebook", + "description": "Create a new Jupyter notebook in your personal space. Only supports python and markdown. Do not try to use any other languange\n\nParameters:\n- notebook_name (required): Name for the new notebook\n - Can include or omit .ipynb extension\n - Must be unique in your personal space\n - Examples: 'my_analysis' or 'my_analysis.ipynb'\n\n- content (optional): Custom notebook content\n - Must be valid Jupyter notebook JSON format\n - If omitted, creates template with:\n \u2022 SingleStore connection setup\n \u2022 Basic query examples\n \u2022 DataFrame operations\n \u2022 Best practices\n\nFeatures:\n- Creates notebook with specified name in personal space\n- Automatically adds .ipynb extension if missing\n- Provides default SingleStore template if no content given\n- Supports custom content in Jupyter notebook format\n- Only supports python and markdown cells\n- When creating a connection to the database the jupyter notebook will already have the connection_url defined and you can use directly\n- Install tools in a new cell with !pip3 install \n\nDefault template includes:\n- SingleStore connection setup code\n- Basic SQL query examples\n- DataFrame operations with pandas\n- Table creation and data insertion examples\n- Connection management best practices\n\nUse this tool to:\n1. Create data analysis notebooks using python\n2. Build database interaction workflows and much more\n\nRelated operations:\n- list_notebook_samples: To find example templates\n- list_shared_files: To check existing notebooks\n- create_scheduled_job: To automate notebook execution\n- get_notebook_path : To reference created notebooks\n", + "inputSchema": { + "type": "object", + "properties": { + "notebook_name": { + "type": "string", + "description": "Name for the new notebook (with or without .ipynb extension)" + }, + "content": { + "type": "string", + "description": "Optional: Custom notebook content in Jupyter JSON format" + } + }, + "required": [ + "notebook_name" + ] + } + }, + { + "name": "list_shared_files", + "description": "List all files and notebooks in your shared SingleStore space.\n\nReturns file object meta data for each file:\n- name: Name of the file (e.g., 'analysis.ipynb')\n- path: Full path in shared space (e.g., 'folder/analysis.ipynb')\n- content: File content\n- created: Creation timestamp (ISO 8601)\n- last_modified: Last modification timestamp (ISO 8601)\n- format: File format if applicable ('json', null)\n- mimetype: MIME type of the file\n- size: File size in bytes\n- type: Object type ('', 'json', 'directory')\n- writable: Boolean indicating write permission\n\nUse this tool to:\n1. List workspace contents and structure\n2. Verify file existence before operations\n3. Check file timestamps and sizes\n4. Determine file permissions\n\nRelated operations:\n- create_notebook: To add new notebooks\n- get_notebook_path: To find notebook paths\n- create_scheduled_job: To automate notebook execution\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "create_scheduled_job", + "description": "Create an automated job to execute a SingleStore notebook on a schedule.\n\nParameters:\n1. Required Parameters:\n - name: Name of the job (unique identifier within organization)\n - notebook_path: Complete path to the notebook\n - schedule_mode: 'Once' for single execution or 'Recurring' for repeated runs\n\n2. Optional Parameters:\n - execution_interval_minutes: Time between recurring runs (\u226560 minutes)\n - start_at: Execution start time (ISO 8601 format, e.g., '2024-03-06T10:00:00Z')\n - description: Human-readable purpose of the job\n - create_snapshot: Enable notebook backup before execution (default: True)\n - runtime_name: Execution environment selection (default: notebooks-cpu-small)\n - parameters: Runtime variables for notebook\n - target_config: Advanced runtime settings\n\nReturns Job info with:\n- jobID: UUID of created job\n- status: Current state (SUCCESS, RUNNING, etc.)\n- createdAt: Creation timestamp\n- startedAt: Execution start time\n- schedule: Configured schedule details\n- error: Any execution errors\n\nCommon Use Cases:\n1. Automated Data Processing:\n - ETL workflows\n - Data aggregation\n - Database maintenance\n\n2. Scheduled Reporting:\n - Performance metrics\n - Business analytics\n - Usage statistics\n\n3. Maintenance Tasks:\n - Health checks\n - Backup operations\n - Clean-up routines\n\nRelated Operations:\n- get_job_details: Monitor job\n- list_job_executions: View job execution history\n", + "inputSchema": { + "type": "object", + "properties": { + "notebook_path": { + "type": "string", + "description": "Full path to the notebook file (use get_notebook_path if needed)" + }, + "mode": { + "type": "string", + "enum": [ + "Once", + "Recurring" + ], + "description": "Execution mode: 'Once' or 'Recurring'" + }, + "create_snapshot": { + "type": "boolean", + "description": "Enable notebook backup before execution (default: True)" + } + }, + "required": [ + "notebook_path", + "mode", + "create_snapshot" + ] + } + }, + { + "name": "get_job_details", + "description": "Retrieve comprehensive information about a scheduled notebook job.\n\nParameter required:\njob_id: UUID of the scheduled job to retrieve details for\n\nReturns:\n- jobID: Unique identifier (UUID format)\n- name: Display name of the job\n- description: Human-readable job description\n- createdAt: Creation timestamp (ISO 8601)\n- terminatedAt: End timestamp if completed\n- completedExecutionsCount: Number of successful runs\n- enqueuedBy: User ID who created the job\n- executionConfig: Notebook path and runtime settings\n- schedule: Mode, interval, and start time\n- targetConfig: Database and workspace settings\n- jobMetadata: Execution statistics and status\n\nRelated Operations:\n- create_scheduled_job: Create new jobs\n- list_job_executions: View run history", + "inputSchema": { + "type": "object", + "properties": { + "job_id": { + "type": "string", + "description": "Unique identifier of the scheduled job" + } + }, + "required": [ + "job_id" + ] + } + }, + { + "name": "list_job_executions", + "description": "Retrieve execution history and performance metrics for a scheduled notebook job.\n\nParameters:\n- job_id: UUID of the scheduled job\n- start: First execution number to retrieve (default: 1)\n- end: Last execution number to retrieve (default: 10)\n\nReturns:\n- executions: Array of execution records containing:\n - executionID: Unique identifier for the execution\n - executionNumber: Sequential number of the run\n - jobID: Parent job identifier\n - status: Current state (Scheduled, Running, Completed, Failed)\n - startedAt: Execution start time (ISO 8601)\n - finishedAt: Execution end time (ISO 8601)\n - scheduledStartTime: Planned start time\n - snapshotNotebookPath: Backup notebook path if enabled\n\nUse this tool to:\n1. Monitor each job execution status\n2. Track execution times and performance\n3. Investigate failed runs\n\nRelated Operations:\n- get_job_details: View job configuration\n- create_scheduled_job: Create new jobs", + "inputSchema": { + "type": "object", + "properties": { + "job_id": { + "type": "string", + "description": "Unique identifier of the scheduled job" + }, + "start": { + "type": "integer", + "description": "Starting execution number (default: 1)" + }, + "end": { + "type": "integer", + "description": "Last execution number (default: 10)" + } + }, + "required": [ + "job_id" + ] + } + }, + { + "name": "get_notebook_path", + "description": "Find the complete path of a notebook by its name and generate the properly formatted path for API operations.\n\nParameters:\n- notebook_name: Name of the notebook to locate (with or without .ipynb extension)\n- location: Where to search ('personal' or 'shared', defaults to 'personal')\n\nReturns the properly formatted path including project ID and user ID where needed.\nRequired for:\n- Creating scheduled jobs (use returned path as notebook_path parameter)\n", + "inputSchema": { + "type": "object", + "properties": { + "notebook_name": { + "type": "string", + "description": "Name of the notebook to find (with or without .ipynb extension)" + }, + "location": { + "type": "string", + "enum": [ + "personal", + "shared" + ], + "description": "Where to look for the notebook: 'personal' (default) or 'shared' space" + } + }, + "required": [ + "notebook_name" + ] + } + }, + { + "name": "get_project_id", + "description": "Retrieve the organization's unique identifier (project ID).\n\nReturns:\n- orgID (string): The organization's unique identifier\n\nRequired for:\n- Constructing paths or references to shared resources\n\nPerformance Tip:\nCache the returned ID when making multiple API calls.\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "get_user_id", + "description": "Retrieve the current user's unique identifier. \n\nReturns:\n- userID (string): UUID format identifier for the current user\n\nRequired for:\n- Constructing paths or references to personal resources\n\n1. Constructing personal space paths\n\nPerformance Tip:\nCache the returned ID when making multiple making multiple API calls.\n", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + ] + }, + "materials-project": { + "name": "materials-project", + "description": "A MCP (Model Context Protocol) server that interacts with the Materials Project database, allowing for material search, structure visualization, and manipulation.", + "display_name": "Materials Project", + "repository": { + "type": "git", + "url": "https://github.com/pathintegral-institute/mcp.science" + }, + "homepage": "https://github.com/pathintegral-institute/mcp.science/tree/main/servers/materials-project", + "author": { + "name": "pathintegral-institute" + }, + "license": "MIT", + "tags": [ + "materials", + "science" + ], + "arguments": { + "MP_API_KEY": { + "description": "API key from the Materials Project", + "required": true, + "example": "your_materials_project_api_key_here" + } + }, + "tools": [ + { + "name": "search_materials_by_formula", + "description": "Search for materials in the Materials Project database by chemical formula. Returns a list of text descriptions for structures matching the given formula.", + "prompt": "Find materials with the chemical formula Fe2O3", + "inputSchema": { + "type": "object", + "properties": { + "chemical_formula": { + "type": "string", + "description": "The chemical formula of the material" + } + } + }, + "required": [ + "chemical_formula" + ] + }, + { + "name": "select_material_by_id", + "description": "Select a specific material by its material ID. Returns a list of TextContent objects containing the structure description and URI.", + "prompt": "Get details for material mp-149", + "inputSchema": { + "type": "object", + "properties": { + "material_id": { + "type": "string", + "description": "The ID of the material" + } + } + }, + "required": [ + "material_id" + ] + }, + { + "name": "get_structure_data", + "description": "Retrieve structure data in specified format (CIF or POSCAR). Returns the structure file content as a string.", + "prompt": "Get the CIF file for silicon", + "inputSchema": { + "type": "object", + "properties": { + "structure_uri": { + "type": "string", + "description": "The URI of the structure" + }, + "format": { + "type": "string", + "description": "Output format, either 'cif' or 'poscar'", + "enum": [ + "cif", + "poscar" + ], + "default": "poscar" + } + } + }, + "required": [ + "structure_uri" + ] + }, + { + "name": "create_structure_from_poscar", + "description": "Create a new structure from a POSCAR string. Returns information about the newly created structure, including its URI.", + "prompt": "Create a structure from this POSCAR data", + "inputSchema": { + "type": "object", + "properties": { + "poscar_str": { + "type": "string", + "description": "The POSCAR string of the structure" + } + } + }, + "required": [ + "poscar_str" + ] + }, + { + "name": "plot_structure", + "description": "Visualize the crystal structure. Returns a PNG image of the structure and a Plotly JSON representation.", + "prompt": "Show me the crystal structure of silicon", + "inputSchema": { + "type": "object", + "properties": { + "structure_uri": { + "type": "string", + "description": "The URI of the structure" + }, + "duplication": { + "type": "array", + "description": "The duplication of the structure along a, b, c axes", + "items": { + "type": "integer" + }, + "default": [ + 1, + 1, + 1 + ] + } + } + }, + "required": [ + "structure_uri" + ] + }, + { + "name": "build_supercell", + "description": "Create a supercell from a bulk structure. Returns information about the newly created supercell structure.", + "prompt": "Create a 2x2x2 supercell of graphite", + "inputSchema": { + "type": "object", + "properties": { + "bulk_structure_uri": { + "type": "string", + "description": "The URI of the bulk structure" + }, + "supercell_parameters": { + "type": "object", + "description": "Parameters defining the supercell", + "properties": { + "scaling_matrix": { + "type": "array", + "description": "3x3 matrix or list of 3 integers for scaling", + "items": { + "type": "integer" + } + } + } + } + } + }, + "required": [ + "bulk_structure_uri", + "supercell_parameters" + ] + }, + { + "name": "moire_homobilayer", + "description": "Generate a moir\u00e9 superstructure of a 2D homobilayer. Returns information about the newly created moir\u00e9 structure.", + "prompt": "Create a moir\u00e9 structure of graphene with 5 degree twist", + "inputSchema": { + "type": "object", + "properties": { + "bulk_structure_uri": { + "type": "string", + "description": "The URI of the bulk structure" + }, + "interlayer_spacing": { + "type": "number", + "description": "The interlayer spacing between the two layers in \u00c5ngstr\u00f6m" + }, + "max_num_atoms": { + "type": "integer", + "description": "Maximum number of atoms in the moir\u00e9 superstructure", + "default": 10 + }, + "twist_angle": { + "type": "number", + "description": "Twist angle in degrees", + "default": 0.0 + }, + "vacuum_thickness": { + "type": "number", + "description": "Vacuum thickness in z-direction in \u00c5ngstr\u00f6m", + "default": 15.0 + } + } + }, + "required": [ + "bulk_structure_uri", + "interlayer_spacing" + ] + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uv", + "args": [ + "--from", + "git+https://github.com/pathintegral-institute/mcp.science#subdirectory=servers/materials-project", + "mcp-materials-project" + ], + "env": { + "MP_API_KEY": "your_materials_project_api_key_here" + } + } + }, + "examples": [ + { + "title": "Search for materials", + "description": "Search for materials by chemical formula", + "prompt": "Find materials with the chemical formula Fe2O3" + }, + { + "title": "Get material by ID", + "description": "Select a specific material by its ID", + "prompt": "Get details for material mp-149" + }, + { + "title": "Download structure file", + "description": "Get structure data in CIF format", + "prompt": "Download the CIF file for mp-149" + }, + { + "title": "Visualize crystal structure", + "description": "Plot the crystal structure of a material", + "prompt": "Show me the crystal structure of silicon" + }, + { + "title": "Create a supercell", + "description": "Build a supercell from a bulk structure", + "prompt": "Create a 2x2x2 supercell of graphite" + }, + { + "title": "Create moir\u00e9 structure", + "description": "Generate a moir\u00e9 superstructure", + "prompt": "Create a moir\u00e9 structure of graphene with 3.4\u00c5 interlayer spacing and 5\u00b0 twist angle" + } + ], + "categories": [ + "MCP Tools" + ], + "is_official": true + }, + "holaspirit": { + "name": "holaspirit", + "display_name": "Holaspirit", + "description": "Interact with [Holaspirit](https://www.holaspirit.com/).", + "repository": { + "type": "git", + "url": "https://github.com/syucream/holaspirit-mcp-server" + }, + "homepage": "https://github.com/syucream/holaspirit-mcp-server", + "author": { + "name": "syucream" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Holaspirit", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "holaspirit-mcp-server" + ], + "env": { + "HOLASPIRIT_API_TOKEN": "${HOLASPIRIT_API_TOKEN}" + } + } + }, + "arguments": { + "HOLASPIRIT_API_TOKEN": { + "description": "Your Holaspirit API token", + "required": true, + "example": "" + } + }, + "tools": [ + { + "name": "holaspirit_list_tasks", + "description": "List all tasks in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_list_metrics", + "description": "List all metrics in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_list_circles", + "description": "List all circles in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + }, + "member": { + "type": "string", + "description": "Comma-separated unique identifiers for the member" + }, + "circle": { + "type": "string", + "description": "Comma-separated unique identifiers for the circle" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_get_circle", + "description": "Get details of a specific circle", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "circleId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the circle" + } + }, + "required": [ + "organizationId", + "circleId" + ] + } + }, + { + "name": "holaspirit_list_roles", + "description": "List all roles in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + }, + "member": { + "type": "string", + "description": "Comma-separated unique identifiers for the member" + }, + "circle": { + "type": "string", + "description": "Comma-separated unique identifiers for the circle" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_get_role", + "description": "Get details of a specific role", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "roleId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the role" + } + }, + "required": [ + "organizationId", + "roleId" + ] + } + }, + { + "name": "holaspirit_list_domains", + "description": "List all domains in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_list_policies", + "description": "List all policies in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_list_meetings", + "description": "List all meetings in the organization", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "page": { + "type": "number", + "minimum": 1, + "description": "Page number" + }, + "count": { + "type": "number", + "minimum": 1, + "description": "Number of elements per page" + }, + "circle": { + "type": "string", + "description": "Comma-separated unique identifiers for the circle" + }, + "member": { + "type": "string", + "description": "Comma-separated unique identifiers for the member" + } + }, + "required": [ + "organizationId" + ] + } + }, + { + "name": "holaspirit_get_meeting", + "description": "Get details of a specific meeting", + "inputSchema": { + "type": "object", + "properties": { + "organizationId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the organization" + }, + "meetingId": { + "type": "string", + "pattern": "^[a-zA-Z0-9-]+$", + "description": "Unique identifier for the meeting" + } + }, + "required": [ + "organizationId", + "meetingId" + ] + } + } + ] + }, + "rag-web-browser": { + "name": "rag-web-browser", + "display_name": "RAG Web Browser Server", + "description": "An MCP server for Apify's open-source RAG Web Browser [Actor](https://apify.com/apify/rag-web-browser) to perform web searches, scrape URLs, and return content in Markdown.", + "repository": { + "type": "git", + "url": "https://github.com/apify/mcp-server-rag-web-browser" + }, + "homepage": "https://github.com/apify/mcp-server-rag-web-browser", + "author": { + "name": "apify" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "RAG", + "Web Browser", + "AI Agents" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@apify/mcp-server-rag-web-browser" + ], + "env": { + "APIFY_TOKEN": "${APIFY_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Web Search Example", + "description": "Ask the server to perform a web search for a specific query.", + "prompt": "What is an MCP server and how can it be used?" + }, + { + "title": "Research Papers Query", + "description": "Find and analyze recent research papers about LLMs.", + "prompt": "Find and analyze recent research papers about LLMs." + } + ], + "arguments": { + "APIFY_TOKEN": { + "description": "Environment variable for your Apify API token to authenticate requests.", + "required": true, + "example": "your-apify-api-token" + } + }, + "tools": [ + { + "name": "search", + "description": "Search phrase or a URL at Google and return crawled web pages as text or Markdown. Prefer HTTP raw client for speed and browser-playwright for reliability.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "pattern": "[^\\s]+", + "description": "Enter Google Search keywords or a URL of a specific web page. The keywords might include theadvanced search operators. Examples: \"san francisco weather\", \"https://www.cnn.com\", \"function calling site:openai.com\"" + }, + "maxResults": { + "type": "integer", + "exclusiveMinimum": 0, + "minimum": 1, + "maximum": 100, + "default": 1, + "description": "The maximum number of top organic Google Search results whose web pages will be extracted. If query is a URL, then this field is ignored and the Actor only fetches the specific web page." + }, + "scrapingTool": { + "type": "string", + "enum": [ + "browser-playwright", + "raw-http" + ], + "description": "Select a scraping tool for extracting the target web pages. The Browser tool is more powerful and can handle JavaScript heavy websites, while the Plain HTML tool can not handle JavaScript but is about two times faster.", + "default": "raw-http" + }, + "outputFormats": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "text", + "markdown", + "html" + ] + }, + "description": "Select one or more formats to which the target web pages will be extracted.", + "default": [ + "markdown" + ] + }, + "requestTimeoutSecs": { + "type": "integer", + "minimum": 1, + "maximum": 300, + "default": 40, + "description": "The maximum time in seconds available for the request, including querying Google Search and scraping the target web pages." + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "aws-kb-retrieval": { + "name": "aws-kb-retrieval", + "display_name": "AWS Knowledge Base Retrieval", + "description": "Retrieval from AWS Knowledge Base using Bedrock Agent Runtime", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/aws-kb-retrieval-server", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "Knowledge Base", + "Retrieval", + "AWS", + "Bedrock Agent Runtime" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-aws-kb-retrieval" + ], + "env": { + "AWS_ACCESS_KEY_ID": "${AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${AWS_SECRET_ACCESS_KEY}", + "AWS_REGION": "${AWS_REGION}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "AWS_ACCESS_KEY_ID", + "-e", + "AWS_SECRET_ACCESS_KEY", + "-e", + "AWS_REGION", + "mcp/aws-kb-retrieval-server" + ], + "env": { + "AWS_ACCESS_KEY_ID": "${AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${AWS_SECRET_ACCESS_KEY}", + "AWS_REGION": "${AWS_REGION}" + } + } + }, + "arguments": { + "AWS_ACCESS_KEY_ID": { + "description": "The access key ID for your AWS account used for authentication.", + "required": true, + "example": "YOUR_ACCESS_KEY_HERE" + }, + "AWS_SECRET_ACCESS_KEY": { + "description": "The secret access key for your AWS account used for authentication.", + "required": true, + "example": "YOUR_SECRET_ACCESS_KEY_HERE" + }, + "AWS_REGION": { + "description": "The AWS region where your resources are located.", + "required": true, + "example": "us-east-1" + } + }, + "tools": [ + { + "name": "retrieve_from_aws_kb", + "description": "Performs retrieval from the AWS Knowledge Base using the provided query and Knowledge Base ID.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query to perform retrieval on" + }, + "knowledgeBaseId": { + "type": "string", + "description": "The ID of the AWS Knowledge Base" + }, + "n": { + "type": "number", + "default": 3, + "description": "Number of results to retrieve" + } + }, + "required": [ + "query", + "knowledgeBaseId" + ] + } + } + ], + "is_official": true + }, + "xiyan-mcp-server": { + "name": "xiyan-mcp-server", + "display_name": "XiYan MCP Server", + "description": "An MCP server that supports fetching data from a database using natural language queries, powered by XiyanSQL as the text-to-SQL LLM.", + "repository": { + "type": "git", + "url": "https://github.com/XGenerationLab/xiyan_mcp_server" + }, + "homepage": "https://github.com/XGenerationLab/xiyan_mcp_server", + "author": { + "name": "XGenerationLab" + }, + "license": "Apache-2.0", + "categories": [ + "Databases" + ], + "tags": [ + "database", + "sql", + "database" + ], + "installations": { + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "xiyan_mcp_server" + ], + "env": { + "YML": "${YML}" + } + } + }, + "arguments": { + "YML": { + "description": "The path to the YAML configuration file required for setting up the server environment variables.", + "required": true, + "example": "path/to/yml" + } + }, + "tools": [ + { + "name": "get_data", + "description": "Fetch the data from database through a natural language query\n\n Args:\n query: The query in natual language\n ", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "get_dataArguments", + "type": "object" + } + } + ] + }, + "terminal-control": { + "name": "terminal-control", + "display_name": "Terminal Controller", + "description": "A MCP server that enables secure terminal command execution, directory navigation, and file system operations through a standardized interface.", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/terminal-controller-mcp" + }, + "homepage": "https://github.com/GongRzhe/terminal-controller-mcp", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "terminal", + "command execution", + "file management", + "cross-platform" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "terminal-controller" + ] + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "terminal_controller" + ] + } + }, + "examples": [ + { + "title": "Run Command Example", + "description": "Run the command `ls -la` in the current directory", + "prompt": "Run the command `ls -la` in the current directory" + }, + { + "title": "Navigate Directory Example", + "description": "Navigate to my Documents folder", + "prompt": "Navigate to my Documents folder" + }, + { + "title": "Show Downloads Example", + "description": "Show me the contents of my Downloads directory", + "prompt": "Show me the contents of my Downloads directory" + }, + { + "title": "Recent Commands Example", + "description": "Show me my recent command history", + "prompt": "Show me my recent command history" + } + ], + "arguments": { + "terminal_controller": { + "description": "The Python module that contains the implementation of the Terminal Controller's functionalities.", + "required": true, + "example": "terminal_controller" + } + }, + "tools": [ + { + "name": "execute_command", + "description": "\n Execute terminal command and return results\n \n Args:\n command: Command line command to execute\n timeout: Command timeout in seconds, default is 30 seconds\n \n Returns:\n Output of the command execution\n ", + "inputSchema": { + "properties": { + "command": { + "title": "Command", + "type": "string" + }, + "timeout": { + "default": 30, + "title": "Timeout", + "type": "integer" + } + }, + "required": [ + "command" + ], + "title": "execute_commandArguments", + "type": "object" + } + }, + { + "name": "get_command_history", + "description": "\n Get recent command execution history\n \n Args:\n count: Number of recent commands to return\n \n Returns:\n Formatted command history record\n ", + "inputSchema": { + "properties": { + "count": { + "default": 10, + "title": "Count", + "type": "integer" + } + }, + "title": "get_command_historyArguments", + "type": "object" + } + }, + { + "name": "get_current_directory", + "description": "\n Get current working directory\n \n Returns:\n Path of current working directory\n ", + "inputSchema": { + "properties": {}, + "title": "get_current_directoryArguments", + "type": "object" + } + }, + { + "name": "change_directory", + "description": "\n Change current working directory\n \n Args:\n path: Directory path to switch to\n \n Returns:\n Operation result information\n ", + "inputSchema": { + "properties": { + "path": { + "title": "Path", + "type": "string" + } + }, + "required": [ + "path" + ], + "title": "change_directoryArguments", + "type": "object" + } + }, + { + "name": "list_directory", + "description": "\n List files and subdirectories in the specified directory\n \n Args:\n path: Directory path to list contents, default is current directory\n \n Returns:\n List of directory contents\n ", + "inputSchema": { + "properties": { + "path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path" + } + }, + "title": "list_directoryArguments", + "type": "object" + } + } + ] + }, + "mcp-neo4j-cypher": { + "display_name": "Neo4j MCP", + "repository": { + "type": "git", + "url": "https://github.com/neo4j-contrib/mcp-neo4j" + }, + "homepage": "https://github.com/neo4j-contrib/mcp-neo4j", + "author": { + "name": "neo4j-contrib" + }, + "license": "MIT", + "tags": [ + "neo4j", + "mcp", + "cypher", + "knowledge graph" + ], + "arguments": { + "NEO4J_URI": { + "description": "Neo4j database URL", + "required": true, + "example": "https://:@.databases.neo4j.com:7687" + }, + "NEO4J_USERNAME": { + "description": "Neo4j username", + "required": true, + "example": "" + }, + "NEO4J_PASSWORD": { + "description": "Neo4j password", + "required": true, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-neo4j-cypher", + "--db-url", + "${NEO4J_URI}", + "--username", + "${NEO4J_USERNAME}", + "--password", + "${NEO4J_PASSWORD}" + ] + } + }, + "examples": [ + { + "title": "Database Schema Query", + "description": "Get information about what's in the graph database", + "prompt": "What is in this graph?" + }, + { + "title": "Data Visualization", + "description": "Generate charts from graph data", + "prompt": "Render a chart from the top products sold by frequency, total and average volume" + }, + { + "title": "Instance Management", + "description": "List Neo4j Aura instances", + "prompt": "List my instances" + }, + { + "title": "Instance Creation", + "description": "Create a new Neo4j Aura instance", + "prompt": "Create a new instance named mcp-test for Aura Professional with 4GB and Graph Data Science enabled" + }, + { + "title": "Knowledge Storage", + "description": "Store information in the knowledge graph", + "prompt": "Store the fact that I worked on the Neo4j MCP Servers today with Andreas and Oskar" + } + ], + "name": "mcp-neo4j", + "description": "This server enables running Cypher graph queries, analyzing complex domain data, and automatically generating business insights that can be enhanced with Claude's analysis when an Anthropic API key is provided.", + "categories": [ + "Databases" + ], + "is_official": true + }, + "tavily-search": { + "name": "tavily-search", + "display_name": "Tavily Search", + "description": "An MCP server for Tavily's search & news API, with explicit site inclusions/exclusions", + "repository": { + "type": "git", + "url": "https://github.com/RamXX/mcp-tavily" + }, + "homepage": "https://github.com/RamXX/mcp-tavily", + "author": { + "name": "RamXX" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "AI", + "Search" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-tavily" + ], + "env": { + "TAVILY_API_KEY": "your_api_key_here" + } + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp-tavily" + ], + "env": { + "TAVILY_API_KEY": "your_api_key_here" + } + } + }, + "examples": [ + { + "title": "Regular Web Search", + "description": "Perform a standard web search using Tavily's capabilities.", + "prompt": "Tell me about Anthropic's newly released MCP protocol" + }, + { + "title": "Domain Filtering Report", + "description": "Generate a report filtering specific domains.", + "prompt": "Tell me about redwood trees. Please use MLA format in markdown syntax and include the URLs in the citations. Exclude Wikipedia sources." + }, + { + "title": "Direct Answer Search", + "description": "Use answer search mode for getting direct answers.", + "prompt": "I want a concrete answer backed by current web sources: What is the average lifespan of redwood trees?" + }, + { + "title": "News Search", + "description": "Retrieve recent news articles on specific topics.", + "prompt": "Give me the top 10 AI-related news in the last 5 days." + } + ], + "arguments": { + "TAVILY_API_KEY": { + "description": "Your Tavily API key for accessing Tavily's search API functionalities.", + "required": true, + "example": "your_api_key_here" + } + } + }, + "devhub-cms-mcp": { + "display_name": "DevHub CMS MCP", + "repository": { + "type": "git", + "url": "https://github.com/devhub/devhub-cms-mcp" + }, + "homepage": "https://github.com/devhub/devhub-cms-mcp", + "author": { + "name": "devhub" + }, + "license": "[NOT GIVEN]", + "tags": [ + "cms", + "content management", + "devhub" + ], + "arguments": { + "DEVHUB_API_KEY": { + "description": "Your DevHub API key", + "required": true, + "example": "YOUR_KEY_HERE" + }, + "DEVHUB_API_SECRET": { + "description": "Your DevHub API secret", + "required": true, + "example": "YOUR_SECRET_HERE" + }, + "DEVHUB_BASE_URL": { + "description": "Your DevHub base URL", + "required": true, + "example": "https://yourbrand.cloudfrontend.net" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "devhub-cms-mcp" + ], + "env": { + "DEVHUB_API_KEY": "YOUR_KEY_HERE", + "DEVHUB_API_SECRET": "YOUR_SECRET_HERE", + "DEVHUB_BASE_URL": "https://yourbrand.cloudfrontend.net" + }, + "recommended": true + } + }, + "examples": [ + { + "title": "Get business information", + "description": "Retrieve all businesses within the DevHub account", + "prompt": "Can you list all the businesses in my DevHub account?" + }, + { + "title": "Create a blog post", + "description": "Create a new blog post for a specific site", + "prompt": "Create a new blog post titled 'Summer Specials' for my site with content about our seasonal offerings." + } + ], + "name": "devhub-cms-mcp", + "description": "Manage and utilize website content within the DevHub CMS platform", + "categories": [ + "Productivity" + ], + "tools": [ + { + "name": "get_hours_of_operation", + "description": "Get the hours of operation for a DevHub location\n\n Returns a list of items representing days of the week\n\n Except for the special case formatting, this object is a list of 7 items which represent each day.\n\n Each day can can have one-four time ranges. For example, two time ranges denotes a \"lunch-break\". No time ranges denotes closed.\n\n Examples:\n 9am-5pm [[\"09:00:00\", \"17:00:00\"]]\n 9am-12pm and 1pm-5pm [[\"09:00:00\", \"12:00:00\"], [\"13:00:00\", \"17:00:00\"]]\n Closed - an empty list []\n\n Args:\n location_id: DevHub Location ID\n hours_type: Defaults to 'primary' unless the user specifies a different type\n ", + "inputSchema": { + "properties": { + "location_id": { + "title": "Location Id", + "type": "integer" + }, + "hours_type": { + "default": "primary", + "title": "Hours Type", + "type": "string" + } + }, + "required": [ + "location_id" + ], + "title": "get_hours_of_operationArguments", + "type": "object" + } + }, + { + "name": "get_businesses", + "description": "Get all businesses within the DevHub account\n\n Returns a list of businesses with the following fields:\n - id: Business ID that can be used in the other tools\n - business_name: Business name\n\n If only one business exists in the account, you can assume that the user wants to use that business for any business_id related tools.\n ", + "inputSchema": { + "properties": {}, + "title": "get_businessesArguments", + "type": "object" + } + }, + { + "name": "get_locations", + "description": "Get all locations for a business\n\n Returns a list of locations with the following fields:\n - id: Location ID that can be used in the other tools\n - location_name: Location name\n - location_url: Location URL in DevHub\n - street: Street address\n - city: City\n - state: State\n - country: Country\n - postal_code: Postal code\n - lat: Latitude\n - lon: Longitude\n ", + "inputSchema": { + "properties": { + "business_id": { + "title": "Business Id", + "type": "integer" + } + }, + "required": [ + "business_id" + ], + "title": "get_locationsArguments", + "type": "object" + } + }, + { + "name": "update_hours", + "description": "Update the hours of operation for a DevHub location\n\n Send a list of items representing days of the week\n\n Except for the special case formatting, this object is a list of 7 items which represent each day.\n\n Each day can can have one-four time ranges. For example, two time ranges denotes a \"lunch-break\". No time ranges denotes closed.\n\n Examples:\n 9am-5pm [[\"09:00:00\", \"17:00:00\"]]\n 9am-12pm and 1pm-5pm [[\"09:00:00\", \"12:00:00\"], [\"13:00:00\", \"17:00:00\"]]\n Closed - an empty list []\n\n Args:\n location_id: DevHub Location ID\n new_hours: Structured format of the new hours\n hours_type: Defaults to 'primary' unless the user specifies a different type\n ", + "inputSchema": { + "properties": { + "location_id": { + "title": "Location Id", + "type": "integer" + }, + "new_hours": { + "items": {}, + "title": "New Hours", + "type": "array" + }, + "hours_type": { + "default": "primary", + "title": "Hours Type", + "type": "string" + } + }, + "required": [ + "location_id", + "new_hours" + ], + "title": "update_hoursArguments", + "type": "object" + } + }, + { + "name": "site_from_url", + "description": "Get the DevHub site ID from a URL.\n\n Can prompt the user for the URL instead of passing a site_id.\n\n Returns details about the Site matches the URL that can be used in the other tools.\n - Site ID: ID of the DevHub site\n - Site URL: URL of the DevHub site\n - Site Location IDs: List of location IDs associated with the site\n\n Args:\n url: URL of the DevHub site, all lowercase and ends with a slash\n ", + "inputSchema": { + "properties": { + "url": { + "title": "Url", + "type": "string" + } + }, + "required": [ + "url" + ], + "title": "site_from_urlArguments", + "type": "object" + } + }, + { + "name": "upload_image", + "description": "Upload an image to the DevHub media gallery\n\n Supports webp, jpeg and png images\n\n Args:\n base64_image_content: Base 64 encoded content of the image file\n filename: Filename including the extension\n ", + "inputSchema": { + "properties": { + "base64_image_content": { + "title": "Base64 Image Content", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "base64_image_content", + "filename" + ], + "title": "upload_imageArguments", + "type": "object" + } + }, + { + "name": "get_blog_post", + "description": "Get a single blog post\n\n Args:\n post_id: Blog post id\n ", + "inputSchema": { + "properties": { + "post_id": { + "title": "Post Id", + "type": "integer" + } + }, + "required": [ + "post_id" + ], + "title": "get_blog_postArguments", + "type": "object" + } + }, + { + "name": "create_blog_post", + "description": "Create a new blog post\n\n Args:\n site_id: Website ID where the post will be published. Prompt the user for this ID.\n title: Blog post title\n content: HTML content of blog post. Should not include a

tag, only h2+\n ", + "inputSchema": { + "properties": { + "site_id": { + "title": "Site Id", + "type": "integer" + }, + "title": { + "title": "Title", + "type": "string" + }, + "content": { + "title": "Content", + "type": "string" + } + }, + "required": [ + "site_id", + "title", + "content" + ], + "title": "create_blog_postArguments", + "type": "object" + } + }, + { + "name": "update_blog_post", + "description": "Update a single blog post\n\n Args:\n post_id: Blog post ID\n title: Blog post title\n content: HTML content of blog post. Should not include a

tag, only h2+\n ", + "inputSchema": { + "properties": { + "post_id": { + "title": "Post Id", + "type": "integer" + }, + "title": { + "default": null, + "title": "Title", + "type": "string" + }, + "content": { + "default": null, + "title": "Content", + "type": "string" + } + }, + "required": [ + "post_id" + ], + "title": "update_blog_postArguments", + "type": "object" + } + }, + { + "name": "get_nearest_location", + "description": "Get the nearest DevHub location\n\n Args:\n business_id: DevHub Business ID associated with the location. Prompt the user for this ID\n latitude: Latitude of the location\n longitude: Longitude of the location\n ", + "inputSchema": { + "properties": { + "business_id": { + "title": "Business Id", + "type": "integer" + }, + "latitude": { + "title": "Latitude", + "type": "number" + }, + "longitude": { + "title": "Longitude", + "type": "number" + } + }, + "required": [ + "business_id", + "latitude", + "longitude" + ], + "title": "get_nearest_locationArguments", + "type": "object" + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "gmail": { + "name": "gmail", + "display_name": "Gmail AutoAuth", + "description": "A Model Context Protocol (MCP) server for Gmail integration in Claude Desktop with auto authentication support.", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/Gmail-MCP-Server" + }, + "homepage": "https://github.com/GongRzhe/Gmail-MCP-Server", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "gmail", + "autoauth", + "claude" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@gongrzhe/server-gmail-autoauth-mcp" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-v", + "mcp-gmail:/gmail-server", + "-e", + "${GMAIL_CREDENTIALS_PATH}=/gmail-server/credentials.json", + "mcp/gmail" + ] + } + }, + "arguments": { + "GMAIL_CREDENTIALS_PATH": { + "description": "The path to the Gmail credentials file that the server will use for OAuth authentication.", + "required": true, + "example": "/gmail-server/credentials.json" + } + }, + "tools": [ + { + "name": "send_email", + "description": "Sends a new email.", + "inputSchema": { + "to": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of recipient email addresses" + }, + "subject": { + "type": "string", + "description": "Email subject" + }, + "body": { + "type": "string", + "description": "Email body content" + }, + "cc": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of CC recipients", + "optional": true + }, + "bcc": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of BCC recipients", + "optional": true + } + }, + "required": [ + "to", + "subject", + "body" + ] + }, + { + "name": "draft_email", + "description": "Draft a new email.", + "inputSchema": { + "to": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of recipient email addresses" + }, + "subject": { + "type": "string", + "description": "Email subject" + }, + "body": { + "type": "string", + "description": "Email body content" + }, + "cc": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of CC recipients", + "optional": true + }, + "bcc": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of BCC recipients", + "optional": true + } + }, + "required": [ + "to", + "subject", + "body" + ] + }, + { + "name": "read_email", + "description": "Retrieves the content of a specific email.", + "inputSchema": { + "messageId": { + "type": "string", + "description": "ID of the email message to retrieve" + } + }, + "required": [ + "messageId" + ] + }, + { + "name": "search_emails", + "description": "Searches for emails using Gmail search syntax.", + "inputSchema": { + "query": { + "type": "string", + "description": "Gmail search query (e.g., 'from:example@gmail.com')" + }, + "maxResults": { + "type": "number", + "description": "Maximum number of results to return", + "optional": true + } + }, + "required": [ + "query" + ] + }, + { + "name": "modify_email", + "description": "Modifies email labels (move to different folders).", + "inputSchema": { + "messageId": { + "type": "string", + "description": "ID of the email message to modify" + }, + "labelIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of label IDs to apply", + "optional": true + }, + "addLabelIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of label IDs to add to the message", + "optional": true + }, + "removeLabelIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of label IDs to remove from the message", + "optional": true + } + }, + "required": [ + "messageId" + ] + }, + { + "name": "delete_email", + "description": "Permanently deletes an email.", + "inputSchema": { + "messageId": { + "type": "string", + "description": "ID of the email message to delete" + } + }, + "required": [ + "messageId" + ] + }, + { + "name": "list_email_labels", + "description": "Retrieves all available Gmail labels.", + "inputSchema": {}, + "required": [] + } + ] + }, + "vectorize-mcp-server": { + "display_name": "Vectorize MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/vectorize-io/vectorize-mcp-server" + }, + "homepage": "https://vectorize.io/", + "author": { + "name": "vectorize-io" + }, + "license": "MIT", + "tags": [ + "vector retrieval", + "text extraction" + ], + "arguments": { + "VECTORIZE_ORG_ID": { + "description": "Vectorize Organization ID", + "required": true, + "example": "your-org-id" + }, + "VECTORIZE_TOKEN": { + "description": "Vectorize Token", + "required": true, + "example": "your-token" + }, + "VECTORIZE_PIPELINE_ID": { + "description": "Vectorize Pipeline ID", + "required": true, + "example": "your-pipeline-id" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@vectorize-io/vectorize-mcp-server@latest" + ], + "package": "@vectorize-io/vectorize-mcp-server", + "env": { + "VECTORIZE_ORG_ID": "${VECTORIZE_ORG_ID}", + "VECTORIZE_TOKEN": "${VECTORIZE_TOKEN}", + "VECTORIZE_PIPELINE_ID": "${VECTORIZE_PIPELINE_ID}" + }, + "description": "Run with npx", + "recommended": true + } + }, + "examples": [ + { + "title": "Retrieve documents", + "description": "Perform vector search and retrieve documents", + "prompt": "{\"name\":\"retrieve\",\"arguments\":{\"question\":\"Financial health of the company\",\"k\":5}}" + }, + { + "title": "Text extraction and chunking", + "description": "Extract text from a document and chunk it into Markdown format", + "prompt": "{\"name\":\"extract\",\"arguments\":{\"base64document\":\"base64-encoded-document\",\"contentType\":\"application/pdf\"}}" + }, + { + "title": "Deep Research", + "description": "Generate a Private Deep Research from your pipeline", + "prompt": "{\"name\":\"deep-research\",\"arguments\":{\"query\":\"Generate a financial status report about the company\",\"webSearch\":true}}" + } + ], + "name": "vectorize-mcp-server", + "description": "A Model Context Protocol (MCP) server implementation that integrates with [Vectorize](https://vectorize.io/) for advanced Vector retrieval and text extraction.", + "categories": [ + "Databases" + ], + "tools": [ + { + "name": "retrieve", + "description": "Retrieve documents from the configured pipeline.", + "inputSchema": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "The term to search for." + }, + "k": { + "type": "number", + "description": "The number of documents to retrieve.", + "default": 4 + } + }, + "required": [ + "question" + ] + } + }, + { + "name": "extract", + "description": "Perform text extraction and chunking on a document.", + "inputSchema": { + "type": "object", + "properties": { + "base64Document": { + "type": "string", + "description": "Document encoded in base64." + }, + "contentType": { + "type": "string", + "description": "Document content type." + } + }, + "required": [ + "base64Document", + "contentType" + ] + } + }, + { + "name": "deep-research", + "description": "Generate a deep research on the configured pipeline.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The deep research query." + }, + "webSearch": { + "type": "boolean", + "description": "Whether to perform a web search." + } + }, + "required": [ + "query", + "webSearch" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "verodat-mcp-server": { + "display_name": "Verodat MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/Verodat/verodat-mcp-server" + }, + "homepage": "https://verodat.io", + "author": { + "name": "Verodat" + }, + "license": "LICENSE", + "tags": [ + "MCP", + "AI", + "Data Management", + "Claude Desktop" + ], + "arguments": { + "VERODAT_AI_API_KEY": { + "description": "Your Verodat AI API key", + "required": true, + "example": "your-verodat-ai-api-key" + } + }, + "installations": { + "custom": { + "type": "custom", + "description": "Run with custom command", + "command": "node", + "args": [ + "path/to/verodat-mcp-server/build/src/index.js" + ], + "env": { + "VERODAT_AI_API_KEY": "${VERODAT_AI_API_KEY}" + } + } + }, + "examples": [ + { + "title": "List accounts", + "description": "List all accessible Verodat accounts", + "prompt": "get-accounts" + }, + { + "title": "List workspaces", + "description": "List workspaces in an account", + "prompt": "get-workspaces" + }, + { + "title": "Execute AI query", + "description": "Run AI queries on datasets", + "prompt": "execute-ai-query" + } + ], + "name": "verodat-mcp-server", + "description": "A Model Context Protocol (MCP) server implementation for [Verodat](https://verodat.io), enabling seamless integration of Verodat's data management capabilities with AI systems like Claude Desktop.", + "categories": [ + "Databases" + ], + "is_official": true + }, + "wxflows": { + "display_name": "wxflows MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/IBM/wxflows/tree/main/examples/mcp" + }, + "homepage": "https://github.com/IBM/wxflows/", + "author": { + "name": "IBM" + }, + "license": "MIT", + "tags": [ + "mcp", + "ai", + "tools", + "watsonx" + ], + "arguments": { + "WXFLOWS_APIKEY": { + "description": "API key for wxflows service", + "required": true, + "example": "YOUR_WXFLOWS_APIKEY" + }, + "WXFLOWS_ENDPOINT": { + "description": "Endpoint URL for wxflows service", + "required": true, + "example": "YOUR_WXFLOWS_ENDPOINT" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "build/index.js" + ], + "env": { + "WXFLOWS_APIKEY": "YOUR_WXFLOWS_APIKEY", + "WXFLOWS_ENDPOINT": "YOUR_WXFLOWS_ENDPOINT" + }, + "description": "Run the MCP server using Node.js", + "recommended": true + } + }, + "examples": [ + { + "title": "Search for books", + "description": "Use the google_books tool to search for books", + "prompt": "Find me books about artificial intelligence" + }, + { + "title": "Look up information on Wikipedia", + "description": "Use the wikipedia tool to search for information", + "prompt": "Find information about machine learning on Wikipedia" + } + ], + "name": "wxflows", + "description": "data-color-mode=\"auto\" data-light-theme=\"light\" data-dark-theme=\"dark\"", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "kubernetes-and-openshift": { + "name": "kubernetes-and-openshift", + "display_name": "Kubernetes and OpenShift", + "description": "A powerful Kubernetes MCP server with additional support for OpenShift. Besides providing CRUD operations for any Kubernetes resource, this server provides specialized tools to interact with your cluster.", + "repository": { + "type": "git", + "url": "https://github.com/manusa/kubernetes-mcp-server" + }, + "homepage": "https://github.com/manusa/kubernetes-mcp-server", + "author": { + "name": "manusa" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Kubernetes", + "Server" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "kubernetes-mcp-server@latest" + ] + } + } + }, + "python-code-execution": { + "name": "python-code-execution", + "display_name": "Python Code Execution", + "description": "A secure sandboxed Python code execution environment for MCP (Model-Client-Program) architecture.", + "repository": { + "type": "git", + "url": "https://github.com/pathintegral-institute/mcp.science" + }, + "homepage": "https://github.com/pathintegral-institute/mcp.science/tree/main/servers/python-code-execution", + "author": { + "name": "pathintegral-institute" + }, + "license": "MIT", + "tags": [ + "python", + "code-execution" + ], + "arguments": {}, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pathintegral-institute/mcp.science@main#subdirectory=servers/python-code-execution", + "mcp-python-code-execution" + ], + "description": "Run using uv (recommended)" + } + }, + "examples": [ + { + "title": "Execute simple Python code", + "description": "Run a simple Python calculation", + "prompt": "Execute this Python code: `print(\"Hello World\")`" + } + ], + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "python_code_execution", + "description": "Execute Python code in a secure sandbox with restricted imports and resource limits. Supports visualization with matplotlib and numerical computation with numpy.", + "prompt": "Execute this Python code: print('Hello, world!')", + "inputSchema": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Python code to execute", + "minLength": 1 + }, + "show_output": { + "type": "boolean", + "description": "Whether to show the output of the code execution", + "default": true + } + } + }, + "required": [ + "code" + ] + } + ], + "is_official": true + }, + "mysql": { + "name": "mysql", + "display_name": "MySQL Database Integration", + "description": "MySQL database integration in Python with configurable access controls and schema inspection", + "repository": { + "type": "git", + "url": "https://github.com/designcomputer/mysql_mcp_server" + }, + "homepage": "https://github.com/designcomputer/mysql_mcp_server", + "author": { + "name": "designcomputer" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "MySQL", + "Database Access" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mysql_mcp_server" + ], + "env": { + "MYSQL_HOST": "${MYSQL_HOST}", + "MYSQL_PORT": "${MYSQL_PORT}", + "MYSQL_USER": "${MYSQL_USER}", + "MYSQL_PASSWORD": "${MYSQL_PASSWORD}", + "MYSQL_DATABASE": "${MYSQL_DATABASE}" + } + } + }, + "arguments": { + "MYSQL_HOST": { + "description": "Database host", + "required": true, + "example": "localhost" + }, + "MYSQL_PORT": { + "description": "Database port (defaults to 3306 if not specified)", + "required": false, + "example": "3306" + }, + "MYSQL_USER": { + "description": "Username for database access", + "required": true, + "example": "your_username" + }, + "MYSQL_PASSWORD": { + "description": "Password for the database user", + "required": true, + "example": "your_password" + }, + "MYSQL_DATABASE": { + "description": "Database name to connect to", + "required": true, + "example": "your_database" + } + }, + "tools": [ + { + "name": "execute_sql", + "description": "Execute an SQL query on the MySQL server", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The SQL query to execute" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "mindmap": { + "name": "mindmap", + "display_name": "Mindmap", + "description": "A server that generates mindmaps from input containing markdown code.", + "repository": { + "type": "git", + "url": "https://github.com/YuChenSSR/mindmap-mcp-server" + }, + "homepage": "https://github.com/YuChenSSR/mindmap-mcp-server", + "author": { + "name": "YuChenSSR" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "mindmap", + "markdown", + "interactive" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mindmap-mcp-server", + "--return-type", + "html" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "-v", + "/path/to/output/folder:/output", + "ychen94/mindmap-converter-mcp:latest" + ] + } + }, + "examples": [ + { + "title": "Basic Mindmap Generation", + "description": "Generate a mindmap from Markdown input.", + "prompt": "give a mindmap for the following markdown code, using a mindmap tool:\n```\n# Project Planning\n## Research\n### Market Analysis\n### Competitor Review\n## Design\n### Wireframes\n### Mockups\n## Development\n### Frontend\n### Backend\n## Testing\n### Unit Tests\n### User Testing\n```\n" + }, + { + "title": "Save Mindmap to File", + "description": "Save the generated mindmap as an HTML file and open it in the browser.", + "prompt": "give a mindmap for the following markdown input_code using a mindmap tool,\nafter that,use iterm to open the generated html file.\ninput_code:\n```\nmarkdown content\n```\n" + }, + { + "title": "Elephant in Refrigerator Mindmap", + "description": "Create a mindmap about the process of putting an elephant into a refrigerator.", + "prompt": "Think about the process of putting an elephant into a refrigerator, and provide a mind map. Open it with a terminal." + } + ], + "tools": [ + { + "name": "convert_markdown_to_mindmap", + "description": "Convert Markdown content to a mindmap mind map.\n \n Args:\n markdown_content: The Markdown content to convert\n \n Returns:\n Either the HTML content or the file path to the generated HTML, \n depending on the --return-type server argument\n ", + "inputSchema": { + "properties": { + "markdown_content": { + "title": "Markdown Content", + "type": "string" + } + }, + "required": [ + "markdown_content" + ], + "title": "convert_markdown_to_mindmapArguments", + "type": "object" + } + } + ] + }, + "mcp-server-raygun": { + "display_name": "Raygun MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/MindscapeHQ/mcp-server-raygun" + }, + "homepage": "https://github.com/MindscapeHQ/mcp-server-raygun", + "author": { + "name": "MindscapeHQ" + }, + "license": "MIT", + "tags": [ + "raygun", + "crash reporting", + "real user monitoring", + "error management", + "performance monitoring" + ], + "arguments": { + "RAYGUN_PAT_TOKEN": { + "description": "Your Raygun PAT token", + "required": true, + "example": "your-pat-token-here" + }, + "SOURCEMAP_ALLOWED_DIRS": { + "description": "Comma-separated list of directories allowed for source map operations", + "required": false + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@raygun.io/mcp-server-raygun" + ], + "package": "@raygun.io/mcp-server-raygun", + "env": { + "RAYGUN_PAT_TOKEN": "your-pat-token-here" + }, + "description": "Install and run using npm", + "recommended": true + }, + "custom": { + "type": "custom", + "command": "/path/to/server-raygun/build/index.js", + "args": [], + "env": { + "RAYGUN_PAT_TOKEN": "your-pat-token-ken" + }, + "description": "Run from a local build", + "recommended": false + } + }, + "examples": [], + "name": "mcp-server-raygun", + "description": "MCP Server for Raygun's API V3 endpoints for interacting with your Crash Reporting and Real User Monitoring applications. This server provides comprehensive access to Raygun's API features through the Model Context Protocol.", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "list_applications", + "description": "List all applications under the users account on Raygun", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "name", + "name desc", + "apikey", + "apikey desc" + ] + }, + "description": "Order items by property values" + } + } + } + }, + { + "name": "get_application", + "description": "Get application by identifier", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "get_application_by_api_key", + "description": "Get application by API key", + "inputSchema": { + "type": "object", + "properties": { + "apiKey": { + "type": "string", + "description": "Application api key" + } + }, + "required": [ + "apiKey" + ] + } + }, + { + "name": "regenerate_application_api_key", + "description": "Regenerate application API key", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "list_customers", + "description": "List customers for an application", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "applicationIdentifier": { + "type": "string" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "isAnonymous", + "isAnonymous desc", + "firstSeenAt", + "firstSeenAt desc", + "lastSeenAt", + "lastSeenAt desc" + ] + }, + "description": "Order items by property values" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "list_deployments", + "description": "List deployments for an application", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "applicationIdentifier": { + "type": "string" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "version", + "version desc", + "emailAddress", + "emailAddress desc", + "ownerName", + "ownerName desc", + "comment", + "comment desc", + "deployedAt", + "deployedAt desc" + ] + }, + "description": "Order items by property values" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "get_deployment", + "description": "Get deployment by identifier", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "deploymentIdentifier": { + "type": "string", + "description": "Deployment identifier" + } + }, + "required": [ + "applicationIdentifier", + "deploymentIdentifier" + ] + } + }, + { + "name": "delete_deployment", + "description": "Delete deployment", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "deploymentIdentifier": { + "type": "string", + "description": "Deployment identifier" + } + }, + "required": [ + "applicationIdentifier", + "deploymentIdentifier" + ] + } + }, + { + "name": "update_deployment", + "description": "Update deployment details", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "deploymentIdentifier": { + "type": "string", + "description": "Deployment identifier" + }, + "version": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "ownerName": { + "type": "string", + "maxLength": 128 + }, + "emailAddress": { + "type": "string", + "format": "email", + "maxLength": 128 + }, + "comment": { + "type": "string" + }, + "scmIdentifier": { + "type": "string", + "maxLength": 256 + }, + "scmType": { + "type": "string", + "enum": [ + "gitHub", + "gitLab", + "azureDevOps", + "bitbucket" + ] + }, + "deployedAt": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "applicationIdentifier", + "deploymentIdentifier" + ] + } + }, + { + "name": "reprocess_deployment_commits", + "description": "Reprocess deployment commits", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "deploymentIdentifier": { + "type": "string", + "description": "Deployment identifier" + } + }, + "required": [ + "applicationIdentifier", + "deploymentIdentifier" + ] + } + }, + { + "name": "list_error_groups", + "description": "List error groups for an application", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "applicationIdentifier": { + "type": "string" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "message", + "message desc", + "status", + "status desc", + "lastOccurredAt", + "lastOccurredAt desc", + "createdAt", + "createdAt desc" + ] + }, + "description": "Order items by property values" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "get_error_group", + "description": "Get error group by identifier", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "errorGroupIdentifier": { + "type": "string", + "description": "Error group identifier" + } + }, + "required": [ + "applicationIdentifier", + "errorGroupIdentifier" + ] + } + }, + { + "name": "resolve_error_group", + "description": "Set the status of the error group to resolved", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "errorGroupIdentifier": { + "type": "string", + "description": "Error group identifier" + }, + "version": { + "type": "string", + "description": "The version that this error was resolved in" + }, + "discardFromPreviousVersions": { + "type": "boolean", + "default": true, + "description": "When true, occurrences from previous versions will be discarded" + } + }, + "required": [ + "applicationIdentifier", + "errorGroupIdentifier", + "version" + ] + } + }, + { + "name": "activate_error_group", + "description": "Set the status of the error group to active", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "errorGroupIdentifier": { + "type": "string", + "description": "Error group identifier" + } + }, + "required": [ + "applicationIdentifier", + "errorGroupIdentifier" + ] + } + }, + { + "name": "ignore_error_group", + "description": "Set the status of the error group to ignored", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "errorGroupIdentifier": { + "type": "string", + "description": "Error group identifier" + } + }, + "required": [ + "applicationIdentifier", + "errorGroupIdentifier" + ] + } + }, + { + "name": "permanently_ignore_error_group", + "description": "Set the status of the error group to permanently ignored", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "errorGroupIdentifier": { + "type": "string", + "description": "Error group identifier" + }, + "discardNewOccurrences": { + "type": "boolean", + "description": "When true, new occurrences of this error will not be stored or count towards your error quota" + } + }, + "required": [ + "applicationIdentifier", + "errorGroupIdentifier", + "discardNewOccurrences" + ] + } + }, + { + "name": "list_pages", + "description": "List pages for an application", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "applicationIdentifier": { + "type": "string" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "lastSeenAt", + "lastSeenAt desc", + "uri", + "uri desc", + "name", + "name desc" + ] + }, + "description": "Order items by property values" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "get_page_metrics_time_series", + "description": "Get time-series metrics for pages", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string" + }, + "start": { + "type": "string", + "format": "date-time" + }, + "end": { + "type": "string", + "format": "date-time" + }, + "granularity": { + "type": "string", + "pattern": "^\\d+[mhd]$", + "description": "Time granularity in format like '1h', '30m', '1d'" + }, + "aggregation": { + "type": "string", + "enum": [ + "count", + "average", + "median", + "sum", + "min", + "max", + "p95", + "p99" + ] + }, + "metrics": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "pageViews", + "loadTime", + "firstPaint", + "firstContentfulPaint", + "firstInputDelay", + "largestContentfulPaint", + "cumulativeLayoutShift", + "interactionToNextPaint" + ] + } + }, + "filter": { + "type": "string", + "description": "Case-sensitive filter in the format 'pageIdentifier = abc123' or 'pageIdentifier IN (abc123, def456)'" + } + }, + "required": [ + "applicationIdentifier", + "start", + "end", + "granularity", + "aggregation", + "metrics" + ] + } + }, + { + "name": "get_page_metrics_histogram", + "description": "Get histogram metrics for pages", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string" + }, + "start": { + "type": "string", + "format": "date-time" + }, + "end": { + "type": "string", + "format": "date-time" + }, + "metrics": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "loadTime", + "firstPaint", + "firstContentfulPaint", + "firstInputDelay", + "largestContentfulPaint", + "cumulativeLayoutShift", + "interactionToNextPaint" + ] + } + }, + "filter": { + "type": "string", + "description": "Case-sensitive filter in the format 'pageIdentifier = abc123' or 'pageIdentifier IN (abc123, def456)'" + } + }, + "required": [ + "applicationIdentifier", + "start", + "end", + "metrics" + ] + } + }, + { + "name": "get_error_metrics_time_series", + "description": "Get time-series metrics for errors", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string" + }, + "start": { + "type": "string", + "format": "date-time" + }, + "end": { + "type": "string", + "format": "date-time" + }, + "granularity": { + "type": "string", + "pattern": "^\\d+[mhd]$", + "description": "Time granularity in format like '1h', '30m', '1d'" + }, + "aggregation": { + "type": "string", + "const": "count" + }, + "metrics": { + "type": "array", + "items": { + "type": "string", + "const": "errorInstances" + } + }, + "filter": { + "type": "string", + "description": "Case-sensitive filter in the format 'errorGroupIdentifier = abc123' or 'errorGroupIdentifier IN (abc123, def456)'" + } + }, + "required": [ + "applicationIdentifier", + "start", + "end", + "granularity", + "aggregation", + "metrics" + ] + } + }, + { + "name": "list_sessions", + "description": "List sessions for an application", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "applicationIdentifier": { + "type": "string" + }, + "filter": { + "type": "string", + "description": "Filter items by an expression. Currently only supports filtering by `xhr.uri`. Example: xhr.uri eq https://example.com" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "customerIdentifier", + "customerIdentifier desc", + "startedAt", + "startedAt desc", + "updatedAt", + "updatedAt desc", + "endedAt", + "endedAt desc", + "countryCode", + "countryCode desc", + "platformName", + "platformName desc", + "operatingSystemName", + "operatingSystemName desc", + "operatingSystemVersion", + "operatingSystemVersion desc", + "browserName", + "browserName desc", + "browserVersion", + "browserVersion desc", + "viewportWidth", + "viewportWidth desc", + "viewportHeight", + "viewportHeight desc", + "deploymentVersion", + "deploymentVersion desc" + ] + }, + "description": "Order items by property values" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "get_session", + "description": "Get session by identifier", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string" + }, + "sessionIdentifier": { + "type": "string" + }, + "include": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "pageViews", + "errors" + ] + }, + "description": "Include additional information for the session" + } + }, + "required": [ + "applicationIdentifier", + "sessionIdentifier" + ] + } + }, + { + "name": "list_invitations", + "description": "Returns a list invitations that the token and token owner has access to", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "emailAddress", + "emailAddress desc", + "createdAt", + "createdAt desc" + ] + }, + "description": "Order items by property values" + } + } + } + }, + { + "name": "send_invitation", + "description": "Send an invitation to a user", + "inputSchema": { + "type": "object", + "properties": { + "emailAddress": { + "type": "string", + "format": "email", + "description": "Email address to send the invitation to" + } + }, + "required": [ + "emailAddress" + ] + } + }, + { + "name": "get_invitation", + "description": "Get an invitation by identifier", + "inputSchema": { + "type": "object", + "properties": { + "invitationIdentifier": { + "type": "string", + "description": "Invitation identifier" + } + }, + "required": [ + "invitationIdentifier" + ] + } + }, + { + "name": "revoke_invitation", + "description": "Revoke a sent invitation", + "inputSchema": { + "type": "object", + "properties": { + "invitationIdentifier": { + "type": "string", + "description": "Invitation identifier" + } + }, + "required": [ + "invitationIdentifier" + ] + } + }, + { + "name": "list_source_maps", + "description": "Returns a list of source maps for the specified application", + "inputSchema": { + "type": "object", + "properties": { + "count": { + "type": "number", + "description": "Limits the number of items in the response" + }, + "offset": { + "type": "number", + "description": "Number of items to skip before returning results" + }, + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "orderBy": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "uri", + "uri desc", + "fileName", + "fileName desc", + "fileSizeBytes", + "fileSizeBytes desc", + "uploadedAt", + "uploadedAt desc", + "createdAt", + "createdAt desc", + "updatedAt", + "updatedAt desc" + ] + }, + "description": "Order items by property values" + } + }, + "required": [ + "applicationIdentifier" + ] + } + }, + { + "name": "get_source_map", + "description": "Returns a single source map by identifier", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "sourceMapIdentifier": { + "type": "string", + "description": "Source map identifier" + } + }, + "required": [ + "applicationIdentifier", + "sourceMapIdentifier" + ] + } + }, + { + "name": "update_source_map", + "description": "Update the details of a source map", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "sourceMapIdentifier": { + "type": "string", + "description": "Source map identifier" + }, + "uri": { + "type": "string", + "format": "uri", + "description": "New URI for the source map" + } + }, + "required": [ + "applicationIdentifier", + "sourceMapIdentifier", + "uri" + ] + } + }, + { + "name": "delete_source_map", + "description": "Delete a source map", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "sourceMapIdentifier": { + "type": "string", + "description": "Source map identifier" + } + }, + "required": [ + "applicationIdentifier", + "sourceMapIdentifier" + ] + } + }, + { + "name": "upload_source_map", + "description": "Uploads a source map to the specified application", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + }, + "filePath": { + "type": "string", + "description": "Path to the source map file" + }, + "uri": { + "type": "string", + "format": "uri", + "description": "URI to associate with the source map" + } + }, + "required": [ + "applicationIdentifier", + "filePath", + "uri" + ] + } + }, + { + "name": "delete_all_source_maps", + "description": "Deletes all source maps", + "inputSchema": { + "type": "object", + "properties": { + "applicationIdentifier": { + "type": "string", + "description": "Application identifier" + } + }, + "required": [ + "applicationIdentifier" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "mcp-zenml": { + "display_name": "ZenML MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/zenml-io/mcp-zenml" + }, + "homepage": "https://zenml.io", + "author": { + "name": "zenml-io" + }, + "license": "MIT", + "tags": [ + "zenml", + "mcp", + "ai", + "ml", + "pipelines" + ], + "arguments": { + "ZENML_STORE_URL": { + "description": "URL of your ZenML server", + "required": true, + "example": "https://d534d987a-zenml.cloudinfra.zenml.io" + }, + "ZENML_STORE_API_KEY": { + "description": "API key for your ZenML server", + "required": true, + "example": "your-api-key-here" + }, + "LOGLEVEL": { + "description": "Logging level", + "required": false, + "example": "INFO" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "uv", + "args": [ + "run", + "path/to/zenml_server.py" + ], + "env": { + "LOGLEVEL": "INFO", + "NO_COLOR": "1", + "PYTHONUNBUFFERED": "1", + "PYTHONIOENCODING": "UTF-8", + "ZENML_STORE_URL": "https://your-zenml-server-goes-here.com", + "ZENML_STORE_API_KEY": "your-api-key-here" + } + } + }, + "examples": [ + { + "title": "Query ZenML Information", + "description": "Ask about ZenML pipelines, runs, and other resources", + "prompt": "Can you show me the latest pipeline runs in my ZenML server?" + } + ], + "name": "mcp-zenml", + "description": "Interact with your MLOps and LLMOps pipelines through your ZenML MCP server", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "travel-planner": { + "name": "travel-planner", + "display_name": "Travel Planner", + "description": "Travel planning and itinerary management server integrating with Google Maps API for location search, place details, and route calculations.", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/TRAVEL-PLANNER-MCP-Server" + }, + "homepage": "https://github.com/GongRzhe/TRAVEL-PLANNER-MCP-Server", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "google-maps", + "travel-planning" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@gongrzhe/server-travelplanner-mcp" + ], + "env": { + "GOOGLE_MAPS_API_KEY": "${GOOGLE_MAPS_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Search Places", + "description": "Search for places using Google Places API", + "prompt": "searchPlaces({ query: 'restaurants', location: '34.0522,-118.2437', radius: 5000 });" + }, + { + "title": "Get Place Details", + "description": "Get detailed information about a specific place", + "prompt": "getPlaceDetails({ placeId: 'ChIJN1t_tDeuEmsRUcIa02j2sDE' });" + }, + { + "title": "Calculate Route", + "description": "Calculate route between two locations", + "prompt": "calculateRoute({ origin: 'Los Angeles, CA', destination: 'San Francisco, CA', mode: 'driving' });" + }, + { + "title": "Get Time Zone", + "description": "Get timezone information for a location", + "prompt": "getTimeZone({ location: '34.0522,-118.2437' });" + } + ], + "arguments": { + "GOOGLE_MAPS_API_KEY": { + "description": "Your Google Maps API key with the following APIs enabled: Places API, Directions API, Geocoding API, Time Zone API", + "required": true, + "example": "your_google_maps_api_key" + } + }, + "tools": [ + { + "name": "create_itinerary", + "description": "Creates a personalized travel itinerary based on user preferences", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "Starting location" + }, + "destination": { + "type": "string", + "description": "Destination location" + }, + "startDate": { + "type": "string", + "description": "Start date (YYYY-MM-DD)" + }, + "endDate": { + "type": "string", + "description": "End date (YYYY-MM-DD)" + }, + "budget": { + "type": "number", + "description": "Budget in USD" + }, + "preferences": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Travel preferences" + } + }, + "required": [ + "origin", + "destination", + "startDate", + "endDate" + ] + } + }, + { + "name": "optimize_itinerary", + "description": "Optimizes an existing itinerary based on specified criteria", + "inputSchema": { + "type": "object", + "properties": { + "itineraryId": { + "type": "string", + "description": "ID of the itinerary to optimize" + }, + "optimizationCriteria": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Criteria for optimization (time, cost, etc.)" + } + }, + "required": [ + "itineraryId", + "optimizationCriteria" + ] + } + }, + { + "name": "search_attractions", + "description": "Searches for attractions and points of interest in a specified location", + "inputSchema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "Location to search attractions" + }, + "radius": { + "type": "number", + "description": "Search radius in meters" + }, + "categories": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Categories of attractions" + } + }, + "required": [ + "location" + ] + } + }, + { + "name": "get_transport_options", + "description": "Retrieves available transportation options between two points", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "Starting point" + }, + "destination": { + "type": "string", + "description": "Destination point" + }, + "date": { + "type": "string", + "description": "Travel date (YYYY-MM-DD)" + } + }, + "required": [ + "origin", + "destination", + "date" + ] + } + }, + { + "name": "get_accommodations", + "description": "Searches for accommodation options in a specified location", + "inputSchema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "Location to search" + }, + "checkIn": { + "type": "string", + "description": "Check-in date (YYYY-MM-DD)" + }, + "checkOut": { + "type": "string", + "description": "Check-out date (YYYY-MM-DD)" + }, + "budget": { + "type": "number", + "description": "Maximum price per night" + } + }, + "required": [ + "location", + "checkIn", + "checkOut" + ] + } + } + ] + }, + "postgresql": { + "name": "postgresql", + "display_name": "PostgreSQL", + "description": "Read-only database access with schema inspection", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "PostgreSQL", + "Database", + "Read-Only" + ], + "author": { + "name": "modelcontextprotocol" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/postgres", + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-postgres", + "postgresql://localhost/mydb" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "mcp/postgres", + "postgresql://host.docker.internal:5432/mydb" + ] + } + }, + "tools": [ + { + "name": "query", + "description": "Run a read-only SQL query", + "inputSchema": { + "type": "object", + "properties": { + "sql": { + "type": "string" + } + } + } + } + ], + "is_official": true + }, + "todoist": { + "name": "todoist", + "display_name": "Todoist", + "description": "Interact with Todoist to manage your tasks.", + "repository": { + "type": "git", + "url": "https://github.com/abhiz123/todoist-mcp-server" + }, + "homepage": "https://github.com/abhiz123/todoist-mcp-server", + "author": { + "name": "abhiz123" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@abhiz123/todoist-mcp-server" + ], + "env": { + "TODOIST_API_TOKEN": "${TODOIST_API_TOKEN}" + } + } + }, + "tags": [ + "task management", + "todoist", + "natural language processing" + ], + "examples": [ + { + "title": "Creating Tasks", + "description": "Example commands for creating tasks", + "prompt": "\"Create task 'Team Meeting'\"" + }, + { + "title": "Getting Tasks", + "description": "Example commands for retrieving tasks", + "prompt": "\"Show all my tasks\"" + }, + { + "title": "Updating Tasks", + "description": "Example commands for updating tasks", + "prompt": "\"Update documentation task to be due next week\"" + }, + { + "title": "Completing Tasks", + "description": "Example commands for completing tasks", + "prompt": "\"Mark the PR review task as complete\"" + }, + { + "title": "Deleting Tasks", + "description": "Example commands for deleting tasks", + "prompt": "\"Delete the PR review task\"" + } + ], + "arguments": { + "TODOIST_API_TOKEN": { + "description": "API token to authenticate with the Todoist service", + "required": true, + "example": "your_api_token_here" + } + }, + "tools": [ + { + "name": "todoist_create_task", + "description": "Create a new task in Todoist with optional description, due date, and priority", + "inputSchema": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The content/title of the task" + }, + "description": { + "type": "string", + "description": "Detailed description of the task (optional)" + }, + "due_string": { + "type": "string", + "description": "Natural language due date like 'tomorrow', 'next Monday', 'Jan 23' (optional)" + }, + "priority": { + "type": "number", + "description": "Task priority from 1 (normal) to 4 (urgent) (optional)", + "enum": [ + 1, + 2, + 3, + 4 + ] + } + }, + "required": [ + "content" + ] + } + }, + { + "name": "todoist_get_tasks", + "description": "Get a list of tasks from Todoist with various filters", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Filter tasks by project ID (optional)" + }, + "filter": { + "type": "string", + "description": "Natural language filter like 'today', 'tomorrow', 'next week', 'priority 1', 'overdue' (optional)" + }, + "priority": { + "type": "number", + "description": "Filter by priority level (1-4) (optional)", + "enum": [ + 1, + 2, + 3, + 4 + ] + }, + "limit": { + "type": "number", + "description": "Maximum number of tasks to return (optional)", + "default": 10 + } + } + } + }, + { + "name": "todoist_update_task", + "description": "Update an existing task in Todoist by searching for it by name and then updating it", + "inputSchema": { + "type": "object", + "properties": { + "task_name": { + "type": "string", + "description": "Name/content of the task to search for and update" + }, + "content": { + "type": "string", + "description": "New content/title for the task (optional)" + }, + "description": { + "type": "string", + "description": "New description for the task (optional)" + }, + "due_string": { + "type": "string", + "description": "New due date in natural language like 'tomorrow', 'next Monday' (optional)" + }, + "priority": { + "type": "number", + "description": "New priority level from 1 (normal) to 4 (urgent) (optional)", + "enum": [ + 1, + 2, + 3, + 4 + ] + } + }, + "required": [ + "task_name" + ] + } + }, + { + "name": "todoist_delete_task", + "description": "Delete a task from Todoist by searching for it by name", + "inputSchema": { + "type": "object", + "properties": { + "task_name": { + "type": "string", + "description": "Name/content of the task to search for and delete" + } + }, + "required": [ + "task_name" + ] + } + }, + { + "name": "todoist_complete_task", + "description": "Mark a task as complete by searching for it by name", + "inputSchema": { + "type": "object", + "properties": { + "task_name": { + "type": "string", + "description": "Name/content of the task to search for and complete" + } + }, + "required": [ + "task_name" + ] + } + } + ] + }, + "ntfy-mcp": { + "name": "ntfy-mcp", + "display_name": "Your Friendly Task Completion Notifier", + "description": "The MCP server that keeps you informed by sending the notification on phone using ntfy", + "repository": { + "type": "git", + "url": "https://github.com/teddyzxcv/ntfy-mcp" + }, + "homepage": "https://github.com/teddyzxcv/ntfy-mcp", + "author": { + "name": "teddyzxcv" + }, + "license": "Apache License 2.0", + "categories": [ + "Messaging" + ], + "tags": [ + "ntfy", + "notifications" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/teddyzxcv/ntfy-mcp" + ], + "env": { + "NTFY_TOPIC": "${NTFY_TOPIC}" + } + } + }, + "examples": [ + { + "title": "Python Hello World", + "description": "Write a prompt to execute a task and receive a notification upon completion.", + "prompt": "Write me a hello world in python, notify me when the task is done" + } + ], + "arguments": { + "NTFY_TOPIC": { + "description": "Environment variable representing the topic name for notifications to be sent to.", + "required": true, + "example": "your_topic_name" + } + } + }, + "everart": { + "name": "everart", + "display_name": "EverArt", + "description": "AI image generation using various models", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/everart", + "author": { + "name": "modelcontextprotocol" + }, + "license": "[NOT FOUND]", + "categories": [ + "Media Creation" + ], + "tags": [ + "EverArt", + "API", + "Claude Desktop" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-everart" + ], + "env": { + "EVERART_API_KEY": "${EVERART_API_KEY}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "EVERART_API_KEY", + "mcp/everart" + ], + "env": { + "EVERART_API_KEY": "${EVERART_API_KEY}" + } + } + }, + "arguments": { + "EVERART_API_KEY": { + "description": "API key to access the EverArt API", + "required": true, + "example": "your_key_here" + } + }, + "tools": [ + { + "name": "generate_image", + "description": "Generate images using EverArt Models and returns a clickable link to view the generated image. The tool will return a URL that can be clicked to view the image in a browser. Available models:\n- 5000:FLUX1.1: Standard quality\n- 9000:FLUX1.1-ultra: Ultra high quality\n- 6000:SD3.5: Stable Diffusion 3.5\n- 7000:Recraft-Real: Photorealistic style\n- 8000:Recraft-Vector: Vector art style\n\nThe response will contain a direct link to view the generated image.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Text description of desired image" + }, + "model": { + "type": "string", + "description": "Model ID (5000:FLUX1.1, 9000:FLUX1.1-ultra, 6000:SD3.5, 7000:Recraft-Real, 8000:Recraft-Vector)", + "default": "5000" + }, + "image_count": { + "type": "number", + "description": "Number of images to generate", + "default": 1 + } + }, + "required": [ + "prompt" + ] + } + } + ], + "is_official": true + }, + "pushover": { + "name": "pushover", + "display_name": "Pushover Notifications", + "description": "Send instant notifications to your devices using [Pushover.net](https://pushover.net/)", + "repository": { + "type": "git", + "url": "https://github.com/ashiknesin/pushover-mcp" + }, + "homepage": "https://github.com/ashiknesin/pushover-mcp", + "author": { + "name": "ashiknesin" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "pushover", + "notifications" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "pushover-mcp@latest", + "start", + "--token", + "${YOUR_TOKEN}", + "--user", + "${YOUR_USER}" + ] + } + }, + "arguments": { + "YOUR_TOKEN": { + "description": "Application token required for authenticating with Pushover.net", + "required": true, + "example": "abcdef123456" + }, + "YOUR_USER": { + "description": "User key associated with your Pushover.net account", + "required": true, + "example": "1234567890:abcdef123456" + } + }, + "tools": [ + { + "name": "send", + "description": "Send a notification via Pushover", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "minLength": 1 + }, + "title": { + "type": "string" + }, + "priority": { + "type": "number", + "minimum": -2, + "maximum": 2 + }, + "sound": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "url_title": { + "type": "string" + }, + "device": { + "type": "string" + } + }, + "required": [ + "message" + ] + } + } + ] + }, + "memory": { + "name": "memory", + "display_name": "Knowledge Graph Memory", + "description": "Knowledge graph-based persistent memory system", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/memory", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "knowledge graph", + "memory", + "persistent memory" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-memory" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "-v", + "claude-memory:/app/dist", + "--rm", + "mcp/memory" + ] + } + }, + "examples": [ + { + "title": "Basic Memory Interaction", + "description": "A simple interaction with memory where user details are remembered.", + "prompt": "Remembering..." + } + ], + "arguments": { + "MEMORY_FILE_PATH": { + "description": "Path to the memory storage JSON file (default: memory.json in the server directory)", + "required": false, + "example": "/path/to/custom/memory.json" + } + }, + "tools": [ + { + "name": "create_entities", + "description": "Create multiple new entities in the knowledge graph", + "inputSchema": { + "type": "object", + "properties": { + "entities": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the entity" + }, + "entityType": { + "type": "string", + "description": "The type of the entity" + }, + "observations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of observation contents associated with the entity" + } + }, + "required": [ + "name", + "entityType", + "observations" + ] + } + } + }, + "required": [ + "entities" + ] + } + }, + { + "name": "create_relations", + "description": "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice", + "inputSchema": { + "type": "object", + "properties": { + "relations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "from": { + "type": "string", + "description": "The name of the entity where the relation starts" + }, + "to": { + "type": "string", + "description": "The name of the entity where the relation ends" + }, + "relationType": { + "type": "string", + "description": "The type of the relation" + } + }, + "required": [ + "from", + "to", + "relationType" + ] + } + } + }, + "required": [ + "relations" + ] + } + }, + { + "name": "add_observations", + "description": "Add new observations to existing entities in the knowledge graph", + "inputSchema": { + "type": "object", + "properties": { + "observations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "entityName": { + "type": "string", + "description": "The name of the entity to add the observations to" + }, + "contents": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of observation contents to add" + } + }, + "required": [ + "entityName", + "contents" + ] + } + } + }, + "required": [ + "observations" + ] + } + }, + { + "name": "delete_entities", + "description": "Delete multiple entities and their associated relations from the knowledge graph", + "inputSchema": { + "type": "object", + "properties": { + "entityNames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of entity names to delete" + } + }, + "required": [ + "entityNames" + ] + } + }, + { + "name": "delete_observations", + "description": "Delete specific observations from entities in the knowledge graph", + "inputSchema": { + "type": "object", + "properties": { + "deletions": { + "type": "array", + "items": { + "type": "object", + "properties": { + "entityName": { + "type": "string", + "description": "The name of the entity containing the observations" + }, + "observations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of observations to delete" + } + }, + "required": [ + "entityName", + "observations" + ] + } + } + }, + "required": [ + "deletions" + ] + } + }, + { + "name": "delete_relations", + "description": "Delete multiple relations from the knowledge graph", + "inputSchema": { + "type": "object", + "properties": { + "relations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "from": { + "type": "string", + "description": "The name of the entity where the relation starts" + }, + "to": { + "type": "string", + "description": "The name of the entity where the relation ends" + }, + "relationType": { + "type": "string", + "description": "The type of the relation" + } + }, + "required": [ + "from", + "to", + "relationType" + ] + }, + "description": "An array of relations to delete" + } + }, + "required": [ + "relations" + ] + } + }, + { + "name": "read_graph", + "description": "Read the entire knowledge graph", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "search_nodes", + "description": "Search for nodes in the knowledge graph based on a query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query to match against entity names, types, and observation content" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "open_nodes", + "description": "Open specific nodes in the knowledge graph by their names", + "inputSchema": { + "type": "object", + "properties": { + "names": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of entity names to retrieve" + } + }, + "required": [ + "names" + ] + } + } + ], + "is_official": true + }, + "elevenlabs": { + "name": "elevenlabs", + "display_name": "ElevenLabs", + "description": "A server that integrates with ElevenLabs text-to-speech API capable of generating full voiceovers with multiple voices.", + "repository": { + "type": "git", + "url": "https://github.com/mamertofabian/elevenlabs-mcp-server" + }, + "homepage": "https://github.com/mamertofabian/elevenlabs-mcp-server", + "author": { + "name": "mamertofabian" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "ElevenLabs", + "Text-to-Speech", + "SvelteKit", + "TTS" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "elevenlabs-mcp-server" + ], + "env": { + "ELEVENLABS_API_KEY": "${ELEVENLABS_API_KEY}", + "ELEVENLABS_VOICE_ID": "${ELEVENLABS_VOICE_ID}", + "ELEVENLABS_MODEL_ID": "${ELEVENLABS_MODEL_ID}", + "ELEVENLABS_STABILITY": "${ELEVENLABS_STABILITY}", + "ELEVENLABS_SIMILARITY_BOOST": "${ELEVENLABS_SIMILARITY_BOOST}", + "ELEVENLABS_STYLE": "${ELEVENLABS_STYLE}", + "ELEVENLABS_OUTPUT_DIR": "${ELEVENLABS_OUTPUT_DIR}" + } + } + }, + "arguments": { + "ELEVENLABS_API_KEY": { + "description": "Your API key for ElevenLabs to access the text-to-speech services.", + "required": true, + "example": "sk-12345abcd" + }, + "ELEVENLABS_VOICE_ID": { + "description": "The ID of the voice you want to use for synthesis.", + "required": true, + "example": "voice-12345" + }, + "ELEVENLABS_MODEL_ID": { + "description": "The model ID to be used, indicating the version of the ElevenLabs API to utilize.", + "required": false, + "example": "eleven_flash_v2" + }, + "ELEVENLABS_STABILITY": { + "description": "Stability of the voice generation; controls variations in the output voice.", + "required": false, + "example": "0.5" + }, + "ELEVENLABS_SIMILARITY_BOOST": { + "description": "Boosting similarity for the voices; affects how closely the output mimics the selected voice.", + "required": false, + "example": "0.75" + }, + "ELEVENLABS_STYLE": { + "description": "Style parameter to adjust the expression in the generated speech.", + "required": false, + "example": "0.1" + }, + "ELEVENLABS_OUTPUT_DIR": { + "description": "Directory path where the generated audio files will be saved.", + "required": false, + "example": "output" + } + }, + "tools": [ + { + "name": "generate_audio_simple", + "description": "Generate audio from plain text using default voice settings", + "inputSchema": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Plain text to convert to audio" + }, + "voice_id": { + "type": "string", + "description": "Optional voice ID to use for generation" + } + }, + "required": [ + "text" + ] + } + }, + { + "name": "generate_audio_script", + "description": "Generate audio from a structured script with multiple voices and actors. \n Accepts either:\n 1. Plain text string\n 2. JSON string with format: {\n \"script\": [\n {\n \"text\": \"Text to speak\",\n \"voice_id\": \"optional-voice-id\",\n \"actor\": \"optional-actor-name\"\n },\n ...\n ]\n }", + "inputSchema": { + "type": "object", + "properties": { + "script": { + "type": "string", + "description": "JSON string containing script array or plain text. For JSON format, provide an object with a 'script' array containing objects with 'text' (required), 'voice_id' (optional), and 'actor' (optional) fields." + } + }, + "required": [ + "script" + ] + } + }, + { + "name": "delete_job", + "description": "Delete a voiceover job and its associated files", + "inputSchema": { + "type": "object", + "properties": { + "job_id": { + "type": "string", + "description": "ID of the job to delete" + } + }, + "required": [ + "job_id" + ] + } + }, + { + "name": "get_audio_file", + "description": "Get the audio file content for a specific job", + "inputSchema": { + "type": "object", + "properties": { + "job_id": { + "type": "string", + "description": "ID of the job to get audio file for" + } + }, + "required": [ + "job_id" + ] + } + }, + { + "name": "list_voices", + "description": "Get a list of all available ElevenLabs voices with metadata", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "get_voiceover_history", + "description": "Get voiceover job history. Optionally specify a job ID for a specific job.", + "inputSchema": { + "type": "object", + "properties": { + "job_id": { + "type": "string", + "description": "Optional job ID to get details for a specific job" + } + }, + "required": [] + } + } + ] + }, + "airbnb": { + "name": "airbnb", + "display_name": "Airbnb", + "description": "Provides tools to search Airbnb and get listing details.", + "repository": { + "type": "git", + "url": "https://github.com/openbnb-org/mcp-server-airbnb" + }, + "homepage": "https://github.com/openbnb-org/mcp-server-airbnb", + "author": { + "name": "openbnb-org" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Airbnb", + "search", + "listings" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@openbnb/mcp-server-airbnb" + ], + "description": "Run with npx (requires npm install)" + } + }, + "examples": [ + { + "title": "Search for Airbnb Listings", + "description": "Search for listings in a specified location.", + "prompt": "Search for listings in New York" + }, + { + "title": "Get Listing Details", + "description": "Retrieve details for a specific listing.", + "prompt": "Get details for listing 12345" + } + ], + "arguments": { + "location": { + "description": "The location where you want to search for Airbnb listings", + "required": true, + "example": "New York City" + }, + "placeId": { + "description": "The unique identifier for a specific place or location", + "required": false, + "example": "ChIJN1t_tDeuEmsRUsoyG83frY4" + }, + "checkin": { + "description": "The check-in date for your stay in YYYY-MM-DD format", + "required": false, + "example": "2023-10-01" + }, + "checkout": { + "description": "The check-out date for your stay in YYYY-MM-DD format", + "required": false, + "example": "2023-10-05" + }, + "adults": { + "description": "The number of adults staying", + "required": false, + "example": "2" + }, + "children": { + "description": "The number of children staying", + "required": false, + "example": "1" + }, + "infants": { + "description": "The number of infants staying", + "required": false, + "example": "1" + }, + "pets": { + "description": "The number of pets allowed in the listing", + "required": false, + "example": "2" + }, + "minPrice": { + "description": "The minimum price per night for the listings", + "required": false, + "example": "50" + }, + "maxPrice": { + "description": "The maximum price per night for the listings", + "required": false, + "example": "300" + }, + "cursor": { + "description": "A cursor for paginating through results", + "required": false, + "example": "next-page-token" + }, + "ignoreRobotsText": { + "description": "Set to true to disregard Airbnb's robots.txt rules for all requests", + "required": false, + "example": "true" + } + }, + "tools": [ + { + "name": "airbnb_search", + "description": "Search for Airbnb listings with various filters and pagination. Provide direct links to the user", + "inputSchema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "Location to search for (city, state, etc.)" + }, + "placeId": { + "type": "string", + "description": "Google Maps Place ID (overrides the location parameter)" + }, + "checkin": { + "type": "string", + "description": "Check-in date (YYYY-MM-DD)" + }, + "checkout": { + "type": "string", + "description": "Check-out date (YYYY-MM-DD)" + }, + "adults": { + "type": "number", + "description": "Number of adults" + }, + "children": { + "type": "number", + "description": "Number of children" + }, + "infants": { + "type": "number", + "description": "Number of infants" + }, + "pets": { + "type": "number", + "description": "Number of pets" + }, + "minPrice": { + "type": "number", + "description": "Minimum price for the stay" + }, + "maxPrice": { + "type": "number", + "description": "Maximum price for the stay" + }, + "cursor": { + "type": "string", + "description": "Base64-encoded string used for Pagination" + }, + "ignoreRobotsText": { + "type": "boolean", + "description": "Ignore robots.txt rules for this request" + } + }, + "required": [ + "location" + ] + } + }, + { + "name": "airbnb_listing_details", + "description": "Get detailed information about a specific Airbnb listing. Provide direct links to the user", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The Airbnb listing ID" + }, + "checkin": { + "type": "string", + "description": "Check-in date (YYYY-MM-DD)" + }, + "checkout": { + "type": "string", + "description": "Check-out date (YYYY-MM-DD)" + }, + "adults": { + "type": "number", + "description": "Number of adults" + }, + "children": { + "type": "number", + "description": "Number of children" + }, + "infants": { + "type": "number", + "description": "Number of infants" + }, + "pets": { + "type": "number", + "description": "Number of pets" + }, + "ignoreRobotsText": { + "type": "boolean", + "description": "Ignore robots.txt rules for this request" + } + }, + "required": [ + "id" + ] + } + } + ] + }, + "prometheus": { + "name": "prometheus", + "display_name": "Prometheus", + "description": "Query and analyze Prometheus - open-source monitoring system.", + "repository": { + "type": "git", + "url": "https://github.com/pab1it0/prometheus-mcp-server" + }, + "homepage": "https://github.com/pab1it0/prometheus-mcp-server", + "author": { + "name": "pab1it0" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "Prometheus", + "Metrics", + "AI" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pab1it0/prometheus-mcp-server", + "prometheus-mcp-server" + ], + "env": { + "PROMETHEUS_URL": "${PROMETHEUS_URL}", + "PROMETHEUS_USERNAME": "${PROMETHEUS_USERNAME}", + "PROMETHEUS_PASSWORD": "${PROMETHEUS_PASSWORD}" + } + } + }, + "examples": [ + { + "title": "Execute Query", + "description": "Execute a PromQL instant query against Prometheus", + "prompt": "execute_query({ query: \"up\" })" + }, + { + "title": "List Metrics", + "description": "Get a list of metrics from Prometheus", + "prompt": "list_metrics()" + } + ], + "arguments": { + "PROMETHEUS_URL": { + "description": "The URL of the Prometheus server you want to connect to.", + "required": true, + "example": "http://your-prometheus-server:9090" + }, + "PROMETHEUS_USERNAME": { + "description": "The username for basic authentication when accessing the Prometheus server.", + "required": false, + "example": "your_username" + }, + "PROMETHEUS_PASSWORD": { + "description": "The password for basic authentication when accessing the Prometheus server.", + "required": false, + "example": "your_password" + } + }, + "tools": [ + { + "name": "execute_query", + "description": "Execute a PromQL instant query against Prometheus", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "time": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Time" + } + }, + "required": [ + "query" + ], + "title": "execute_queryArguments", + "type": "object" + } + }, + { + "name": "execute_range_query", + "description": "Execute a PromQL range query with start time, end time, and step interval", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "start": { + "title": "Start", + "type": "string" + }, + "end": { + "title": "End", + "type": "string" + }, + "step": { + "title": "Step", + "type": "string" + } + }, + "required": [ + "query", + "start", + "end", + "step" + ], + "title": "execute_range_queryArguments", + "type": "object" + } + }, + { + "name": "list_metrics", + "description": "List all available metrics in Prometheus", + "inputSchema": { + "properties": {}, + "title": "list_metricsArguments", + "type": "object" + } + }, + { + "name": "get_metric_metadata", + "description": "Get metadata for a specific metric", + "inputSchema": { + "properties": { + "metric": { + "title": "Metric", + "type": "string" + } + }, + "required": [ + "metric" + ], + "title": "get_metric_metadataArguments", + "type": "object" + } + }, + { + "name": "get_targets", + "description": "Get information about all scrape targets", + "inputSchema": { + "properties": {}, + "title": "get_targetsArguments", + "type": "object" + } + } + ] + }, + "searxng": { + "name": "searxng", + "display_name": "SearXNG", + "description": "A Model Context Protocol Server for [SearXNG](https://docs.searxng.org/)", + "repository": { + "type": "git", + "url": "https://github.com/ihor-sokoliuk/mcp-searxng" + }, + "homepage": "https://github.com/ihor-sokoliuk/mcp-searxng", + "author": { + "name": "ihor-sokoliuk" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "search", + "searxng", + "api" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/ihor-sokoliuk/mcp-searxng" + ], + "env": { + "SEARXNG_URL": "${SEARXNG_URL}" + } + } + }, + "arguments": { + "SEARXNG_URL": { + "description": "Environment variable to set the URL of the SearXNG instance that will be used for search queries.", + "required": true, + "example": "http://localhost:8080" + } + }, + "tools": [ + { + "name": "searxng_web_search", + "description": "Execute web searches with pagination.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search terms" + }, + "count": { + "type": "number", + "description": "Results per page (default: 20)", + "optional": true + }, + "offset": { + "type": "number", + "description": "Pagination offset (default: 0)", + "optional": true + } + }, + "required": [ + "query" + ] + } + ] + }, + "greptimedb-mcp-server": { + "display_name": "GreptimeDB MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/GreptimeTeam/greptimedb-mcp-server" + }, + "homepage": "https://github.com/GreptimeTeam/greptimedb-mcp-server", + "author": { + "name": "GreptimeTeam" + }, + "license": "MIT", + "tags": [ + "database", + "sql", + "greptimedb", + "mcp" + ], + "arguments": { + "GREPTIMEDB_HOST": { + "description": "Database host", + "required": true, + "example": "localhost" + }, + "GREPTIMEDB_PORT": { + "description": "Database port", + "required": false, + "example": "4002" + }, + "GREPTIMEDB_USER": { + "description": "Database username", + "required": true, + "example": "root" + }, + "GREPTIMEDB_PASSWORD": { + "description": "Database password", + "required": true, + "example": "" + }, + "GREPTIMEDB_DATABASE": { + "description": "Database name", + "required": true, + "example": "public" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "greptimedb-mcp-server" + ], + "env": { + "GREPTIMEDB_HOST": "localhost", + "GREPTIMEDB_PORT": "4002", + "GREPTIMEDB_USER": "root", + "GREPTIMEDB_PASSWORD": "", + "GREPTIMEDB_DATABASE": "public" + } + } + }, + "examples": [ + { + "title": "Basic Usage", + "description": "Connect to GreptimeDB and explore tables", + "prompt": "Connect to my GreptimeDB instance and list all available tables." + } + ], + "name": "greptimedb-mcp-server", + "description": "A Model Context Protocol (MCP) server implementation for [GreptimeDB](https://github.com/GreptimeTeam/greptimedb).", + "categories": [ + "Databases" + ], + "is_official": true, + "tools": [ + { + "name": "execute_sql", + "description": "Execute an SQL query on the GreptimeDB server", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The SQL query to execute" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "pinecone": { + "name": "pinecone", + "display_name": "Pinecone Model Context Protocol for Claude Desktop", + "description": "MCP server for searching and uploading records to Pinecone. Allows for simple RAG features, leveraging Pinecone's Inference API.", + "repository": { + "type": "git", + "url": "https://github.com/sirmews/mcp-pinecone" + }, + "homepage": "https://github.com/sirmews/mcp-pinecone", + "author": { + "name": "sirmews" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "pinecone" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-pinecone", + "--index-name", + "${your-index-name}", + "--api-key", + "${your-secret-api-key}" + ] + } + }, + "tools": [ + { + "name": "semantic_search", + "description": "Search Pinecone for documents.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query" + }, + "top_k": { + "type": "integer", + "description": "Number of top results to return (default: 10)", + "default": 10 + }, + "namespace": { + "type": "string", + "description": "Optional namespace to search in", + "optional": true + }, + "category": { + "type": "string", + "description": "Category for search" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Tags for search" + }, + "date_range": { + "type": "object", + "properties": { + "start": { + "type": "string", + "format": "date", + "description": "Start date of the range" + }, + "end": { + "type": "string", + "format": "date", + "description": "End date of the range" + } + } + } + }, + "required": [ + "query" + ] + }, + { + "name": "read_document", + "description": "Read a document from Pinecone.", + "inputSchema": { + "document_id": { + "type": "string", + "description": "ID of the document to read" + }, + "namespace": { + "type": "string", + "description": "Optional namespace to read from", + "optional": true + } + }, + "required": [ + "document_id" + ] + }, + { + "name": "process_document", + "description": "Process a document. This will optionally chunk, then embed, and upsert the document into Pinecone.", + "inputSchema": { + "document_id": { + "type": "string", + "description": "ID of the document to process" + }, + "text": { + "type": "string", + "description": "Text content of the document" + }, + "metadata": { + "type": "object", + "description": "Metadata for the document" + }, + "namespace": { + "type": "string", + "description": "Optional namespace to store the document in", + "optional": true + } + }, + "required": [ + "document_id", + "text", + "metadata" + ] + }, + { + "name": "list_documents", + "description": "List all documents in the knowledge base by namespace.", + "inputSchema": { + "namespace": { + "type": "string", + "description": "Namespace to list documents in" + } + }, + "required": [ + "namespace" + ] + }, + { + "name": "pinecone_stats", + "description": "Get stats about the Pinecone index specified in this server.", + "inputSchema": {}, + "required": [] + } + ] + }, + "atlassian": { + "name": "atlassian", + "display_name": "Atlassian", + "description": "Interact with Atlassian Cloud products (Confluence and Jira) including searching/reading Confluence spaces/pages, accessing Jira issues, and project metadata.", + "repository": { + "type": "git", + "url": "https://github.com/sooperset/mcp-atlassian" + }, + "homepage": "https://github.com/sooperset/mcp-atlassian", + "author": { + "name": "sooperset" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Atlassian", + "Confluence", + "Jira" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-atlassian", + "--confluence-url=${CONFLUENCE_URL}", + "--confluence-username=${CONFLUENCE_USERNAME}", + "--confluence-token=${CONFLUENCE_TOKEN}", + "--jira-url=${JIRA_URL}", + "--jira-username=${JIRA_USERNAME}", + "--jira-token=${JIRA_TOKEN}" + ], + "description": "Run with uvx (requires uv install)" + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp-atlassian", + "--confluence-url=${CONFLUENCE_URL}", + "--confluence-username=${CONFLUENCE_USERNAME}", + "--confluence-token=${CONFLUENCE_TOKEN}", + "--jira-url=${JIRA_URL}", + "--jira-username=${JIRA_USERNAME}", + "--jira-token=${JIRA_TOKEN}" + ], + "description": "Run with Python module (requires pip install)" + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "mcp/atlassian", + "--confluence-url=${CONFLUENCE_URL}", + "--confluence-username=${CONFLUENCE_USERNAME}", + "--confluence-token=${CONFLUENCE_TOKEN}", + "--jira-url=${JIRA_URL}", + "--jira-username=${JIRA_USERNAME}", + "--jira-token=${JIRA_TOKEN}" + ] + } + }, + "arguments": { + "CONFLUENCE_URL": { + "description": "The URL of the Confluence site to connect to. Required for both Cloud and Server/Data Center deployments.", + "required": true, + "example": "https://your-company.atlassian.net/wiki or https://confluence.your-company.com" + }, + "CONFLUENCE_USERNAME": { + "description": "The username for the Confluence account (email for Cloud). Required to authenticate with Confluence.", + "required": true, + "example": "your.email@company.com" + }, + "CONFLUENCE_TOKEN": { + "description": "The API token or personal access token for the Confluence account. Required for authentication with Confluence.", + "required": true, + "example": "your_api_token or your_token" + }, + "JIRA_URL": { + "description": "The URL of the Jira site to connect to. Required for both Cloud and Server/Data Center deployments.", + "required": true, + "example": "https://your-company.atlassian.net or https://jira.your-company.com" + }, + "JIRA_USERNAME": { + "description": "The username for the Jira account (email for Cloud). Required to authenticate with Jira.", + "required": true, + "example": "your.email@company.com" + }, + "JIRA_TOKEN": { + "description": "The API token or personal access token for the Jira account. Required for authentication with Jira.", + "required": true, + "example": "your_api_token or your_token" + } + }, + "tools": [] + }, + "mcp-server-browserbase": { + "display_name": "Browserbase MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/browserbase/mcp-server-browserbase" + }, + "license": "[NOT GIVEN]", + "homepage": "https://www.browserbase.com/", + "author": { + "name": "browserbase" + }, + "tags": [ + "browser automation", + "puppeteer", + "stagehand", + "web interaction", + "screenshots", + "javascript" + ], + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "src/build/dist/index.js" + ], + "description": "Run using Node.js" + } + }, + "name": "mcp-server-browserbase", + "description": "Automate browser interactions in the cloud (e.g. web navigation, data extraction, form filling, and more)", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "open-strategy-partners-marketing-tools": { + "name": "open-strategy-partners-marketing-tools", + "display_name": "Open Strategy Partners Marketing Tools", + "description": "Content editing codes, value map, and positioning tools for product marketing.", + "repository": { + "type": "git", + "url": "https://github.com/open-strategy-partners/osp_marketing_tools" + }, + "homepage": "https://github.com/open-strategy-partners/osp_marketing_tools", + "author": { + "name": "open-strategy-partners" + }, + "license": "CC-BY-SA-4.0", + "categories": [ + "Productivity" + ], + "tags": [ + "LLM", + "Technical Writing", + "Optimization" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/open-strategy-partners/osp_marketing_tools@main", + "osp_marketing_tools" + ] + } + }, + "examples": [ + { + "title": "Value Map Generation", + "description": "Generate an OSP value map for a product with specified features for a target audience.", + "prompt": "Generate an OSP value map for CloudDeploy, focusing on DevOps engineers with these key features: - Automated deployment pipeline - Infrastructure as code support - Real-time monitoring - Multi-cloud compatibility." + }, + { + "title": "Meta Information Creation", + "description": "Create optimized metadata for an article based on a specific topic and audience.", + "prompt": "Use the OSP meta tool to generate metadata for an article about containerization best practices. Primary keyword: 'Docker containers', audience: system administrators, content type: technical guide." + }, + { + "title": "Content Editing", + "description": "Review technical content using OSP editing codes for improvements.", + "prompt": "Review this technical content using OSP editing codes: Kubernetes helps you manage containers. It's really good at what it does. You can use it to deploy your apps and make them run better." + }, + { + "title": "Technical Writing", + "description": "Apply the OSP writing guide to create a document for a specific audience.", + "prompt": "Apply the OSP writing guide to create a tutorial about setting up a CI/CD pipeline for junior developers." + } + ], + "tools": [ + { + "name": "health_check", + "description": "Check if the server is running and can access its resources", + "inputSchema": { + "properties": {}, + "title": "health_checkArguments", + "type": "object" + } + }, + { + "name": "get_editing_codes", + "description": "Get the Open Strategy Partners (OSP) editing codes documentation and usage protocol for editing texts.", + "inputSchema": { + "properties": {}, + "title": "get_editing_codesArguments", + "type": "object" + } + }, + { + "name": "get_writing_guide", + "description": "Get the Open Strategy Partners (OSP) writing guide and usage protocol for editing texts.", + "inputSchema": { + "properties": {}, + "title": "get_writing_guideArguments", + "type": "object" + } + }, + { + "name": "get_meta_guide", + "description": "Get the Open Strategy Partners (OSP) Web Content Meta Information Generation System (titles, meta-titles, slugs).", + "inputSchema": { + "properties": {}, + "title": "get_meta_guideArguments", + "type": "object" + } + }, + { + "name": "get_value_map_positioning_guide", + "description": "Get the Open Strategy Partners (OSP) Product Communications Value Map Generation System for Product Positioning (value cases, feature extraction, taglines).", + "inputSchema": { + "properties": {}, + "title": "get_value_map_positioning_guideArguments", + "type": "object" + } + }, + { + "name": "get_on_page_seo_guide", + "description": "Get the Open Strategy Partners (OSP) On-Page SEO Optimization Guide.", + "inputSchema": { + "properties": {}, + "title": "get_on_page_seo_guideArguments", + "type": "object" + } + } + ] + }, + "mongodb-lens": { + "name": "mongodb-lens", + "display_name": "MongoDB Lens", + "description": "Full Featured MCP Server for MongoDB Databases.", + "repository": { + "type": "git", + "url": "https://github.com/furey/mongodb-lens" + }, + "homepage": "https://github.com/furey/mongodb-lens", + "author": { + "name": "furey" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "mongodb", + "server" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mongodb-lens@latest", + "${MONGODB_URI}" + ], + "env": { + "CONFIG_LOG_LEVEL": "${CONFIG_LOG_LEVEL}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "--network=host", + "--pull=always", + "-e", + "CONFIG_LOG_LEVEL='verbose'", + "furey/mongodb-lens", + "${MONGODB_URI}" + ], + "env": { + "CONFIG_LOG_LEVEL": "${CONFIG_LOG_LEVEL}" + } + } + }, + "arguments": { + "CONFIG_LOG_LEVEL": { + "description": "Sets the logging level of MongoDB Lens, controlling the verbosity of log output.", + "required": false, + "example": "verbose" + }, + "MONGODB_URI": { + "description": "The connection string for the MongoDB database.", + "required": true, + "example": "mongodb://your-connection-string" + } + }, + "tools": [ + { + "name": "connect-mongodb", + "description": "Connect to a different MongoDB URI or alias", + "inputSchema": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "minLength": 1, + "description": "MongoDB connection URI or alias to connect to" + }, + "validateConnection": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "true", + "description": "Whether to validate the connection" + } + }, + "required": [ + "uri" + ] + } + }, + { + "name": "connect-original", + "description": "Connect back to the original MongoDB URI used at startup", + "inputSchema": { + "type": "object", + "properties": { + "validateConnection": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "true", + "description": "Whether to validate the connection" + } + } + } + }, + { + "name": "add-connection-alias", + "description": "Add a new MongoDB connection alias", + "inputSchema": { + "type": "object", + "properties": { + "alias": { + "type": "string", + "minLength": 1, + "description": "Alias name for the connection" + }, + "uri": { + "type": "string", + "minLength": 1, + "description": "MongoDB connection URI" + } + }, + "required": [ + "alias", + "uri" + ] + } + }, + { + "name": "list-connections", + "description": "List all configured MongoDB connection aliases", + "inputSchema": { + "type": "object" + } + }, + { + "name": "list-databases", + "description": "List all accessible MongoDB databases", + "inputSchema": { + "type": "object" + } + }, + { + "name": "current-database", + "description": "Get the name of the current database", + "inputSchema": { + "type": "object" + } + }, + { + "name": "create-database", + "description": "Create a new MongoDB database with option to switch", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1, + "description": "Database name to create" + }, + "switch": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Whether to switch to the new database after creation" + }, + "validateName": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "true", + "description": "Whether to validate database name" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "use-database", + "description": "Switch to a specific database", + "inputSchema": { + "type": "object", + "properties": { + "database": { + "type": "string", + "minLength": 1, + "description": "Database name to use" + } + }, + "required": [ + "database" + ] + } + }, + { + "name": "drop-database", + "description": "Drop a database (requires confirmation)", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1, + "description": "Database name to drop" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "create-user", + "description": "Create a new database user", + "inputSchema": { + "type": "object", + "properties": { + "username": { + "type": "string", + "minLength": 1, + "description": "Username" + }, + "password": { + "type": "string", + "minLength": 1, + "description": "Password" + }, + "roles": { + "type": "string", + "description": "Roles as JSON array, e.g. [{\"role\": \"readWrite\", \"db\": \"mydb\"}]" + } + }, + "required": [ + "username", + "password", + "roles" + ] + } + }, + { + "name": "drop-user", + "description": "Drop an existing database user", + "inputSchema": { + "type": "object", + "properties": { + "username": { + "type": "string", + "minLength": 1, + "description": "Username to drop" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "username" + ] + } + }, + { + "name": "list-collections", + "description": "List collections in the current database", + "inputSchema": { + "type": "object" + } + }, + { + "name": "create-collection", + "description": "Create a new collection with options", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "options": { + "type": "string", + "default": "{}", + "description": "Collection options as JSON string (capped, size, etc.)" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "drop-collection", + "description": "Drop a collection (requires confirmation)", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1, + "description": "Collection name to drop" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "rename-collection", + "description": "Rename an existing collection", + "inputSchema": { + "type": "object", + "properties": { + "oldName": { + "type": "string", + "minLength": 1, + "description": "Current collection name" + }, + "newName": { + "type": "string", + "minLength": 1, + "description": "New collection name" + }, + "dropTarget": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Whether to drop target collection if it exists" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "oldName", + "newName" + ] + } + }, + { + "name": "validate-collection", + "description": "Run validation on a collection to check for inconsistencies", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "full": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Perform full validation (slower but more thorough)" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "distinct-values", + "description": "Get unique values for a field", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "field": { + "type": "string", + "minLength": 1, + "description": "Field name to get distinct values for" + }, + "filter": { + "type": "string", + "default": "{}", + "description": "Optional filter as JSON string" + } + }, + "required": [ + "collection", + "field" + ] + } + }, + { + "name": "find-documents", + "description": "Run queries with filters and projections", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "default": "{}", + "description": "MongoDB query filter (JSON string)" + }, + "projection": { + "type": "string", + "description": "Fields to include/exclude (JSON string)" + }, + "limit": { + "type": "integer", + "minimum": 1, + "default": 10, + "description": "Maximum number of documents to return" + }, + "skip": { + "type": "integer", + "minimum": 0, + "default": 0, + "description": "Number of documents to skip" + }, + "sort": { + "type": "string", + "description": "Sort specification (JSON string)" + }, + "streaming": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Enable streaming for large result sets" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "count-documents", + "description": "Count documents with optional filter", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "default": "{}", + "description": "MongoDB query filter (JSON string)" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "insert-document", + "description": "Insert one or multiple documents into a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "document": { + "type": "string", + "description": "Document as JSON string or array of documents" + }, + "options": { + "type": "string", + "description": "Options as JSON string (including \"ordered\" for multiple documents)" + } + }, + "required": [ + "collection", + "document" + ] + } + }, + { + "name": "update-document", + "description": "Update specific documents in a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "description": "Filter as JSON string" + }, + "update": { + "type": "string", + "description": "Update operations as JSON string" + }, + "options": { + "type": "string", + "description": "Options as JSON string" + } + }, + "required": [ + "collection", + "filter", + "update" + ] + } + }, + { + "name": "delete-document", + "description": "Delete document(s) (requires confirmation)", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "minLength": 1, + "description": "Filter as JSON string" + }, + "many": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Delete multiple documents if true" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "collection", + "filter" + ] + } + }, + { + "name": "aggregate-data", + "description": "Run aggregation pipelines", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "pipeline": { + "type": "string", + "description": "Aggregation pipeline as JSON string array" + }, + "streaming": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Enable streaming results for large datasets" + }, + "limit": { + "type": "integer", + "minimum": 1, + "default": 1000, + "description": "Maximum number of results to return when streaming" + } + }, + "required": [ + "collection", + "pipeline" + ] + } + }, + { + "name": "create-index", + "description": "Create new index on collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "keys": { + "type": "string", + "description": "Index keys as JSON object" + }, + "options": { + "type": "string", + "description": "Index options as JSON object" + } + }, + "required": [ + "collection", + "keys" + ] + } + }, + { + "name": "drop-index", + "description": "Drop an existing index from a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "indexName": { + "type": "string", + "minLength": 1, + "description": "Name of the index to drop" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "collection", + "indexName" + ] + } + }, + { + "name": "get-stats", + "description": "Get database or collection statistics", + "inputSchema": { + "type": "object", + "properties": { + "target": { + "type": "string", + "enum": [ + "database", + "collection" + ], + "description": "Target type" + }, + "name": { + "type": "string", + "description": "Collection name (for collection stats)" + } + }, + "required": [ + "target" + ] + } + }, + { + "name": "analyze-schema", + "description": "Automatically infer schema from collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "sampleSize": { + "type": "integer", + "minimum": 1, + "default": 100, + "description": "Number of documents to sample" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "generate-schema-validator", + "description": "Generate a JSON Schema validator for a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "strictness": { + "type": "string", + "enum": [ + "strict", + "moderate", + "relaxed" + ], + "default": "moderate", + "description": "Validation strictness level" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "compare-schemas", + "description": "Compare schemas between two collections", + "inputSchema": { + "type": "object", + "properties": { + "sourceCollection": { + "type": "string", + "minLength": 1, + "description": "Source collection name" + }, + "targetCollection": { + "type": "string", + "minLength": 1, + "description": "Target collection name" + }, + "sampleSize": { + "type": "integer", + "minimum": 1, + "default": 100, + "description": "Number of documents to sample" + } + }, + "required": [ + "sourceCollection", + "targetCollection" + ] + } + }, + { + "name": "explain-query", + "description": "Analyze query performance", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "description": "MongoDB query filter (JSON string)" + }, + "verbosity": { + "type": "string", + "enum": [ + "queryPlanner", + "executionStats", + "allPlansExecution" + ], + "default": "executionStats", + "description": "Explain verbosity level" + } + }, + "required": [ + "collection", + "filter" + ] + } + }, + { + "name": "analyze-query-patterns", + "description": "Analyze query patterns and suggest optimizations", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name to analyze" + }, + "duration": { + "type": "integer", + "minimum": 1, + "maximum": 60, + "default": 10, + "description": "Duration to analyze in seconds" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "bulk-operations", + "description": "Perform bulk inserts, updates, or deletes", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "operations": { + "type": "string", + "description": "Array of operations as JSON string" + }, + "ordered": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "true", + "description": "Whether operations should be performed in order" + }, + "token": { + "type": "string", + "description": "Confirmation token from previous request" + } + }, + "required": [ + "collection", + "operations" + ] + } + }, + { + "name": "create-timeseries", + "description": "Create a time series collection for temporal data", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "timeField": { + "type": "string", + "minLength": 1, + "description": "Field that contains the time value" + }, + "metaField": { + "type": "string", + "description": "Field that contains metadata for grouping" + }, + "granularity": { + "type": "string", + "enum": [ + "seconds", + "minutes", + "hours" + ], + "default": "seconds", + "description": "Time series granularity" + }, + "expireAfterSeconds": { + "type": "integer", + "description": "Optional TTL in seconds" + } + }, + "required": [ + "name", + "timeField" + ] + } + }, + { + "name": "collation-query", + "description": "Find documents with language-specific collation rules", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "default": "{}", + "description": "Query filter as JSON string" + }, + "locale": { + "type": "string", + "minLength": 2, + "description": "Locale code (e.g., \"en\", \"fr\", \"de\")" + }, + "strength": { + "type": "integer", + "minimum": 1, + "maximum": 5, + "default": 3, + "description": "Collation strength (1-5)" + }, + "caseLevel": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Consider case in first-level differences" + }, + "sort": { + "type": "string", + "description": "Sort specification as JSON string" + } + }, + "required": [ + "collection", + "locale" + ] + } + }, + { + "name": "text-search", + "description": "Perform full-text search across text-indexed fields", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "searchText": { + "type": "string", + "minLength": 1, + "description": "Text to search for" + }, + "language": { + "type": "string", + "description": "Optional language for text search" + }, + "caseSensitive": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Case sensitive search" + }, + "diacriticSensitive": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Diacritic sensitive search" + }, + "limit": { + "type": "integer", + "minimum": 1, + "default": 10, + "description": "Maximum results to return" + } + }, + "required": [ + "collection", + "searchText" + ] + } + }, + { + "name": "geo-query", + "description": "Run geospatial queries with various operators", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "operator": { + "type": "string", + "enum": [ + "near", + "geoWithin", + "geoIntersects" + ], + "description": "Geospatial operator type" + }, + "field": { + "type": "string", + "minLength": 1, + "description": "Geospatial field name" + }, + "geometry": { + "type": "string", + "description": "GeoJSON geometry as JSON string" + }, + "maxDistance": { + "type": "number", + "description": "Maximum distance in meters (for near queries)" + }, + "limit": { + "type": "integer", + "minimum": 1, + "default": 10, + "description": "Maximum number of documents to return" + } + }, + "required": [ + "collection", + "operator", + "field", + "geometry" + ] + } + }, + { + "name": "transaction", + "description": "Execute multiple operations in a single transaction", + "inputSchema": { + "type": "object", + "properties": { + "operations": { + "type": "string", + "description": "JSON array of operations with collection, operation type, and parameters" + } + }, + "required": [ + "operations" + ] + } + }, + { + "name": "map-reduce", + "description": "Run Map-Reduce operations (note: Map-Reduce deprecated as of MongoDB 5.0)", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "map": { + "type": "string", + "description": "Map function as string e.g. \"function() { emit(this.field, 1); }\"" + }, + "reduce": { + "type": "string", + "description": "Reduce function as string e.g. \"function(key, values) { return Array.sum(values); }\"" + }, + "options": { + "type": "string", + "description": "Options as JSON string (query, limit, etc.)" + } + }, + "required": [ + "collection", + "map", + "reduce" + ] + } + }, + { + "name": "watch-changes", + "description": "Watch for changes in a collection using change streams", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "operations": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "insert", + "update", + "delete", + "replace" + ] + }, + "default": [ + "insert", + "update", + "delete" + ], + "description": "Operations to watch" + }, + "duration": { + "type": "integer", + "minimum": 1, + "maximum": 60, + "default": 10, + "description": "Duration to watch in seconds" + }, + "fullDocument": { + "allOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "true", + "false" + ] + } + ], + "default": "false", + "description": "Include full document in update events" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "gridfs-operation", + "description": "Manage large files with GridFS", + "inputSchema": { + "type": "object", + "properties": { + "operation": { + "type": "string", + "enum": [ + "list", + "info", + "delete" + ], + "description": "GridFS operation type" + }, + "bucket": { + "type": "string", + "default": "fs", + "description": "GridFS bucket name" + }, + "filename": { + "type": "string", + "description": "Filename for info/delete operations" + }, + "limit": { + "type": "integer", + "minimum": 1, + "default": 20, + "description": "Maximum files to list" + } + }, + "required": [ + "operation" + ] + } + }, + { + "name": "clear-cache", + "description": "Clear memory caches to ensure fresh data", + "inputSchema": { + "type": "object", + "properties": { + "target": { + "type": "string", + "enum": [ + "all", + "collections", + "schemas", + "indexes", + "stats", + "fields", + "serverStatus" + ], + "default": "all", + "description": "Cache type to clear (default: all)" + } + } + } + }, + { + "name": "shard-status", + "description": "Get sharding status for database or collections", + "inputSchema": { + "type": "object", + "properties": { + "target": { + "type": "string", + "enum": [ + "database", + "collection" + ], + "default": "database", + "description": "Target type" + }, + "collection": { + "type": "string", + "description": "Collection name (if target is collection)" + } + } + } + }, + { + "name": "export-data", + "description": "Export query results to formatted JSON or CSV", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "minLength": 1, + "description": "Collection name" + }, + "filter": { + "type": "string", + "default": "{}", + "description": "Filter as JSON string" + }, + "format": { + "type": "string", + "enum": [ + "json", + "csv" + ], + "default": "json", + "description": "Export format" + }, + "fields": { + "type": "string", + "description": "Comma-separated list of fields to include (for CSV)" + }, + "limit": { + "type": "integer", + "minimum": 1, + "default": 1000, + "description": "Maximum documents to export" + }, + "sort": { + "type": "string", + "description": "Sort specification as JSON string (e.g. {\"date\": -1} for descending)" + } + }, + "required": [ + "collection" + ] + } + } + ] + }, + "devrev": { + "name": "devrev", + "display_name": "DevRev", + "description": "An MCP server to integrate with DevRev APIs to search through your DevRev Knowledge Graph where objects can be imported from diff. sources listed [here](https://devrev.ai/docs/import#available-sources).", + "repository": { + "type": "git", + "url": "https://github.com/kpsunil97/devrev-mcp-server" + }, + "homepage": "https://github.com/kpsunil97/devrev-mcp-server", + "author": { + "name": "kpsunil97" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "devrev", + "server", + "search" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "devrev-mcp" + ], + "env": { + "DEVREV_API_KEY": "${DEVREV_API_KEY}" + } + } + }, + "arguments": { + "DEVREV_API_KEY": { + "description": "Your DevRev API key required to authenticate requests to the DevRev API.", + "required": true, + "example": "YOUR_DEVREV_API_KEY" + } + }, + "tools": [ + { + "name": "search", + "description": "Search DevRev using the provided query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string" + }, + "namespace": { + "type": "string", + "enum": [ + "article", + "issue", + "ticket" + ] + } + }, + "required": [ + "query", + "namespace" + ] + } + }, + { + "name": "get_object", + "description": "Get all information about a DevRev object using its ID", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + }, + "required": [ + "id" + ] + } + } + ] + }, + "eunomia": { + "name": "eunomia", + "display_name": "Eunomia", + "description": "Extension of the Eunomia framework that connects Eunomia instruments with MCP servers", + "repository": { + "type": "git", + "url": "https://github.com/whataboutyou-ai/eunomia-MCP-server" + }, + "homepage": "https://github.com/whataboutyou-ai/eunomia-MCP-server", + "author": { + "name": "whataboutyou-ai" + }, + "license": "Apache-2.0", + "categories": [ + "AI Systems" + ], + "tags": [ + "Eunomia", + "Data Governance" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/whataboutyou-ai/eunomia-MCP-server", + "orchestra_server" + ] + } + }, + "arguments": { + "APP_NAME": { + "description": "Name of the application", + "required": true, + "example": "mcp-server_orchestra" + }, + "APP_VERSION": { + "description": "Current version of the application", + "required": true, + "example": "0.1.0" + }, + "LOG_LEVEL": { + "description": "Logging level to control the verbosity of logs (default: 'info')", + "required": false, + "example": "info" + }, + "REQUEST_TIMEOUT": { + "description": "Environment variable that sets the request timeout duration in seconds", + "required": false, + "example": "30" + } + } + }, + "amap": { + "name": "amap", + "display_name": "Amap / \u9ad8\u5fb7\u5730\u56fe", + "description": "MCP Server for the AMap Map API.", + "repository": { + "type": "npm", + "url": "https://www.npmjs.com/package/@amap/amap-maps-mcp-server" + }, + "homepage": "https://lbs.amap.com/api/mcp-server/summary", + "author": { + "name": "amap" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "amap", + "map" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@amap/amap-maps-mcp-server" + ], + "env": { + "AMAP_MAPS_API_KEY": "${AMAP_MAPS_API_KEY}" + } + } + }, + "arguments": { + "AMAP_MAPS_API_KEY": { + "description": "The API key to access the AMap service.", + "required": true, + "example": "YOUR_API_KEY_HERE" + } + }, + "is_official": true, + "tools": [ + { + "name": "maps_regeocode", + "description": "\u5c06\u7ecf\u7eac\u5ea6\u5750\u6807\u8f6c\u6362\u4e3a\u5546\u5708\u533a\u57df\u4fe1\u606f", + "inputSchema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "\u7ecf\u7eac\u5ea6\u5750\u6807" + } + }, + "required": [ + "location" + ] + } + }, + { + "name": "maps_geo", + "description": "\u5c06\u5730\u5740\u8f6c\u6362\u4e3a\u7ecf\u7eac\u5ea6\u5750\u6807", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "\u5730\u5740" + }, + "city": { + "type": "string", + "description": "\u6307\u5b9a\u67e5\u8be2\u7684\u57ce\u5e02" + } + }, + "required": [ + "address" + ] + } + }, + { + "name": "maps_ip_location", + "description": "\u6839\u636e\u7528\u6237\u8f93\u5165\u7684 IP \u5730\u5740\u786e\u5b9a IP \u7684\u4f4d\u7f6e", + "inputSchema": { + "type": "object", + "properties": { + "ip": { + "type": "string", + "description": "IP\u5730\u5740" + } + }, + "required": [ + "ip" + ] + } + }, + { + "name": "maps_weather", + "description": "\u6839\u636e\u57ce\u5e02\u540d\u79f0\u6216 adcode \u67e5\u8be2\u6307\u5b9a\u57ce\u5e02\u7684\u5929\u6c14", + "inputSchema": { + "type": "object", + "properties": { + "city": { + "type": "string", + "description": "\u57ce\u5e02\u540d\u79f0\u6216 adcode" + } + }, + "required": [ + "city" + ] + } + }, + { + "name": "maps_search_detail", + "description": "\u6839\u636e\u5173\u952e\u8bcd\u641c\u7d22\u6216\u5468\u8fb9\u641c\u7d22\u83b7\u53d6\u7684POI ID\u7684\u8be6\u7ec6\u4fe1\u606f", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "POI ID" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "maps_bicycling", + "description": "\u6839\u636e\u8d77\u70b9\u548c\u7ec8\u70b9\u7684\u7ecf\u7eac\u5ea6\u5750\u6807\u89c4\u5212\u81ea\u884c\u8f66\u8def\u7ebf\uff0c\u89c4\u5212\u65f6\u4f1a\u8003\u8651\u4ea4\u901a\u3001\u5355\u884c\u7ebf\u3001\u5c01\u95ed\u8def\u6bb5\u7b49\u60c5\u51b5\uff0c\u6700\u591a\u652f\u6301500\u516c\u91cc\u7684\u81ea\u884c\u8f66\u8def\u7ebf\u89c4\u5212", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "\u8d77\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "destination": { + "type": "string", + "description": "\u7ec8\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + } + }, + "required": [ + "origin", + "destination" + ] + } + }, + { + "name": "maps_direction_walking", + "description": "\u6839\u636e\u8d77\u70b9\u548c\u7ec8\u70b9\u7684\u7ecf\u7eac\u5ea6\u5750\u6807\u89c4\u5212\u6b65\u884c\u8def\u7ebf\uff0c\u6700\u591a\u652f\u6301100\u516c\u91cc\u7684\u6b65\u884c\u8def\u7ebf\u89c4\u5212", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "\u8d77\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "destination": { + "type": "string", + "description": "\u7ec8\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + } + }, + "required": [ + "origin", + "destination" + ] + } + }, + { + "name": "maps_direction_driving", + "description": "\u6839\u636e\u8d77\u70b9\u548c\u7ec8\u70b9\u7684\u7ecf\u7eac\u5ea6\u5750\u6807\u89c4\u5212\u6c7d\u8f66\u8def\u7ebf\uff0c\u6700\u591a\u652f\u6301500\u516c\u91cc\u7684\u6c7d\u8f66\u8def\u7ebf\u89c4\u5212", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "\u8d77\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "destination": { + "type": "string", + "description": "\u7ec8\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + } + }, + "required": [ + "origin", + "destination" + ] + } + }, + { + "name": "maps_direction_transit_integrated", + "description": "\u6839\u636e\u8d77\u70b9\u548c\u7ec8\u70b9\u7684\u7ecf\u7eac\u5ea6\u5750\u6807\u89c4\u5212\u516c\u5171\u4ea4\u901a\u8def\u7ebf\uff0c\u6700\u591a\u652f\u6301500\u516c\u91cc\u7684\u516c\u5171\u4ea4\u901a\u8def\u7ebf\u89c4\u5212", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "\u8d77\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "destination": { + "type": "string", + "description": "\u7ec8\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "city": { + "type": "string", + "description": "\u8d77\u70b9\u6240\u5728\u57ce\u5e02" + }, + "cityd": { + "type": "string", + "description": "\u7ec8\u70b9\u6240\u5728\u57ce\u5e02" + } + }, + "required": [ + "origin", + "destination", + "city", + "cityd" + ] + } + }, + { + "name": "maps_distance", + "description": "\u6839\u636e\u4e24\u4e2a\u7ecf\u7eac\u5ea6\u5750\u6807\u8ba1\u7b97\u8ddd\u79bb\uff0c\u652f\u6301\u516c\u4ea4\u3001\u6b65\u884c\u3001\u5730\u94c1\u8ddd\u79bb\u8ba1\u7b97", + "inputSchema": { + "type": "object", + "properties": { + "origins": { + "type": "string", + "description": "\u8d77\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6\uff0c\u53ef\u4ee5\u8f93\u5165\u591a\u4e2a\u5750\u6807\uff0c\u7528\u5206\u53f7\u5206\u9694\uff0c\u4f8b\u5982120,30;120,31" + }, + "destination": { + "type": "string", + "description": "\u7ec8\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "type": { + "type": "string", + "description": "\u8ddd\u79bb\u7c7b\u578b\uff0c1\u8868\u793a\u516c\u4ea4\u8ddd\u79bb\u8ba1\u7b97\uff0c0\u8868\u793a\u76f4\u7ebf\u8ddd\u79bb\u8ba1\u7b97\uff0c3\u8868\u793a\u6b65\u884c\u8ddd\u79bb\u8ba1\u7b97" + } + }, + "required": [ + "origins", + "destination" + ] + } + }, + { + "name": "maps_text_search", + "description": "\u5173\u952e\u8bcd\u641c\u7d22\uff0c\u6839\u636e\u7528\u6237\u8f93\u5165\u7684\u5173\u952e\u8bcd\u641c\u7d22\u76f8\u5173\u7684POI", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "string", + "description": "\u5173\u952e\u8bcd" + }, + "city": { + "type": "string", + "description": "\u67e5\u8be2\u57ce\u5e02" + }, + "types": { + "type": "string", + "description": "POI\u7c7b\u578b\uff0c\u4f8b\u5982\u516c\u4ea4\u7ad9" + } + }, + "required": [ + "keywords" + ] + } + }, + { + "name": "maps_around_search", + "description": "\u5468\u8fb9\u641c\u7d22\uff0c\u6839\u636e\u7528\u6237\u8f93\u5165\u7684\u5173\u952e\u8bcd\u548c\u4e2d\u5fc3\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\u641c\u7d22\u5468\u56f4\u7684POI", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "string", + "description": "\u5173\u952e\u8bcd" + }, + "location": { + "type": "string", + "description": "\u4e2d\u5fc3\u70b9\u7ecf\u7eac\u5ea6\u5750\u6807\uff0c\u5750\u6807\u683c\u5f0f\u4e3a\uff1a\u7ecf\u5ea6,\u7eac\u5ea6" + }, + "radius": { + "type": "string", + "description": "\u641c\u7d22\u534a\u5f84" + } + }, + "required": [ + "location" + ] + } + } + ] + }, + "google-custom-search": { + "name": "google-custom-search", + "display_name": "Google Custom Search", + "description": "Provides Google Search results via the Google Custom Search API", + "repository": { + "type": "git", + "url": "https://github.com/adenot/mcp-google-search" + }, + "homepage": "https://github.com/adenot/mcp-google-search", + "author": { + "name": "adenot" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Google", + "Custom Search", + "Webpage Reading" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@adenot/mcp-google-search" + ], + "env": { + "GOOGLE_API_KEY": "your-api-key-here", + "GOOGLE_SEARCH_ENGINE_ID": "your-search-engine-id-here" + } + } + }, + "examples": [ + { + "title": "Search Tool", + "description": "Perform web searches using Google Custom Search API.", + "prompt": "{\"name\":\"search\",\"arguments\":{\"query\":\"your search query\",\"num\":5}}" + }, + { + "title": "Webpage Reader Tool", + "description": "Extract content from any webpage.", + "prompt": "{\"name\":\"read_webpage\",\"arguments\":{\"url\":\"https://example.com\"}}" + } + ], + "arguments": { + "GOOGLE_API_KEY": { + "description": "Your Google API key for accessing the Google Custom Search API.", + "required": true, + "example": "AIzaSyA-xxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + "GOOGLE_SEARCH_ENGINE_ID": { + "description": "The unique identifier for your Custom Search Engine that you created on Google.", + "required": true, + "example": "012345678901234567890:abcdefghijk" + } + }, + "tools": [ + { + "name": "search", + "description": "Perform a web search query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + }, + "num": { + "type": "number", + "description": "Number of results (1-10)", + "minimum": 1, + "maximum": 10 + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "bigquery": { + "name": "bigquery", + "display_name": "BigQuery", + "description": "Server implementation for Google BigQuery integration that enables direct BigQuery database access and querying capabilities", + "repository": { + "type": "git", + "url": "https://github.com/ergut/mcp-bigquery-server" + }, + "homepage": "https://github.com/ergut/mcp-bigquery-server", + "author": { + "name": "ergut" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "BigQuery", + "AI", + "LLM" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@ergut/mcp-bigquery-server", + "--project-id", + "${PROJECT_ID}", + "--location", + "${LOCATION}" + ] + } + }, + "arguments": { + "PROJECT_ID": { + "description": "Your Google Cloud project ID", + "required": true, + "example": "your-project-id" + }, + "LOCATION": { + "description": "BigQuery location, defaults to 'us-central1'.", + "required": false, + "example": "us-central1" + } + }, + "tools": [ + { + "name": "query", + "description": "Run a read-only BigQuery SQL query", + "inputSchema": { + "type": "object", + "properties": { + "sql": { + "type": "string" + }, + "maximumBytesBilled": { + "type": "string", + "description": "Maximum bytes billed (default: 1GB)", + "optional": true + } + } + } + } + ] + }, + "e2b-mcp-server": { + "display_name": "E2B MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/e2b-dev/mcp-server" + }, + "homepage": "https://e2b.dev", + "author": { + "name": "e2b-dev" + }, + "license": "[NOT GIVEN]", + "tags": [ + "code-interpreter", + "claude", + "sandbox" + ], + "arguments": { + "e2bApiKey": { + "description": "E2B API key", + "required": true + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@e2b/mcp-server" + ], + "env": { + "E2B_API_KEY": "${e2bApiKey}" + } + }, + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "e2b-mcp-server" + ], + "env": { + "E2B_API_KEY": "${e2bApiKey}" + } + } + }, + "name": "e2b-mcp-server", + "description": "This repository contains the source code for the [E2B](https://e2b.dev) MCP server.", + "categories": [ + "MCP Tools" + ], + "is_official": true, + "tools": [ + { + "name": "run_code", + "description": "Run python code in a secure sandbox by E2B. Using the Jupyter Notebook syntax.", + "inputSchema": { + "type": "object", + "properties": { + "code": { + "type": "string" + } + }, + "required": [ + "code" + ] + } + } + ] + }, + "bitable-mcp": { + "name": "bitable-mcp", + "display_name": "Bitable", + "description": "MCP server provides access to Lark Bitable through the Model Context Protocol. It allows users to interact with Bitable tables using predefined tools.", + "repository": { + "type": "git", + "url": "https://github.com/lloydzhou/bitable-mcp" + }, + "homepage": "https://github.com/lloydzhou/bitable-mcp", + "author": { + "name": "lloydzhou" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Bitable", + "Lark" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "bitable-mcp" + ], + "env": { + "PERSONAL_BASE_TOKEN": "${PERSONAL_BASE_TOKEN}", + "APP_TOKEN": "${APP_TOKEN}" + } + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "bitable_mcp" + ], + "env": { + "PERSONAL_BASE_TOKEN": "${PERSONAL_BASE_TOKEN}", + "APP_TOKEN": "${APP_TOKEN}" + }, + "description": "Run with Python module (requires pip install)" + } + }, + "examples": [ + { + "title": "List Tables", + "description": "Lists all tables available in Bitable.", + "prompt": "list_table" + } + ], + "arguments": { + "PERSONAL_BASE_TOKEN": { + "description": "Personal base token required for authentication with the Bitable API.", + "required": true, + "example": "your_personal_base_token" + }, + "APP_TOKEN": { + "description": "Application token required for the Bitable server to function properly.", + "required": true, + "example": "your_app_token" + } + }, + "tools": [ + { + "name": "list_table", + "description": "list table for current bitable", + "inputSchema": { + "properties": {}, + "title": "list_tableArguments", + "type": "object" + } + }, + { + "name": "describe_table", + "description": "describe_table by table name", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + } + }, + "required": [ + "name" + ], + "title": "describe_tableArguments", + "type": "object" + } + }, + { + "name": "read_query", + "description": "read_query by sql", + "inputSchema": { + "properties": { + "sql": { + "title": "Sql", + "type": "string" + } + }, + "required": [ + "sql" + ], + "title": "read_queryArguments", + "type": "object" + } + } + ] + }, + "openapi-anyapi": { + "name": "openapi-anyapi", + "display_name": "Scalable OpenAPI Endpoint Discovery Tool", + "description": "Interact with large [OpenAPI](https://www.openapis.org/) docs using built-in semantic search for endpoints. Allows for customizing the MCP server prefix.", + "repository": { + "type": "git", + "url": "https://github.com/baryhuang/mcp-server-any-openapi" + }, + "homepage": "https://github.com/baryhuang/mcp-server-any-openapi", + "author": { + "name": "baryhuang" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "OPENAPI_JSON_DOCS_URL=${OPENAPI_JSON_DOCS_URL}", + "-e", + "API_REQUEST_BASE_URL=${API_REQUEST_BASE_URL}", + "-e", + "MCP_API_PREFIX=${MCP_API_PREFIX}", + "buryhuang/mcp-server-any-openapi:latest" + ], + "env": { + "OPENAPI_JSON_DOCS_URL": "${OPENAPI_JSON_DOCS_URL}", + "API_REQUEST_BASE_URL": "${API_REQUEST_BASE_URL}", + "MCP_API_PREFIX": "${MCP_API_PREFIX}" + } + }, + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/baryhuang/mcp-server-any-openapi", + "src/mcp_server_any_openapi/server.py" + ], + "env": { + "OPENAPI_JSON_DOCS_URL": "${OPENAPI_JSON_DOCS_URL}", + "API_REQUEST_BASE_URL": "${API_REQUEST_BASE_URL}", + "MCP_API_PREFIX": "${MCP_API_PREFIX}" + } + } + }, + "tags": [ + "OpenAPI", + "API Discovery", + "Semantic Search", + "FastAPI" + ], + "examples": [ + { + "title": "Get API Endpoints", + "description": "Use this tool to find relevant API endpoints by describing your intent.", + "prompt": "Get prices for all stocks" + } + ], + "arguments": { + "OPENAPI_JSON_DOCS_URL": { + "description": "URL to the OpenAPI specification JSON (defaults to https://api.staging.readymojo.com/openapi.json)", + "required": false, + "example": "https://api.example.com/openapi.json" + }, + "API_REQUEST_BASE_URL": { + "description": "Optional base URL to override the default URL extracted from the OpenAPI document.", + "required": false, + "example": "https://api.finance.com" + }, + "MCP_API_PREFIX": { + "description": "Customizable tool namespace (default 'any_openapi'). Allows for control over tool naming.", + "required": false, + "example": "finance" + } + }, + "tools": [ + { + "name": "${MCP_API_PREFIX}_api_request_schema", + "description": "Get API endpoint schemas that match your intent. Returns endpoint details including path, method, parameters, and response formats.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Describe what you want to do with the API (e.g., 'Get user profile information', 'Create a new job posting')" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "${MCP_API_PREFIX}_make_request", + "description": "Make an actual REST API request with full control over method, headers, body, and parameters.", + "inputSchema": { + "type": "object", + "properties": { + "method": { + "type": "string", + "description": "HTTP method (GET, POST, PUT, DELETE, PATCH)", + "enum": [ + "GET", + "POST", + "PUT", + "DELETE", + "PATCH" + ] + }, + "url": { + "type": "string", + "description": "Fully qualified API URL (e.g., https://api.example.com/users/123)" + }, + "headers": { + "type": "object", + "description": "Request headers", + "additionalProperties": { + "type": "string" + } + }, + "query_params": { + "type": "object", + "description": "Query parameters", + "additionalProperties": { + "type": "string" + } + }, + "body": { + "type": "object", + "description": "Request body (for POST, PUT, PATCH)", + "additionalProperties": true + } + }, + "required": [ + "method", + "url" + ] + } + } + ] + }, + "blender": { + "name": "blender", + "display_name": "Blender", + "description": "Blender integration allowing prompt enabled 3D scene creation, modeling and manipulation.", + "repository": { + "type": "git", + "url": "https://github.com/ahujasid/blender-mcp" + }, + "homepage": "https://github.com/ahujasid/blender-mcp", + "author": { + "name": "ahujasid" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Blender", + "Claude AI", + "3D Modeling" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "blender-mcp" + ] + } + }, + "tools": [ + { + "name": "get_scene_info", + "description": "Get detailed information about the current Blender scene", + "inputSchema": { + "properties": {}, + "title": "get_scene_infoArguments", + "type": "object" + } + }, + { + "name": "get_object_info", + "description": "\n Get detailed information about a specific object in the Blender scene.\n \n Parameters:\n - object_name: The name of the object to get information about\n ", + "inputSchema": { + "properties": { + "object_name": { + "title": "Object Name", + "type": "string" + } + }, + "required": [ + "object_name" + ], + "title": "get_object_infoArguments", + "type": "object" + } + }, + { + "name": "create_object", + "description": "\n Create a new object in the Blender scene.\n \n Parameters:\n - type: Object type (CUBE, SPHERE, CYLINDER, PLANE, CONE, TORUS, EMPTY, CAMERA, LIGHT)\n - name: Optional name for the object\n - location: Optional [x, y, z] location coordinates\n - rotation: Optional [x, y, z] rotation in radians\n - scale: Optional [x, y, z] scale factors (not used for TORUS)\n \n Torus-specific parameters (only used when type == \"TORUS\"):\n - align: How to align the torus ('WORLD', 'VIEW', or 'CURSOR')\n - major_segments: Number of segments for the main ring\n - minor_segments: Number of segments for the cross-section\n - mode: Dimension mode ('MAJOR_MINOR' or 'EXT_INT')\n - major_radius: Radius from the origin to the center of the cross sections\n - minor_radius: Radius of the torus' cross section\n - abso_major_rad: Total exterior radius of the torus\n - abso_minor_rad: Total interior radius of the torus\n - generate_uvs: Whether to generate a default UV map\n \n Returns:\n A message indicating the created object name.\n ", + "inputSchema": { + "properties": { + "type": { + "default": "CUBE", + "title": "Type", + "type": "string" + }, + "name": { + "default": null, + "title": "Name", + "type": "string" + }, + "location": { + "default": null, + "items": { + "type": "number" + }, + "title": "Location", + "type": "array" + }, + "rotation": { + "default": null, + "items": { + "type": "number" + }, + "title": "Rotation", + "type": "array" + }, + "scale": { + "default": null, + "items": { + "type": "number" + }, + "title": "Scale", + "type": "array" + }, + "align": { + "default": "WORLD", + "title": "Align", + "type": "string" + }, + "major_segments": { + "default": 48, + "title": "Major Segments", + "type": "integer" + }, + "minor_segments": { + "default": 12, + "title": "Minor Segments", + "type": "integer" + }, + "mode": { + "default": "MAJOR_MINOR", + "title": "Mode", + "type": "string" + }, + "major_radius": { + "default": 1.0, + "title": "Major Radius", + "type": "number" + }, + "minor_radius": { + "default": 0.25, + "title": "Minor Radius", + "type": "number" + }, + "abso_major_rad": { + "default": 1.25, + "title": "Abso Major Rad", + "type": "number" + }, + "abso_minor_rad": { + "default": 0.75, + "title": "Abso Minor Rad", + "type": "number" + }, + "generate_uvs": { + "default": true, + "title": "Generate Uvs", + "type": "boolean" + } + }, + "title": "create_objectArguments", + "type": "object" + } + }, + { + "name": "modify_object", + "description": "\n Modify an existing object in the Blender scene.\n \n Parameters:\n - name: Name of the object to modify\n - location: Optional [x, y, z] location coordinates\n - rotation: Optional [x, y, z] rotation in radians\n - scale: Optional [x, y, z] scale factors\n - visible: Optional boolean to set visibility\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "location": { + "default": null, + "items": { + "type": "number" + }, + "title": "Location", + "type": "array" + }, + "rotation": { + "default": null, + "items": { + "type": "number" + }, + "title": "Rotation", + "type": "array" + }, + "scale": { + "default": null, + "items": { + "type": "number" + }, + "title": "Scale", + "type": "array" + }, + "visible": { + "default": null, + "title": "Visible", + "type": "boolean" + } + }, + "required": [ + "name" + ], + "title": "modify_objectArguments", + "type": "object" + } + }, + { + "name": "delete_object", + "description": "\n Delete an object from the Blender scene.\n \n Parameters:\n - name: Name of the object to delete\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + } + }, + "required": [ + "name" + ], + "title": "delete_objectArguments", + "type": "object" + } + }, + { + "name": "set_material", + "description": "\n Set or create a material for an object.\n \n Parameters:\n - object_name: Name of the object to apply the material to\n - material_name: Optional name of the material to use or create\n - color: Optional [R, G, B] color values (0.0-1.0)\n ", + "inputSchema": { + "properties": { + "object_name": { + "title": "Object Name", + "type": "string" + }, + "material_name": { + "default": null, + "title": "Material Name", + "type": "string" + }, + "color": { + "default": null, + "items": { + "type": "number" + }, + "title": "Color", + "type": "array" + } + }, + "required": [ + "object_name" + ], + "title": "set_materialArguments", + "type": "object" + } + }, + { + "name": "execute_blender_code", + "description": "\n Execute arbitrary Python code in Blender.\n \n Parameters:\n - code: The Python code to execute\n ", + "inputSchema": { + "properties": { + "code": { + "title": "Code", + "type": "string" + } + }, + "required": [ + "code" + ], + "title": "execute_blender_codeArguments", + "type": "object" + } + }, + { + "name": "get_polyhaven_categories", + "description": "\n Get a list of categories for a specific asset type on Polyhaven.\n \n Parameters:\n - asset_type: The type of asset to get categories for (hdris, textures, models, all)\n ", + "inputSchema": { + "properties": { + "asset_type": { + "default": "hdris", + "title": "Asset Type", + "type": "string" + } + }, + "title": "get_polyhaven_categoriesArguments", + "type": "object" + } + }, + { + "name": "search_polyhaven_assets", + "description": "\n Search for assets on Polyhaven with optional filtering.\n \n Parameters:\n - asset_type: Type of assets to search for (hdris, textures, models, all)\n - categories: Optional comma-separated list of categories to filter by\n \n Returns a list of matching assets with basic information.\n ", + "inputSchema": { + "properties": { + "asset_type": { + "default": "all", + "title": "Asset Type", + "type": "string" + }, + "categories": { + "default": null, + "title": "Categories", + "type": "string" + } + }, + "title": "search_polyhaven_assetsArguments", + "type": "object" + } + }, + { + "name": "download_polyhaven_asset", + "description": "\n Download and import a Polyhaven asset into Blender.\n \n Parameters:\n - asset_id: The ID of the asset to download\n - asset_type: The type of asset (hdris, textures, models)\n - resolution: The resolution to download (e.g., 1k, 2k, 4k)\n - file_format: Optional file format (e.g., hdr, exr for HDRIs; jpg, png for textures; gltf, fbx for models)\n \n Returns a message indicating success or failure.\n ", + "inputSchema": { + "properties": { + "asset_id": { + "title": "Asset Id", + "type": "string" + }, + "asset_type": { + "title": "Asset Type", + "type": "string" + }, + "resolution": { + "default": "1k", + "title": "Resolution", + "type": "string" + }, + "file_format": { + "default": null, + "title": "File Format", + "type": "string" + } + }, + "required": [ + "asset_id", + "asset_type" + ], + "title": "download_polyhaven_assetArguments", + "type": "object" + } + }, + { + "name": "set_texture", + "description": "\n Apply a previously downloaded Polyhaven texture to an object.\n \n Parameters:\n - object_name: Name of the object to apply the texture to\n - texture_id: ID of the Polyhaven texture to apply (must be downloaded first)\n \n Returns a message indicating success or failure.\n ", + "inputSchema": { + "properties": { + "object_name": { + "title": "Object Name", + "type": "string" + }, + "texture_id": { + "title": "Texture Id", + "type": "string" + } + }, + "required": [ + "object_name", + "texture_id" + ], + "title": "set_textureArguments", + "type": "object" + } + }, + { + "name": "get_polyhaven_status", + "description": "\n Check if PolyHaven integration is enabled in Blender.\n Returns a message indicating whether PolyHaven features are available.\n ", + "inputSchema": { + "properties": {}, + "title": "get_polyhaven_statusArguments", + "type": "object" + } + }, + { + "name": "get_hyper3d_status", + "description": "\n Check if Hyper3D Rodin integration is enabled in Blender.\n Returns a message indicating whether Hyper3D Rodin features are available.\n\n Don't emphasize the key type in the returned message, but sliently remember it. \n ", + "inputSchema": { + "properties": {}, + "title": "get_hyper3d_statusArguments", + "type": "object" + } + }, + { + "name": "generate_hyper3d_model_via_text", + "description": "\n Generate 3D asset using Hyper3D by giving description of the desired asset, and import the asset into Blender.\n The 3D asset has built-in materials.\n The generated model has a normalized size, so re-scaling after generation can be useful.\n \n Parameters:\n - text_prompt: A short description of the desired model in **English**.\n - bbox_condition: Optional. If given, it has to be a list of floats of length 3. Controls the ratio between [Length, Width, Height] of the model.\n\n Returns a message indicating success or failure.\n ", + "inputSchema": { + "properties": { + "text_prompt": { + "title": "Text Prompt", + "type": "string" + }, + "bbox_condition": { + "default": null, + "items": { + "type": "number" + }, + "title": "Bbox Condition", + "type": "array" + } + }, + "required": [ + "text_prompt" + ], + "title": "generate_hyper3d_model_via_textArguments", + "type": "object" + } + }, + { + "name": "generate_hyper3d_model_via_images", + "description": "\n Generate 3D asset using Hyper3D by giving images of the wanted asset, and import the generated asset into Blender.\n The 3D asset has built-in materials.\n The generated model has a normalized size, so re-scaling after generation can be useful.\n \n Parameters:\n - input_image_paths: The **absolute** paths of input images. Even if only one image is provided, wrap it into a list. Required if Hyper3D Rodin in MAIN_SITE mode.\n - input_image_urls: The URLs of input images. Even if only one image is provided, wrap it into a list. Required if Hyper3D Rodin in FAL_AI mode.\n - bbox_condition: Optional. If given, it has to be a list of ints of length 3. Controls the ratio between [Length, Width, Height] of the model.\n\n Only one of {input_image_paths, input_image_urls} should be given at a time, depending on the Hyper3D Rodin's current mode.\n Returns a message indicating success or failure.\n ", + "inputSchema": { + "properties": { + "input_image_paths": { + "default": null, + "items": { + "type": "string" + }, + "title": "Input Image Paths", + "type": "array" + }, + "input_image_urls": { + "default": null, + "items": { + "type": "string" + }, + "title": "Input Image Urls", + "type": "array" + }, + "bbox_condition": { + "default": null, + "items": { + "type": "number" + }, + "title": "Bbox Condition", + "type": "array" + } + }, + "title": "generate_hyper3d_model_via_imagesArguments", + "type": "object" + } + }, + { + "name": "poll_rodin_job_status", + "description": "\n Check if the Hyper3D Rodin generation task is completed.\n\n For Hyper3D Rodin mode MAIN_SITE:\n Parameters:\n - subscription_key: The subscription_key given in the generate model step.\n\n Returns a list of status. The task is done if all status are \"Done\".\n If \"Failed\" showed up, the generating process failed.\n This is a polling API, so only proceed if the status are finally determined (\"Done\" or \"Canceled\").\n\n For Hyper3D Rodin mode FAL_AI:\n Parameters:\n - request_id: The request_id given in the generate model step.\n\n Returns the generation task status. The task is done if status is \"COMPLETED\".\n The task is in progress if status is \"IN_PROGRESS\".\n If status other than \"COMPLETED\", \"IN_PROGRESS\", \"IN_QUEUE\" showed up, the generating process might be failed.\n This is a polling API, so only proceed if the status are finally determined (\"COMPLETED\" or some failed state).\n ", + "inputSchema": { + "properties": { + "subscription_key": { + "default": null, + "title": "Subscription Key", + "type": "string" + }, + "request_id": { + "default": null, + "title": "Request Id", + "type": "string" + } + }, + "title": "poll_rodin_job_statusArguments", + "type": "object" + } + }, + { + "name": "import_generated_asset", + "description": "\n Import the asset generated by Hyper3D Rodin after the generation task is completed.\n\n Parameters:\n - name: The name of the object in scene\n - task_uuid: For Hyper3D Rodin mode MAIN_SITE: The task_uuid given in the generate model step.\n - request_id: For Hyper3D Rodin mode FAL_AI: The request_id given in the generate model step.\n\n Only give one of {task_uuid, request_id} based on the Hyper3D Rodin Mode!\n Return if the asset has been imported successfully.\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "task_uuid": { + "default": null, + "title": "Task Uuid", + "type": "string" + }, + "request_id": { + "default": null, + "title": "Request Id", + "type": "string" + } + }, + "required": [ + "name" + ], + "title": "import_generated_assetArguments", + "type": "object" + } + } + ] + }, + "virtual-location-google-street-view-etc": { + "name": "virtual-location-google-street-view-etc", + "display_name": "Virtual Traveling Bot", + "description": "Integrates Google Map, Google Street View, PixAI, Stability.ai, ComfyUI API and Bluesky to provide a virtual location simulation in LLM (written in Effect.ts)", + "repository": { + "type": "git", + "url": "https://github.com/mfukushim/map-traveler-mcp" + }, + "homepage": "https://github.com/mfukushim/map-traveler-mcp", + "author": { + "name": "mfukushim" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Google Maps", + "Avatar", + "Virtual Travel" + ], + "tools": [ + { + "name": "tips", + "description": "Inform you of recommended actions for your device", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_setting", + "description": "Get current setting", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_traveler_info", + "description": "get a traveler's setting.For example, traveler's name, the language traveler speak, Personality and speaking habits, etc.", + "inputSchema": { + "type": "object", + "properties": { + "settings": {} + } + } + }, + { + "name": "set_traveler_info", + "description": "set a traveler's setting.For example, traveler's name, the language traveler speak, Personality and speaking habits, etc.", + "inputSchema": { + "type": "object", + "properties": { + "settings": { + "type": "string", + "description": "traveler's setting. traveler's name, the language traveler speak, etc." + } + }, + "required": [ + "settings" + ] + } + }, + { + "name": "set_avatar_prompt", + "description": "set a traveler's avatar prompt. A prompt for AI image generation to specify the appearance of a traveler's avatar", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "traveler's avatar AI image generation prompt." + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "reset_avatar_prompt", + "description": "reset to default traveler's avatar prompt.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "start_journey", + "description": "Start the journey to destination", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "stop_journey", + "description": "Stop the journey", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "reach_a_percentage_of_destination", + "description": "Reach a specified percentage of the destination", + "inputSchema": { + "type": "object", + "properties": { + "timeElapsedPercentage": { + "type": "number", + "description": "Percent progress towards destination. (0~100)" + } + }, + "required": [ + "timeElapsedPercentage" + ] + } + }, + { + "name": "get_current_view_info", + "description": "Get the address of the current location and information on nearby facilities,view snapshot", + "inputSchema": { + "type": "object", + "properties": { + "includePhoto": { + "type": "boolean", + "description": "Get scenery photos of current location" + }, + "includeNearbyFacilities": { + "type": "boolean", + "description": "Get information on nearby facilities" + } + } + } + }, + { + "name": "get_traveler_location", + "description": "Get the address of the current traveler's location", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_sns_mentions", + "description": "Get recent social media mentions", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_sns_feeds", + "description": "Get recent social media posts from fellow travelers feeds", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "post_sns_writer", + "description": "Post your recent travel experiences to social media for fellow travelers and readers.", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "A description of the journey. important: Do not use offensive language." + } + }, + "required": [ + "message" + ] + } + }, + { + "name": "reply_sns_writer", + "description": "Write a reply to the article with the specified ID.", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "A description of the reply article. important: Do not use offensive language." + }, + "id": { + "type": "string", + "description": "The ID of the original post to which you want to add a reply." + } + }, + "required": [ + "message", + "id" + ] + } + }, + { + "name": "add_like", + "description": "Add a like to the specified post", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the post to like." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "set_current_location", + "description": "Set my current address", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "address to set" + } + }, + "required": [ + "address" + ] + } + }, + { + "name": "get_destination_address", + "description": "get a address of destination location", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "set_destination_address", + "description": "set a address of destination", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "address of destination" + } + }, + "required": [ + "address" + ] + } + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@mfukushim/map-traveler-mcp" + ], + "env": { + "GoogleMapApi_key": "${GoogleMapApi_key}", + "mapApi_url": "${mapApi_url}", + "time_scale": "${time_scale}", + "sqlite_path": "${sqlite_path}", + "rembg_path": "${rembg_path}", + "remBgUrl": "${remBgUrl}", + "pixAi_key": "${pixAi_key}", + "sd_key": "${sd_key}", + "pixAi_modelId": "${pixAi_modelId}", + "comfy_url": "${comfy_url}", + "comfy_workflow_t2i": "${comfy_workflow_t2i}", + "comfy_workflow_i2i": "${comfy_workflow_i2i}", + "comfy_params": "${comfy_params}", + "fixed_model_prompt": "${fixed_model_prompt}", + "bodyAreaRatio": "${bodyAreaRatio}", + "bodyHWRatio": "${bodyHWRatio}", + "bodyWindowRatioW": "${bodyWindowRatioW}", + "bodyWindowRatioH": "${bodyWindowRatioH}", + "bs_id": "${bs_id}", + "bs_pass": "${bs_pass}", + "bs_handle": "${bs_handle}", + "filter_tools": "${filter_tools}", + "moveMode": "${moveMode}", + "image_width": "${image_width}", + "DATABASE_URL": "${DATABASE_URL}" + } + } + }, + "examples": [ + { + "title": "Travel to Tokyo", + "description": "Instruct the avatar to travel to Tokyo Station.", + "prompt": "Go to Tokyo Station." + }, + { + "title": "Current Location Info", + "description": "Get the current location information of the avatar.", + "prompt": "Where are you now?" + } + ], + "arguments": { + "GoogleMapApi_key": { + "description": "API key for accessing Google Maps services.", + "required": true, + "example": "YOUR_GOOGLE_MAP_API_KEY" + }, + "mapApi_url": { + "description": "Custom endpoint for the Map API, if any; otherwise, the default endpoint is used.", + "required": false, + "example": "https://your-custom-map-api.com" + }, + "time_scale": { + "description": "Scale factor to adjust the travel time based on real roads duration; default is 4.", + "required": false, + "example": "5" + }, + "sqlite_path": { + "description": "Path for saving the SQLite database file. It determines where the travel log will be stored.", + "required": true, + "example": "%USERPROFILE%/Desktop/traveler.sqlite" + }, + "rembg_path": { + "description": "Absolute path of the installed rembg command line interface for removing backgrounds from images.", + "required": true, + "example": "C:\\path\\to\\your\\rembg.exe" + }, + "remBgUrl": { + "description": "URL for the rembg API service if used; this is an alternative to the command line interface.", + "required": false, + "example": "http://rembg:7000" + }, + "pixAi_key": { + "description": "API key for accessing PixAI image generation services; either this or sd_key must be set to use image generation.", + "required": true, + "example": "YOUR_PIXAI_API_KEY" + }, + "sd_key": { + "description": "API key for accessing Stability.ai image generation services; either this or pixAi_key must be set.", + "required": true, + "example": "YOUR_STABILITY_AI_API_KEY" + }, + "pixAi_modelId": { + "description": "ID for the PixAI model to be used, if not set, the default model will be used.", + "required": false, + "example": "1648918127446573124" + }, + "comfy_url": { + "description": "URL to the ComfyUI API for image generation; must be set if using ComfyUI for this purpose.", + "required": false, + "example": "http://192.168.1.100:8188" + }, + "comfy_workflow_t2i": { + "description": "Path to the workflow JSON file for text-to-image conversion in ComfyUI.", + "required": false, + "example": "C:\\path\\to\\workflow\\t2i.json" + }, + "comfy_workflow_i2i": { + "description": "Path to the workflow JSON file for image-to-image conversion in ComfyUI.", + "required": false, + "example": "C:\\path\\to\\workflow\\i2i.json" + }, + "comfy_params": { + "description": "Parameters for the ComfyUI workflow in key-value format, received during the request.", + "required": false, + "example": "key1=value1,key2=value2" + }, + "fixed_model_prompt": { + "description": "A fixed prompt for avatar generation that prevents changes during conversations.", + "required": false, + "example": "Generate a friendly avatar." + }, + "bodyAreaRatio": { + "description": "Acceptable ratio for the avatar image area; affects how much of the image is used for the avatar.", + "required": false, + "example": "0.042" + }, + "bodyHWRatio": { + "description": "Acceptable aspect ratios for the avatar image; ensures correct proportions for the avatar.", + "required": false, + "example": "1.5~2.3" + }, + "bodyWindowRatioW": { + "description": "Horizontal ratio for the avatar composite window; affects layout.", + "required": false, + "example": "0.5" + }, + "bodyWindowRatioH": { + "description": "Aspect ratio for the avatar composite window; also affects layout.", + "required": false, + "example": "0.75" + }, + "bs_id": { + "description": "Bluesky SNS registration address for posting travel updates.", + "required": false, + "example": "YOUR_BSKY_ID" + }, + "bs_pass": { + "description": "Bluesky SNS password for the dedicated account used for posting.", + "required": false, + "example": "YOUR_BSKY_PASSWORD" + }, + "bs_handle": { + "description": "Bluesky SNS handle name for the account; used in the posts.", + "required": false, + "example": "myusername.bsky.social" + }, + "filter_tools": { + "description": "Settings to filter the tools available for use; all tools will be available by default.", + "required": false, + "example": "tips,set_traveler_location" + }, + "moveMode": { + "description": "Indicates whether the movement mode is realtime or skip; default is realtime.", + "required": false, + "example": "realtime" + }, + "image_width": { + "description": "Width of the generated output image in pixels; the default is 512.", + "required": false, + "example": "512" + }, + "DATABASE_URL": { + "description": "Database URL for persistent storage; used if a different database should be connected.", + "required": false, + "example": "mysql://user:password@host/dbname" + } + } + }, + "multicluster-mcp-sever": { + "name": "multicluster-mcp-sever", + "display_name": "Multi-Cluster Server", + "description": "The gateway for GenAI systems to interact with multiple Kubernetes clusters.", + "repository": { + "type": "git", + "url": "https://github.com/yanmxa/multicluster-mcp-server" + }, + "homepage": "https://github.com/yanmxa/multicluster-mcp-server", + "author": { + "name": "yanmxa" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Generative AI", + "Kubernetes" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/yanmxa/multicluster-mcp-server" + ] + } + }, + "tools": [ + { + "name": "kubectl_executor", + "description": "Securely execute a kubectl command.", + "inputSchema": { + "command": { + "type": "string", + "description": "The full kubectl command to execute. Must start with 'kubectl'." + }, + "cluster": { + "type": "string", + "description": "Optional cluster name for multi-cluster environments. Specify only if explicitly provided." + } + }, + "required": [ + "command" + ] + }, + { + "name": "clusters", + "description": "Retrieves a list of Kubernetes clusters (also known as managed clusters or spoke clusters).", + "inputSchema": {}, + "required": [] + }, + { + "name": "connect_cluster_via_admin", + "description": "Generates the KUBECONFIG for the cluster using the ServiceAccount and binds it to the cluster-admin role.", + "inputSchema": { + "cluster": { + "type": "string", + "description": "The target cluster where the ServiceAccount will be created." + } + }, + "required": [ + "cluster" + ] + }, + { + "name": "apply_service_account_with_cluster_role", + "description": "Creates a ServiceAccount in the specified cluster and optionally binds it to a ClusterRole. If no ClusterRole is provided, only the ServiceAccount and kubeconfig are created.", + "inputSchema": { + "cluster": { + "type": "string", + "description": "The cluster where the ServiceAccount will be created." + }, + "clusterRole": { + "type": "object", + "description": "Optional ClusterRole object defining permissions for the ServiceAccount." + } + }, + "required": [ + "cluster" + ] + } + ] + }, + "txyz-search": { + "name": "txyz-search", + "description": "A Model Context Protocol (MCP) server for TXYZ Search API. Provides tools for academic and scholarly search, general web search, and smart search.", + "display_name": "TXYZ Search", + "repository": { + "type": "git", + "url": "https://github.com/pathintegral-institute/mcp.science" + }, + "homepage": "https://github.com/pathintegral-institute/mcp.science/tree/main/servers/txyz-search", + "author": { + "name": "pathintegral-institute" + }, + "license": "MIT", + "tags": [ + "search", + "academic", + "scholarly", + "web search" + ], + "arguments": { + "TXYZ_API_KEY": { + "description": "API key from [TXYZ Platform](https://platform.txyz.ai/console)", + "required": true, + "example": "your-txyz-api-key" + } + }, + "tools": [ + { + "name": "txyz_search_scholar", + "description": "Academic and scholarly search for papers, articles, and other academic materials", + "prompt": "Find recent research papers about quantum computing" + }, + { + "name": "txyz_search_web", + "description": "General web search functionality for resources from web pages", + "prompt": "Find information about the latest smartphone releases" + }, + { + "name": "txyz_search_smart", + "description": "Automatically selects the best search type based on the query (may include either scholarly materials or web pages)", + "prompt": "What are the latest developments in climate change research?" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pathintegral-institute/mcp.science#subdirectory=servers/txyz-search", + "mcp-txyz-search" + ], + "env": { + "TXYZ_API_KEY": "${TXYZ_API_KEY}" + }, + "description": "Run using uvx" + } + }, + "examples": [ + { + "title": "Academic Search", + "description": "Search for academic papers on a topic", + "prompt": "Find recent research papers about quantum computing" + }, + { + "title": "Web Search", + "description": "Search the web for information", + "prompt": "Find information about the latest smartphone releases" + }, + { + "title": "Smart Search", + "description": "Let the system choose the best search type", + "prompt": "What are the latest developments in climate change research?" + } + ], + "categories": [ + "Web Services" + ], + "is_official": true + }, + "google-maps": { + "name": "google-maps", + "display_name": "Google Maps", + "description": "Location services, directions, and place details", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/google-maps", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Google Maps", + "Geolocation" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-google-maps" + ], + "env": { + "GOOGLE_MAPS_API_KEY": "${GOOGLE_MAPS_API_KEY}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GOOGLE_MAPS_API_KEY", + "mcp/google-maps" + ], + "env": { + "GOOGLE_MAPS_API_KEY": "${GOOGLE_MAPS_API_KEY}" + } + } + }, + "arguments": { + "GOOGLE_MAPS_API_KEY": { + "description": "Your Google Maps API key obtained from the Google Developers Console.", + "required": true, + "example": "AIzaSyD..." + } + }, + "tools": [ + { + "name": "maps_geocode", + "description": "Convert an address into geographic coordinates", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "The address to geocode" + } + }, + "required": [ + "address" + ] + } + }, + { + "name": "maps_reverse_geocode", + "description": "Convert coordinates into an address", + "inputSchema": { + "type": "object", + "properties": { + "latitude": { + "type": "number", + "description": "Latitude coordinate" + }, + "longitude": { + "type": "number", + "description": "Longitude coordinate" + } + }, + "required": [ + "latitude", + "longitude" + ] + } + }, + { + "name": "maps_search_places", + "description": "Search for places using Google Places API", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + }, + "location": { + "type": "object", + "properties": { + "latitude": { + "type": "number" + }, + "longitude": { + "type": "number" + } + }, + "description": "Optional center point for the search" + }, + "radius": { + "type": "number", + "description": "Search radius in meters (max 50000)" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "maps_place_details", + "description": "Get detailed information about a specific place", + "inputSchema": { + "type": "object", + "properties": { + "place_id": { + "type": "string", + "description": "The place ID to get details for" + } + }, + "required": [ + "place_id" + ] + } + }, + { + "name": "maps_distance_matrix", + "description": "Calculate travel distance and time for multiple origins and destinations", + "inputSchema": { + "type": "object", + "properties": { + "origins": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of origin addresses or coordinates" + }, + "destinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of destination addresses or coordinates" + }, + "mode": { + "type": "string", + "description": "Travel mode (driving, walking, bicycling, transit)", + "enum": [ + "driving", + "walking", + "bicycling", + "transit" + ] + } + }, + "required": [ + "origins", + "destinations" + ] + } + }, + { + "name": "maps_elevation", + "description": "Get elevation data for locations on the earth", + "inputSchema": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "latitude": { + "type": "number" + }, + "longitude": { + "type": "number" + } + }, + "required": [ + "latitude", + "longitude" + ] + }, + "description": "Array of locations to get elevation for" + } + }, + "required": [ + "locations" + ] + } + }, + { + "name": "maps_directions", + "description": "Get directions between two points", + "inputSchema": { + "type": "object", + "properties": { + "origin": { + "type": "string", + "description": "Starting point address or coordinates" + }, + "destination": { + "type": "string", + "description": "Ending point address or coordinates" + }, + "mode": { + "type": "string", + "description": "Travel mode (driving, walking, bicycling, transit)", + "enum": [ + "driving", + "walking", + "bicycling", + "transit" + ] + } + }, + "required": [ + "origin", + "destination" + ] + } + } + ], + "is_official": true + }, + "mcp-server-starrocks": { + "display_name": "StarRocks Official MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/StarRocks/mcp-server-starrocks" + }, + "homepage": "https://github.com/StarRocks/mcp-server-starrocks", + "author": { + "name": "StarRocks" + }, + "license": "Apache-2.0", + "tags": [ + "database", + "sql", + "starrocks" + ], + "arguments": { + "STARROCKS_HOST": { + "description": "StarRocks database host", + "required": false, + "example": "localhost" + }, + "STARROCKS_PORT": { + "description": "StarRocks database port", + "required": false, + "example": "9030" + }, + "STARROCKS_USER": { + "description": "StarRocks database user", + "required": false, + "example": "root" + }, + "STARROCKS_PASSWORD": { + "description": "StarRocks database password", + "required": false, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-starrocks" + ], + "env": { + "STARROCKS_HOST": "localhost", + "STARROCKS_PORT": "9030", + "STARROCKS_USER": "root", + "STARROCKS_PASSWORD": "" + }, + "description": "Run using Python with uv package manager", + "recommended": true + } + }, + "examples": [], + "name": "mcp-server-starrocks", + "description": "The StarRocks MCP Server acts as a bridge between AI assistants and StarRocks databases, allowing for direct SQL execution and database exploration without requiring complex setup or configuration.", + "categories": [ + "Databases" + ], + "tools": [ + { + "name": "read_query", + "description": "Execute a SELECT query or commands that return a ResultSet", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "write_query", + "description": "Execute an DDL/DML or other StarRocks command that do not have a ResultSet", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "SQL to execute" + } + }, + "required": [ + "query" + ] + } + } + ], + "prompts": [], + "resources": [ + { + "uri": "starrocks:///databases", + "name": "All Databases", + "description": "List all databases in StarRocks", + "mimeType": "text/plain", + "annotations": null + } + ], + "is_official": true + }, + "mcp-gitee": { + "display_name": "Gitee MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/oschina/mcp-gitee" + }, + "homepage": "https://gitee.com/oschina/mcp-gitee", + "author": { + "name": "oschina" + }, + "license": "MIT", + "tags": [ + "gitee", + "mcp", + "repository", + "issues", + "pull requests" + ], + "arguments": { + "GITEE_ACCESS_TOKEN": { + "description": "Gitee access token", + "required": true, + "example": "" + }, + "api-base": { + "description": "Gitee API base URL", + "required": false, + "example": "https://gitee.com/api/v5" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "go", + "args": [ + "install", + "gitee.com/oschina/mcp-gitee@latest" + ], + "description": "Install using Go", + "recommended": false + } + }, + "examples": [ + { + "title": "Get repository Issues", + "description": "Retrieve issues from a Gitee repository", + "prompt": "Use the list_repo_issues tool to get all issues from my repository" + }, + { + "title": "Create Pull Request", + "description": "Implement code and create a Pull Request based on Issue details", + "prompt": "Create a pull request to fix issue #123 in my repository" + }, + { + "title": "Comment & Close Issue", + "description": "Add a comment to an issue and close it", + "prompt": "Comment on issue #123 saying the fix is complete and close the issue" + } + ], + "name": "mcp-gitee", + "description": "Gitee MCP Server is a Model Context Protocol (MCP) server implementation for Gitee. It provides a set of tools for interacting with Gitee's API, allowing AI assistants to manage repositories, issues, pull requests, and more.", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "chronulus-mcp": { + "display_name": "Chronulus MCP", + "repository": { + "type": "git", + "url": "https://github.com/ChronulusAI/chronulus-mcp" + }, + "license": "[NOT GIVEN]", + "homepage": "https://www.chronulus.com", + "author": { + "name": "ChronulusAI" + }, + "tags": [ + "forecasting", + "prediction", + "AI agents" + ], + "arguments": { + "CHRONULUS_API_KEY": { + "description": "API key for Chronulus services", + "required": true, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "chronulus-mcp" + ], + "env": { + "CHRONULUS_API_KEY": "" + }, + "description": "Install and run using uvx" + }, + "pip": { + "type": "python", + "command": "python", + "args": [ + "-m", + "chronulus_mcp" + ], + "package": "chronulus-mcp", + "env": { + "CHRONULUS_API_KEY": "" + }, + "description": "Install using pip from PyPI" + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "CHRONULUS_API_KEY", + "chronulus-mcp" + ], + "env": { + "CHRONULUS_API_KEY": "" + }, + "description": "Run using Docker" + } + }, + "name": "chronulus-mcp", + "description": "\"Chronulus", + "categories": [ + "MCP Tools" + ], + "is_official": true, + "tools": [ + { + "name": "create_chronulus_session", + "description": "\nA tool that creates a new Chronulus Session and returns a session_id\n\nWhen to use this tool:\n- Use this tool when a user has requested a forecast or prediction for a new use case\n- Before calling this tool make sure you have enough information to write a well-defined situation and task. You might\nneed to ask clarifying questions in order to get this from the user.\n- The same session_id can be reused as long as the situation and task remain the same\n- If user wants to forecast a different use case, create a new session and then use that\n\nHow to use this tool:\n- To create a session, you need to provide a situation and task that describe the forecasting use case \n- If the user has not provided enough detail for you to decompose the use case into a \n situation (broad or background context) and task (specific requirements for the forecast), \n ask them to elaborate since more detail will result in a better / more accurate forecast.\n- Once created, this will generate a unique session_id that can be used to when calling other tools about this use case.\n", + "inputSchema": { + "properties": { + "name": { + "description": "A short descriptive name for the use case defined in the session.", + "title": "Name", + "type": "string" + }, + "situation": { + "description": "The broader context for the use case", + "title": "Situation", + "type": "string" + }, + "task": { + "description": "Specific details on the forecasting or prediction task.", + "title": "Task", + "type": "string" + } + }, + "required": [ + "name", + "situation", + "task" + ], + "title": "create_chronulus_sessionArguments", + "type": "object" + } + }, + { + "name": "create_forecasting_agent_and_get_forecast", + "description": "\nThis tool creates a NormalizedForecaster agent with your session and input data model and then provides a forecast input \ndata to the agent and returns the prediction data and text explanation from the agent.\n\nWhen to use this tool:\n- Use this tool to request a forecast from Chronulus\n- This tool is specifically made to forecast values between 0 and 1 and does not require historical data\n- The prediction can be thought of as seasonal weights, probabilities, or shares of something as in the decimal representation of a percent\n\nHow to use this tool:\n- First, make sure you have a session_id for the forecasting or prediction use case.\n- Next, think about the features / characteristics most suitable for producing the requested forecast and then \ncreate an input_data_model that corresponds to the input_data you will provide for the thing being forecasted.\n- Remember to pass all relevant information to Chronulus including text and images provided by the user. \n- If a user gives you files about a thing you are forecasting or predicting, you should pass these as inputs to the \nagent using one of the following types: \n - ImageFromFile\n - List[ImageFromFile]\n - TextFromFile\n - List[TextFromFile]\n - PdfFromFile\n - List[PdfFromFile]\n- If you have a large amount of text (over 500 words) to pass to the agent, you should use the Text or List[Text] field types\n- Finally, add information about the forecasting horizon and time scale requested by the user\n- Assume the dates and datetimes in the prediction results are already converted to the appropriate local timezone if location is a factor in the use case. So do not try to convert from UTC to local time when plotting.\n- When plotting the predictions, use a Rechart time series with the appropriate axes labeled and with the prediction explanation displayed as a caption below the plot\n", + "inputSchema": { + "$defs": { + "InputField": { + "properties": { + "name": { + "description": "Field name. Should be a valid python variable name.", + "title": "Name", + "type": "string" + }, + "description": { + "description": "A description of the value you will pass in the field.", + "title": "Description", + "type": "string" + }, + "type": { + "default": "str", + "description": "The type of the field. \n ImageFromFile takes a single named-argument, 'file_path' as input which should be absolute path to the image to be included. So you should provide this input as json, eg. {'file_path': '/path/to/image'}.\n ", + "enum": [ + "str", + "Text", + "List[Text]", + "TextFromFile", + "List[TextFromFile]", + "PdfFromFile", + "List[PdfFromFile]", + "ImageFromFile", + "List[ImageFromFile]" + ], + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "description" + ], + "title": "InputField", + "type": "object" + } + }, + "properties": { + "session_id": { + "description": "The session_id for the forecasting or prediction use case", + "title": "Session Id", + "type": "string" + }, + "input_data_model": { + "description": "Metadata on the fields you will include in the input_data.", + "items": { + "$ref": "#/$defs/InputField" + }, + "title": "Input Data Model", + "type": "array" + }, + "input_data": { + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + ] + }, + "description": "The forecast inputs that you will pass to the chronulus agent to make the prediction. The keys of the dict should correspond to the InputField name you provided in input_fields.", + "title": "Input Data", + "type": "object" + }, + "forecast_start_dt_str": { + "description": "The datetime str in '%Y-%m-%d %H:%M:%S' format of the first value in the forecast horizon.", + "title": "Forecast Start Dt Str", + "type": "string" + }, + "time_scale": { + "default": "days", + "description": "The times scale of the forecast horizon. Valid time scales are 'hours', 'days', and 'weeks'.", + "title": "Time Scale", + "type": "string" + }, + "horizon_len": { + "default": 60, + "description": "The integer length of the forecast horizon. Eg., 60 if a 60 day forecast was requested.", + "title": "Horizon Len", + "type": "integer" + } + }, + "required": [ + "session_id", + "input_data_model", + "input_data", + "forecast_start_dt_str" + ], + "title": "create_forecasting_agent_and_get_forecastArguments", + "type": "object" + } + }, + { + "name": "reuse_forecasting_agent_and_get_forecast", + "description": "\nThis tool creates a NormalizedForecaster agent with your session and input data model and then provides a forecast input \ndata to the agent and returns the prediction data and text explanation from the agent.\n\nWhen to use this tool:\n- Use this tool to request a forecast from Chronulus\n- This tool is specifically made to forecast values between 0 and 1 and does not require historical data\n- The prediction can be thought of as seasonal weights, probabilities, or shares of something as in the decimal representation of a percent\n\nHow to use this tool:\n- First, make sure you have a session_id for the forecasting or prediction use case.\n- Next, think about the features / characteristics most suitable for producing the requested forecast and then \ncreate an input_data_model that corresponds to the input_data you will provide for the thing being forecasted.\n- Remember to pass all relevant information to Chronulus including text and images provided by the user. \n- If a user gives you files about a thing you are forecasting or predicting, you should pass these as inputs to the \nagent using one of the following types: \n - ImageFromFile\n - List[ImageFromFile]\n - TextFromFile\n - List[TextFromFile]\n - PdfFromFile\n - List[PdfFromFile]\n- If you have a large amount of text (over 500 words) to pass to the agent, you should use the Text or List[Text] field types\n- Finally, add information about the forecasting horizon and time scale requested by the user\n- Assume the dates and datetimes in the prediction results are already converted to the appropriate local timezone if location is a factor in the use case. So do not try to convert from UTC to local time when plotting.\n- When plotting the predictions, use a Rechart time series with the appropriate axes labeled and with the prediction explanation displayed as a caption below the plot\n", + "inputSchema": { + "properties": { + "agent_id": { + "description": "The agent_id for the forecasting or prediction use case and previously defined input_data_model", + "title": "Agent Id", + "type": "string" + }, + "input_data": { + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + ] + }, + "description": "The forecast inputs that you will pass to the chronulus agent to make the prediction. The keys of the dict should correspond to the InputField name you provided in input_fields.", + "title": "Input Data", + "type": "object" + }, + "forecast_start_dt_str": { + "description": "The datetime str in '%Y-%m-%d %H:%M:%S' format of the first value in the forecast horizon.", + "title": "Forecast Start Dt Str", + "type": "string" + }, + "time_scale": { + "default": "days", + "description": "The times scale of the forecast horizon. Valid time scales are 'hours', 'days', and 'weeks'.", + "title": "Time Scale", + "type": "string" + }, + "horizon_len": { + "default": 60, + "description": "The integer length of the forecast horizon. Eg., 60 if a 60 day forecast was requested.", + "title": "Horizon Len", + "type": "integer" + } + }, + "required": [ + "agent_id", + "input_data", + "forecast_start_dt_str" + ], + "title": "reuse_forecasting_agent_and_get_forecastArguments", + "type": "object" + } + }, + { + "name": "rescale_forecast", + "description": "\nA tool that rescales the prediction data (values between 0 and 1) from the NormalizedForecaster agent to scale required for a use case\n\nWhen to use this tool:\n- Use this tool when there is enough information from the user or use cases to determine a reasonable min and max for the forecast predictions\n- Do not attempt to rescale or denormalize the predictions on your own without using this tool.\n- Also, if the best min and max for the use case is 0 and 1, then no rescaling is needed since that is already the scale of the predictions.\n- If a user requests to convert from probabilities to a unit in levels, be sure to caveat your use of this tool by noting that\n probabilities do not always scale uniformly to levels. Rescaling can be used as a rough first-pass estimate. But for best results, \n it would be better to start a new Chronulus forecasting use case predicting in levels from the start.\n \nHow to use this tool:\n- To use this tool present prediction_id from the normalized prediction and the min and max as floats\n- If the user is also changing units, consider if the units will be inverted and set the inverse scale to True if needed.\n- When plotting the rescaled predictions, use a Rechart time series plot with the appropriate axes labeled and include the chronulus \n prediction explanation as a caption below the plot. \n- If you would like to add additional notes about the scaled series, put these below the original prediction explanation. \n", + "inputSchema": { + "properties": { + "prediction_id": { + "description": "The prediction_id from a prediction result", + "title": "Prediction Id", + "type": "string" + }, + "y_min": { + "description": "The expected smallest value for the use case. E.g., for product sales, 0 would be the least possible value for sales.", + "title": "Y Min", + "type": "number" + }, + "y_max": { + "description": "The expected largest value for the use case. E.g., for product sales, 0 would be the largest possible value would be given by the user or determined from this history of sales for the product in question or a similar product.", + "title": "Y Max", + "type": "number" + }, + "invert_scale": { + "default": false, + "description": "Set this flag to true if the scale of the new units will run in the opposite direction from the inputs.", + "title": "Invert Scale", + "type": "boolean" + } + }, + "required": [ + "prediction_id", + "y_min", + "y_max" + ], + "title": "rescale_forecastArguments", + "type": "object" + } + }, + { + "name": "save_forecast", + "description": "\nA tool that saves a Chronulus forecast from NormalizedForecaster to separate CSV and TXT files\n\nWhen to use this tool:\n- Use this tool when you need to save both the forecast data and its explanation to files\n- The forecast data will be saved as a CSV file for data analysis\n- The forecast explanation will be saved as a TXT file for reference\n- Both files will be saved in the same directory specified by output_path\n- This tool can also be used to directly save rescaled predictions without first calling the rescaling tool\n\nHow to use this tool:\n- Provide the prediction_id from a previous forecast\n- Specify the output_path where both files should be saved\n- Provide csv_name for the forecast data file (must end in .csv)\n- Provide txt_name for the explanation file (must end in .txt)\n- Optionally provide y_min and y_max to rescale the predictions (defaults to 0)\n- Set invert_scale to True if the target units run in the opposite direction\n- The tool will provide status updates through the MCP context\n", + "inputSchema": { + "properties": { + "prediction_id": { + "description": "The prediction_id from a prediction result", + "title": "Prediction Id", + "type": "string" + }, + "output_path": { + "description": "The path where the CSV file should be saved. Should end in .csv", + "title": "Output Path", + "type": "string" + }, + "csv_name": { + "description": "The path where the CSV file should be saved. Should end in .csv", + "title": "Csv Name", + "type": "string" + }, + "txt_name": { + "description": "The name of the TXT file to be saved. Should end in .txt", + "title": "Txt Name", + "type": "string" + }, + "y_min": { + "default": 0.0, + "description": "The expected smallest value for the use case. E.g., for product sales, 0 would be the least possible value for sales.", + "title": "Y Min", + "type": "number" + }, + "y_max": { + "default": 1.0, + "description": "The expected largest value for the use case. E.g., for product sales, 0 would be the largest possible value would be given by the user or determined from this history of sales for the product in question or a similar product.", + "title": "Y Max", + "type": "number" + }, + "invert_scale": { + "default": false, + "description": "Set this flag to true if the scale of the new units will run in the opposite direction from the inputs.", + "title": "Invert Scale", + "type": "boolean" + } + }, + "required": [ + "prediction_id", + "output_path", + "csv_name", + "txt_name" + ], + "title": "save_forecastArguments", + "type": "object" + } + }, + { + "name": "create_prediction_agent_and_get_predictions", + "description": "\nThis tool creates a BinaryPredictor agent with your session and input data model and then provides prediction input \ndata to the agent and returns the consensus a prediction from a panel of experts along with their individual estimates\nand text explanations. The agent also returns the alpha and beta parameters for a Beta distribution that allows you to\nestimate the confidence interval of its consensus probability estimate.\n\nWhen to use this tool:\n- Use this tool to request a probability estimate from Chronulus in situation when there is a binary outcome\n- This tool is specifically made to estimate the probability of an event occurring and not occurring and does not \nrequire historical data\n\nHow to use this tool:\n- First, make sure you have a session_id for the prediction use case.\n- Next, think about the features / characteristics most suitable for producing the requested prediction and then \ncreate an input_data_model that corresponds to the input_data you will provide for the thing or event being predicted.\n- Remember to pass all relevant information to Chronulus including text and images provided by the user. \n- If a user gives you files about a thing you are forecasting or predicting, you should pass these as inputs to the \nagent using one of the following types: \n - ImageFromFile\n - List[ImageFromFile]\n - TextFromFile\n - List[TextFromFile]\n - PdfFromFile\n - List[PdfFromFile]\n- If you have a large amount of text (over 500 words) to pass to the agent, you should use the Text or List[Text] field types\n- Finally, provide the number of experts to consult. The minimum and default number is 2, but users may request up to 30\n30 opinions in situations where reproducibility and risk sensitively is of the utmost importance. In most cases, 2 to 5 \nexperts is sufficient. \n", + "inputSchema": { + "$defs": { + "InputField": { + "properties": { + "name": { + "description": "Field name. Should be a valid python variable name.", + "title": "Name", + "type": "string" + }, + "description": { + "description": "A description of the value you will pass in the field.", + "title": "Description", + "type": "string" + }, + "type": { + "default": "str", + "description": "The type of the field. \n ImageFromFile takes a single named-argument, 'file_path' as input which should be absolute path to the image to be included. So you should provide this input as json, eg. {'file_path': '/path/to/image'}.\n ", + "enum": [ + "str", + "Text", + "List[Text]", + "TextFromFile", + "List[TextFromFile]", + "PdfFromFile", + "List[PdfFromFile]", + "ImageFromFile", + "List[ImageFromFile]" + ], + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "description" + ], + "title": "InputField", + "type": "object" + } + }, + "properties": { + "session_id": { + "description": "The session_id for the forecasting or prediction use case", + "title": "Session Id", + "type": "string" + }, + "input_data_model": { + "description": "Metadata on the fields you will include in the input_data.", + "items": { + "$ref": "#/$defs/InputField" + }, + "title": "Input Data Model", + "type": "array" + }, + "input_data": { + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + ] + }, + "description": "The forecast inputs that you will pass to the chronulus agent to make the prediction. The keys of the dict should correspond to the InputField name you provided in input_fields.", + "title": "Input Data", + "type": "object" + }, + "num_experts": { + "description": "The number of experts to consult when forming consensus", + "title": "Num Experts", + "type": "integer" + } + }, + "required": [ + "session_id", + "input_data_model", + "input_data", + "num_experts" + ], + "title": "create_prediction_agent_and_get_predictionsArguments", + "type": "object" + } + }, + { + "name": "reuse_prediction_agent_and_get_prediction", + "description": "\nThis tool provides prediction input data to a previously created Chronulus BinaryPredictor agent and returns the \nconsensus a prediction from a panel of experts along with their individual estimates and text explanations. The agent \nalso returns the alpha and beta parameters for a Beta distribution that allows you to estimate the confidence interval \nof its consensus probability estimate.\n\nWhen to use this tool:\n- Use this tool to request a prediction from a Chronulus prediction agent that you have already created and when your \ninput data model is unchanged\n- Use this tool to request a probability estimate from an existing prediction agent in a situation when there is a binary outcome\n- This tool is specifically made to estimate the probability of an event occurring and not occurring and does not \nrequire historical data\n\nHow to use this tool:\n- First, make sure you have a session_id for the prediction use case.\n- Next, think about the features / characteristics most suitable for producing the requested prediction and then \ncreate an input_data_model that corresponds to the input_data you will provide for the thing or event being predicted.\n- Remember to pass all relevant information to Chronulus including text and images provided by the user. \n- If a user gives you files about a thing you are forecasting or predicting, you should pass these as inputs to the \nagent using one of the following types: \n - ImageFromFile\n - List[ImageFromFile]\n - TextFromFile\n - List[TextFromFile]\n - PdfFromFile\n - List[PdfFromFile]\n- If you have a large amount of text (over 500 words) to pass to the agent, you should use the Text or List[Text] field types\n- Finally, provide the number of experts to consult. The minimum and default number is 2, but users may request up to 30\n30 opinions in situations where reproducibility and risk sensitively is of the utmost importance. In most cases, 2 to 5 \nexperts is sufficient. \n\nHow to use this tool:\n- First, make sure you have an agent_id for the prediction agent. The agent is already attached to the correct session. \nSo you do not need to provide a session_id.\n- Next, reference the input data model that you previously used with the agent and create new input data for the item \nbeing predicted that aligns with the previously specified input data model\n- Remember to pass all relevant information to Chronulus including text and images provided by the user. \n- If a user gives you files about a thing you are forecasting or predicting, you should pass these as inputs to the \nagent using one of the following types: \n - ImageFromFile\n - List[ImageFromFile]\n - TextFromFile\n - List[TextFromFile]\n - PdfFromFile\n - List[PdfFromFile]\n- If you have a large amount of text (over 500 words) to pass to the agent, you should use the Text or List[Text] field types\n- Finally, provide the number of experts to consult. The minimum and default number is 2, but users may request up to 30\n30 opinions in situations where reproducibility and risk sensitively is of the utmost importance. In most cases, 2 to 5 \nexperts is sufficient. \n", + "inputSchema": { + "properties": { + "agent_id": { + "description": "The agent_id for the forecasting or prediction use case and previously defined input_data_model", + "title": "Agent Id", + "type": "string" + }, + "input_data": { + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + ] + }, + "description": "The forecast inputs that you will pass to the chronulus agent to make the prediction. The keys of the dict should correspond to the InputField name you provided in input_fields.", + "title": "Input Data", + "type": "object" + }, + "num_experts": { + "description": "The number of experts to consult when forming consensus", + "title": "Num Experts", + "type": "integer" + } + }, + "required": [ + "agent_id", + "input_data", + "num_experts" + ], + "title": "reuse_prediction_agent_and_get_predictionArguments", + "type": "object" + } + }, + { + "name": "save_prediction_analysis_html", + "description": "\nA tool that saves an analysis of a BinaryPredictor prediction to HTML. \n\nThe analysis includes a plot of the theoretical and empirical beta distribution estimated by Chronulus and also\nlist the opinions provided by each expert.\n\nWhen to use this tool:\n- Use this tool when you need to save the BinaryPredictor estimates to for the user\n\nHow to use this tool:\n- Provide the request_id from a previous prediction response\n- Specify the output_path where the html should be saved\n- Provide html_name for the file (must end in .html)\n- The tool will provide status updates through the MCP context\n", + "inputSchema": { + "properties": { + "request_id": { + "description": "The request_id from the BinaryPredictor result", + "title": "Request Id", + "type": "string" + }, + "output_path": { + "description": "The path where the HTML file should be saved.", + "title": "Output Path", + "type": "string" + }, + "html_name": { + "description": "The path where the HTML file should be saved.", + "title": "Html Name", + "type": "string" + }, + "title": { + "description": "Title of analysis", + "title": "Title", + "type": "string" + }, + "plot_label": { + "description": "Label for the Beta plot", + "title": "Plot Label", + "type": "string" + }, + "chronulus_prediction_summary": { + "description": "A summary paragraph distilling prediction results and expert opinions provided by Chronulus", + "title": "Chronulus Prediction Summary", + "type": "string" + }, + "dist_shape": { + "description": "A one line description of the shape of the distribution of predictions", + "title": "Dist Shape", + "type": "string" + }, + "dist_shape_interpretation": { + "description": "2-3 sentences interpreting the shape of the distribution of predictions in layman's terms", + "title": "Dist Shape Interpretation", + "type": "string" + } + }, + "required": [ + "request_id", + "output_path", + "html_name", + "title", + "plot_label", + "chronulus_prediction_summary", + "dist_shape", + "dist_shape_interpretation" + ], + "title": "save_prediction_analysis_htmlArguments", + "type": "object" + } + }, + { + "name": "get_risk_assessment_scorecard", + "description": "\nA tool that retrieves the risk assessment scorecard for the Chronulus Session in Markdown format\n\nWhen to use this tool:\n- Use this tool when the use asks about the risk level or safety concerns of a forecasting use case\n- You may also use this tool to provide justification to a user if you would like to warn them of the implications of \n what they are asking you to forecasting or predict.\n\nHow to use this tool:\n- Make sure you have a session_id for the forecasting or prediction use case\n- When displaying the scorecard markdown for the user, you should use an MDX-style React component\n", + "inputSchema": { + "properties": { + "session_id": { + "description": "The session_id for the forecasting or prediction use case", + "title": "Session Id", + "type": "string" + }, + "as_json": { + "description": "If true, returns the scorecard in JSON format, otherwise returns a markdown formatted scorecard", + "title": "As Json", + "type": "boolean" + } + }, + "required": [ + "session_id", + "as_json" + ], + "title": "get_risk_assessment_scorecardArguments", + "type": "object" + } + } + ] + }, + "spotify": { + "name": "spotify", + "display_name": "Spotify", + "description": "This MCP allows an LLM to play and use Spotify.", + "repository": { + "type": "git", + "url": "https://github.com/varunneal/spotify-mcp" + }, + "homepage": "https://github.com/varunneal/spotify-mcp", + "author": { + "name": "varunneal" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "spotify", + "audio" + ], + "examples": [ + { + "title": "Basic Playback Control", + "description": "Use the MCP to start, pause, or skip songs on Spotify.", + "prompt": "Start playing a song on Spotify." + }, + { + "title": "Search for Tracks", + "description": "Search for tracks, albums, artists, or playlists using the Spotify API.", + "prompt": "Search for the album 'Thriller'." + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/varunneal/spotify-mcp", + "spotify-mcp" + ], + "env": { + "SPOTIFY_CLIENT_ID": "${SPOTIFY_CLIENT_ID}", + "SPOTIFY_CLIENT_SECRET": "${SPOTIFY_CLIENT_SECRET}", + "SPOTIFY_REDIRECT_URI": "${SPOTIFY_REDIRECT_URI}" + } + } + }, + "arguments": { + "SPOTIFY_CLIENT_ID": { + "description": "The client ID for your Spotify application, required to authenticate with the Spotify API.", + "required": true, + "example": "your_spotify_client_id_here" + }, + "SPOTIFY_CLIENT_SECRET": { + "description": "The client secret for your Spotify application, needed for secure authentication with the API.", + "required": true, + "example": "your_spotify_client_secret_here" + }, + "SPOTIFY_REDIRECT_URI": { + "description": "The redirect URI you specified when creating the Spotify application, needed for the OAuth authentication process.", + "required": false, + "example": "http://localhost:8888" + } + }, + "tools": [ + { + "name": "SpotifyPlayback", + "description": "Manages the current playback with the following actions:\n - get: Get information about user's current track.\n - start: Starts playing new item or resumes current playback if called with no uri.\n - pause: Pauses current playback.\n - skip: Skips current track.\n ", + "inputSchema": { + "description": "Manages the current playback with the following actions:\n- get: Get information about user's current track.\n- start: Starts playing new item or resumes current playback if called with no uri.\n- pause: Pauses current playback.\n- skip: Skips current track.", + "properties": { + "action": { + "description": "Action to perform: 'get', 'start', 'pause' or 'skip'.", + "title": "Action", + "type": "string" + }, + "spotify_uri": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Spotify uri of item to play for 'start' action. If omitted, resumes current playback.", + "title": "Spotify Uri" + }, + "num_skips": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of tracks to skip for `skip` action.", + "title": "Num Skips" + } + }, + "required": [ + "action" + ], + "title": "Playback", + "type": "object" + } + }, + { + "name": "SpotifySearch", + "description": "Search for tracks, albums, artists, or playlists on Spotify.", + "inputSchema": { + "description": "Search for tracks, albums, artists, or playlists on Spotify.", + "properties": { + "query": { + "description": "query term", + "title": "Query", + "type": "string" + }, + "qtype": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "track", + "description": "Type of items to search for (track, album, artist, playlist, or comma-separated combination)", + "title": "Qtype" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 10, + "description": "Maximum number of items to return", + "title": "Limit" + } + }, + "required": [ + "query" + ], + "title": "Search", + "type": "object" + } + }, + { + "name": "SpotifyQueue", + "description": "Manage the playback queue - get the queue or add tracks.", + "inputSchema": { + "description": "Manage the playback queue - get the queue or add tracks.", + "properties": { + "action": { + "description": "Action to perform: 'add' or 'get'.", + "title": "Action", + "type": "string" + }, + "track_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Track ID to add to queue (required for add action)", + "title": "Track Id" + } + }, + "required": [ + "action" + ], + "title": "Queue", + "type": "object" + } + }, + { + "name": "SpotifyGetInfo", + "description": "Get detailed information about a Spotify item (track, album, artist, or playlist).", + "inputSchema": { + "description": "Get detailed information about a Spotify item (track, album, artist, or playlist).", + "properties": { + "item_uri": { + "description": "URI of the item to get information about. If 'playlist' or 'album', returns its tracks. If 'artist', returns albums and top tracks.", + "title": "Item Uri", + "type": "string" + } + }, + "required": [ + "item_uri" + ], + "title": "GetInfo", + "type": "object" + } + } + ] + }, + "any-chat-completions": { + "name": "any-chat-completions", + "display_name": "Any Chat Completions", + "description": "Interact with any OpenAI SDK Compatible Chat Completions API like OpenAI, Perplexity, Groq, xAI and many more.", + "repository": { + "type": "git", + "url": "https://github.com/pyroprompts/any-chat-completions-mcp" + }, + "homepage": "https://github.com/pyroprompts/any-chat-completions-mcp", + "author": { + "name": "pyroprompts" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "Claude", + "OpenAI", + "API", + "Chat Completion" + ], + "examples": [ + { + "title": "OpenAI Integration", + "description": "Integrate OpenAI into Claude Desktop", + "prompt": "Configure the MCP server to use OpenAI's API." + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/pyroprompts/any-chat-completions-mcp" + ], + "env": { + "AI_CHAT_KEY": "${AI_CHAT_KEY}", + "AI_CHAT_NAME": "${AI_CHAT_NAME}", + "AI_CHAT_MODEL": "${AI_CHAT_MODEL}", + "AI_CHAT_BASE_URL": "${AI_CHAT_BASE_URL}" + } + } + }, + "arguments": { + "AI_CHAT_KEY": { + "description": "API key for authentication with the chat service provider.", + "required": true, + "example": "your_openai_secret_key_here" + }, + "AI_CHAT_NAME": { + "description": "The name of the AI chat provider to use, like 'OpenAI' or 'PyroPrompts'.", + "required": true, + "example": "OpenAI" + }, + "AI_CHAT_MODEL": { + "description": "Specifies which model to be used for the chat service, e.g., 'gpt-4o'.", + "required": true, + "example": "gpt-4o" + }, + "AI_CHAT_BASE_URL": { + "description": "The base URL for the API service of the chat provider.", + "required": true, + "example": "https://api.openai.com/v1" + } + }, + "tools": [ + { + "name": "chat-with-${AI_CHAT_NAME_CLEAN}", + "description": "Text chat with ${AI_CHAT_NAME}", + "inputSchema": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The content of the chat to send to ${AI_CHAT_NAME}" + } + }, + "required": [ + "content" + ] + } + } + ] + }, + "google-tasks": { + "name": "google-tasks", + "display_name": "Google Tasks", + "description": "Google Tasks API Model Context Protocol Server.", + "repository": { + "type": "git", + "url": "https://github.com/zcaceres/gtasks-mcp" + }, + "homepage": "https://github.com/zcaceres/gtasks-mcp", + "author": { + "name": "zcaceres" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "google", + "tasks", + "productivity" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/zcaceres/gtasks-mcp" + ] + } + }, + "tools": [ + { + "name": "search", + "description": "Search for tasks in Google Tasks.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for tasks" + } + }, + "required": [ + "query" + ] + }, + { + "name": "list", + "description": "List all tasks in Google Tasks.", + "inputSchema": { + "cursor": { + "type": "string", + "description": "Cursor for pagination", + "optional": true + } + }, + "required": [] + }, + { + "name": "create", + "description": "Create a new task in Google Tasks.", + "inputSchema": { + "taskListId": { + "type": "string", + "description": "Task list ID", + "optional": true + }, + "title": { + "type": "string", + "description": "Task title" + }, + "notes": { + "type": "string", + "description": "Task notes", + "optional": true + }, + "due": { + "type": "string", + "description": "Due date", + "optional": true + } + }, + "required": [ + "title" + ] + }, + { + "name": "update", + "description": "Update an existing task in Google Tasks.", + "inputSchema": { + "taskListId": { + "type": "string", + "description": "Task list ID", + "optional": true + }, + "id": { + "type": "string", + "description": "Task ID" + }, + "uri": { + "type": "string", + "description": "Task URI" + }, + "title": { + "type": "string", + "description": "New task title", + "optional": true + }, + "notes": { + "type": "string", + "description": "New task notes", + "optional": true + }, + "status": { + "type": "string", + "description": "New task status ('needsAction' or 'completed')", + "optional": true + }, + "due": { + "type": "string", + "description": "New due date", + "optional": true + } + }, + "required": [ + "id", + "uri" + ] + }, + { + "name": "delete", + "description": "Delete a task in Google Tasks.", + "inputSchema": { + "taskListId": { + "type": "string", + "description": "Task list ID" + }, + "id": { + "type": "string", + "description": "Task ID" + } + }, + "required": [ + "taskListId", + "id" + ] + }, + { + "name": "clear", + "description": "Clear completed tasks from a Google Tasks task list.", + "inputSchema": { + "taskListId": { + "type": "string", + "description": "Task list ID" + } + }, + "required": [ + "taskListId" + ] + } + ] + }, + "greptimedb": { + "display_name": "GreptimeDB", + "repository": { + "type": "git", + "url": "https://github.com/GreptimeTeam/greptimedb" + }, + "homepage": "https://greptime.com", + "author": { + "name": "GreptimeTeam" + }, + "license": "Apache License 2.0", + "tags": [ + "database", + "timeseries", + "observability", + "metrics", + "logs", + "events" + ], + "arguments": { + "http-addr": { + "description": "HTTP address to bind to", + "required": true, + "example": "0.0.0.0:4000" + }, + "rpc-bind-addr": { + "description": "RPC address to bind to", + "required": true, + "example": "0.0.0.0:4001" + }, + "mysql-addr": { + "description": "MySQL protocol address to bind to", + "required": true, + "example": "0.0.0.0:4002" + }, + "postgres-addr": { + "description": "PostgreSQL protocol address to bind to", + "required": true, + "example": "0.0.0.0:4003" + } + }, + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-p", + "127.0.0.1:4000-4003:4000-4003", + "-v", + "$(pwd)/greptimedb:./greptimedb_data", + "--name", + "greptime", + "--rm", + "greptime/greptimedb:latest", + "standalone", + "start", + "--http-addr", + "0.0.0.0:4000", + "--rpc-bind-addr", + "0.0.0.0:4001", + "--mysql-addr", + "0.0.0.0:4002", + "--postgres-addr", + "0.0.0.0:4003" + ], + "recommended": true, + "description": "Run GreptimeDB in a Docker container" + }, + "source": { + "type": "custom", + "command": "cargo", + "args": [ + "run", + "--", + "standalone", + "start" + ], + "description": "Build and run GreptimeDB from source" + } + }, + "examples": [ + { + "title": "Start a standalone server", + "description": "Run a standalone GreptimeDB server", + "prompt": "cargo run -- standalone start" + } + ], + "name": "greptimedb", + "description": "", + "categories": [ + "Databases" + ], + "is_official": true + }, + "chroma-mcp": { + "display_name": "Chroma MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/chroma-core/chroma-mcp" + }, + "homepage": "https://www.trychroma.com/", + "author": { + "name": "chroma-core" + }, + "license": "Apache 2.0", + "tags": [ + "vector database", + "embeddings", + "LLM", + "retrieval", + "MCP" + ], + "arguments": { + "client-type": { + "description": "Type of client to use (ephemeral, persistent, http, cloud)", + "required": false, + "example": "persistent" + }, + "data-dir": { + "description": "Directory to store data for persistent client", + "required": false, + "example": "/full/path/to/your/data/directory" + }, + "host": { + "description": "Host for HTTP client", + "required": false, + "example": "your-host" + }, + "port": { + "description": "Port for HTTP client", + "required": false, + "example": "your-port" + }, + "tenant": { + "description": "Tenant ID for cloud client", + "required": false, + "example": "your-tenant-id" + }, + "database": { + "description": "Database name for cloud client", + "required": false, + "example": "your-database-name" + }, + "api-key": { + "description": "API key for cloud client", + "required": false, + "example": "your-api-key" + }, + "custom-auth-credentials": { + "description": "Custom authentication credentials for HTTP client", + "required": false, + "example": "your-custom-auth-credentials" + }, + "ssl": { + "description": "Whether to use SSL for HTTP client", + "required": false, + "example": "true" + }, + "dotenv-path": { + "description": "Path to .env file", + "required": false, + "example": "/custom/path/.env" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "chroma-mcp" + ], + "description": "Install using uvx", + "recommended": true + } + }, + "examples": [ + { + "title": "List Collections", + "description": "List all collections with pagination support", + "prompt": "Use chroma_list_collections to show me all available collections." + }, + { + "title": "Create Collection", + "description": "Create a new collection with optional HNSW configuration", + "prompt": "Use chroma_create_collection to create a new collection named 'my_documents'." + }, + { + "title": "Query Documents", + "description": "Query documents using semantic search with advanced filtering", + "prompt": "Use chroma_query_documents to find documents in the 'my_documents' collection that are similar to 'machine learning concepts'." + } + ], + "name": "chroma-mcp", + "description": "Embeddings, vector search, document storage, and full-text search with the open-source AI application database", + "categories": [ + "Databases" + ], + "is_official": true + }, + "xmind": { + "name": "xmind", + "display_name": "XMind", + "description": "Read and search through your XMind directory containing XMind files.", + "repository": { + "type": "git", + "url": "https://github.com/apeyroux/mcp-xmind" + }, + "homepage": "https://github.com/apeyroux/mcp-xmind", + "license": "MIT", + "author": { + "name": "apeyroux" + }, + "categories": [ + "Knowledge Base" + ], + "tags": [ + "XMind", + "Mind Mapping", + "Analysis", + "Productivity" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@41px/mcp-xmind", + "${USER_XMIND_DIRECTORY}" + ] + } + }, + "examples": [ + { + "title": "Search for Nodes", + "description": "Searches through the mind map for specific nodes based on the query parameters.", + "prompt": "{\"name\": \"search_nodes\", \"arguments\": {\"path\": \"/path/to/file.xmind\", \"query\": \"project\", \"searchIn\": [\"title\", \"notes\"], \"caseSensitive\": false}}" + }, + { + "title": "Extract Node", + "description": "Extracts a node from the mind map based on a search query.", + "prompt": "{\"name\": \"extract_node\", \"arguments\": {\"path\": \"/path/to/file.xmind\", \"searchQuery\": \"Feature > API\"}}" + }, + { + "title": "List Tasks", + "description": "Lists TODO tasks from the mind map.", + "prompt": "{\"name\": \"get_todo_tasks\", \"arguments\": {\"path\": \"/path/to/file.xmind\"}}" + } + ], + "arguments": { + "USER_XMIND_DIRECTORY": { + "description": "The path to the directory containing XMind files that should be processed by the server.", + "required": true, + "example": "/Users/alex/XMind" + } + }, + "tools": [ + { + "name": "read_xmind", + "description": "Parse and analyze XMind files with multiple capabilities:\n - Extract complete mind map structure in JSON format\n - Include all relationships between nodes with their IDs and titles\n - Extract callouts attached to topics\n - Generate text or markdown summaries\n - Search for specific content\n - Get hierarchical path to any node\n - Filter content by labels, task status, or node depth\n - Extract all URLs and external references\n - Analyze relationships and connections between topics\n Input: File path to .xmind file\n Output: JSON structure containing nodes, relationships, and callouts", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "list_xmind_directory", + "description": "Comprehensive XMind file discovery and analysis tool:\n - Recursively scan directories for .xmind files\n - Filter files by creation/modification date\n - Search for files containing specific content\n - Group files by project or category\n - Detect duplicate mind maps\n - Generate directory statistics and summaries\n - Verify file integrity and structure\n - Monitor changes in mind map files\n Input: Directory path to scan\n Output: List of XMind files with optional metadata", + "inputSchema": { + "type": "object", + "properties": { + "directory": { + "type": "string" + } + } + } + }, + { + "name": "read_multiple_xmind_files", + "description": "Advanced multi-file analysis and correlation tool:\n - Process multiple XMind files simultaneously\n - Compare content across different mind maps\n - Identify common themes and patterns\n - Merge related content from different files\n - Generate cross-reference reports\n - Find content duplications across files\n - Create consolidated summaries\n - Track changes across multiple versions\n - Generate comparative analysis\n Input: Array of file paths to .xmind files\n Output: Combined analysis results in JSON format with per-file details", + "inputSchema": { + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "paths" + ] + } + }, + { + "name": "search_xmind_files", + "description": "Advanced file search tool with recursive capabilities:\n - Search for files and directories by partial name matching\n - Case-insensitive pattern matching\n - Searches through all subdirectories recursively\n - Returns full paths to all matching items\n - Includes both files and directories in results\n - Safe searching within allowed directories only\n - Handles special characters in names\n - Continues searching even if some directories are inaccessible\n Input: {\n directory: Starting directory path,\n pattern: Search text to match in names\n }\n Output: Array of full paths to matching items", + "inputSchema": { + "type": "object", + "properties": { + "pattern": { + "type": "string" + }, + "directory": { + "type": "string" + } + }, + "required": [ + "pattern" + ] + } + }, + { + "name": "extract_node", + "description": "Smart node extraction with fuzzy path matching:\n - Flexible search using partial or complete node paths\n - Returns multiple matching nodes ranked by relevance\n - Supports approximate matching for better results\n - Includes full context and hierarchy information\n - Returns complete subtree for each match\n - Best tool for exploring and navigating complex mind maps\n - Perfect for finding nodes when exact path is unknown\n Usage examples:\n - \"Project > Backend\" : finds nodes in any path containing these terms\n - \"Feature API\" : finds nodes containing these words in any order\n Input: {\n path: Path to .xmind file,\n searchQuery: Text to search in node paths (flexible matching)\n }\n Output: Ranked list of matching nodes with their full subtrees", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "searchQuery": { + "type": "string" + } + }, + "required": [ + "path", + "searchQuery" + ] + } + }, + { + "name": "extract_node_by_id", + "description": "Extract a specific node and its subtree using its unique ID:\n - Find and extract node using its XMind ID\n - Return complete subtree structure\n - Preserve all node properties and relationships\n - Fast direct access without path traversal\n Note: For a more detailed view with fuzzy matching, use \"extract_node\" with the node's path\n Input: {\n path: Path to .xmind file,\n nodeId: Unique identifier of the node\n }\n Output: JSON structure of the found node and its subtree", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "nodeId": { + "type": "string" + } + }, + "required": [ + "path", + "nodeId" + ] + } + }, + { + "name": "search_nodes", + "description": "Advanced node search with multiple criteria:\n - Search through titles, notes, labels, callouts and tasks\n - Filter by task status (todo/done)\n - Find nodes by their relationships\n - Configure which fields to search in\n - Case-sensitive or insensitive search\n - Get full context including task status\n - Returns all matching nodes with their IDs\n - Includes relationship information and task status\n Input: {\n path: Path to .xmind file,\n query: Search text,\n searchIn: Array of fields to search in ['title', 'notes', 'labels', 'callouts', 'tasks'],\n taskStatus: 'todo' | 'done' (optional),\n caseSensitive: Boolean (optional)\n }\n Output: Detailed search results with task status and context", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "query": { + "type": "string" + }, + "searchIn": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "title", + "notes", + "labels", + "callouts", + "tasks" + ] + } + }, + "caseSensitive": { + "type": "boolean" + }, + "taskStatus": { + "type": "string", + "enum": [ + "todo", + "done" + ] + } + }, + "required": [ + "path", + "query" + ] + } + } + ] + }, + "search1api-mcp": { + "display_name": "Search1API MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/fatwang2/search1api-mcp" + }, + "homepage": "https://www.search1api.com/?utm_source=mcp", + "author": { + "name": "fatwang2" + }, + "license": "MIT", + "tags": [ + "search", + "web", + "news", + "crawl", + "sitemap", + "reasoning", + "trending" + ], + "arguments": { + "SEARCH1API_KEY": { + "description": "Your Search1API API key", + "required": true, + "example": "your_api_key_here" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "search1api-mcp" + ], + "env": { + "SEARCH1API_KEY": "YOUR_SEARCH1API_KEY" + }, + "description": "Run directly using npx", + "recommended": true + } + }, + "examples": [ + { + "title": "Web Search", + "description": "Search the web for information", + "prompt": "Search for the latest news about artificial intelligence" + }, + { + "title": "News Search", + "description": "Search for news articles", + "prompt": "Find news articles about climate change from the past month" + }, + { + "title": "Web Crawling", + "description": "Extract content from a specific URL", + "prompt": "Crawl the content from https://example.com" + } + ], + "name": "search1api-mcp", + "description": "A Model Context Protocol (MCP) server that provides search and crawl functionality using Search1API.", + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "search", + "description": "Web search tool", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query, be simple and concise" + }, + "max_results": { + "type": "number", + "description": "Maximum number of results to return", + "default": 10 + }, + "search_service": { + "type": "string", + "description": "Specify the search engine to use. Choose based on your specific needs", + "default": "google", + "enum": [ + "google", + "bing", + "duckduckgo", + "yahoo", + "x", + "reddit", + "github", + "youtube", + "arxiv", + "wechat", + "bilibili", + "imdb", + "wikipedia" + ] + }, + "crawl_results": { + "type": "number", + "description": "Number of results to crawl for full webpage content, useful when search result summaries are insufficient for complex queries", + "default": 0 + }, + "include_sites": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of sites to include in search. Only use when you need special results from sites not available in search_service", + "default": [] + }, + "exclude_sites": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of sites to exclude from search. Only use when you need to explicitly filter out specific domains from results", + "default": [] + }, + "time_range": { + "type": "string", + "description": "Time range for search results, only use when specific time constraints are required", + "enum": [ + "day", + "month", + "year" + ] + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "news", + "description": "News search tool", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query, be simple and concise" + }, + "max_results": { + "type": "number", + "description": "Maximum number of results to return", + "default": 10 + }, + "search_service": { + "type": "string", + "description": "Specify the news engine to use. Choose based on your specific needs", + "default": "bing", + "enum": [ + "google", + "bing", + "duckduckgo", + "yahoo", + "hackernews" + ] + }, + "crawl_results": { + "type": "number", + "description": "Number of results to crawl for full webpage content, useful when search result summaries are insufficient for complex queries", + "default": 0 + }, + "include_sites": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of sites to include in search. Only use when you need special results from sites not available in search_service", + "default": [] + }, + "exclude_sites": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of sites to exclude from search. Only use when you need to explicitly filter out specific domains from results", + "default": [] + }, + "time_range": { + "type": "string", + "description": "Time range for search results, only use when specific time constraints are required", + "enum": [ + "day", + "month", + "year" + ] + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "crawl", + "description": "Extract content from URL", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to crawl" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "sitemap", + "description": "Get all related links from a URL", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to get sitemap" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "reasoning", + "description": "Deep thinking and complex problem solving", + "inputSchema": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The question or problem that needs deep thinking" + } + }, + "required": [ + "content" + ] + } + }, + { + "name": "trending", + "description": "Get trending topics from popular platforms", + "inputSchema": { + "type": "object", + "properties": { + "search_service": { + "type": "string", + "description": "Specify the platform to get trending topics from", + "enum": [ + "github", + "hackernews" + ], + "default": "github" + }, + "max_results": { + "type": "number", + "description": "Maximum number of trending items to return", + "default": 10 + } + }, + "required": [ + "search_service" + ] + } + } + ], + "prompts": [], + "resources": [ + { + "uri": "search1api://info", + "name": "Search1API Information", + "description": "Basic information about Search1API capabilities", + "mimeType": "application/json" + } + ], + "is_official": true + }, + "influxdb": { + "name": "influxdb", + "display_name": "InfluxDB", + "description": "Run queries against InfluxDB OSS API v2.", + "repository": { + "type": "git", + "url": "https://github.com/idoru/influxdb-mcp-server" + }, + "homepage": "https://github.com/idoru/influxdb-mcp-server", + "author": { + "name": "idoru" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "InfluxDB", + "API", + "server", + "time-series" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "${INFLUXDB_TOKEN}", + "${INFLUXDB_URL}", + "${INFLUXDB_ORG}" + ], + "env": { + "INFLUXDB_TOKEN": "your_token", + "INFLUXDB_URL": "http://localhost:8086", + "INFLUXDB_ORG": "your_org" + } + } + }, + "arguments": { + "INFLUXDB_TOKEN": { + "description": "Authentication token for the InfluxDB API", + "required": true, + "example": "your_token" + }, + "INFLUXDB_URL": { + "description": "URL of the InfluxDB instance", + "required": false, + "example": "http://localhost:8086" + }, + "INFLUXDB_ORG": { + "description": "Default organization name for certain operations", + "required": false, + "example": "your_org" + } + }, + "tools": [ + { + "name": "write-data", + "description": "Write data to InfluxDB in line protocol format.", + "inputSchema": { + "org": { + "type": "string", + "description": "The organization name" + }, + "bucket": { + "type": "string", + "description": "The bucket name" + }, + "data": { + "type": "string", + "description": "Data in InfluxDB line protocol format" + }, + "precision": { + "type": "string", + "enum": [ + "ns", + "us", + "ms", + "s" + ], + "description": "Timestamp precision (ns, us, ms, s)" + } + }, + "required": [ + "org", + "bucket", + "data" + ] + }, + { + "name": "query-data", + "description": "Execute a Flux query on InfluxDB data.", + "inputSchema": { + "org": { + "type": "string", + "description": "The organization name" + }, + "query": { + "type": "string", + "description": "Flux query string" + } + }, + "required": [ + "org", + "query" + ] + }, + { + "name": "create-bucket", + "description": "Create a new bucket in InfluxDB.", + "inputSchema": { + "name": { + "type": "string", + "description": "The bucket name" + }, + "orgID": { + "type": "string", + "description": "The organization ID" + }, + "retentionPeriodSeconds": { + "type": "number", + "description": "Retention period in seconds (optional)" + } + }, + "required": [ + "name", + "orgID" + ] + }, + { + "name": "create-org", + "description": "Create a new organization in InfluxDB.", + "inputSchema": { + "name": { + "type": "string", + "description": "The organization name" + }, + "description": { + "type": "string", + "description": "Organization description (optional)" + } + }, + "required": [ + "name" + ] + } + ] + }, + "mssql": { + "name": "mssql", + "display_name": "MSSQL", + "description": "MCP Server for MSSQL database in Python", + "repository": { + "type": "git", + "url": "https://github.com/JexinSam/mssql_mcp_server" + }, + "homepage": "https://github.com/JexinSam/mssql_mcp_server", + "author": { + "name": "JexinSam" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "MSSQL", + "AI", + "Database Access" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mssql_mcp_server" + ], + "env": { + "MSSQL_DRIVER": "${MSSQL_DRIVER}", + "MSSQL_HOST": "${MSSQL_HOST}", + "MSSQL_USER": "${MSSQL_USER}", + "MSSQL_PASSWORD": "${MSSQL_PASSWORD}", + "MSSQL_DATABASE": "${MSSQL_DATABASE}" + } + } + }, + "arguments": { + "MSSQL_DRIVER": { + "description": "Environment variable that specifies the driver to connect to the MSSQL database.", + "required": true, + "example": "mssql_driver" + }, + "MSSQL_HOST": { + "description": "Environment variable that specifies the hostname or IP address of the MSSQL server.", + "required": true, + "example": "localhost" + }, + "MSSQL_USER": { + "description": "Environment variable that defines the username for connecting to the MSSQL database.", + "required": true, + "example": "your_username" + }, + "MSSQL_PASSWORD": { + "description": "Environment variable that stores the password for the MSSQL user.", + "required": true, + "example": "your_password" + }, + "MSSQL_DATABASE": { + "description": "Environment variable that specifies the name of the MSSQL database to connect to.", + "required": true, + "example": "your_database" + } + }, + "tools": [ + { + "name": "execute_sql", + "description": "Execute an SQL query on the MSSQL server", + "inputSchema": { + "query": { + "type": "string", + "description": "The SQL query to execute" + } + }, + "required": [ + "query" + ] + } + ] + }, + "n8n": { + "name": "n8n", + "display_name": "n8n", + "description": "This MCP server provides tools and resources for AI assistants to manage n8n workflows and executions, including listing, creating, updating, and deleting workflows, as well as monitoring their execution status.", + "repository": { + "type": "git", + "url": "https://github.com/leonardsellem/n8n-mcp-server" + }, + "homepage": "https://github.com/leonardsellem/n8n-mcp-server", + "author": { + "name": "leonardsellem" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "n8n", + "server", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@anaisbetts/mcp-installer" + ], + "env": { + "N8N_API_URL": "${N8N_API_URL}", + "N8N_API_KEY": "${N8N_API_KEY}" + } + } + }, + "arguments": { + "N8N_API_URL": { + "description": "URL of the n8n API", + "required": true, + "example": "http://localhost:5678/api/v1" + }, + "N8N_API_KEY": { + "description": "API key for authenticating with n8n", + "required": true, + "example": "n8n_api_..." + } + }, + "tools": [ + { + "name": "install_repo_mcp_server", + "description": "Install an MCP server via npx or uvx", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The package name of the MCP server" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The arguments to pass along" + }, + "env": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The environment variables to set, delimited by =" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "install_local_mcp_server", + "description": "Install an MCP server whose code is cloned locally on your computer", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "The path to the MCP server code cloned on your computer" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The arguments to pass along" + }, + "env": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The environment variables to set, delimited by =" + } + }, + "required": [ + "path" + ] + } + } + ] + }, + "bing-web-search-api": { + "name": "bing-web-search-api", + "display_name": "Bing Search API", + "description": "Server implementation for Microsoft Bing Web Search API.", + "repository": { + "type": "git", + "url": "https://github.com/leehanchung/bing-search-mcp" + }, + "homepage": "https://github.com/leehanchung/bing-search-mcp", + "author": { + "name": "leehanchung" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Bing", + "Search", + "Web", + "News", + "Images" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+http://github.com/leehanchung/bing-search-mcp", + "mcp-server-bing" + ], + "env": { + "BING_API_KEY": "${BING_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Web Search Example", + "description": "Search the web for various queries.", + "prompt": "Search for 'latest technology news'." + }, + { + "title": "News Search Example", + "description": "Search for the latest news articles.", + "prompt": "Search for 'global warming'." + }, + { + "title": "Image Search Example", + "description": "Find images related to a query.", + "prompt": "Search for 'sunsets'." + } + ], + "arguments": { + "BING_API_KEY": { + "description": "API key required for authenticating requests to the Microsoft Bing Search API.", + "required": true, + "example": "your-bing-api-key" + } + }, + "tools": [ + { + "name": "bing_web_search", + "description": "Performs a web search using the Bing Search API for general information\n and websites.\n\n Args:\n query: Search query (required)\n count: Number of results (1-50, default 10)\n offset: Pagination offset (default 0)\n market: Market code like en-US, en-GB, etc.\n ", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "count": { + "default": 10, + "title": "Count", + "type": "integer" + }, + "offset": { + "default": 0, + "title": "Offset", + "type": "integer" + }, + "market": { + "default": "en-US", + "title": "Market", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "bing_web_searchArguments", + "type": "object" + } + }, + { + "name": "bing_news_search", + "description": "Searches for news articles using Bing News Search API for current\n events and timely information.\n\n Args:\n query: News search query (required)\n count: Number of results (1-50, default 10)\n market: Market code like en-US, en-GB, etc.\n freshness: Time period of news (Day, Week, Month)\n ", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "count": { + "default": 10, + "title": "Count", + "type": "integer" + }, + "market": { + "default": "en-US", + "title": "Market", + "type": "string" + }, + "freshness": { + "default": "Day", + "title": "Freshness", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "bing_news_searchArguments", + "type": "object" + } + }, + { + "name": "bing_image_search", + "description": "Searches for images using Bing Image Search API for visual content.\n\n Args:\n query: Image search query (required)\n count: Number of results (1-50, default 10)\n market: Market code like en-US, en-GB, etc.\n ", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "count": { + "default": 10, + "title": "Count", + "type": "integer" + }, + "market": { + "default": "en-US", + "title": "Market", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "bing_image_searchArguments", + "type": "object" + } + } + ] + }, + "image-generation": { + "name": "image-generation", + "display_name": "Image Generation", + "description": "This MCP server provides image generation capabilities using the Replicate Flux model.", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/Image-Generation-MCP-Server" + }, + "homepage": "https://github.com/GongRzhe/Image-Generation-MCP-Server", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "image", + "generation", + "flux", + "Replicate" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@gongrzhe/image-gen-server" + ], + "env": { + "REPLICATE_API_TOKEN": "${REPLICATE_API_TOKEN}", + "MODEL": "${MODEL}", + "your-replicate-api-token": "${your_replicate_api_token}", + "alternative-model-name": "${alternative_model_name}" + } + } + }, + "arguments": { + "REPLICATE_API_TOKEN": { + "description": "Your Replicate API token for authentication", + "required": true, + "example": "your-replicate-api-token" + }, + "MODEL": { + "description": "The Replicate model to use for image generation. Defaults to \"black-forest-labs/flux-schnell\"", + "required": false, + "example": "alternative-model-name" + } + }, + "tools": [ + { + "name": "generate_image", + "description": "Generate an image using the Flux model", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Prompt for generated image" + }, + "seed": { + "type": "integer", + "description": "Random seed for reproducible generation" + }, + "aspect_ratio": { + "type": "string", + "enum": [ + "1:1", + "16:9", + "21:9", + "3:2", + "2:3", + "4:5", + "5:4", + "3:4", + "4:3", + "9:16", + "9:21" + ], + "description": "Aspect ratio for the generated image", + "default": "1:1" + }, + "output_format": { + "type": "string", + "enum": [ + "webp", + "jpg", + "png" + ], + "description": "Format of the output images", + "default": "webp" + }, + "num_outputs": { + "type": "integer", + "description": "Number of outputs to generate (1-4)", + "default": 1, + "minimum": 1, + "maximum": 4 + } + }, + "required": [ + "prompt" + ] + } + } + ] + }, + "aws-s3": { + "name": "aws-s3", + "display_name": "Sample S3 Model Context Protocol", + "description": "A sample MCP server for AWS S3 that flexibly fetches objects from S3 such as PDF documents.", + "repository": { + "type": "git", + "url": "https://github.com/aws-samples/sample-mcp-server-s3" + }, + "homepage": "https://github.com/aws-samples/sample-mcp-server-s3", + "author": { + "name": "aws-samples" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "S3", + "PDF", + "aws" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "s3-mcp-server" + ] + } + } + }, + "markdownify": { + "name": "markdownify", + "display_name": "Markdownify", + "description": "MCP to convert almost anything to Markdown (PPTX, HTML, PDF, Youtube Transcripts and more)", + "repository": { + "type": "git", + "url": "https://github.com/zcaceres/mcp-markdownify-server" + }, + "homepage": "https://github.com/zcaceres/mcp-markdownify-server", + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "markdown", + "conversion" + ], + "author": { + "name": "zcaceres" + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/zcaceres/mcp-markdownify-server" + ], + "env": { + "UV_PATH": "${UV_PATH}" + } + } + }, + "arguments": { + "UV_PATH": { + "description": "Environment variable specifying the installation location of the `uv` dependency.", + "required": false, + "example": "/path/to/uv" + } + } + }, + "openapi-schema": { + "name": "openapi-schema", + "display_name": "OpenAPI Schema Model Context Protocol", + "description": "Allow LLMs to explore large [OpenAPI](https://www.openapis.org/) schemas without bloating the context.", + "repository": { + "type": "git", + "url": "https://github.com/hannesj/mcp-openapi-schema" + }, + "homepage": "https://github.com/hannesj/mcp-openapi-schema", + "author": { + "name": "hannesj" + }, + "license": "[NOT FOUND]", + "categories": [ + "Dev Tools" + ], + "tags": [ + "OpenAPI", + "LLM" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mcp-openapi-schema", + "${ABSOLUTE_PATH_TO_OPENAPI_YAML}" + ] + } + }, + "arguments": { + "ABSOLUTE_PATH_TO_OPENAPI_YAML": { + "description": "The absolute path to the OpenAPI YAML file that the MCP server will use to load the schema.", + "required": true, + "example": "/absolute/path/to/openapi.yaml" + } + }, + "tools": [ + { + "name": "list-endpoints", + "description": "Lists all API paths and their HTTP methods with summaries, organized by path", + "inputSchema": { + "type": "object" + } + }, + { + "name": "get-endpoint", + "description": "Gets detailed information about a specific API endpoint", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "method": { + "type": "string" + } + }, + "required": [ + "path", + "method" + ] + } + }, + { + "name": "get-request-body", + "description": "Gets the request body schema for a specific endpoint", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "method": { + "type": "string" + } + }, + "required": [ + "path", + "method" + ] + } + }, + { + "name": "get-response-schema", + "description": "Gets the response schema for a specific endpoint, method, and status code", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "method": { + "type": "string" + }, + "statusCode": { + "type": "string", + "default": "200" + } + }, + "required": [ + "path", + "method" + ] + } + }, + { + "name": "get-path-parameters", + "description": "Gets the parameters for a specific path", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "method": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "list-components", + "description": "Lists all schema components (schemas, parameters, responses, etc.)", + "inputSchema": { + "type": "object" + } + }, + { + "name": "get-component", + "description": "Gets detailed definition for a specific component", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Component type (e.g., schemas, parameters, responses)" + }, + "name": { + "type": "string", + "description": "Component name" + } + }, + "required": [ + "type", + "name" + ] + } + }, + { + "name": "list-security-schemes", + "description": "Lists all available security schemes", + "inputSchema": { + "type": "object" + } + }, + { + "name": "get-examples", + "description": "Gets examples for a specific component or endpoint", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "request", + "response", + "component" + ], + "description": "Type of example to retrieve" + }, + "path": { + "type": "string", + "description": "API path (required for request/response examples)" + }, + "method": { + "type": "string", + "description": "HTTP method (required for request/response examples)" + }, + "statusCode": { + "type": "string", + "description": "Status code (for response examples)" + }, + "componentType": { + "type": "string", + "description": "Component type (required for component examples)" + }, + "componentName": { + "type": "string", + "description": "Component name (required for component examples)" + } + }, + "required": [ + "type" + ] + } + }, + { + "name": "search-schema", + "description": "Searches across paths, operations, and schemas", + "inputSchema": { + "type": "object", + "properties": { + "pattern": { + "type": "string", + "description": "Search pattern (case-insensitive)" + } + }, + "required": [ + "pattern" + ] + } + } + ] + }, + "xcodebuild": { + "name": "xcodebuild", + "display_name": "Xcode Build", + "description": "\ud83c\udf4e Build iOS Xcode workspace/project and feed back errors to llm.", + "repository": { + "type": "git", + "url": "https://github.com/ShenghaiWang/xcodebuild" + }, + "homepage": "https://github.com/ShenghaiWang/xcodebuild", + "author": { + "name": "ShenghaiWang" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "xcode", + "mcpxcodebuild" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcpxcodebuild" + ] + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcpxcodebuild" + ] + } + }, + "examples": [ + { + "title": "Build iOS Project", + "description": "Builds the iOS Xcode workspace/project located at a specified folder.", + "prompt": "build --folder /path/to/your/project" + } + ], + "tools": [ + { + "name": "build", + "description": "Build the iOS Xcode workspace/project in the folder", + "inputSchema": { + "description": "Parameters", + "properties": { + "folder": { + "description": "The full path of the current folder that the iOS Xcode workspace/project sits", + "title": "Folder", + "type": "string" + } + }, + "required": [ + "folder" + ], + "title": "Folder", + "type": "object" + } + }, + { + "name": "test", + "description": "Run test for the iOS Xcode workspace/project in the folder", + "inputSchema": { + "description": "Parameters", + "properties": { + "folder": { + "description": "The full path of the current folder that the iOS Xcode workspace/project sits", + "title": "Folder", + "type": "string" + } + }, + "required": [ + "folder" + ], + "title": "Folder", + "type": "object" + } + } + ] + }, + "azure-adx": { + "name": "azure-adx", + "display_name": "Azure Data Explorer", + "description": "Query and analyze Azure Data Explorer databases.", + "repository": { + "type": "git", + "url": "https://github.com/pab1it0/adx-mcp-server" + }, + "homepage": "https://github.com/pab1it0/adx-mcp-server", + "author": { + "name": "pab1it0" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Azure", + "KQL", + "Data Explorer" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pab1it0/adx-mcp-server", + "adx-mcp-server" + ], + "env": { + "ADX_CLUSTER_URL": "${ADX_CLUSTER_URL}", + "ADX_DATABASE": "${ADX_DATABASE}" + } + } + }, + "arguments": { + "ADX_CLUSTER_URL": { + "description": "The URL of the Azure Data Explorer cluster.", + "required": true, + "example": "https://yourcluster.region.kusto.windows.net" + }, + "ADX_DATABASE": { + "description": "The name of the Azure Data Explorer database to connect to.", + "required": true, + "example": "your_database" + } + }, + "tools": [ + { + "name": "execute_query", + "description": "Executes a Kusto Query Language (KQL) query against the configured Azure Data Explorer database and returns the results as a list of dictionaries.", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "execute_queryArguments", + "type": "object" + } + }, + { + "name": "list_tables", + "description": "Retrieves a list of all tables available in the configured Azure Data Explorer database, including their names, folders, and database associations.", + "inputSchema": { + "properties": {}, + "title": "list_tablesArguments", + "type": "object" + } + }, + { + "name": "get_table_schema", + "description": "Retrieves the schema information for a specified table in the Azure Data Explorer database, including column names, data types, and other schema-related metadata.", + "inputSchema": { + "properties": { + "table_name": { + "title": "Table Name", + "type": "string" + } + }, + "required": [ + "table_name" + ], + "title": "get_table_schemaArguments", + "type": "object" + } + }, + { + "name": "sample_table_data", + "description": "Retrieves a random sample of rows from the specified table in the Azure Data Explorer database. The sample_size parameter controls how many rows to return (default: 10).", + "inputSchema": { + "properties": { + "table_name": { + "title": "Table Name", + "type": "string" + }, + "sample_size": { + "default": 10, + "title": "Sample Size", + "type": "integer" + } + }, + "required": [ + "table_name" + ], + "title": "sample_table_dataArguments", + "type": "object" + } + } + ] + }, + "llm-context": { + "name": "llm-context", + "display_name": "LLM Context", + "description": "Provides a repo-packing MCP tool with configurable profiles that specify file inclusion/exclusion patterns and optional prompts.", + "repository": { + "type": "git", + "url": "https://github.com/cyberchitta/llm-context.py" + }, + "homepage": "https://github.com/cyberchitta/llm-context.py", + "author": { + "name": "cyberchitta" + }, + "license": "Apache 2.0", + "categories": [ + "Dev Tools" + ], + "tags": [ + "LLM", + "Context Injection", + "Development", + "ChatGPT", + "Productivity" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "llm-context", + "lc-mcp" + ] + } + }, + "arguments": { + "mcp": { + "description": "Indicates the model context protocol that should be used for communication.", + "required": true, + "example": "lc-mcp" + } + }, + "tools": [ + { + "name": "lc-project-context", + "description": "IMPORTANT: First check if project context is already available in the conversation before making any new requests. Use lc-get-files for retrieving specific files, and only use this tool when a broad repository overview is needed.\n\nGenerates a structured repository overview including: 1) Directory tree with file status (\u2713 full, \u25cb outline, \u2717 excluded) 2) Complete contents of key files 3) Smart outlines highlighting important definitions in supported languages. The output is customizable via profiles that control file inclusion rules and presentation format. The assistant tracks previously retrieved project context in the conversation and checks this history before making new requests.", + "inputSchema": { + "properties": { + "root_path": { + "description": "Root directory path (e.g. '/home/user/projects/myproject')", + "format": "path", + "title": "Root Path", + "type": "string" + }, + "rule_name": { + "default": "lc-code", + "description": "Rule to use (e.g. 'code', 'copy', 'full') - defines file inclusion and presentation rules", + "pattern": "^[a-zA-Z0-9_-]+$", + "title": "Rule Name", + "type": "string" + } + }, + "required": [ + "root_path" + ], + "title": "ContextRequest", + "type": "object" + } + }, + { + "name": "lc-get-files", + "description": "IMPORTANT: Check previously retrieved file contents before making new requests. Retrieves (read-only) complete contents of specified files from the project. For this project, this is the preferred method for all file content analysis and text searches - simply retrieve the relevant files and examine their contents. The assistant cannot modify files with this tool - it only reads their contents.", + "inputSchema": { + "properties": { + "root_path": { + "description": "Root directory path (e.g. '/home/user/projects/myproject')", + "format": "path", + "title": "Root Path", + "type": "string" + }, + "paths": { + "description": "File paths relative to root_path, starting with a forward slash and including the root directory name. For example, if root_path is '/home/user/projects/myproject', then a valid path would be '/myproject/src/main.py", + "items": { + "type": "string" + }, + "title": "Paths", + "type": "array" + } + }, + "required": [ + "root_path", + "paths" + ], + "title": "FilesRequest", + "type": "object" + } + }, + { + "name": "lc-list-modified-files", + "description": "IMPORTANT: First get the generation timestamp from the project context. Returns a list of paths to files that have been modified since a given timestamp. This is typically used to track which files have changed during the conversation. After getting the list, use lc-get-files to examine the contents of any modified files of interest.", + "inputSchema": { + "properties": { + "root_path": { + "description": "Root directory path (e.g. '/home/user/projects/myproject')", + "format": "path", + "title": "Root Path", + "type": "string" + }, + "rule_name": { + "default": "lc-code", + "description": "Rule to use (e.g. 'code', 'copy', 'full') - defines file inclusion and presentation rules", + "pattern": "^[a-zA-Z0-9_-]+$", + "title": "Rule Name", + "type": "string" + }, + "timestamp": { + "description": "Unix timestamp to check modifications since", + "title": "Timestamp", + "type": "number" + } + }, + "required": [ + "root_path", + "timestamp" + ], + "title": "ListModifiedFilesRequest", + "type": "object" + } + }, + { + "name": "lc-code-outlines", + "description": "Returns smart outlines highlighting important definitions in all supported code files. This provides a high-level overview of code structure without retrieving full file contents. Outlines show key definitions (classes, functions, methods) in the codebase. Use lc-get-implementations to retrieve the full implementation of any definition shown in these outlines.", + "inputSchema": { + "properties": { + "root_path": { + "description": "Root directory path (e.g. '/home/user/projects/myproject')", + "format": "path", + "title": "Root Path", + "type": "string" + }, + "rule_name": { + "default": "lc-code", + "description": "Rule to use for file selection rules", + "pattern": "^[a-zA-Z0-9_-]+$", + "title": "Rule Name", + "type": "string" + } + }, + "required": [ + "root_path" + ], + "title": "OutlinesRequest", + "type": "object" + } + }, + { + "name": "lc-get-implementations", + "description": "Retrieves complete code implementations of definitions identified in code outlines. Provide a list of file paths and definition names to get their full implementations. This tool works with all supported languages except C and C++.", + "inputSchema": { + "properties": { + "root_path": { + "description": "Root directory path (e.g. '/home/user/projects/myproject')", + "format": "path", + "title": "Root Path", + "type": "string" + }, + "queries": { + "description": "List of (file_path, definition_name) tuples to fetch implementations for", + "items": { + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "title": "Queries", + "type": "array" + } + }, + "required": [ + "root_path", + "queries" + ], + "title": "ImplementationsRequest", + "type": "object" + } + } + ] + }, + "gmail-headless": { + "name": "gmail-headless", + "display_name": "Headless Gmail Server", + "description": "Remote hostable MCP server that can get and send Gmail messages without local credential or file system setup.", + "repository": { + "type": "git", + "url": "https://github.com/baryhuang/mcp-headless-gmail" + }, + "homepage": "https://github.com/baryhuang/mcp-headless-gmail", + "author": { + "name": "baryhuang" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "Gmail", + "Headless", + "Docker", + "API" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "buryhuang/mcp-headless-gmail:latest" + ] + } + }, + "tools": [ + { + "name": "gmail_refresh_token", + "description": "Refresh the access token using the refresh token and client credentials", + "inputSchema": { + "google_access_token": { + "type": "string", + "description": "Google OAuth2 access token (optional if expired)" + }, + "google_refresh_token": { + "type": "string", + "description": "Google OAuth2 refresh token" + }, + "google_client_id": { + "type": "string", + "description": "Google OAuth2 client ID for token refresh" + }, + "google_client_secret": { + "type": "string", + "description": "Google OAuth2 client secret for token refresh" + } + }, + "required": [ + "google_refresh_token", + "google_client_id", + "google_client_secret" + ] + }, + { + "name": "gmail_get_recent_emails", + "description": "Get the most recent emails from Gmail (returns metadata, snippets, and first 1k chars of body)", + "inputSchema": { + "google_access_token": { + "type": "string", + "description": "Google OAuth2 access token" + }, + "max_results": { + "type": "integer", + "description": "Maximum number of emails to return (default: 10)" + }, + "unread_only": { + "type": "boolean", + "description": "Whether to return only unread emails (default: False)" + } + }, + "required": [ + "google_access_token" + ] + }, + { + "name": "gmail_get_email_body_chunk", + "description": "Get a 1k character chunk of an email body starting from the specified offset", + "inputSchema": { + "google_access_token": { + "type": "string", + "description": "Google OAuth2 access token" + }, + "message_id": { + "type": "string", + "description": "ID of the message to retrieve" + }, + "thread_id": { + "type": "string", + "description": "ID of the thread to retrieve (will get the first message if multiple exist)" + }, + "offset": { + "type": "integer", + "description": "Offset in characters to start from (default: 0)" + } + }, + "required": [ + "google_access_token" + ] + }, + { + "name": "gmail_send_email", + "description": "Send an email via Gmail", + "inputSchema": { + "google_access_token": { + "type": "string", + "description": "Google OAuth2 access token" + }, + "to": { + "type": "string", + "description": "Recipient email address" + }, + "subject": { + "type": "string", + "description": "Email subject" + }, + "body": { + "type": "string", + "description": "Email body content (plain text)" + }, + "html_body": { + "type": "string", + "description": "Email body content in HTML format (optional)" + } + }, + "required": [ + "google_access_token", + "to", + "subject", + "body" + ] + } + ] + }, + "graphlit-mcp-server": { + "display_name": "Graphlit MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/graphlit/graphlit-mcp-server" + }, + "homepage": "https://www.graphlit.com/blog/graphlit-mcp-server", + "author": { + "name": "graphlit" + }, + "license": "MIT", + "tags": [ + "mcp", + "graphlit", + "retrieval", + "extraction", + "ingestion", + "web", + "notifications" + ], + "arguments": { + "GRAPHLIT_ORGANIZATION_ID": { + "description": "Your organization ID from Graphlit Platform", + "required": true, + "example": "your-organization-id" + }, + "GRAPHLIT_ENVIRONMENT_ID": { + "description": "Your environment ID from Graphlit Platform", + "required": true, + "example": "your-environment-id" + }, + "GRAPHLIT_JWT_SECRET": { + "description": "Your JWT secret for signing the JWT token", + "required": true, + "example": "your-jwt-secret" + } + }, + "installations": { + "npx": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "graphlit-mcp-server" + ], + "env": { + "GRAPHLIT_ORGANIZATION_ID": "${input:organization_id}", + "GRAPHLIT_ENVIRONMENT_ID": "${input:environment_id}", + "GRAPHLIT_JWT_SECRET": "${input:jwt_secret}" + }, + "description": "Run using NPX", + "recommended": true + } + }, + "examples": [ + { + "title": "Query Contents", + "description": "Retrieve relevant content from your Graphlit project", + "prompt": "Use the Graphlit MCP Server to search for information about machine learning in my project" + } + ], + "name": "graphlit-mcp-server", + "description": "The Model Context Protocol (MCP) Server enables integration between MCP clients and the Graphlit service. This document outlines the setup process and provides a basic example of using the client.", + "categories": [ + "Knowledge Base" + ], + "tools": [ + { + "name": "configureProject", + "description": "Configures the default content workflow for the Graphlit project. Only needed if user asks to configure the default workflow.\n Optionally accepts whether to enable high-quality document and web page preparation using a vision LLM. Defaults to using Azure AI Document Intelligence for document preparation, if not assigned.\n Optionally accepts whether to enable entity extraction using LLM into the knowledge graph. Defaults to no entity extraction, if not assigned.\n Optionally accepts the preferred model provider service type, i.e. Anthropic, OpenAI, Google. Defaults to Anthropic if not provided.\n Returns the project identifier.", + "inputSchema": { + "type": "object", + "properties": { + "enablePreparation": { + "type": "boolean", + "default": false, + "description": "Whether to enable high-quality document and web page preparation using vision LLM. Defaults to False." + }, + "enableExtraction": { + "type": "boolean", + "default": false, + "description": "Whether to enable entity extraction using LLM into the knowledge graph. Defaults to False." + }, + "serviceType": { + "type": "string", + "enum": [ + "ANTHROPIC", + "AZURE_AI", + "AZURE_OPEN_AI", + "CEREBRAS", + "COHERE", + "DEEPSEEK", + "GOOGLE", + "GROQ", + "JINA", + "MISTRAL", + "OPEN_AI", + "REPLICATE", + "VOYAGE" + ], + "default": "ANTHROPIC", + "description": "Preferred model provider service type, i.e. Anthropic, OpenAI, Google. Defaults to Anthropic if not provided." + } + } + } + }, + { + "name": "askGraphlit", + "description": "Ask questions about the Graphlit API or SDKs. Can create code samples for any API call.\n Accepts an LLM user prompt for code generation.\n Returns the LLM prompt completion in Markdown format.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "LLM user prompt for code generation." + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "retrieveSources", + "description": "Retrieve relevant content sources from Graphlit knowledge base. Do *not* use for retrieving content by content identifier - retrieve content resource instead, with URI 'contents://{id}'.\n Accepts an LLM user prompt for content retrieval. For best retrieval quality, provide only key words or phrases from the user prompt, which will be used to create text embeddings for a vector search query.\n Only use when there is a valid LLM user prompt for content retrieval, otherwise use queryContents. For example 'recent content' is not a useful user prompt, since it doesn't reference the text in the content.\n Accepts an optional ingestion recency filter (defaults to null, meaning all time), and optional content type and file type filters.\n Also accepts optional feed and collection identifiers to filter content by.\n Returns the ranked content sources, including their content resource URI to retrieve the complete Markdown text.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "LLM user prompt for content retrieval." + }, + "inLast": { + "type": "string", + "description": "Recency filter for content ingested 'in last' timespan, optional. Should be ISO 8601 format, for example, 'PT1H' for last hour, 'P1D' for last day, 'P7D' for last week, 'P30D' for last month. Doesn't support weeks or months explicitly." + }, + "contentType": { + "type": "string", + "enum": [ + "EMAIL", + "EVENT", + "FILE", + "ISSUE", + "MESSAGE", + "PAGE", + "POST", + "TEXT" + ], + "description": "Content type filter, optional. One of: Email, Event, File, Issue, Message, Page, Post, Text." + }, + "fileType": { + "type": "string", + "enum": [ + "ANIMATION", + "AUDIO", + "CODE", + "DATA", + "DOCUMENT", + "DRAWING", + "EMAIL", + "GEOMETRY", + "IMAGE", + "MANIFEST", + "PACKAGE", + "POINT_CLOUD", + "SHAPE", + "UNKNOWN", + "VIDEO" + ], + "description": "File type filter, optional. One of: Animation, Audio, Code, Data, Document, Drawing, Email, Geometry, Image, Package, PointCloud, Shape, Video." + }, + "feeds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Feed identifiers to filter content by, optional." + }, + "collections": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Collection identifiers to filter content by, optional." + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "retrieveImages", + "description": "Retrieve images from Graphlit knowledge base. Provides image-specific retrieval when image similarity search is desired.\n Do *not* use for retrieving content by content identifier - retrieve content resource instead, with URI 'contents://{id}'.\n Accepts image URL. Image will be used for similarity search using image embeddings.\n Accepts optional geo-location filter for search by latitude, longitude and optional distance radius. Images taken with GPS enabled are searchable by geo-location.\n Also accepts optional recency filter (defaults to null, meaning all time), and optional feed and collection identifiers to filter images by.\n Returns the matching images, including their content resource URI to retrieve the complete Markdown text.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of image which will be used for similarity search using image embeddings." + }, + "inLast": { + "type": "string", + "description": "Recency filter for images ingested 'in last' timespan, optional. Should be ISO 8601 format, for example, 'PT1H' for last hour, 'P1D' for last day, 'P7D' for last week, 'P30D' for last month. Doesn't support weeks or months explicitly." + }, + "feeds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Feed identifiers to filter images by, optional." + }, + "collections": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Collection identifiers to filter images by, optional." + }, + "location": { + "type": "object", + "properties": { + "latitude": { + "type": "number", + "minimum": -90, + "maximum": 90, + "description": "The latitude, must be between -90 and 90." + }, + "longitude": { + "type": "number", + "minimum": -180, + "maximum": 180, + "description": "The longitude, must be between -180 and 180." + }, + "distance": { + "type": "number", + "description": "The distance radius (in meters)." + } + }, + "required": [ + "latitude", + "longitude" + ], + "additionalProperties": false, + "description": "Geo-location filter for search by latitude, longitude and optional distance radius." + }, + "limit": { + "type": "number", + "default": 100, + "description": "Limit the number of images to be returned. Defaults to 100." + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "extractText", + "description": "Extracts JSON data from text using LLM.\n Accepts text to be extracted, and JSON schema which describes the data which will be extracted. JSON schema needs be of type 'object' and include 'properties' and 'required' fields.\n Optionally accepts text prompt which is provided to LLM to guide data extraction. Defaults to 'Extract data using the tools provided'.\n Returns extracted JSON from text.", + "inputSchema": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Text to be extracted with LLM." + }, + "schema": { + "type": "string", + "description": "JSON schema which describes the data which will be extracted. JSON schema needs be of type 'object' and include 'properties' and 'required' fields." + }, + "prompt": { + "type": "string", + "description": "Text prompt which is provided to LLM to guide data extraction, optional." + } + }, + "required": [ + "text", + "schema" + ] + } + }, + { + "name": "createCollection", + "description": "Create a collection.\n Accepts a collection name, and optional list of content identifiers to add to collection.\n Returns the collection identifier", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Collection name." + }, + "contents": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Content identifiers to add to collection, optional." + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "addContentsToCollection", + "description": "Add contents to a collection.\n Accepts a collection identifier and a list of content identifiers to add to collection.\n Returns the collection identifier.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Collection identifier." + }, + "contents": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Content identifiers to add to collection." + } + }, + "required": [ + "id", + "contents" + ] + } + }, + { + "name": "removeContentsFromCollection", + "description": "Remove contents from collection.\n Accepts a collection identifier and a list of content identifiers to remove from collection.\n Returns the collection identifier.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Collection identifier." + }, + "contents": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Content identifiers to remove from collection." + } + }, + "required": [ + "id", + "contents" + ] + } + }, + { + "name": "deleteContent", + "description": "Deletes content from Graphlit knowledge base.\n Accepts content identifier.\n Returns the content identifier and content state, i.e. Deleted.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Content identifier." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "deleteCollection", + "description": "Deletes collection from Graphlit knowledge base.\n Does *not* delete the contents in the collection, only the collection itself.\n Accepts collection identifier.\n Returns the collection identifier and collection state, i.e. Deleted.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Collection identifier." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "deleteFeed", + "description": "Deletes feed from Graphlit knowledge base.\n *Does* delete the contents in the feed, in addition to the feed itself.\n Accepts feed identifier.\n Returns the feed identifier and feed state, i.e. Deleted.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Feed identifier." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "deleteFeeds", + "description": "Deletes feeds from Graphlit knowledge base.\n *Does* delete the contents in the feed, in addition to the feed itself.\n Accepts optional feed type filter to limit the feeds which will be deleted.\n Also accepts optional limit of how many feeds to delete, defaults to 100.\n Returns the feed identifiers and feed state, i.e. Deleted.", + "inputSchema": { + "type": "object", + "properties": { + "feedType": { + "type": "string", + "enum": [ + "DISCORD", + "EMAIL", + "INTERCOM", + "ISSUE", + "MICROSOFT_TEAMS", + "NOTION", + "REDDIT", + "RSS", + "SEARCH", + "SITE", + "SLACK", + "TWITTER", + "WEB", + "YOU_TUBE", + "ZENDESK" + ], + "description": "Feed type filter, optional. One of: Discord, Email, Intercom, Issue, MicrosoftTeams, Notion, Reddit, Rss, Search, Site, Slack, Web, YouTube, Zendesk." + }, + "limit": { + "type": "number", + "default": 100, + "description": "Limit the number of feeds to be deleted. Defaults to 100." + } + } + } + }, + { + "name": "deleteCollections", + "description": "Deletes collections from Graphlit knowledge base.\n Does *not* delete the contents in the collections, only the collections themselves.\n Accepts optional limit of how many collections to delete, defaults to 100.\n Returns the collection identifiers and collection state, i.e. Deleted.", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "default": 100, + "description": "Limit the number of collections to be deleted. Defaults to 100." + } + } + } + }, + { + "name": "deleteContents", + "description": "Deletes contents from Graphlit knowledge base.\n Accepts optional content type and file type filters to limit the contents which will be deleted.\n Also accepts optional limit of how many contents to delete, defaults to 1000.\n Returns the content identifiers and content state, i.e. Deleted.", + "inputSchema": { + "type": "object", + "properties": { + "contentType": { + "type": "string", + "enum": [ + "EMAIL", + "EVENT", + "FILE", + "ISSUE", + "MESSAGE", + "PAGE", + "POST", + "TEXT" + ], + "description": "Content type filter, optional. One of: Email, Event, File, Issue, Message, Page, Post, Text." + }, + "fileType": { + "type": "string", + "enum": [ + "ANIMATION", + "AUDIO", + "CODE", + "DATA", + "DOCUMENT", + "DRAWING", + "EMAIL", + "GEOMETRY", + "IMAGE", + "MANIFEST", + "PACKAGE", + "POINT_CLOUD", + "SHAPE", + "UNKNOWN", + "VIDEO" + ], + "description": "File type filter, optional. One of: Animation, Audio, Code, Data, Document, Drawing, Email, Geometry, Image, Package, PointCloud, Shape, Video." + }, + "limit": { + "type": "number", + "default": 1000, + "description": "Limit the number of contents to be deleted. Defaults to 1000." + } + } + } + }, + { + "name": "queryContents", + "description": "Query contents from Graphlit knowledge base. Do *not* use for retrieving content by content identifier - retrieve content resource instead, with URI 'contents://{id}'.\n Accepts optional content name, content type and file type for metadata filtering.\n Accepts optional recency filter (defaults to null, meaning all time), and optional feed and collection identifiers to filter images by.\n Accepts optional geo-location filter for search by latitude, longitude and optional distance radius. Images and videos taken with GPS enabled are searchable by geo-location.\n Returns the matching contents, including their content resource URI to retrieve the complete Markdown text.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Textual match on content name." + }, + "type": { + "type": "string", + "enum": [ + "EMAIL", + "EVENT", + "FILE", + "ISSUE", + "MESSAGE", + "PAGE", + "POST", + "TEXT" + ], + "description": "Filter by content type." + }, + "fileType": { + "type": "string", + "enum": [ + "ANIMATION", + "AUDIO", + "CODE", + "DATA", + "DOCUMENT", + "DRAWING", + "EMAIL", + "GEOMETRY", + "IMAGE", + "MANIFEST", + "PACKAGE", + "POINT_CLOUD", + "SHAPE", + "UNKNOWN", + "VIDEO" + ], + "description": "Filter by file type." + }, + "inLast": { + "type": "string", + "description": "Recency filter for content ingested 'in last' timespan, optional. Should be ISO 8601 format, for example, 'PT1H' for last hour, 'P1D' for last day, 'P7D' for last week, 'P30D' for last month. Doesn't support weeks or months explicitly." + }, + "feeds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Feed identifiers to filter contents by, optional." + }, + "collections": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Collection identifiers to filter contents by, optional." + }, + "location": { + "type": "object", + "properties": { + "latitude": { + "type": "number", + "minimum": -90, + "maximum": 90, + "description": "The latitude, must be between -90 and 90." + }, + "longitude": { + "type": "number", + "minimum": -180, + "maximum": 180, + "description": "The longitude, must be between -180 and 180." + }, + "distance": { + "type": "number", + "description": "The distance radius (in meters)." + } + }, + "required": [ + "latitude", + "longitude" + ], + "additionalProperties": false, + "description": "Geo-location filter for search by latitude, longitude and optional distance radius." + }, + "limit": { + "type": "number", + "default": 100, + "description": "Limit the number of contents to be returned. Defaults to 100." + } + } + } + }, + { + "name": "queryCollections", + "description": "Query collections from Graphlit knowledge base. Do *not* use for retrieving collection by collection identifier - retrieve collection resource instead, with URI 'collections://{id}'.\n Accepts optional collection name for metadata filtering.\n Returns the matching collections, including their collection resource URI to retrieve the collection contents.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Textual match on collection name." + }, + "limit": { + "type": "number", + "default": 100, + "description": "Limit the number of collections to be returned. Defaults to 100." + } + } + } + }, + { + "name": "queryFeeds", + "description": "Query feeds from Graphlit knowledge base. Do *not* use for retrieving feed by feed identifier - retrieve feed resource instead, with URI 'feeds://{id}'.\n Accepts optional feed name and feed type for metadata filtering.\n Returns the matching feeds, including their feed resource URI to retrieve the feed contents.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Textual match on feed name." + }, + "type": { + "type": "string", + "enum": [ + "DISCORD", + "EMAIL", + "INTERCOM", + "ISSUE", + "MICROSOFT_TEAMS", + "NOTION", + "REDDIT", + "RSS", + "SEARCH", + "SITE", + "SLACK", + "TWITTER", + "WEB", + "YOU_TUBE", + "ZENDESK" + ], + "description": "Filter by feed type." + }, + "limit": { + "type": "number", + "default": 100, + "description": "Limit the number of feeds to be returned. Defaults to 100." + } + } + } + }, + { + "name": "isContentDone", + "description": "Check if content has completed asynchronous ingestion.\n Accepts a content identifier which was returned from one of the non-feed ingestion tools, like ingestUrl.\n Returns whether the content is done or not.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Content identifier." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "isFeedDone", + "description": "Check if an asynchronous feed has completed ingesting all the available content.\n Accepts a feed identifier which was returned from one of the ingestion tools, like ingestGoogleDriveFiles.\n Returns whether the feed is done or not.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Feed identifier." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "listNotionDatabases", + "description": "Lists available Notion databases.\n Returns a list of Notion databases, where the database identifier can be used with ingestNotionPages to ingest pages into Graphlit knowledge base.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "listLinearProjects", + "description": "Lists available Linear projects.\n Returns a list of Linear projects, where the project name can be used with ingestLinearIssues to ingest issues into Graphlit knowledge base.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "listSlackChannels", + "description": "Lists available Slack channels.\n Returns a list of Slack channels, where the channel name can be used with ingestSlackMessages to ingest messages into Graphlit knowledge base.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "listSharePointLibraries", + "description": "Lists available SharePoint libraries.\n Returns a list of SharePoint libraries, where the selected libraryId can be used with listSharePointFolders to enumerate SharePoint folders in a library.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "listSharePointFolders", + "description": "Lists available SharePoint folders.\n Returns a list of SharePoint folders, which can be used with ingestSharePointFiles to ingest files into Graphlit knowledge base.", + "inputSchema": { + "type": "object", + "properties": { + "libraryId": { + "type": "string", + "description": "SharePoint library identifier." + } + }, + "required": [ + "libraryId" + ] + } + }, + { + "name": "ingestSharePointFiles", + "description": "Ingests files from SharePoint library into Graphlit knowledge base.\n Accepts a SharePoint libraryId and an optional folderId to ingest files from a specific SharePoint folder.\n Libraries can be enumerated with listSharePointLibraries and library folders with listSharePointFolders.\n Accepts an optional read limit for the number of files to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "libraryId": { + "type": "string", + "description": "SharePoint library identifier." + }, + "folderId": { + "type": "string", + "description": "SharePoint folder identifier, optional." + }, + "readLimit": { + "type": "number", + "description": "Number of files to ingest, optional. Defaults to 100." + } + }, + "required": [ + "libraryId" + ] + } + }, + { + "name": "ingestOneDriveFiles", + "description": "Ingests files from OneDrive folder into Graphlit knowledge base.\n Accepts an optional read limit for the number of files to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "readLimit": { + "type": "number", + "description": "Number of files to ingest, optional. Defaults to 100." + } + } + } + }, + { + "name": "ingestGoogleDriveFiles", + "description": "Ingests files from Google Drive folder into Graphlit knowledge base.\n Accepts an optional read limit for the number of files to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "readLimit": { + "type": "number", + "description": "Number of files to ingest, optional. Defaults to 100." + } + } + } + }, + { + "name": "ingestDropboxFiles", + "description": "Ingests files from Dropbox folder into Graphlit knowledge base.\n Accepts optional relative path to Dropbox folder (i.e. /Pictures), and an optional read limit for the number of files to ingest.\n If no path provided, ingests files from root Dropbox folder.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Relative path to Dropbox folder, optional." + }, + "readLimit": { + "type": "number", + "description": "Number of files to ingest, optional. Defaults to 100." + } + } + } + }, + { + "name": "ingestBoxFiles", + "description": "Ingests files from Box folder into Graphlit knowledge base.\n Accepts optional Box folder identifier, and an optional read limit for the number of files to ingest.\n If no folder identifier provided, ingests files from root Box folder (i.e. \"0\").\n Folder identifier can be inferred from Box URL. https://app.box.com/folder/123456 -> folder identifier is \"123456\".\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "folderId": { + "type": "string", + "default": "0", + "description": "Box folder identifier, optional. Defaults to root folder." + }, + "readLimit": { + "type": "number", + "description": "Number of files to ingest, optional. Defaults to 100." + } + } + } + }, + { + "name": "ingestGitHubFiles", + "description": "Ingests files from GitHub repository into Graphlit knowledge base.\n Accepts GitHub repository owner and repository name and an optional read limit for the number of files to ingest.\n For example, for GitHub repository (https://github.com/openai/tiktoken), 'openai' is the repository owner, and 'tiktoken' is the repository name.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "repositoryName": { + "type": "string", + "description": "GitHub repository name." + }, + "repositoryOwner": { + "type": "string", + "description": "GitHub repository owner." + }, + "readLimit": { + "type": "number", + "description": "Number of files to ingest, optional. Defaults to 100." + } + }, + "required": [ + "repositoryName", + "repositoryOwner" + ] + } + }, + { + "name": "ingestNotionPages", + "description": "Ingests pages from Notion database into Graphlit knowledge base.\n Accepts Notion database identifier and an optional read limit for the number of pages to ingest.\n You can list the available Notion database identifiers with listNotionDatabases.\n Or, for a Notion URL, https://www.notion.so/Example/Engineering-Wiki-114abc10cb38487e91ec906fc6c6f350, 'Engineering-Wiki-114abc10cb38487e91ec906fc6c6f350' is an example of a Notion database identifier.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "databaseId": { + "type": "string", + "description": "Notion database identifier." + }, + "readLimit": { + "type": "number", + "description": "Number of pages to ingest, optional. Defaults to 100." + } + }, + "required": [ + "databaseId" + ] + } + }, + { + "name": "ingestMicrosoftTeamsMessages", + "description": "Ingests messages from Microsoft Teams channel into Graphlit knowledge base.\n Accepts Microsoft Teams team identifier and channel identifier, and an optional read limit for the number of messages to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "teamId": { + "type": "string", + "description": "Microsoft Teams team identifier." + }, + "channelId": { + "type": "string", + "description": "Microsoft Teams channel identifier." + }, + "readLimit": { + "type": "number", + "description": "Number of messages to ingest, optional. Defaults to 100." + } + }, + "required": [ + "teamId", + "channelId" + ] + } + }, + { + "name": "ingestSlackMessages", + "description": "Ingests messages from Slack channel into Graphlit knowledge base.\n Accepts Slack channel name and an optional read limit for the number of messages to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "channelName": { + "type": "string", + "description": "Slack channel name." + }, + "readLimit": { + "type": "number", + "description": "Number of messages to ingest, optional. Defaults to 100." + } + }, + "required": [ + "channelName" + ] + } + }, + { + "name": "ingestDiscordMessages", + "description": "Ingests messages from Discord channel into Graphlit knowledge base.\n Accepts Discord channel name and an optional read limit for the number of messages to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "channelName": { + "type": "string", + "description": "Discord channel name." + }, + "readLimit": { + "type": "number", + "description": "Number of messages to ingest, optional. Defaults to 100." + } + }, + "required": [ + "channelName" + ] + } + }, + { + "name": "ingestTwitterPosts", + "description": "Ingests posts by user from Twitter/X into Graphlit knowledge base.\n Accepts Twitter/X user name, without the leading @ symbol, and an optional read limit for the number of posts to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "userName": { + "type": "string", + "description": "Twitter/X user name, without the leading @ symbol, i.e. 'graphlit'." + }, + "readLimit": { + "type": "number", + "description": "Number of posts to ingest, optional. Defaults to 100." + } + }, + "required": [ + "userName" + ] + } + }, + { + "name": "ingestTwitterSearch", + "description": "Searches for recent posts from Twitter/X, and ingests them into Graphlit knowledge base.\n Accepts search query, and an optional read limit for the number of posts to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + }, + "readLimit": { + "type": "number", + "description": "Number of posts to ingest, optional. Defaults to 100." + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "ingestRedditPosts", + "description": "Ingests posts from Reddit subreddit into Graphlit knowledge base.\n Accepts a subreddit name and an optional read limit for the number of posts to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "subredditName": { + "type": "string", + "description": "Subreddit name." + }, + "readLimit": { + "type": "number", + "description": "Number of posts to ingest, optional. Defaults to 100." + } + }, + "required": [ + "subredditName" + ] + } + }, + { + "name": "ingestGoogleEmail", + "description": "Ingests emails from Google Email account into Graphlit knowledge base.\n Accepts an optional read limit for the number of emails to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "readLimit": { + "type": "number", + "description": "Number of emails to ingest, optional. Defaults to 100." + } + } + } + }, + { + "name": "ingestMicrosoftEmail", + "description": "Ingests emails from Microsoft Email account into Graphlit knowledge base.\n Accepts an optional read limit for the number of emails to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "readLimit": { + "type": "number", + "description": "Number of emails to ingest, optional. Defaults to 100." + } + } + } + }, + { + "name": "ingestLinearIssues", + "description": "Ingests issues from Linear project into Graphlit knowledge base.\n Accepts Linear project name and an optional read limit for the number of issues to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "projectName": { + "type": "string", + "description": "Linear project name." + }, + "readLimit": { + "type": "number", + "description": "Number of issues to ingest, optional. Defaults to 100." + } + }, + "required": [ + "projectName" + ] + } + }, + { + "name": "ingestGitHubIssues", + "description": "Ingests issues from GitHub repository into Graphlit knowledge base.\n Accepts GitHub repository owner and repository name and an optional read limit for the number of issues to ingest.\n For example, for GitHub repository (https://github.com/openai/tiktoken), 'openai' is the repository owner, and 'tiktoken' is the repository name.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "repositoryName": { + "type": "string", + "description": "GitHub repository name." + }, + "repositoryOwner": { + "type": "string", + "description": "GitHub repository owner." + }, + "readLimit": { + "type": "number", + "description": "Number of issues to ingest, optional. Defaults to 100." + } + }, + "required": [ + "repositoryName", + "repositoryOwner" + ] + } + }, + { + "name": "ingestJiraIssues", + "description": "Ingests issues from Atlassian Jira repository into Graphlit knowledge base.\n Accepts Atlassian Jira server URL and project name, and an optional read limit for the number of issues to ingest.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Atlassian Jira server URL." + }, + "projectName": { + "type": "string", + "description": "Atlassian Jira project name." + }, + "readLimit": { + "type": "number", + "description": "Number of issues to ingest, optional. Defaults to 100." + } + }, + "required": [ + "url", + "projectName" + ] + } + }, + { + "name": "webCrawl", + "description": "Crawls web pages from web site into Graphlit knowledge base.\n Accepts a URL and an optional read limit for the number of pages to crawl.\n Uses sitemap.xml to discover pages to be crawled from website.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Web site URL." + }, + "readLimit": { + "type": "number", + "description": "Number of web pages to ingest, optional. Defaults to 100." + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "webMap", + "description": "Enumerates the web pages at or beneath the provided URL using web sitemap. \n Does *not* ingest web pages into Graphlit knowledge base.\n Accepts web site URL as string.\n Returns list of mapped URIs from web site.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Web site URL." + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "webSearch", + "description": "Performs web or podcast search based on search query. Can search for web pages or podcasts/podcast episodes. \n Format the search query as what would be entered into a Google search. You can use site filtering in the search query, like 'site:twitter.com'. \n Accepts search query as string, and optional search service type. \n Prefer calling this tool over using 'curl' directly for any web search.\n *Only* use Podscan search service type to search for podcasts or podcast episodes.\n Does *not* ingest pages into Graphlit knowledge base. *Does* ingest podcast episodes as transcribed audio files into Graphlit knowledge base. \n When searching for podcasts or podcast episodes, *don't* include the term 'podcast' or 'episode' in the search query - that would be redundant.\n Search service types: Tavily (web pages), Exa (web pages) and Podscan (podcast episodes). Defaults to Exa.\n Returns URL, title and relevant Markdown text from resulting web pages or podcast episode transcripts.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query." + }, + "searchService": { + "type": "string", + "enum": [ + "EXA", + "PODSCAN", + "TAVILY" + ], + "default": "EXA", + "description": "Search service type (Tavily, Exa, Podscan). Defaults to Exa." + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "ingestRSS", + "description": "Ingests posts from RSS feed into Graphlit knowledge base.\n For podcast RSS feeds, audio will be downloaded, transcribed and ingested into Graphlit knowledge base.\n Accepts RSS URL and an optional read limit for the number of posts to read.\n Executes asynchronously and returns the feed identifier.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "RSS URL." + }, + "readLimit": { + "type": "number", + "description": "Number of issues to posts, optional. Defaults to 25." + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "ingestUrl", + "description": "Ingests content from URL into Graphlit knowledge base.\n Can scrape web pages, and can ingest individual Word documents, PDFs, audio recordings, videos, images, or any other unstructured data.\n Executes asynchronously and returns the content identifier.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL to ingest content from." + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "ingestText", + "description": "Ingests text as content into Graphlit knowledge base.\n Accepts a name for the content object, the text itself, and an optional text type (Plain, Markdown, Html). Defaults to Markdown text type.\n Optionally accepts an identifier for an existing content object. Will overwrite existing content, if provided.\n Can use for storing long-term textual memories or the output from LLM or other tools as content resources, which can be later searched or retrieved.\n Executes *synchronously* and returns the content identifier.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name for the content object." + }, + "text": { + "type": "string", + "description": "Text content to ingest." + }, + "textType": { + "type": "string", + "enum": [ + "HTML", + "MARKDOWN", + "PLAIN" + ], + "default": "MARKDOWN", + "description": "Text type (Plain, Markdown, Html). Defaults to Markdown." + }, + "id": { + "type": "string", + "description": "Optional identifier for the content object. Will overwrite existing content, if provided." + } + }, + "required": [ + "name", + "text" + ] + } + }, + { + "name": "ingestFile", + "description": "Ingests local file into Graphlit knowledge base.\n Accepts the path to the file in the local filesystem.\n Can use for storing *large* long-term textual memories or the output from LLM or other tools as content resources, which can be later searched or retrieved.\n Executes asynchronously and returns the content identifier.", + "inputSchema": { + "type": "object", + "properties": { + "filePath": { + "type": "string", + "description": "Path to the file in the local filesystem." + } + }, + "required": [ + "filePath" + ] + } + }, + { + "name": "screenshotPage", + "description": "Screenshots web page from URL.\n Executes *synchronously* and returns the content identifier.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "describeImageUrl", + "description": "Prompts vision LLM and returns completion. \n Does *not* ingest image into Graphlit knowledge base.\n Accepts image URL as string.\n Returns Markdown text from LLM completion.", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "required": [ + "prompt", + "url" + ] + } + }, + { + "name": "describeImageContent", + "description": "Prompts vision LLM and returns description of image content. \n Accepts content identifier as string, and optional prompt for image description.\n Returns Markdown text from LLM completion.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "prompt": { + "type": "string" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "publishAudio", + "description": "Publishes text as audio format, and ingests into Graphlit knowledge base.\n Accepts a name for the content object, the text itself, and an optional text type (Plain, Markdown, Html). Defaults to Markdown text type.\n Optionally accepts an ElevenLabs voice identifier.\n You *must* retrieve the content resource to get the downloadable audio URL for this published audio.\n Executes *synchronously* and returns the content identifier.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "text": { + "type": "string" + }, + "textType": { + "type": "string", + "enum": [ + "HTML", + "MARKDOWN", + "PLAIN" + ], + "default": "MARKDOWN" + }, + "voice": { + "type": "string", + "default": "HqW11As4VRPkApNPkAZp" + } + }, + "required": [ + "name", + "text" + ] + } + }, + { + "name": "sendWebHookNotification", + "description": "Sends a webhook notification to the provided URL.\n Accepts the webhook URL.\n Also accepts the text to be sent with the webhook, and an optional text type (Plain, Markdown, Html). Defaults to Markdown text type.\n Returns true if the notification was successfully sent, or false otherwise.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string" + }, + "text": { + "type": "string" + }, + "textType": { + "type": "string", + "enum": [ + "HTML", + "MARKDOWN", + "PLAIN" + ], + "default": "MARKDOWN" + } + }, + "required": [ + "url", + "text" + ] + } + }, + { + "name": "sendSlackNotification", + "description": "Sends a Slack notification to the provided Slack channel.\n Accepts the Slack channel name.\n Also accepts the text for the Slack message, and an optional text type (Plain, Markdown, Html). Defaults to Markdown text type.\n Hint: In Slack Markdown, images are displayed by simply putting the URL in angle brackets like instead of using the traditional Markdown image syntax ![alt text](url). \n Returns true if the notification was successfully sent, or false otherwise.", + "inputSchema": { + "type": "object", + "properties": { + "channelName": { + "type": "string" + }, + "text": { + "type": "string" + }, + "textType": { + "type": "string", + "enum": [ + "HTML", + "MARKDOWN", + "PLAIN" + ], + "default": "MARKDOWN" + } + }, + "required": [ + "channelName", + "text" + ] + } + }, + { + "name": "sendTwitterNotification", + "description": "Posts a tweet from the configured user account.\n Accepts the plain text for the tweet.\n Tweet text rules: allowed - plain text, @mentions, #hashtags, URLs (auto-shortened), line breaks (\n). \n Not allowed - markdown, HTML tags, rich text, or custom styles.\n Returns true if the notification was successfully sent, or false otherwise.", + "inputSchema": { + "type": "object", + "properties": { + "text": { + "type": "string" + } + }, + "required": [ + "text" + ] + } + }, + { + "name": "sendEmailNotification", + "description": "Sends an email notification to the provided email address(es).\n Accepts the email subject and a list of email 'to' addresses.\n Email addresses should be in RFC 5322 format. i.e. Alice Wonderland , or alice@wonderland.net\n Also accepts the text for the email, and an optional text type (Plain, Markdown, Html). Defaults to Markdown text type.\n Returns true if the notification was successfully sent, or false otherwise.", + "inputSchema": { + "type": "object", + "properties": { + "subject": { + "type": "string" + }, + "to": { + "type": "array", + "items": { + "type": "string" + } + }, + "text": { + "type": "string" + }, + "textType": { + "type": "string", + "enum": [ + "HTML", + "MARKDOWN", + "PLAIN" + ], + "default": "MARKDOWN" + } + }, + "required": [ + "subject", + "to", + "text" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "mac-messages-mcp": { + "name": "mac-messages-mcp", + "display_name": "Mac Messages", + "description": "An MCP server that securely interfaces with your iMessage database via the Model Context Protocol (MCP), allowing LLMs to query and analyze iMessage conversations. It includes robust phone number validation, attachment processing, contact management, group chat handling, and full support for sending and receiving messages.", + "repository": { + "type": "git", + "url": "https://github.com/carterlasalle/mac_messages_mcp" + }, + "homepage": "https://github.com/carterlasalle/mac_messages_mcp", + "author": { + "name": "carterlasalle" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mac-messages-mcp" + ] + } + }, + "tags": [ + "python", + "mac", + "messages" + ], + "tools": [ + { + "name": "tool_get_recent_messages", + "description": "\n Get recent messages from the Messages app.\n \n Args:\n hours: Number of hours to look back (default: 24)\n contact: Filter by contact name, phone number, or email (optional)\n Use \"contact:N\" to select a specific contact from previous matches\n ", + "inputSchema": { + "properties": { + "hours": { + "default": 24, + "title": "Hours", + "type": "integer" + }, + "contact": { + "default": null, + "title": "Contact", + "type": "string" + } + }, + "title": "tool_get_recent_messagesArguments", + "type": "object" + } + }, + { + "name": "tool_send_message", + "description": "\n Send a message using the Messages app.\n \n Args:\n recipient: Phone number, email, contact name, or \"contact:N\" to select from matches\n For example, \"contact:1\" selects the first contact from a previous search\n message: Message text to send\n group_chat: Whether to send to a group chat (uses chat ID instead of buddy)\n ", + "inputSchema": { + "properties": { + "recipient": { + "title": "Recipient", + "type": "string" + }, + "message": { + "title": "Message", + "type": "string" + }, + "group_chat": { + "default": false, + "title": "Group Chat", + "type": "boolean" + } + }, + "required": [ + "recipient", + "message" + ], + "title": "tool_send_messageArguments", + "type": "object" + } + }, + { + "name": "tool_find_contact", + "description": "\n Find a contact by name using fuzzy matching.\n \n Args:\n name: The name to search for\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + } + }, + "required": [ + "name" + ], + "title": "tool_find_contactArguments", + "type": "object" + } + }, + { + "name": "tool_check_db_access", + "description": "\n Diagnose database access issues.\n ", + "inputSchema": { + "properties": {}, + "title": "tool_check_db_accessArguments", + "type": "object" + } + }, + { + "name": "tool_check_contacts", + "description": "\n List available contacts in the address book.\n ", + "inputSchema": { + "properties": {}, + "title": "tool_check_contactsArguments", + "type": "object" + } + }, + { + "name": "tool_check_addressbook", + "description": "\n Diagnose AddressBook access issues.\n ", + "inputSchema": { + "properties": {}, + "title": "tool_check_addressbookArguments", + "type": "object" + } + }, + { + "name": "tool_get_chats", + "description": "\n List available group chats from the Messages app.\n ", + "inputSchema": { + "properties": {}, + "title": "tool_get_chatsArguments", + "type": "object" + } + } + ] + }, + "llamacloud": { + "name": "llamacloud", + "display_name": "LlamaCloud", + "description": "Integrate the data stored in a managed index on [LlamaCloud](https://cloud.llamaindex.ai/)", + "repository": { + "type": "git", + "url": "https://github.com/run-llama/mcp-server-llamacloud" + }, + "homepage": "https://github.com/run-llama/mcp-server-llamacloud", + "author": { + "name": "run-llama" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "LlamaCloud", + "TypeScript" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@llamaindex/mcp-server-llamacloud", + "--index", + "10k-SEC-Tesla", + "--description", + "10k SEC documents from 2023 for Tesla", + "--index", + "10k-SEC-Apple", + "--description", + "10k SEC documents from 2023 for Apple" + ], + "env": { + "LLAMA_CLOUD_PROJECT_NAME": "", + "LLAMA_CLOUD_API_KEY": "" + } + } + }, + "arguments": { + "LLAMA_CLOUD_PROJECT_NAME": { + "description": "The name of your LlamaCloud project that you want to use with the transfer tools.", + "required": true, + "example": "MyProject" + }, + "LLAMA_CLOUD_API_KEY": { + "description": "Your API key for accessing LlamaCloud services, which is necessary for authentication.", + "required": true, + "example": "1234567890abcdef" + } + }, + "tools": [ + { + "name": "get_information_10k_sec_tesla", + "description": "Get information from the 10k-SEC-Tesla index. The index contains 10k SEC documents from 2023 for Tesla", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query used to get information from the 10k-SEC-Tesla index." + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "get_information_10k_sec_apple", + "description": "Get information from the 10k-SEC-Apple index. The index contains 10k SEC documents from 2023 for Apple", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query used to get information from the 10k-SEC-Apple index." + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "mcp-server-motherduck": { + "name": "mcp-server-motherduck", + "description": "Query and analyze data with MotherDuck and local DuckDB", + "display_name": "MotherDuck MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/motherduckdb/mcp-server-motherduck" + }, + "homepage": "https://motherduck.com", + "author": { + "name": "motherduckdb" + }, + "license": "MIT", + "tags": [ + "SQL", + "DuckDB", + "MotherDuck", + "analytics", + "database" + ], + "arguments": { + "db-path": { + "description": "Path to the database to connect to (md: for MotherDuck, :memory: for in-memory, or path to local file)", + "required": true, + "example": "md:" + }, + "motherduck-token": { + "description": "MotherDuck access token for authentication", + "required": true, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-motherduck", + "--db-path", + "md:", + "--motherduck-token", + "${input:motherduck_token}" + ], + "recommended": true + } + }, + "examples": [ + { + "title": "Create a new database and table", + "description": "Create a new database and table in MotherDuck", + "prompt": "Create a new database and table in MotherDuck" + }, + { + "title": "Query local CSV file", + "description": "Query data from a local CSV file", + "prompt": "Query data from my local CSV file" + }, + { + "title": "Join data across sources", + "description": "Join data from local DuckDB with MotherDuck tables", + "prompt": "Join data from my local DuckDB database with a table in MotherDuck" + }, + { + "title": "Analyze S3 data", + "description": "Analyze data stored in Amazon S3", + "prompt": "Analyze data stored in Amazon S3" + } + ], + "categories": [ + "Databases" + ], + "is_official": true + }, + "replicate": { + "name": "replicate", + "display_name": "Replicate", + "description": "Search, run and manage machine learning models on Replicate through a simple tool-based interface. Browse models, create predictions, track their status, and handle generated images.", + "repository": { + "type": "git", + "url": "https://github.com/deepfates/mcp-replicate" + }, + "homepage": "https://github.com/deepfates/mcp-replicate", + "author": { + "name": "deepfates" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "Replicate", + "API" + ], + "examples": [ + { + "title": "Run a model prediction", + "description": "Creates a prediction using a specified model and input parameters.", + "prompt": "create_prediction(model_id='model_id_here', input_params='input_params_here')" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "mcp-replicate" + ], + "env": { + "REPLICATE_API_TOKEN": "${REPLICATE_API_TOKEN}" + } + } + }, + "arguments": { + "REPLICATE_API_TOKEN": { + "description": "Your Replicate API token to authenticate requests to the Replicate API. Needed for the server to function and fetch models or execute predictions.", + "required": true, + "example": "your_token_here" + } + }, + "tools": [ + { + "name": "search_models", + "description": "Search for models using semantic search", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "list_models", + "description": "List available models with optional filtering", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Filter by model owner" + }, + "cursor": { + "type": "string", + "description": "Pagination cursor" + } + } + } + }, + { + "name": "list_collections", + "description": "List available model collections", + "inputSchema": { + "type": "object", + "properties": { + "cursor": { + "type": "string", + "description": "Pagination cursor" + } + } + } + }, + { + "name": "get_collection", + "description": "Get details of a specific collection", + "inputSchema": { + "type": "object", + "properties": { + "slug": { + "type": "string", + "description": "Collection slug" + } + }, + "required": [ + "slug" + ] + } + }, + { + "name": "create_prediction", + "description": "Create a new prediction using either a model version (for community models) or model name (for official models)", + "inputSchema": { + "type": "object", + "properties": { + "version": { + "type": "string", + "description": "Model version ID to use (for community models)" + }, + "model": { + "type": "string", + "description": "Model name to use (for official models)" + }, + "input": { + "type": "object", + "description": "Input parameters for the model", + "additionalProperties": true + }, + "webhook_url": { + "type": "string", + "description": "Optional webhook URL for notifications" + } + }, + "oneOf": [ + { + "required": [ + "version", + "input" + ] + }, + { + "required": [ + "model", + "input" + ] + } + ] + } + }, + { + "name": "cancel_prediction", + "description": "Cancel a running prediction", + "inputSchema": { + "type": "object", + "properties": { + "prediction_id": { + "type": "string", + "description": "ID of the prediction to cancel" + } + }, + "required": [ + "prediction_id" + ] + } + }, + { + "name": "get_prediction", + "description": "Get details about a specific prediction", + "inputSchema": { + "type": "object", + "properties": { + "prediction_id": { + "type": "string", + "description": "ID of the prediction to get details for" + } + }, + "required": [ + "prediction_id" + ] + } + }, + { + "name": "list_predictions", + "description": "List recent predictions", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Maximum number of predictions to return", + "default": 10 + }, + "cursor": { + "type": "string", + "description": "Cursor for pagination" + } + } + } + }, + { + "name": "get_model", + "description": "Get details of a specific model including available versions", + "inputSchema": { + "type": "object", + "properties": { + "owner": { + "type": "string", + "description": "Model owner" + }, + "name": { + "type": "string", + "description": "Model name" + } + }, + "required": [ + "owner", + "name" + ] + } + }, + { + "name": "view_image", + "description": "Display an image in the system's default web browser", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the image to display" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "clear_image_cache", + "description": "Clear the image viewer cache", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_image_cache_stats", + "description": "Get statistics about the image cache", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ] + }, + "metoro-mcp-server": { + "display_name": "Metoro MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/metoro-io/metoro-mcp-server" + }, + "homepage": "https://metoro.io/", + "author": { + "name": "metoro-io" + }, + "license": "MIT", + "tags": [ + "kubernetes", + "observability", + "eBPF", + "microservices" + ], + "arguments": { + "METORO_AUTH_TOKEN": { + "description": "Authentication token for Metoro API access", + "required": true, + "example": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjdXN0b21lcklkIjoiOThlZDU1M2QtYzY4ZC00MDRhLWFhZjItNDM2ODllNWJiMGUzIiwiZW1haWwiOiJ0ZXN0QGNocmlzYmF0dGFyYmVlLmNvbSIsImV4cCI6MTgyMTI0NzIzN30.7G6alDpcZh_OThYj293Jce5rjeOBqAhOlANR_Fl5auw" + }, + "METORO_API_URL": { + "description": "URL for the Metoro API", + "required": true, + "example": "https://us-east.metoro.io" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "metoro-mcp-server", + "args": [], + "description": "Run the Metoro MCP server executable built from Go", + "env": { + "METORO_AUTH_TOKEN": "", + "METORO_API_URL": "https://us-east.metoro.io" + } + } + }, + "examples": [ + { + "title": "Kubernetes Cluster Interaction", + "description": "Ask questions about your Kubernetes cluster through Claude Desktop App", + "prompt": "What services are running in my Kubernetes cluster?" + } + ], + "name": "metoro-mcp-server", + "description": "This MCP Server allows you to interact with your Kubernetes cluster via the Claude Desktop App!", + "categories": [ + "MCP Tools" + ], + "is_official": true + }, + "brave-search": { + "name": "brave-search", + "display_name": "Brave Search", + "description": "Web and local search using Brave's Search API", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/brave-search", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "brave", + "search", + "web", + "local" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-brave-search" + ], + "env": { + "BRAVE_API_KEY": "${BRAVE_API_KEY}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "BRAVE_API_KEY", + "mcp/brave-search" + ], + "env": { + "BRAVE_API_KEY": "${BRAVE_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Web Search Example", + "description": "Execute a web search with pagination and filtering.", + "prompt": "brave_web_search(query=\"example search\", count=10, offset=0)" + }, + { + "title": "Local Search Example", + "description": "Search for local businesses and services.", + "prompt": "brave_local_search(query=\"restaurants near me\", count=5)" + } + ], + "arguments": { + "BRAVE_API_KEY": { + "description": "The API key required to authenticate requests to the Brave Search API.", + "required": true, + "example": "YOUR_API_KEY_HERE" + } + }, + "tools": [ + { + "name": "brave_web_search", + "description": "Performs a web search using the Brave Search API, ideal for general queries, news, articles, and online content. Use this for broad information gathering, recent events, or when you need diverse web sources. Supports pagination, content filtering, and freshness controls. Maximum 20 results per request, with offset for pagination. ", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query (max 400 chars, 50 words)" + }, + "count": { + "type": "number", + "description": "Number of results (1-20, default 10)", + "default": 10 + }, + "offset": { + "type": "number", + "description": "Pagination offset (max 9, default 0)", + "default": 0 + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "brave_local_search", + "description": "Searches for local businesses and places using Brave's Local Search API. Best for queries related to physical locations, businesses, restaurants, services, etc. Returns detailed information including:\n- Business names and addresses\n- Ratings and review counts\n- Phone numbers and opening hours\nUse this when the query implies 'near me' or mentions specific locations. Automatically falls back to web search if no local results are found.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Local search query (e.g. 'pizza near Central Park')" + }, + "count": { + "type": "number", + "description": "Number of results (1-20, default 5)", + "default": 5 + } + }, + "required": [ + "query" + ] + } + } + ], + "is_official": true + }, + "naver": { + "name": "naver", + "display_name": "Naver", + "description": "This MCP server provides tools to interact with various Naver services, such as searching blogs, news, books, and more.", + "repository": { + "type": "git", + "url": "https://github.com/pfldy2850/py-mcp-naver" + }, + "homepage": "https://github.com/pfldy2850/py-mcp-naver", + "author": { + "name": "pfldy2850" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Naver", + "API", + "OpenAPI", + "Search" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/pfldy2850/py-mcp-naver.git", + "src/server.py" + ], + "env": { + "NAVER_CLIENT_ID": "${NAVER_CLIENT_ID}", + "NAVER_CLIENT_SECRET": "${NAVER_CLIENT_SECRET}" + } + } + }, + "examples": [ + { + "title": "Search Blog Posts", + "description": "Search blog posts on Naver using a query.", + "prompt": "search_blog('your query here')" + }, + { + "title": "Search News Articles", + "description": "Search news articles on Naver using a query.", + "prompt": "search_news('your query here')" + }, + { + "title": "Search Books", + "description": "Search books on Naver using a query.", + "prompt": "search_book('your query here')" + } + ], + "arguments": { + "NAVER_CLIENT_ID": { + "description": "The Client ID for accessing the Naver Open API, obtained from the Naver developer portal.", + "required": true, + "example": "your_naver_client_id" + }, + "NAVER_CLIENT_SECRET": { + "description": "The Client Secret for accessing the Naver Open API, obtained from the Naver developer portal.", + "required": true, + "example": "your_naver_client_secret" + } + }, + "tools": [ + { + "name": "search_blog", + "description": "Search blog posts on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for blog posts" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_news", + "description": "Search news articles on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for news articles" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_book", + "description": "Search books on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for books" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "get_book_adv", + "description": "Get detailed book information using title or ISBN.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for book title or ISBN (optional)" + }, + "d_titl": { + "type": "string", + "description": "Book title (optional)" + }, + "d_isbn": { + "type": "string", + "description": "Book ISBN (optional)" + } + }, + "required": [] + }, + { + "name": "adult_check", + "description": "Check if a search term is adult content.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search term to check for adult content" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_encyc", + "description": "Search encyclopedia entries on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for encyclopedia entries" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_cafe_article", + "description": "Search articles in Naver cafes.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for cafe articles" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_kin", + "description": "Search questions and answers on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for Q&A" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_local", + "description": "Search local information on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for local information" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'random')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "fix_spelling", + "description": "Correct spelling errors in a given text.", + "inputSchema": { + "query": { + "type": "string", + "description": "Text to correct spelling errors" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_webkr", + "description": "Search web pages on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for web pages" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_image", + "description": "Search images on Naver with filters.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for images" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + }, + "filter": { + "type": "string", + "description": "Filter for image search (default: 'all')" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_shop", + "description": "Search shopping items on Naver with filters.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for shopping items" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + }, + "sort": { + "type": "string", + "description": "Sorting method (default: 'sim')" + }, + "filter": { + "type": "string", + "description": "Filter for shopping search (optional)" + }, + "exclude": { + "type": "string", + "description": "Exclude filter for shopping search (optional)" + } + }, + "required": [ + "query" + ] + }, + { + "name": "search_doc", + "description": "Search documents on Naver.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query for documents" + }, + "display": { + "type": "integer", + "description": "Number of results to display (default: 10)" + }, + "start": { + "type": "integer", + "description": "Starting index for pagination (default: 1)" + } + }, + "required": [ + "query" + ] + } + ] + }, + "forevervm": { + "display_name": "ForeverVM MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/jamsocket/forevervm" + }, + "homepage": "https://forevervm.com/docs/guides/forevervm-mcp-server/", + "author": { + "name": "jamsocket" + }, + "license": "MIT", + "tags": [ + "python", + "repl", + "claude" + ], + "arguments": { + "client": { + "description": "Client to use", + "required": true, + "example": "claude" + } + }, + "installations": { + "cli": { + "type": "cli", + "command": "npx", + "args": [ + "forevervm-mcp", + "install", + "--client", + "${client}" + ] + } + }, + "examples": [ + { + "title": "Create a Python REPL", + "description": "Create a new Python REPL environment", + "prompt": "create-python-repl" + }, + { + "title": "Run Python code", + "description": "Execute Python code in an existing REPL", + "prompt": "run-python-in-repl" + } + ], + "name": "forevervm", + "description": "data-color-mode=\"auto\" data-light-theme=\"light\" data-dark-theme=\"dark\"", + "categories": [ + "System Tools" + ], + "is_official": true + }, + "kibela": { + "name": "kibela", + "display_name": "Kibela", + "description": "Interact with Kibela API.", + "repository": { + "type": "git", + "url": "https://github.com/kiwamizamurai/mcp-kibela-server" + }, + "homepage": "https://github.com/kiwamizamurai/mcp-kibela-server", + "author": { + "name": "kiwamizamurai" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "Kibela", + "Integration" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/kiwamizamurai/mcp-kibela-server" + ], + "env": { + "KIBELA_TEAM": "${KIBELA_TEAM}", + "KIBELA_TOKEN": "${KIBELA_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Search Kibela notes", + "description": "Search through your Kibela notes using a query.", + "prompt": "kibela_search_notes(\"my search query\")" + }, + { + "title": "Get latest notes", + "description": "Retrieve your latest notes from Kibela.", + "prompt": "kibela_get_my_notes()" + }, + { + "title": "Get note content", + "description": "Fetch content of a specific note by ID.", + "prompt": "kibela_get_note_content(\"note-id\")" + } + ], + "arguments": { + "KIBELA_TEAM": { + "description": "Your Kibela team name", + "required": true, + "example": "your-team" + }, + "KIBELA_TOKEN": { + "description": "Your Kibela API token", + "required": true, + "example": "your-token" + } + }, + "tools": [ + { + "name": "kibela_search_notes", + "description": "Search Kibela notes with given query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "kibela_get_my_notes", + "description": "Get your latest notes from Kibela", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Number of notes to fetch (max 50)", + "default": 15 + } + } + } + }, + { + "name": "kibela_get_note_content", + "description": "Get content and comments of a specific note", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Note ID" + } + }, + "required": [ + "id" + ] + } + } + ] + }, + "whale-tracker-mcp": { + "name": "whale-tracker-mcp", + "display_name": "Whale Tracker", + "description": "A mcp server for tracking cryptocurrency whale transactions.", + "repository": { + "type": "git", + "url": "https://github.com/kukapay/whale-tracker-mcp" + }, + "homepage": "https://github.com/kukapay/whale-tracker-mcp", + "author": { + "name": "kukapay" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "whale tracker", + "cryptocurrency", + "API" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/kukapay/whale-tracker-mcp", + "whale-tracker-mcp" + ], + "env": { + "WHALE_TRACKER_API_KEY": "your_api_key_here" + } + } + }, + "examples": [ + { + "title": "Fetch Recent Transactions", + "description": "What are the latest whale transactions on Ethereum with a minimum value of $1,000,000?", + "prompt": "What are the latest whale transactions on Ethereum with a minimum value of $1,000,000?" + }, + { + "title": "Get Transaction Details", + "description": "Tell me about transaction ID 123456789.", + "prompt": "Tell me about transaction ID 123456789." + }, + { + "title": "Analyze Whale Activity", + "description": "Analyze recent whale transactions on Bitcoin.", + "prompt": "Analyze recent whale transactions on Bitcoin." + } + ], + "arguments": { + "WHALE_TRACKER_API_KEY": { + "description": "Environment variable to load the Whale Alert API key for the server.", + "required": true, + "example": "your_api_key_here" + } + } + }, + "flightradar24": { + "name": "flightradar24", + "display_name": "Flightradar24", + "description": "A Claude Desktop MCP server that helps you track flights in real-time using Flightradar24 data.", + "repository": { + "type": "git", + "url": "https://github.com/sunsetcoder/flightradar24-mcp-server" + }, + "author": { + "name": "sunsetcoder" + }, + "license": "MIT", + "examples": [ + { + "title": "Check Flight Status", + "description": "Ask for the status of a specific flight.", + "prompt": "What's the status of flight UA123?" + }, + { + "title": "Show Current Flights at Airport", + "description": "Request to see all flights currently at an airport.", + "prompt": "Show me all flights currently at SFO" + }, + { + "title": "Emergency Flights Query", + "description": "Ask if there are emergency flights in the area.", + "prompt": "Are there any emergency flights in the area?" + }, + { + "title": "International Flights Arrival", + "description": "Request information on international flights arriving within a timeframe.", + "prompt": "Show me all international flights arriving at SFO in the next 2 hours" + } + ], + "categories": [ + "Web Services" + ], + "tags": [ + "Flightradar24", + "Flight Tracking" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/sunsetcoder/flightradar24-mcp-server" + ], + "env": { + "FR24_API_KEY": "${FR24_API_KEY}", + "FR24_API_URL": "${FR24_API_URL}" + } + } + }, + "arguments": { + "FR24_API_KEY": { + "description": "Flightradar24 API key required for accessing flight data from the Flightradar24 API.", + "required": true, + "example": "your_actual_api_key_here" + }, + "FR24_API_URL": { + "description": "The base URL for calling the Flightradar24 API for fetching real-time flight data.", + "required": false, + "example": "https://fr24api.flightradar24.com" + } + } + }, + "fantasy-pl": { + "name": "fantasy-pl", + "display_name": "Fantasy Premier League", + "description": "Give your coding agent direct access to up-to date Fantasy Premier League data", + "repository": { + "type": "git", + "url": "https://github.com/rishijatia/fantasy-pl-mcp" + }, + "homepage": "https://github.com/rishijatia/fantasy-pl-mcp", + "author": { + "name": "rishijatia" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "FPL", + "fantasy", + "football" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "fpl-mcp" + ] + } + }, + "examples": [ + { + "title": "Compare Players", + "description": "This example shows how to compare the statistics of two players.", + "prompt": "Compare Mohamed Salah and Erling Haaland over the last 5 gameweeks." + }, + { + "title": "Find Players", + "description": "This example demonstrates how to find players of a specific team.", + "prompt": "Find all Arsenal midfielders." + }, + { + "title": "Current Gameweek Status", + "description": "This example prompts for the current gameweek status.", + "prompt": "What's the current gameweek status?" + }, + { + "title": "Top Forwards", + "description": "This example retrieves the top 5 forwards by points.", + "prompt": "Show me the top 5 forwards by points." + } + ] + }, + "claudepost": { + "name": "claudepost", + "display_name": "Claude Post Email Management", + "description": "ClaudePost enables seamless email management for Gmail, offering secure features like email search, reading, and sending.", + "repository": { + "type": "git", + "url": "https://github.com/ZilongXue/claude-post" + }, + "homepage": "https://github.com/ZilongXue/claude-post", + "author": { + "name": "Zilong Xue" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "Email Management", + "Natural Language Processing" + ], + "examples": [ + { + "title": "Search Emails", + "description": "Search for emails using natural language commands.", + "prompt": "Show me emails from last week." + }, + { + "title": "Read Email Content", + "description": "Request to read specific email content.", + "prompt": "Show me the content of email #12345." + }, + { + "title": "Send Emails", + "description": "Send emails using voice commands.", + "prompt": "I want to send an email to john@example.com." + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/ZilongXue/claude-post", + "email-client" + ] + } + }, + "tools": [ + { + "name": "search-emails", + "description": "Search emails within a date range and/or with specific keywords", + "inputSchema": { + "type": "object", + "properties": { + "start_date": { + "type": "string", + "description": "Start date in YYYY-MM-DD format (optional)" + }, + "end_date": { + "type": "string", + "description": "End date in YYYY-MM-DD format (optional)" + }, + "keyword": { + "type": "string", + "description": "Keyword to search in email subject and body (optional)" + }, + "folder": { + "type": "string", + "description": "Folder to search in ('inbox' or 'sent', defaults to 'inbox')", + "enum": [ + "inbox", + "sent" + ] + } + } + } + }, + { + "name": "get-email-content", + "description": "Get the full content of a specific email by its ID", + "inputSchema": { + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "The ID of the email to retrieve" + } + }, + "required": [ + "email_id" + ] + } + }, + { + "name": "count-daily-emails", + "description": "Count emails received for each day in a date range", + "inputSchema": { + "type": "object", + "properties": { + "start_date": { + "type": "string", + "description": "Start date in YYYY-MM-DD format" + }, + "end_date": { + "type": "string", + "description": "End date in YYYY-MM-DD format" + } + }, + "required": [ + "start_date", + "end_date" + ] + } + }, + { + "name": "send-email", + "description": "CONFIRMATION STEP: Actually send the email after user confirms the details. Before calling this, first show the email details to the user for confirmation. Required fields: recipients (to), subject, and content. Optional: CC recipients.", + "inputSchema": { + "type": "object", + "properties": { + "to": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of recipient email addresses (confirmed)" + }, + "subject": { + "type": "string", + "description": "Confirmed email subject" + }, + "content": { + "type": "string", + "description": "Confirmed email content" + }, + "cc": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of CC recipient email addresses (optional, confirmed)" + } + }, + "required": [ + "to", + "subject", + "content" + ] + } + } + ] + }, + "quickchart": { + "name": "quickchart", + "display_name": "Quickchart", + "description": "A Model Context Protocol server for generating charts using QuickChart.io", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/Quickchart-MCP-Server" + }, + "homepage": "https://github.com/GongRzhe/Quickchart-MCP-Server", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "quickchart", + "chart generation", + "data visualization" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@gongrzhe/quickchart-mcp-server" + ] + } + }, + "examples": [ + { + "title": "Basic bar chart", + "description": "Generate a bar chart using Chart.js configuration.", + "prompt": "{\"type\":\"bar\",\"data\":{\"labels\":[\"January\",\"February\",\"March\"],\"datasets\":[{\"label\":\"Sales\",\"data\":[65,59,80],\"backgroundColor\":\"rgb(75,192,192)\"}]},\"options\":{\"title\":{\"display\":true,\"text\":\"Monthly Sales\"}}}" + } + ], + "arguments": { + "client": { + "description": "Specifies the client type for which the QuickChart Server is installed. In this case, it's for Claude.", + "required": true, + "example": "claude" + } + }, + "tools": [ + { + "name": "generate_chart", + "description": "Generate a chart using QuickChart", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Chart type (bar, line, pie, doughnut, radar, polarArea, scatter, bubble, radialGauge, speedometer)" + }, + "labels": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Labels for data points" + }, + "datasets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "data": { + "type": "array" + }, + "backgroundColor": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + }, + "borderColor": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + }, + "additionalConfig": { + "type": "object" + } + }, + "required": [ + "data" + ] + } + }, + "title": { + "type": "string" + }, + "options": { + "type": "object" + } + }, + "required": [ + "type", + "datasets" + ] + } + }, + { + "name": "download_chart", + "description": "Download a chart image to a local file", + "inputSchema": { + "type": "object", + "properties": { + "config": { + "type": "object", + "description": "Chart configuration object" + }, + "outputPath": { + "type": "string", + "description": "Path where the chart image should be saved" + } + }, + "required": [ + "config", + "outputPath" + ] + } + } + ] + }, + "mcp-grafana": { + "display_name": "Grafana MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/grafana/mcp-grafana" + }, + "license": "Apache License, Version 2.0", + "homepage": "https://github.com/grafana/mcp-grafana", + "author": { + "name": "grafana" + }, + "tags": [ + "grafana", + "mcp", + "model context protocol" + ], + "arguments": { + "GRAFANA_URL": { + "description": "URL of your Grafana instance", + "required": true, + "example": "http://localhost:3000" + }, + "GRAFANA_API_KEY": { + "description": "Service account token for Grafana authentication", + "required": true, + "example": "" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "go", + "args": [ + "install", + "github.com/grafana/mcp-grafana/cmd/mcp-grafana@latest" + ], + "env": { + "GOBIN": "$HOME/go/bin" + }, + "description": "Install from source using Go", + "recommended": false + } + }, + "examples": [ + { + "title": "Search for dashboards", + "description": "Search for dashboards in your Grafana instance", + "prompt": "Find dashboards related to Kubernetes in my Grafana instance" + }, + { + "title": "Query Prometheus metrics", + "description": "Execute a Prometheus query against a datasource", + "prompt": "Show me the CPU usage for the last hour from my Prometheus datasource" + }, + { + "title": "Check current on-call users", + "description": "Find out who is currently on-call", + "prompt": "Who is currently on-call according to Grafana OnCall?" + } + ], + "name": "mcp-grafana", + "description": "A [Model Context Protocol][mcp] (MCP) server for Grafana.", + "categories": [ + "Analytics" + ], + "is_official": true + }, + "puppeteer": { + "name": "puppeteer", + "display_name": "Puppeteer Browser Automation", + "description": "Browser automation and web scraping", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/puppeteer", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "puppeteer", + "automation", + "javascript", + "screenshots", + "web" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-puppeteer" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "--init", + "-e", + "DOCKER_CONTAINER=true", + "mcp/puppeteer" + ] + } + }, + "tools": [ + { + "name": "puppeteer_navigate", + "description": "Navigate to a URL", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string" + } + }, + "required": [ + "url" + ] + } + }, + { + "name": "puppeteer_screenshot", + "description": "Take a screenshot of the current page or a specific element", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name for the screenshot" + }, + "selector": { + "type": "string", + "description": "CSS selector for element to screenshot" + }, + "width": { + "type": "number", + "description": "Width in pixels (default: 800)" + }, + "height": { + "type": "number", + "description": "Height in pixels (default: 600)" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "puppeteer_click", + "description": "Click an element on the page", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for element to click" + } + }, + "required": [ + "selector" + ] + } + }, + { + "name": "puppeteer_fill", + "description": "Fill out an input field", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for input field" + }, + "value": { + "type": "string", + "description": "Value to fill" + } + }, + "required": [ + "selector", + "value" + ] + } + }, + { + "name": "puppeteer_select", + "description": "Select an element on the page with Select tag", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for element to select" + }, + "value": { + "type": "string", + "description": "Value to select" + } + }, + "required": [ + "selector", + "value" + ] + } + }, + { + "name": "puppeteer_hover", + "description": "Hover an element on the page", + "inputSchema": { + "type": "object", + "properties": { + "selector": { + "type": "string", + "description": "CSS selector for element to hover" + } + }, + "required": [ + "selector" + ] + } + }, + { + "name": "puppeteer_evaluate", + "description": "Execute JavaScript in the browser console", + "inputSchema": { + "type": "object", + "properties": { + "script": { + "type": "string", + "description": "JavaScript code to execute" + } + }, + "required": [ + "script" + ] + } + } + ], + "is_official": true + }, + "sqlite": { + "name": "sqlite", + "display_name": "SQLite", + "description": "Database interaction and business intelligence capabilities", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/sqlite", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "sqlite", + "database", + "business insights" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "-v", + "mcp-test:/mcp", + "mcp/sqlite", + "--db-path", + "/mcp/test.db" + ] + } + }, + "examples": [ + { + "title": "Interactive SQL Analysis", + "description": "Guides users through database operations and insights generation.", + "prompt": "mcp-demo -topic [business_domain]" + } + ], + "tools": [ + { + "name": "read_query", + "description": "Execute a SELECT query on the SQLite database", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "SELECT SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "write_query", + "description": "Execute an INSERT, UPDATE, or DELETE query on the SQLite database", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "create_table", + "description": "Create a new table in the SQLite database", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "CREATE TABLE SQL statement" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "list_tables", + "description": "List all tables in the SQLite database", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "describe_table", + "description": "Get the schema information for a specific table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the table to describe" + } + }, + "required": [ + "table_name" + ] + } + }, + { + "name": "append_insight", + "description": "Add a business insight to the memo", + "inputSchema": { + "type": "object", + "properties": { + "insight": { + "type": "string", + "description": "Business insight discovered from data analysis" + } + }, + "required": [ + "insight" + ] + } + } + ], + "is_official": true + }, + "dbhub": { + "name": "dbhub", + "display_name": "DBHub - Universal Database Gateway", + "description": "Universal database MCP server connecting to MySQL, PostgreSQL, SQLite, DuckDB and etc.", + "repository": { + "type": "git", + "url": "https://github.com/bytebase/dbhub" + }, + "homepage": "https://github.com/bytebase/dbhub/", + "author": { + "name": "bytebase" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Database Gateway", + "PostgreSQL", + "MySQL", + "SQL Server", + "SQLite" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "bytebase/dbhub", + "--transport", + "stdio", + "--dsn", + "${DATABASE_URL}" + ] + }, + "npx": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@bytebase/dbhub", + "--transport", + "stdio", + "--dsn", + "${DATABASE_URL}" + ] + } + }, + "arguments": { + "DATABASE_URL": { + "description": "The database connection string which includes the user, password, host, port, and database name.", + "required": true, + "example": "postgres://user:password@localhost:5432/dbname?sslmode=disable" + } + }, + "tools": [ + { + "name": "list_connectors", + "description": "Lists all available database connectors and their sample DSNs. Indicates which connector is active based on the current DSN.", + "inputSchema": {}, + "required": [] + }, + { + "name": "run_query", + "description": "Executes a SQL query and returns the results.", + "inputSchema": { + "query": { + "type": "string", + "description": "SQL query to execute" + } + }, + "required": [ + "query" + ] + } + ] + }, + "obsidian-mcp": { + "name": "obsidian-mcp", + "display_name": "Obsidian", + "description": "(by Steven Stavrakis) An MCP server for Obsidian.md with tools for searching, reading, writing, and organizing notes.", + "repository": { + "type": "git", + "url": "https://github.com/StevenStavrakis/obsidian-mcp" + }, + "homepage": "https://github.com/StevenStavrakis/obsidian-mcp", + "author": { + "name": "StevenStavrakis" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "Obsidian", + "AI", + "Notes", + "Productivity" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "obsidian-mcp", + "${OBSIDIAN_VAULT_PATH}", + "${OBSIDIAN_VAULT_PATH2}" + ] + } + }, + "arguments": { + "OBSIDIAN_VAULT_PATH": { + "description": "Path to your Obsidian vault", + "required": true + }, + "OBSIDIAN_VAULT_PATH2": { + "description": "Path to your second Obsidian vault", + "required": false + } + }, + "examples": [ + { + "title": "Read a note", + "description": "Read the contents of a note.", + "prompt": "read-note('note-id')" + }, + { + "title": "Create a new note", + "description": "Create a new note in the vault.", + "prompt": "create-note('note-name', 'note-content')" + } + ], + "tools": [ + { + "name": "create-note", + "description": "Create a new note in the specified vault with markdown content.\n\nExamples:\n- Root note: { \"vault\": \"vault1\", \"filename\": \"note.md\" }\n- Subfolder note: { \"vault\": \"vault2\", \"filename\": \"note.md\", \"folder\": \"journal/2024\" }\n- INCORRECT: { \"filename\": \"journal/2024/note.md\" } (don't put path in filename)", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault to create the note in" + }, + "filename": { + "type": "string", + "minLength": 1, + "description": "Just the note name without any path separators (e.g. 'my-note.md', NOT 'folder/my-note.md'). Will add .md extension if missing" + }, + "content": { + "type": "string", + "minLength": 1, + "description": "Content of the note in markdown format" + }, + "folder": { + "type": "string", + "description": "Optional subfolder path relative to vault root (e.g. 'journal/subfolder'). Use this for the path instead of including it in filename" + } + }, + "required": [ + "vault", + "filename", + "content" + ] + } + }, + { + "name": "list-available-vaults", + "description": "Lists all available vaults that can be used with other tools", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "edit-note", + "description": "Edit an existing note in the specified vault.\n\n There is a limited and discrete list of supported operations:\n - append: Appends content to the end of the note\n - prepend: Prepends content to the beginning of the note\n - replace: Replaces the entire content of the note\n\nExamples:\n- Root note: { \"vault\": \"vault1\", \"filename\": \"note.md\", \"operation\": \"append\", \"content\": \"new content\" }\n- Subfolder note: { \"vault\": \"vault2\", \"filename\": \"note.md\", \"folder\": \"journal/2024\", \"operation\": \"append\", \"content\": \"new content\" }\n- INCORRECT: { \"filename\": \"journal/2024/note.md\" } (don't put path in filename)", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "search-vault", + "description": "Search for specific content within vault notes (NOT for listing available vaults - use the list-vaults prompt for that).\n\nThis tool searches through note contents and filenames for specific text or tags:\n- Content search: { \"vault\": \"vault1\", \"query\": \"hello world\", \"searchType\": \"content\" }\n- Filename search: { \"vault\": \"vault2\", \"query\": \"meeting-notes\", \"searchType\": \"filename\" }\n- Search both: { \"vault\": \"vault1\", \"query\": \"project\", \"searchType\": \"both\" }\n- Tag search: { \"vault\": \"vault2\", \"query\": \"tag:status/active\" }\n- Search in subfolder: { \"vault\": \"vault1\", \"query\": \"hello\", \"path\": \"journal/2024\" }\n\nNote: To get a list of available vaults, use the list-vaults prompt instead of this search tool.", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault to search in" + }, + "query": { + "type": "string", + "minLength": 1, + "description": "Search query (required). For text search use the term directly, for tag search use tag: prefix" + }, + "path": { + "type": "string", + "description": "Optional subfolder path within the vault to limit search scope" + }, + "caseSensitive": { + "type": "boolean", + "default": false, + "description": "Whether to perform case-sensitive search (default: false)" + }, + "searchType": { + "type": "string", + "enum": [ + "content", + "filename", + "both" + ], + "default": "content", + "description": "Type of search to perform (default: content)" + } + }, + "required": [ + "vault", + "query" + ] + } + }, + { + "name": "move-note", + "description": "Move/rename a note while preserving links", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault containing the note" + }, + "source": { + "type": "string", + "minLength": 1, + "description": "Source path of the note relative to vault root (e.g., 'folder/note.md')" + }, + "destination": { + "type": "string", + "minLength": 1, + "description": "Destination path relative to vault root (e.g., 'new-folder/new-name.md')" + } + }, + "required": [ + "vault", + "source", + "destination" + ] + } + }, + { + "name": "create-directory", + "description": "Create a new directory in the specified vault", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault where the directory should be created" + }, + "path": { + "type": "string", + "minLength": 1, + "description": "Path of the directory to create (relative to vault root)" + }, + "recursive": { + "type": "boolean", + "default": true, + "description": "Create parent directories if they don't exist" + } + }, + "required": [ + "vault", + "path" + ] + } + }, + { + "name": "delete-note", + "description": "Delete a note, moving it to .trash by default or permanently deleting if specified", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault containing the note" + }, + "path": { + "type": "string", + "minLength": 1, + "description": "Path of the note relative to vault root (e.g., 'folder/note.md')" + }, + "reason": { + "type": "string", + "description": "Optional reason for deletion (stored in trash metadata)" + }, + "permanent": { + "type": "boolean", + "default": false, + "description": "Whether to permanently delete instead of moving to trash (default: false)" + } + }, + "required": [ + "vault", + "path" + ] + } + }, + { + "name": "add-tags", + "description": "Add tags to notes in frontmatter and/or content.\n\nExamples:\n- Add to both locations: { \"files\": [\"note.md\"], \"tags\": [\"status/active\"] }\n- Add to frontmatter only: { \"files\": [\"note.md\"], \"tags\": [\"project/docs\"], \"location\": \"frontmatter\" }\n- Add to start of content: { \"files\": [\"note.md\"], \"tags\": [\"type/meeting\"], \"location\": \"content\", \"position\": \"start\" }", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault containing the notes" + }, + "files": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "Array of note filenames to process (must have .md extension)" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "Array of tags to add (e.g., 'status/active', 'project/docs')" + }, + "location": { + "type": "string", + "enum": [ + "frontmatter", + "content", + "both" + ], + "description": "Where to add tags (default: both)" + }, + "normalize": { + "type": "boolean", + "description": "Whether to normalize tag format (e.g., ProjectActive -> project-active) (default: true)" + }, + "position": { + "type": "string", + "enum": [ + "start", + "end" + ], + "description": "Where to add inline tags in content (default: end)" + } + }, + "required": [ + "vault", + "files", + "tags" + ] + } + }, + { + "name": "remove-tags", + "description": "Remove tags from notes in frontmatter and/or content.\n\nExamples:\n- Simple: { \"files\": [\"note.md\"], \"tags\": [\"project\", \"status\"] }\n- With hierarchy: { \"files\": [\"note.md\"], \"tags\": [\"work/active\", \"priority/high\"] }\n- With options: { \"files\": [\"note.md\"], \"tags\": [\"status\"], \"options\": { \"location\": \"frontmatter\" } }\n- Pattern matching: { \"files\": [\"note.md\"], \"options\": { \"patterns\": [\"status/*\"] } }\n- INCORRECT: { \"tags\": [\"#project\"] } (don't include # symbol)", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault containing the notes" + }, + "files": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "Array of note filenames to process (must have .md extension)" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "Array of tags to remove (without # symbol). Example: ['project', 'work/active']" + }, + "options": { + "type": "object", + "properties": { + "location": { + "type": "string", + "enum": [ + "frontmatter", + "content", + "both" + ], + "default": "both", + "description": "Where to remove tags from (default: both)" + }, + "normalize": { + "type": "boolean", + "default": true, + "description": "Whether to normalize tag format (e.g., ProjectActive -> project-active) (default: true)" + }, + "preserveChildren": { + "type": "boolean", + "default": false, + "description": "Whether to preserve child tags when removing parent tags (default: false)" + }, + "patterns": { + "type": "array", + "items": { + "type": "string" + }, + "default": [], + "description": "Tag patterns to match for removal (supports * wildcard) (default: [])" + } + }, + "additionalProperties": false, + "default": { + "location": "both", + "normalize": true, + "preserveChildren": false, + "patterns": [] + } + } + }, + "required": [ + "vault", + "files", + "tags" + ] + } + }, + { + "name": "rename-tag", + "description": "Safely renames tags throughout the vault while preserving hierarchies.\n\nExamples:\n- Simple rename: { \"oldTag\": \"project\", \"newTag\": \"projects\" }\n- Rename with hierarchy: { \"oldTag\": \"work/active\", \"newTag\": \"projects/current\" }\n- With options: { \"oldTag\": \"status\", \"newTag\": \"state\", \"normalize\": true, \"createBackup\": true }\n- INCORRECT: { \"oldTag\": \"#project\" } (don't include # symbol)", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault containing the tags" + }, + "oldTag": { + "type": "string", + "minLength": 1, + "description": "The tag to rename (without #). Example: 'project' or 'work/active'" + }, + "newTag": { + "type": "string", + "minLength": 1, + "description": "The new tag name (without #). Example: 'projects' or 'work/current'" + }, + "createBackup": { + "type": "boolean", + "default": true, + "description": "Whether to create a backup before making changes (default: true)" + }, + "normalize": { + "type": "boolean", + "default": true, + "description": "Whether to normalize tag names (e.g., ProjectActive -> project-active) (default: true)" + }, + "batchSize": { + "type": "number", + "minimum": 1, + "maximum": 100, + "default": 50, + "description": "Number of files to process in each batch (1-100) (default: 50)" + } + }, + "required": [ + "vault", + "oldTag", + "newTag" + ] + } + }, + { + "name": "read-note", + "description": "Read the content of an existing note in the vault.\n\nExamples:\n- Root note: { \"vault\": \"vault1\", \"filename\": \"note.md\" }\n- Subfolder note: { \"vault\": \"vault1\", \"filename\": \"note.md\", \"folder\": \"journal/2024\" }\n- INCORRECT: { \"filename\": \"journal/2024/note.md\" } (don't put path in filename)", + "inputSchema": { + "type": "object", + "properties": { + "vault": { + "type": "string", + "minLength": 1, + "description": "Name of the vault containing the note" + }, + "filename": { + "type": "string", + "minLength": 1, + "description": "Just the note name without any path separators (e.g. 'my-note.md', NOT 'folder/my-note.md')" + }, + "folder": { + "type": "string", + "description": "Optional subfolder path relative to vault root" + } + }, + "required": [ + "vault", + "filename" + ] + } + } + ] + }, + "mcp-server-qdrant": { + "display_name": "Qdrant MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/qdrant/mcp-server-qdrant" + }, + "homepage": "https://github.com/qdrant/mcp-server-qdrant/", + "author": { + "name": "qdrant" + }, + "license": "Apache License 2.0", + "tags": [ + "vector-search", + "qdrant", + "memory", + "semantic-search" + ], + "arguments": { + "QDRANT_URL": { + "description": "URL of the Qdrant server", + "required": false, + "example": "http://localhost:6333" + }, + "QDRANT_API_KEY": { + "description": "API key for the Qdrant server", + "required": false, + "example": "your-api-key" + }, + "COLLECTION_NAME": { + "description": "Name of the collection to use", + "required": true, + "example": "my-collection" + }, + "QDRANT_LOCAL_PATH": { + "description": "Path to the local Qdrant database (alternative to QDRANT_URL)", + "required": false, + "example": "/path/to/qdrant/database" + }, + "EMBEDDING_PROVIDER": { + "description": "Embedding provider to use (currently only \"fastembed\" is supported)", + "required": false, + "example": "fastembed" + }, + "EMBEDDING_MODEL": { + "description": "Name of the embedding model to use", + "required": false, + "example": "sentence-transformers/all-MiniLM-L6-v2" + }, + "TOOL_STORE_DESCRIPTION": { + "description": "Custom description for the store tool", + "required": false, + "example": "Store reusable code snippets for later retrieval." + }, + "TOOL_FIND_DESCRIPTION": { + "description": "Custom description for the find tool", + "required": false, + "example": "Search for relevant code snippets based on natural language descriptions." + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-qdrant" + ], + "env": { + "QDRANT_URL": "http://localhost:6333", + "QDRANT_API_KEY": "your_api_key", + "COLLECTION_NAME": "my-collection", + "EMBEDDING_MODEL": "sentence-transformers/all-MiniLM-L6-v2" + }, + "description": "Run using uvx without specific installation", + "recommended": true + } + }, + "examples": [ + { + "title": "Basic Usage", + "description": "Store and retrieve information from Qdrant", + "prompt": "I want to store some information in Qdrant and then retrieve it later. Can you help me with that?" + }, + { + "title": "Code Snippet Storage", + "description": "Store and retrieve code snippets with descriptions", + "prompt": "I need to store this function that calculates Fibonacci numbers and retrieve it later when I need it." + } + ], + "name": "mcp-server-qdrant", + "description": "This repository is an example of how to create a MCP server for Qdrant, a vector search engine.", + "categories": [ + "Databases" + ], + "is_official": true, + "tools": [ + { + "name": "qdrant-store", + "description": "Keep the memory for later use, when you are asked to remember something.", + "inputSchema": { + "properties": { + "information": { + "title": "Information", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "default": null, + "title": "Metadata", + "type": "object" + } + }, + "required": [ + "information" + ], + "title": "storeArguments", + "type": "object" + } + }, + { + "name": "qdrant-find", + "description": "Look up memories in Qdrant. Use this tool when you need to: \n - Find memories by their content \n - Access memories for further analysis \n - Get some personal information about the user", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + } + }, + "required": [ + "query" + ], + "title": "findArguments", + "type": "object" + } + } + ] + }, + "scholarly": { + "name": "scholarly", + "display_name": "scholarly", + "description": "A MCP server to search for scholarly and academic articles.", + "repository": { + "type": "git", + "url": "https://github.com/adityak74/mcp-scholarly" + }, + "homepage": "https://github.com/adityak74/mcp-scholarly", + "author": { + "name": "adityak74" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "scholarly", + "academic" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-scholarly" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "mcp/scholarly" + ] + } + }, + "arguments": { + "keyword": { + "description": "The keyword to search for articles in arXiv.", + "required": true, + "example": "machine learning" + } + }, + "tools": [ + { + "name": "search-arxiv", + "description": "Search arxiv for articles related to the given keyword.", + "inputSchema": { + "type": "object", + "properties": { + "keyword": { + "type": "string" + } + }, + "required": [ + "keyword" + ] + } + }, + { + "name": "search-google-scholar", + "description": "Search google scholar for articles related to the given keyword.", + "inputSchema": { + "type": "object", + "properties": { + "keyword": { + "type": "string" + } + }, + "required": [ + "keyword" + ] + } + } + ] + }, + "fingertip": { + "name": "fingertip", + "display_name": "Fingertip", + "description": "MCP server for Fingertip.com to search and create new sites.", + "repository": { + "type": "git", + "url": "https://github.com/fingertip-com/fingertip-mcp" + }, + "homepage": "https://github.com/fingertip-com/fingertip-mcp", + "author": { + "name": "fingertip-com" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Fingertip", + "AI Assistants" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@fingertip/mcp" + ] + } + } + }, + "mcp-server-esignatures": { + "display_name": "eSignatures MCP server", + "repository": { + "type": "git", + "url": "https://github.com/esignaturescom/mcp-server-esignatures" + }, + "homepage": "https://esignatures.com", + "author": { + "name": "esignaturescom" + }, + "license": "MIT", + "tags": [ + "contracts", + "templates", + "collaborators", + "esignatures" + ], + "arguments": { + "ESIGNATURES_SECRET_TOKEN": { + "description": "Your eSignatures API secret token", + "required": true, + "example": "your-esignatures-api-secret-token" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-esignatures" + ], + "env": { + "ESIGNATURES_SECRET_TOKEN": "your-esignatures-api-secret-token" + }, + "description": "Published server installation", + "recommended": true + } + }, + "examples": [ + { + "title": "Creating a Draft Contract", + "description": "Generate a draft NDA contract for review", + "prompt": "Generate a draft NDA contract for a publisher, which I can review and send. Signer: John Doe, ACME Corp, john@acme.com" + }, + { + "title": "Sending a Contract", + "description": "Send an NDA based on a template", + "prompt": "Send an NDA based on my template to John Doe, ACME Corp, john@acme.com. Set the term to 2 years." + }, + { + "title": "Updating templates", + "description": "Review templates for legal compliance", + "prompt": "Review my templates for legal compliance, and ask me about updating each one individually" + }, + { + "title": "Inviting template collaborators", + "description": "Invite collaborators to edit templates", + "prompt": "Invite John Doe to edit the NDA template, email: john@acme.com" + } + ], + "name": "mcp-server-esignatures", + "description": "MCP server for eSignatures (https://esignatures.com)", + "categories": [ + "Productivity" + ], + "tools": [ + { + "name": "create_contract", + "description": "Creates a new contract. The contract can be a draft which the user can customize/send, or the contract can be sent instantly. So called 'signature fields' like Name/Date/signature-line must be left out, they are all handled automatically. Contract owners can customize the content by replacing {{placeholder fields}} inside the content, and the signers can fill in Signer fields when they sign the contract.", + "inputSchema": { + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "GUID of a mobile-friendly contract template within eSignatures. The template provides content, title, and labels. Required unless document_elements is provided." + }, + "title": { + "type": "string", + "description": "Sets the contract's title, which appears as the first line in contracts and PDF files, in email subjects, and overrides the template's title." + }, + "locale": { + "type": "string", + "description": "Language for signer page and emails.", + "enum": [ + "es", + "hu", + "da", + "id", + "ro", + "sk", + "pt", + "hr", + "sl", + "de", + "it", + "pl", + "rs", + "sv", + "en", + "ja", + "en-GB", + "fr", + "cz", + "vi", + "no", + "zh-CN", + "nl" + ] + }, + "metadata": { + "type": "string", + "description": "Custom data for contract owners and webhook notifications; e.g. internal IDs." + }, + "expires_in_hours": { + "type": "string", + "description": "Sets contract expiry time in hours; expired contracts can't be signed. Expiry period can be extended per contract in eSignatures." + }, + "custom_webhook_url": { + "type": "string", + "description": "Overrides default webhook HTTPS URL for this contract, defined on the API page in eSignatures. Retries 6 times with 1 hour delays, timeout is 20 seconds." + }, + "assigned_user_email": { + "type": "string", + "description": "Assigns an eSignatures user as contract owner with edit/view/send rights and notification settings. Contract owners get email notifications for signings and full contract completion if enabled on their Profile." + }, + "labels": { + "type": "array", + "description": "Assigns labels to the contract, overriding template labels. Labels assist in organizing contracts without using folders.", + "items": { + "type": "string" + } + }, + "test": { + "type": "string", + "description": "Marks contract as 'demo' with no fees; adds DEMO stamp, disables reminders.", + "enum": [ + "yes", + "no" + ] + }, + "save_as_draft": { + "type": "string", + "description": "Saves contract as draft for further editing; draft can be edited and sent via UI. URL: https://esignatures.com/contracts/contract_id/edit, where contract_id is in the API response.", + "enum": [ + "yes", + "no" + ] + }, + "signers": { + "type": "array", + "description": "List of individuals required to sign the contract. Only include specific persons with their contact details; do not add generic signers.", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Signer's name." + }, + "email": { + "type": "string", + "description": "Signer's email address." + }, + "mobile": { + "type": "string", + "description": "Signer's mobile number (E.123 format)." + }, + "company_name": { + "type": "string", + "description": "Signer's company name." + }, + "signing_order": { + "type": "string", + "description": "Order in which signers receive the contract; same number signers are notified together. By default, sequential." + }, + "auto_sign": { + "type": "string", + "description": "Automatically signs document if 'yes'; only for your signature not for other signers." + }, + "signature_request_delivery_methods": { + "type": "array", + "description": "Methods for delivering signature request. Empty list skips sending. Default calculated. Requires contact details.", + "items": { + "type": "string", + "enum": [ + "email", + "sms" + ] + } + }, + "signed_document_delivery_method": { + "type": "string", + "description": "Method to deliver signed document (email, sms). Usually required by law. Default calculated.", + "enum": [ + "email", + "sms" + ] + }, + "multi_factor_authentications": { + "type": "array", + "description": "Authentication methods for signers (sms_verification_code, email_verification_code). Requires the relevant contact details.", + "items": { + "type": "string", + "enum": [ + "sms_verification_code", + "email_verification_code" + ] + } + }, + "redirect_url": { + "type": "string", + "description": "URL for signer redirection post-signing." + } + }, + "required": [ + "name" + ] + } + }, + "placeholder_fields": { + "type": "array", + "description": "Replaces text placeholders in templates when creating a contract. Example: {{interest_rate}}. Do not add placeholder values when creating a draft.", + "items": { + "type": "object", + "properties": { + "api_key": { + "type": "string", + "description": "The template's placeholder key, e.g., for {{interest_rate}}, api_key is 'interest_rate'." + }, + "value": { + "type": "string", + "description": "Text that replaces the placeholder." + }, + "document_elements": { + "type": "array", + "description": "Allows insertion of custom elements like headers, text, images into placeholders.", + "items": { + "type": "object", + "oneOf": [ + { + "properties": { + "type": { + "type": "string", + "description": "Header lines. Do not add the title of the template/contract as the first line; it will already be included at the beginning of the contracts.", + "enum": [ + "text_header_one", + "text_header_two", + "text_header_three" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For paragraphs and non-list text content.", + "enum": [ + "text_normal" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + }, + "text_styles": { + "type": "array", + "description": "An array defining text style ranges within the element. For Placeholder fields, ensure the moustache brackets around the placeholder also match the style. Example for '{{rate}} percent': [{offset:0, length:8, style:'bold'}]", + "items": { + "type": "object", + "properties": { + "offset": { + "type": "integer", + "description": "Start index of styled text (0-based)" + }, + "length": { + "type": "integer", + "description": "Number of characters in the styled range" + }, + "style": { + "type": "string", + "description": "Style to apply", + "enum": [ + "bold", + "italic", + "underline" + ] + } + } + } + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Indentation level of text, defaults to 0." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For list items. Use ordered_list_item for sequential/numbered lists, unordered_list_item for bullet points. Lists continue at the same indentation level until interrupted by another element type which is not a list or indented paragraph.", + "enum": [ + "ordered_list_item", + "unordered_list_item" + ] + }, + "text": { + "type": "string" + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Depth of list nesting, default 0. For ordered lists, numbering persists at the same or deeper indentation levels; paragraphs don't interrupt numbering." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Signer fields allow input or selection by signers. Do not add any signer fields for collecting signatures, names, dates, company names or titles or anything similar at the end of documents. Radio buttons group automatically, do not insert any other elements (like text) between radio buttons that should be grouped together. Instead, place descriptive text before or after the complete radio button group.", + "enum": [ + "signer_field_text", + "signer_field_text_area", + "signer_field_date", + "signer_field_dropdown", + "signer_field_checkbox", + "signer_field_radiobutton", + "signer_field_file_upload" + ] + }, + "text": { + "type": "string" + }, + "signer_field_assigned_to": { + "type": "string", + "description": "Specifies which signer(s) can interact with this field based on signing order. 'first_signer' means only the first signer to open and sign can fill the field; others with the same or later order cannot. The same rule applies for 'second_signer' and 'last_signer'. 'every_signer' shows the field to each signer, with separate values in the final PDF. Examples: 'Primary contact for property issues' (first signer) and 'My mobile number' (every signer).", + "enum": [ + "first_signer", + "second_signer", + "last_signer", + "every_signer" + ] + }, + "signer_field_required": { + "type": "string", + "enum": [ + "yes", + "no" + ] + }, + "signer_field_dropdown_options": { + "type": "string", + "description": "Options for dropdown fields, separated by newline \n characters" + }, + "signer_field_id": { + "type": "string", + "description": "Unique ID for the Signer field, used in Webhook notifications for value inclusion. If not specified, values are excluded from Webhook notifications and CSV exports." + } + }, + "required": [ + "type", + "text", + "signer_field_assigned_to" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "image" + ] + }, + "image_base64": { + "type": "string", + "description": "The base64-encoded png or jpg image (max 0.5MB)." + }, + "image_alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + }, + "image_height_rem": { + "type": "number", + "minimum": 2, + "maximum": 38 + } + }, + "required": [ + "type", + "image_base64" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "table" + ] + }, + "table_cells": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "styles": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "bold", + "italic" + ] + } + }, + "alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + } + } + } + } + } + }, + "required": [ + "type", + "table_cells" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Nested template inclusion. Maximum depth: 1 level", + "enum": [ + "template" + ] + }, + "template_id": { + "type": "string", + "description": "ID of the template to insert; Placeholder fields apply within this template too." + } + }, + "required": [ + "type", + "template_id" + ] + } + ] + } + } + } + } + }, + "document_elements": { + "type": "array", + "description": "Customize document content with headers, text, images, etc. Owners can manually replace {{placeholder fields}} in the eSignatures editor, and signers can fill in Signer fields. Use placeholders for signer names unless names are already provided. The contract title is automatically added as the first line.", + "items": { + "type": "object", + "oneOf": [ + { + "properties": { + "type": { + "type": "string", + "description": "Header lines. Do not add the title of the template/contract as the first line; it will already be included at the beginning of the contracts.", + "enum": [ + "text_header_one", + "text_header_two", + "text_header_three" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For paragraphs and non-list text content.", + "enum": [ + "text_normal" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + }, + "text_styles": { + "type": "array", + "description": "An array defining text style ranges within the element. For Placeholder fields, ensure the moustache brackets around the placeholder also match the style. Example for '{{rate}} percent': [{offset:0, length:8, style:'bold'}]", + "items": { + "type": "object", + "properties": { + "offset": { + "type": "integer", + "description": "Start index of styled text (0-based)" + }, + "length": { + "type": "integer", + "description": "Number of characters in the styled range" + }, + "style": { + "type": "string", + "description": "Style to apply", + "enum": [ + "bold", + "italic", + "underline" + ] + } + } + } + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Indentation level of text, defaults to 0." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For list items. Use ordered_list_item for sequential/numbered lists, unordered_list_item for bullet points. Lists continue at the same indentation level until interrupted by another element type which is not a list or indented paragraph.", + "enum": [ + "ordered_list_item", + "unordered_list_item" + ] + }, + "text": { + "type": "string" + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Depth of list nesting, default 0. For ordered lists, numbering persists at the same or deeper indentation levels; paragraphs don't interrupt numbering." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Signer fields allow input or selection by signers. Do not add any signer fields for collecting signatures, names, dates, company names or titles or anything similar at the end of documents. Radio buttons group automatically, do not insert any other elements (like text) between radio buttons that should be grouped together. Instead, place descriptive text before or after the complete radio button group.", + "enum": [ + "signer_field_text", + "signer_field_text_area", + "signer_field_date", + "signer_field_dropdown", + "signer_field_checkbox", + "signer_field_radiobutton", + "signer_field_file_upload" + ] + }, + "text": { + "type": "string" + }, + "signer_field_assigned_to": { + "type": "string", + "description": "Specifies which signer(s) can interact with this field based on signing order. 'first_signer' means only the first signer to open and sign can fill the field; others with the same or later order cannot. The same rule applies for 'second_signer' and 'last_signer'. 'every_signer' shows the field to each signer, with separate values in the final PDF. Examples: 'Primary contact for property issues' (first signer) and 'My mobile number' (every signer).", + "enum": [ + "first_signer", + "second_signer", + "last_signer", + "every_signer" + ] + }, + "signer_field_required": { + "type": "string", + "enum": [ + "yes", + "no" + ] + }, + "signer_field_dropdown_options": { + "type": "string", + "description": "Options for dropdown fields, separated by newline \n characters" + }, + "signer_field_id": { + "type": "string", + "description": "Unique ID for the Signer field, used in Webhook notifications for value inclusion. If not specified, values are excluded from Webhook notifications and CSV exports." + } + }, + "required": [ + "type", + "text", + "signer_field_assigned_to" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "image" + ] + }, + "image_base64": { + "type": "string", + "description": "The base64-encoded png or jpg image (max 0.5MB)." + }, + "image_alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + }, + "image_height_rem": { + "type": "number", + "minimum": 2, + "maximum": 38 + } + }, + "required": [ + "type", + "image_base64" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "table" + ] + }, + "table_cells": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "styles": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "bold", + "italic" + ] + } + }, + "alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + } + } + } + } + } + }, + "required": [ + "type", + "table_cells" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Nested template inclusion. Maximum depth: 1 level", + "enum": [ + "template" + ] + }, + "template_id": { + "type": "string", + "description": "ID of the template to insert; Placeholder fields apply within this template too." + } + }, + "required": [ + "type", + "template_id" + ] + } + ] + } + }, + "signer_fields": { + "type": "array", + "description": "Set default values for Signer fields.", + "items": { + "type": "object", + "properties": { + "signer_field_id": { + "type": "string", + "description": "Signer field ID of the Signer field, defined in the template or document_elements." + }, + "default_value": { + "type": "string", + "description": "Default input value (use '1' for checkboxes and radio buttons, 'YYYY-mm-dd' for dates)." + }, + "select_position": { + "type": "string", + "description": "Pre-selected option index for dropdowns (0-based)." + } + }, + "required": [ + "signer_field_id" + ] + } + }, + "emails": { + "type": "object", + "description": "Customize email communications for signing and final documents.", + "properties": { + "signature_request_subject": { + "type": "string", + "description": "Email subject for signature request emails." + }, + "signature_request_text": { + "type": "string", + "description": "Email body of signature request email; use __FULL_NAME__ for personalization. First line is bold and larger." + }, + "final_contract_subject": { + "type": "string", + "description": "Email subject for the final contract email." + }, + "final_contract_text": { + "type": "string", + "description": "Body of final contract email; use __FULL_NAME__ for personalization. First line is bold and larger." + }, + "cc_email_addresses": { + "type": "array", + "description": "Email addresses CC'd when sending the signed contract PDF.", + "items": { + "type": "string" + } + }, + "reply_to": { + "type": "string", + "description": "Custom reply-to email address (defaults to support email if not set)." + } + } + }, + "custom_branding": { + "type": "object", + "description": "Customize branding for documents and emails.", + "properties": { + "company_name": { + "type": "string", + "description": "Custom company name shown as the sender." + }, + "logo_url": { + "type": "string", + "description": "URL for custom logo (PNG, recommended 400px size)." + } + } + }, + "contract_source": { + "type": "string", + "enum": [ + "mcpserver" + ], + "description": "Identifies the originating system. Currently only mcpserver supported for MCP requests." + }, + "mcp_query": { + "type": "string", + "description": "The original text query that the user typed which triggered this MCP command execution. Used for logging and debugging purposes." + } + }, + "required": [ + "contract_source", + "mcp_query" + ] + } + }, + { + "name": "query_contract", + "description": "Responds with the contract details, contract_id, status, final PDF url if present, title, labels, metadata, expiry time if present, and signer details with all signer events (signer events are included only for recent contracts, with rate limiting).", + "inputSchema": { + "type": "object", + "properties": { + "contract_id": { + "type": "string", + "description": "GUID of the contract (draft contracts can't be queried, only sent contracts)." + } + }, + "required": [ + "contract_id" + ] + } + }, + { + "name": "withdraw_contract", + "description": "Withdraws a sent contract.", + "inputSchema": { + "type": "object", + "properties": { + "contract_id": { + "type": "string", + "description": "GUID of the contract to be withdrawn." + } + }, + "required": [ + "contract_id" + ] + } + }, + { + "name": "delete_contract", + "description": "Deletes a contract. The contract can only be deleted if it's a test contract or a draft contract.", + "inputSchema": { + "type": "object", + "properties": { + "contract_id": { + "type": "string", + "description": "GUID of the contract to be deleted." + } + }, + "required": [ + "contract_id" + ] + } + }, + { + "name": "list_recent_contracts", + "description": "Returns the the details of the latest 100 contracts.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "create_template", + "description": "Creates a reusable contract template for contracts to be based on.", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "Title for the new template; used for contracts based on this template." + }, + "labels": { + "type": "array", + "description": "Assign labels for organizing templates and contracts; labels are inherited by contracts.", + "items": { + "type": "string" + } + }, + "document_elements": { + "type": "array", + "description": "Customize template content with headers, text, images. Owners can manually replace {{placeholder fields}} in the eSignatures contract editor, and signers can fill in Signer fields when signing the document. Use placeholders for signer names if needed, instead of Signer fields. Contract title auto-inserts as the first line.", + "items": { + "type": "object", + "oneOf": [ + { + "properties": { + "type": { + "type": "string", + "description": "Header lines. Do not add the title of the template/contract as the first line; it will already be included at the beginning of the contracts.", + "enum": [ + "text_header_one", + "text_header_two", + "text_header_three" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For paragraphs and non-list text content.", + "enum": [ + "text_normal" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + }, + "text_styles": { + "type": "array", + "description": "An array defining text style ranges within the element. For Placeholder fields, ensure the moustache brackets around the placeholder also match the style. Example for '{{rate}} percent': [{offset:0, length:8, style:'bold'}]", + "items": { + "type": "object", + "properties": { + "offset": { + "type": "integer", + "description": "Start index of styled text (0-based)" + }, + "length": { + "type": "integer", + "description": "Number of characters in the styled range" + }, + "style": { + "type": "string", + "description": "Style to apply", + "enum": [ + "bold", + "italic", + "underline" + ] + } + } + } + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Indentation level of text, defaults to 0." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For list items. Use ordered_list_item for sequential/numbered lists, unordered_list_item for bullet points. Lists continue at the same indentation level until interrupted by another element type which is not a list or indented paragraph.", + "enum": [ + "ordered_list_item", + "unordered_list_item" + ] + }, + "text": { + "type": "string" + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Depth of list nesting, default 0. For ordered lists, numbering persists at the same or deeper indentation levels; paragraphs don't interrupt numbering." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Signer fields allow input or selection by signers. Do not add any signer fields for collecting signatures, names, dates, company names or titles or anything similar at the end of documents. Radio buttons group automatically, do not insert any other elements (like text) between radio buttons that should be grouped together. Instead, place descriptive text before or after the complete radio button group.", + "enum": [ + "signer_field_text", + "signer_field_text_area", + "signer_field_date", + "signer_field_dropdown", + "signer_field_checkbox", + "signer_field_radiobutton", + "signer_field_file_upload" + ] + }, + "text": { + "type": "string" + }, + "signer_field_assigned_to": { + "type": "string", + "description": "Specifies which signer(s) can interact with this field based on signing order. 'first_signer' means only the first signer to open and sign can fill the field; others with the same or later order cannot. The same rule applies for 'second_signer' and 'last_signer'. 'every_signer' shows the field to each signer, with separate values in the final PDF. Examples: 'Primary contact for property issues' (first signer) and 'My mobile number' (every signer).", + "enum": [ + "first_signer", + "second_signer", + "last_signer", + "every_signer" + ] + }, + "signer_field_required": { + "type": "string", + "enum": [ + "yes", + "no" + ] + }, + "signer_field_dropdown_options": { + "type": "string", + "description": "Options for dropdown fields, separated by newline \n characters" + }, + "signer_field_id": { + "type": "string", + "description": "Unique ID for the Signer field, used in Webhook notifications for value inclusion. If not specified, values are excluded from Webhook notifications and CSV exports." + } + }, + "required": [ + "type", + "text", + "signer_field_assigned_to" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "image" + ] + }, + "image_base64": { + "type": "string", + "description": "The base64-encoded png or jpg image (max 0.5MB)." + }, + "image_alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + }, + "image_height_rem": { + "type": "number", + "minimum": 2, + "maximum": 38 + } + }, + "required": [ + "type", + "image_base64" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "table" + ] + }, + "table_cells": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "styles": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "bold", + "italic" + ] + } + }, + "alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + } + } + } + } + } + }, + "required": [ + "type", + "table_cells" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Nested template inclusion. Maximum depth: 1 level", + "enum": [ + "template" + ] + }, + "template_id": { + "type": "string", + "description": "ID of the template to insert; Placeholder fields apply within this template too." + } + }, + "required": [ + "type", + "template_id" + ] + } + ] + } + } + }, + "required": [ + "title", + "document_elements" + ] + } + }, + { + "name": "update_template", + "description": "Updates the title, labels or the content of a contract template.", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The new title of the template." + }, + "labels": { + "type": "array", + "description": "List of labels to be assigned to the template.", + "items": { + "type": "string" + } + }, + "document_elements": { + "type": "array", + "description": "The content of the template like headers, text, and images for the document.", + "items": { + "type": "object", + "oneOf": [ + { + "properties": { + "type": { + "type": "string", + "description": "Header lines. Do not add the title of the template/contract as the first line; it will already be included at the beginning of the contracts.", + "enum": [ + "text_header_one", + "text_header_two", + "text_header_three" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For paragraphs and non-list text content.", + "enum": [ + "text_normal" + ] + }, + "text": { + "type": "string" + }, + "text_alignment": { + "type": "string", + "enum": [ + "center", + "right", + "justified" + ], + "default": "left" + }, + "text_styles": { + "type": "array", + "description": "An array defining text style ranges within the element. For Placeholder fields, ensure the moustache brackets around the placeholder also match the style. Example for '{{rate}} percent': [{offset:0, length:8, style:'bold'}]", + "items": { + "type": "object", + "properties": { + "offset": { + "type": "integer", + "description": "Start index of styled text (0-based)" + }, + "length": { + "type": "integer", + "description": "Number of characters in the styled range" + }, + "style": { + "type": "string", + "description": "Style to apply", + "enum": [ + "bold", + "italic", + "underline" + ] + } + } + } + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Indentation level of text, defaults to 0." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "For list items. Use ordered_list_item for sequential/numbered lists, unordered_list_item for bullet points. Lists continue at the same indentation level until interrupted by another element type which is not a list or indented paragraph.", + "enum": [ + "ordered_list_item", + "unordered_list_item" + ] + }, + "text": { + "type": "string" + }, + "depth": { + "type": "integer", + "default": 0, + "description": "Depth of list nesting, default 0. For ordered lists, numbering persists at the same or deeper indentation levels; paragraphs don't interrupt numbering." + } + }, + "required": [ + "type", + "text" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Signer fields allow input or selection by signers. Do not add any signer fields for collecting signatures, names, dates, company names or titles or anything similar at the end of documents. Radio buttons group automatically, do not insert any other elements (like text) between radio buttons that should be grouped together. Instead, place descriptive text before or after the complete radio button group.", + "enum": [ + "signer_field_text", + "signer_field_text_area", + "signer_field_date", + "signer_field_dropdown", + "signer_field_checkbox", + "signer_field_radiobutton", + "signer_field_file_upload" + ] + }, + "text": { + "type": "string" + }, + "signer_field_assigned_to": { + "type": "string", + "description": "Specifies which signer(s) can interact with this field based on signing order. 'first_signer' means only the first signer to open and sign can fill the field; others with the same or later order cannot. The same rule applies for 'second_signer' and 'last_signer'. 'every_signer' shows the field to each signer, with separate values in the final PDF. Examples: 'Primary contact for property issues' (first signer) and 'My mobile number' (every signer).", + "enum": [ + "first_signer", + "second_signer", + "last_signer", + "every_signer" + ] + }, + "signer_field_required": { + "type": "string", + "enum": [ + "yes", + "no" + ] + }, + "signer_field_dropdown_options": { + "type": "string", + "description": "Options for dropdown fields, separated by newline \n characters" + }, + "signer_field_id": { + "type": "string", + "description": "Unique ID for the Signer field, used in Webhook notifications for value inclusion. If not specified, values are excluded from Webhook notifications and CSV exports." + } + }, + "required": [ + "type", + "text", + "signer_field_assigned_to" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "image" + ] + }, + "image_base64": { + "type": "string", + "description": "The base64-encoded png or jpg image (max 0.5MB)." + }, + "image_alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + }, + "image_height_rem": { + "type": "number", + "minimum": 2, + "maximum": 38 + } + }, + "required": [ + "type", + "image_base64" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "table" + ] + }, + "table_cells": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "styles": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "bold", + "italic" + ] + } + }, + "alignment": { + "type": "string", + "enum": [ + "center", + "right" + ], + "default": "left" + } + } + } + } + } + }, + "required": [ + "type", + "table_cells" + ] + }, + { + "properties": { + "type": { + "type": "string", + "description": "Nested template inclusion. Maximum depth: 1 level", + "enum": [ + "template" + ] + }, + "template_id": { + "type": "string", + "description": "ID of the template to insert; Placeholder fields apply within this template too." + } + }, + "required": [ + "type", + "template_id" + ] + } + ] + } + } + } + } + }, + { + "name": "query_template", + "description": "Responds with the template details, template_id, title, labels, created_at, list of the Placeholder fields in the template, list of Signer fields int he template, and the full content inside document_elements", + "inputSchema": { + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "GUID of the template." + } + }, + "required": [ + "template_id" + ] + } + }, + { + "name": "delete_template", + "description": "Deletes a contract template.", + "inputSchema": { + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "GUID of the template to be deleted." + } + }, + "required": [ + "template_id" + ] + } + }, + { + "name": "list_templates", + "description": "Lists the templates.", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "keboola-mcp-server": { + "display_name": "Keboola MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/keboola/keboola-mcp-server" + }, + "license": "MIT", + "homepage": "https://github.com/keboola/keboola-mcp-server", + "author": { + "name": "keboola" + }, + "tags": [ + "keboola", + "data", + "storage", + "snowflake" + ], + "arguments": { + "api-url": { + "description": "Keboola Connection API URL", + "required": true, + "example": "https://connection.YOUR_REGION.keboola.com" + }, + "KBC_STORAGE_TOKEN": { + "description": "Keboola Storage API token", + "required": true, + "example": "your-keboola-storage-token" + }, + "KBC_WORKSPACE_USER": { + "description": "Snowflake workspace username", + "required": true, + "example": "your-workspace-user" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/keboola/keboola-mcp-server.git", + "keboola-mcp", + "--api-url", + "${api-url}" + ], + "env": { + "KBC_STORAGE_TOKEN": "your-keboola-storage-token", + "KBC_WORKSPACE_USER": "your-workspace-user" + }, + "description": "Run the server using Python", + "recommended": true + } + }, + "examples": [ + { + "title": "List buckets and tables", + "description": "Get a list of all buckets and tables in your Keboola project", + "prompt": "List all the buckets and tables in my Keboola project." + }, + { + "title": "Preview table data", + "description": "Preview data from a specific table", + "prompt": "Show me a preview of the data in table [table_id]." + } + ], + "name": "keboola-mcp-server", + "description": "\"Keboola", + "categories": [ + "Analytics" + ], + "is_official": true, + "tools": [ + { + "name": "list_bucket_info", + "description": "List information about all buckets in the project.", + "inputSchema": { + "properties": {}, + "title": "list_bucket_infoArguments", + "type": "object" + } + }, + { + "name": "get_bucket_metadata", + "description": "Get detailed information about a specific bucket.", + "inputSchema": { + "properties": { + "bucket_id": { + "description": "Unique ID of the bucket.", + "title": "Bucket Id", + "type": "string" + } + }, + "required": [ + "bucket_id" + ], + "title": "get_bucket_metadataArguments", + "type": "object" + } + }, + { + "name": "list_bucket_tables", + "description": "List all tables in a specific bucket with their basic information.", + "inputSchema": { + "properties": { + "bucket_id": { + "description": "Unique ID of the bucket.", + "title": "Bucket Id", + "type": "string" + } + }, + "required": [ + "bucket_id" + ], + "title": "list_bucket_tablesArguments", + "type": "object" + } + }, + { + "name": "get_table_metadata", + "description": "Get detailed information about a specific table including its DB identifier and column information.", + "inputSchema": { + "properties": { + "table_id": { + "description": "Unique ID of the table.", + "title": "Table Id", + "type": "string" + } + }, + "required": [ + "table_id" + ], + "title": "get_table_metadataArguments", + "type": "object" + } + }, + { + "name": "query_table", + "description": "\n Executes an SQL SELECT query to get the data from the underlying snowflake database.\n * When constructing the SQL SELECT query make sure to use the fully qualified table names\n that include the database name, schema name and the table name.\n * The fully qualified table name can be found in the table information, use a tool to get the information\n about tables. The fully qualified table name can be found in the response for that tool.\n * Snowflake is case-sensitive so always wrap the column names in double quotes.\n\n Examples:\n * SQL queries must include the fully qualified table names including the database name, e.g.:\n SELECT * FROM \"db_name\".\"db_schema_name\".\"table_name\";\n ", + "inputSchema": { + "properties": { + "sql_query": { + "description": "SQL SELECT query to run.", + "title": "Sql Query", + "type": "string" + } + }, + "required": [ + "sql_query" + ], + "title": "query_tableArguments", + "type": "object" + } + }, + { + "name": "list_components", + "description": "List all available components and their configurations.", + "inputSchema": { + "properties": {}, + "title": "list_componentsArguments", + "type": "object" + } + }, + { + "name": "list_component_configs", + "description": "List all configurations for a specific component.", + "inputSchema": { + "properties": { + "component_id": { + "title": "Component Id", + "type": "string" + } + }, + "required": [ + "component_id" + ], + "title": "list_component_configsArguments", + "type": "object" + } + } + ] + }, + "anki": { + "name": "anki", + "display_name": "Anki", + "description": "An MCP server for interacting with your [Anki](https://apps.ankiweb.net/) decks and cards.", + "repository": { + "type": "git", + "url": "https://github.com/scorzeth/anki-mcp-server" + }, + "homepage": "https://github.com/scorzeth/anki-mcp-server", + "author": { + "name": "scorzeth" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "Anki", + "Cards", + "Review" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/scorzeth/anki-mcp-server" + ], + "description": "Run with npx (requires npm install)" + } + }, + "tools": [ + { + "name": "update_cards", + "description": "After the user answers cards you've quizzed them on, use this tool to mark them answered and update their ease", + "inputSchema": { + "type": "object", + "properties": { + "answers": { + "type": "array", + "items": { + "type": "object", + "properties": { + "cardId": { + "type": "number", + "description": "Id of the card to answer" + }, + "ease": { + "type": "number", + "description": "Ease of the card between 1 (Again) and 4 (Easy)" + } + } + } + } + } + } + }, + { + "name": "add_card", + "description": "Create a new flashcard in Anki for the user. Must use HTML formatting only. IMPORTANT FORMATTING RULES:\n1. Must use HTML tags for ALL formatting - NO markdown\n2. Use
for ALL line breaks\n3. For code blocks, use
 with inline CSS styling\n4. Example formatting:\n   - Line breaks: 
\n - Code:
\n   - Lists: 
    and
  1. tags\n - Bold: \n - Italic: ", + "inputSchema": { + "type": "object", + "properties": { + "front": { + "type": "string", + "description": "The front of the card. Must use HTML formatting only." + }, + "back": { + "type": "string", + "description": "The back of the card. Must use HTML formatting only." + } + }, + "required": [ + "front", + "back" + ] + } + }, + { + "name": "get_due_cards", + "description": "Returns a given number (num) of cards due for review.", + "inputSchema": { + "type": "object", + "properties": { + "num": { + "type": "number", + "description": "Number of due cards to get" + } + }, + "required": [ + "num" + ] + } + }, + { + "name": "get_new_cards", + "description": "Returns a given number (num) of new and unseen cards.", + "inputSchema": { + "type": "object", + "properties": { + "num": { + "type": "number", + "description": "Number of new cards to get" + } + }, + "required": [ + "num" + ] + } + } + ] + }, + "obsidian-markdown-notes": { + "name": "obsidian-markdown-notes", + "display_name": "Obsidian Markdown Notes", + "description": "Read and search through your Obsidian vault or any directory containing Markdown notes", + "repository": { + "type": "git", + "url": "https://github.com/calclavia/mcp-obsidian" + }, + "homepage": "https://github.com/calclavia/mcp-obsidian", + "author": { + "name": "calclavia" + }, + "license": "APGL-3.0", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "obsidian" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/calclavia/mcp-obsidian.git", + "${OBSIDIAN_VAULT_PATH}" + ] + } + }, + "arguments": { + "OBSIDIAN_VAULT_PATH": { + "description": "Path to your Obsidian vault", + "required": true + } + }, + "tools": [ + { + "name": "read_notes", + "description": "Read the contents of multiple notes. Each note's content is returned with its path as a reference. Failed reads for individual notes won't stop the entire operation. Reading too many at once may result in an error.", + "inputSchema": { + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "paths" + ] + } + }, + { + "name": "search_notes", + "description": "Searches for a note by its name. The search is case-insensitive and matches partial names. Queries can also be a valid regex. Returns paths of the notes that match the query.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "fireproof-mcp": { + "display_name": "Model Context Protocol and Fireproof Demo: JSON Document Server", + "repository": { + "type": "git", + "url": "https://github.com/fireproof-storage/mcp-database-server" + }, + "license": "MIT", + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "/path/to/fireproof-mcp/build/index.js" + ], + "description": "Run the server using Node.js after installing dependencies and building" + } + }, + "homepage": "https://github.com/fireproof-storage/mcp-database-server", + "author": { + "name": "fireproof-storage" + }, + "tags": [ + "fireproof", + "database", + "MCP", + "Model Context Protocol", + "JSON", + "document store" + ], + "examples": [ + { + "title": "Basic Usage", + "description": "Using the server with Claude Desktop", + "prompt": "Configure Claude Desktop to use the Fireproof MCP server by adding the server config to the appropriate location." + } + ], + "name": "fireproof-mcp", + "description": "Immutable ledger database with live synchronization", + "categories": [ + "Databases" + ], + "is_official": true + }, + "lingo-dev": { + "display_name": "Lingo.dev MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/lingodotdev/lingo.dev" + }, + "homepage": "https://lingo.dev", + "author": { + "name": "lingodotdev" + }, + "license": "Apache-2.0", + "tags": [ + "translation", + "localization", + "mcp" + ], + "arguments": { + "api-key": { + "description": "Your Lingo.dev project API key", + "required": true, + "example": "" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "lingo.dev", + "mcp", + "${api-key}" + ], + "description": "Run the Lingo.dev MCP server using npx", + "recommended": true + } + }, + "examples": [ + { + "title": "Translate content", + "description": "Ask the AI tool to translate content using Lingo.dev", + "prompt": "Translate this text to Spanish: 'Hello world'" + } + ], + "name": "lingo-dev", + "description": "The [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) is a standard for connecting Large Language Models (LLMs) to external services. This guide will walk you through how to connect AI tools to Lingo.dev using MCP.", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "veyrax-mcp": { + "display_name": "VeyraX MCP", + "repository": { + "type": "git", + "url": "https://github.com/VeyraX/veyrax-mcp" + }, + "homepage": "https://www.veyrax.com", + "author": { + "name": "VeyraX" + }, + "license": "[NOT GIVEN]", + "tags": [ + "MCP", + "Model Context Protocol", + "AI tools", + "LLM integration" + ], + "arguments": { + "VEYRAX_API_KEY": { + "description": "Your VeyraX API key found in your account settings", + "required": true + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "path/to/veyrax-mcp/build/src/index.js", + "--config", + "\"{\\\"VEYRAX_API_KEY\\\":\\\"${VEYRAX_API_KEY}\\\"}\"" + ] + } + }, + "examples": [ + { + "title": "Getting Started with VeyraX MCP", + "description": "Basic setup for VeyraX MCP", + "prompt": "How do I set up VeyraX MCP in my environment?" + } + ], + "name": "veyrax-mcp", + "description": "Single tool to control all 100+ API integrations, and UI components", + "categories": [ + "MCP Tools" + ], + "is_official": true + }, + "mcp-server-neon": { + "display_name": "Neon MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/neondatabase/mcp-server-neon" + }, + "homepage": "https://neon.tech", + "author": { + "name": "neondatabase" + }, + "license": "MIT", + "tags": [ + "database", + "postgres", + "neon", + "mcp", + "llm" + ], + "arguments": { + "NEON_API_KEY": { + "description": "Neon API key - you can generate one through the Neon console", + "required": true + } + }, + "installations": { + "cli": { + "type": "cli", + "command": "npx", + "args": [ + "@neondatabase/mcp-server-neon", + "init", + "$NEON_API_KEY" + ], + "package": "@neondatabase/mcp-server-neon", + "env": {}, + "description": "Install via npm", + "recommended": true + } + }, + "examples": [ + { + "title": "List projects", + "description": "List all Neon projects", + "prompt": "List me all my Neon projects" + }, + { + "title": "Create database and table", + "description": "Create a new Postgres database and add a users table", + "prompt": "Let's create a new Postgres database, and call it \"my-database\". Let's then create a table called users with the following columns: id, name, email, and password." + }, + { + "title": "Run migration", + "description": "Run a migration to alter a table", + "prompt": "I want to run a migration on my project called \"my-project\" that alters the users table to add a new column called \"created_at\"." + }, + { + "title": "Project summary", + "description": "Get a summary of all projects and data", + "prompt": "Can you give me a summary of all of my Neon projects and what data is in each one?" + } + ], + "name": "mcp-server-neon", + "description": "This lets you use Claude Desktop, or any MCP Client, to use natural language to accomplish things with Neon.", + "categories": [ + "Databases" + ], + "is_official": true + }, + "video-editor": { + "name": "video-editor", + "display_name": "Video Editor", + "description": "A Model Context Protocol Server to add, edit, and search videos with [Video Jungle](https://www.video-jungle.com/).", + "repository": { + "type": "git", + "url": "https://github.com/burningion/video-editing-mcp" + }, + "homepage": "https://github.com/burningion/video-editing-mcp", + "author": { + "name": "burningion" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "video", + "editing", + "API" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/burningion/video-editing-mcp", + "video-editor-mcp", + "${YOURAPIKEY}" + ] + } + }, + "examples": [ + { + "title": "Add Video Example", + "description": "Shows how to add a video from a URL.", + "prompt": "can you download the video at https://www.youtube.com/shorts/RumgYaH5XYw and name it fly traps?" + }, + { + "title": "Search Videos Example", + "description": "Example of searching videos with a keyword.", + "prompt": "can you search my videos for fly traps?" + }, + { + "title": "Generate Edit Example", + "description": "Creates an edit from found video segments.", + "prompt": "can you create an edit of all the times the video says \"fly trap\"?" + } + ], + "arguments": { + "YOURAPIKEY": { + "description": "API key required to authenticate and communicate with Video Jungle services.", + "required": true, + "example": "YOURAPIKEY" + } + }, + "tools": [ + { + "name": "add-video", + "description": "Upload video from URL. Begins analysis of video to allow for later information retrieval for automatic video editing an search.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "required": [ + "name", + "url" + ] + } + }, + { + "name": "search-remote-videos", + "description": "Default method to search videos. Will return videos including video_ids, which allow for information retrieval and building video edits.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Text search query" + }, + "limit": { + "type": "integer", + "default": 10, + "minimum": 1, + "description": "Maximum number of results to return" + }, + "project_id": { + "type": "string", + "format": "uuid", + "description": "Project ID to scope the search" + }, + "duration_min": { + "type": "number", + "minimum": 0, + "description": "Minimum video duration in seconds" + }, + "duration_max": { + "type": "number", + "minimum": 0, + "description": "Maximum video duration in seconds" + } + }, + "created_after": { + "type": "string", + "format": "date-time", + "description": "Filter videos created after this datetime" + }, + "created_before": { + "type": "string", + "format": "date-time", + "description": "Filter videos created before this datetime" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "Set of tags to filter by" + }, + "include_segments": { + "type": "boolean", + "default": true, + "description": "Whether to include video segments in results" + }, + "include_related": { + "type": "boolean", + "default": false, + "description": "Whether to include related videos" + }, + "query_audio": { + "type": "string", + "description": "Audio search query" + }, + "query_img": { + "type": "string", + "description": "Image search query" + }, + "oneOf": [ + { + "required": [ + "query" + ] + } + ] + } + }, + { + "name": "search-local-videos", + "description": "Search user's local videos in Photos app by keyword", + "inputSchema": { + "type": "object", + "properties": { + "keyword": { + "type": "string" + }, + "start_date": { + "type": "string", + "description": "ISO 8601 formatted datetime string (e.g. 2024-01-21T15:30:00Z)" + }, + "end_date": { + "type": "string", + "description": "ISO 8601 formatted datetime string (e.g. 2024-01-21T15:30:00Z)" + } + }, + "required": [ + "keyword" + ] + } + }, + { + "name": "generate-edit-from-videos", + "description": "Generate an edit from videos, from within a specific project. Creates a new project to work within no existing project ID (UUID) is passed ", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "Either an existing Project UUID or String. A UUID puts the edit in an existing project, and a string creates a new project with that name." + }, + "name": { + "type": "string", + "description": "Video Edit name" + }, + "open_editor": { + "type": "boolean", + "description": "Open a live editor with the project's edit" + }, + "resolution": { + "type": "string", + "description": "Video resolution. Examples include '1920x1080', '1280x720'" + }, + "edit": { + "type": "array", + "cuts": { + "video_id": { + "type": "string", + "description": "Video UUID" + }, + "video_start_time": { + "type": "string", + "description": "Clip start time in 00:00:00.000 format" + }, + "video_end_time": { + "type": "string", + "description": "Clip end time in 00:00:00.000 format" + } + } + } + }, + "required": [ + "edit", + "cuts", + "name", + "project_id" + ] + } + }, + { + "name": "generate-edit-from-single-video", + "description": "Generate a compressed video edit from a single video.", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string" + }, + "resolution": { + "type": "string" + }, + "video_id": { + "type": "string" + }, + "edit": { + "type": "array", + "cuts": { + "video_start_time": "time", + "video_end_time": "time" + } + } + }, + "required": [ + "edit", + "project_id", + "video_id", + "cuts" + ] + } + }, + { + "name": "update-video-edit", + "description": "Update an existing video edit within a specific project.", + "inputSchema": { + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "UUID of the project containing the edit" + }, + "edit_id": { + "type": "string", + "description": "UUID of the video edit to update" + }, + "name": { + "type": "string", + "description": "Video Edit name" + }, + "description": { + "type": "string", + "description": "Description of the video edit" + }, + "video_output_format": { + "type": "string", + "description": "Output format for the video (e.g., 'mp4', 'webm')" + }, + "video_output_resolution": { + "type": "string", + "description": "Video resolution. Examples include '1920x1080', '1280x720'" + }, + "video_output_fps": { + "type": "number", + "description": "Frames per second for the output video" + }, + "video_series_sequential": { + "type": "array", + "description": "Array of video clips in sequential order", + "items": { + "type": "object", + "properties": { + "video_id": { + "type": "string", + "description": "Video UUID" + }, + "video_start_time": { + "type": "string", + "description": "Clip start time in 00:00:00.000 format" + }, + "video_end_time": { + "type": "string", + "description": "Clip end time in 00:00:00.000 format" + } + } + } + }, + "audio_overlay": { + "type": "object", + "description": "Audio overlay settings and assets" + }, + "rendered": { + "type": "boolean", + "description": "Whether the edit has been rendered" + } + }, + "required": [ + "project_id", + "edit_id" + ] + } + }, + { + "name": "create-video-bar-chart-from-two-axis-data", + "description": "Create a video bar chart from two-axis data", + "inputSchema": { + "type": "object", + "properties": { + "x_values": { + "type": "array", + "items": { + "type": "string" + } + }, + "y_values": { + "type": "array", + "items": { + "type": "number" + } + }, + "x_label": { + "type": "string" + }, + "y_label": { + "type": "string" + }, + "title": { + "type": "string" + }, + "filename": { + "type": "string" + } + }, + "required": [ + "x_values", + "y_values", + "x_label", + "y_label", + "title" + ] + } + }, + { + "name": "create-video-line-chart-from-two-axis-data", + "description": "Create a video line chart from two-axis data", + "inputSchema": { + "type": "object", + "properties": { + "x_values": { + "type": "array", + "items": { + "type": "string" + } + }, + "y_values": { + "type": "array", + "items": { + "type": "number" + } + }, + "x_label": { + "type": "string" + }, + "y_label": { + "type": "string" + }, + "title": { + "type": "string" + }, + "filename": { + "type": "string" + } + }, + "required": [ + "x_values", + "y_values", + "x_label", + "y_label", + "title" + ] + } + } + ] + }, + "mongodb": { + "name": "mongodb", + "display_name": "MongoDB", + "description": "A Model Context Protocol Server for MongoDB.", + "repository": { + "type": "git", + "url": "https://github.com/kiliczsh/mcp-mongo-server" + }, + "homepage": "https://github.com/kiliczsh/mcp-mongo-server", + "author": { + "name": "kiliczsh" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "MongoDB", + "LLM" + ], + "arguments": { + "MONGODB_URI": { + "description": "The connection string for the MongoDB database.", + "required": true, + "example": "mongodb://muhammed:kilic@mongodb.localhost/sample_namespace" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mcp-mongo-server", + "${MONGODB_URI}" + ] + } + }, + "tools": [ + { + "name": "query", + "description": "Execute a MongoDB query with optional execution plan analysis", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Name of the collection to query" + }, + "filter": { + "type": "object", + "description": "MongoDB query filter" + }, + "projection": { + "type": "object", + "description": "Fields to include/exclude" + }, + "limit": { + "type": "number", + "description": "Maximum number of documents to return" + }, + "explain": { + "type": "string", + "description": "Optional: Get query execution information (queryPlanner, executionStats, or allPlansExecution)", + "enum": [ + "queryPlanner", + "executionStats", + "allPlansExecution" + ] + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "aggregate", + "description": "Execute a MongoDB aggregation pipeline with optional execution plan analysis", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Name of the collection to aggregate" + }, + "pipeline": { + "type": "array", + "description": "Aggregation pipeline stages" + }, + "explain": { + "type": "string", + "description": "Optional: Get aggregation execution information (queryPlanner, executionStats, or allPlansExecution)", + "enum": [ + "queryPlanner", + "executionStats", + "allPlansExecution" + ] + } + }, + "required": [ + "collection", + "pipeline" + ] + } + }, + { + "name": "update", + "description": "Update documents in a MongoDB collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Name of the collection to update" + }, + "filter": { + "type": "object", + "description": "Filter to select documents to update" + }, + "update": { + "type": "object", + "description": "Update operations to apply ($set, $unset, $inc, etc.)" + }, + "upsert": { + "type": "boolean", + "description": "Create a new document if no documents match the filter" + }, + "multi": { + "type": "boolean", + "description": "Update multiple documents that match the filter" + } + }, + "required": [ + "collection", + "filter", + "update" + ] + } + }, + { + "name": "serverInfo", + "description": "Get MongoDB server information including version, storage engine, and other details", + "inputSchema": { + "type": "object", + "properties": { + "includeDebugInfo": { + "type": "boolean", + "description": "Include additional debug information about the server" + } + } + } + }, + { + "name": "insert", + "description": "Insert one or more documents into a MongoDB collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Name of the collection to insert into" + }, + "documents": { + "type": "array", + "description": "Array of documents to insert", + "items": { + "type": "object" + } + }, + "ordered": { + "type": "boolean", + "description": "Optional: If true, perform an ordered insert of the documents. If false, perform an unordered insert" + }, + "writeConcern": { + "type": "object", + "description": "Optional: Write concern for the insert operation" + }, + "bypassDocumentValidation": { + "type": "boolean", + "description": "Optional: Allow insert to bypass schema validation" + } + }, + "required": [ + "collection", + "documents" + ] + } + }, + { + "name": "createIndex", + "description": "Create one or more indexes on a MongoDB collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Name of the collection to create indexes on" + }, + "indexes": { + "type": "array", + "description": "Array of index specifications", + "items": { + "type": "object", + "properties": { + "key": { + "type": "object", + "description": "Index key pattern, e.g. { field: 1 } for ascending, { field: -1 } for descending" + }, + "name": { + "type": "string", + "description": "Optional: Name of the index" + }, + "unique": { + "type": "boolean", + "description": "Optional: If true, creates a unique index" + }, + "sparse": { + "type": "boolean", + "description": "Optional: If true, creates a sparse index" + }, + "background": { + "type": "boolean", + "description": "Optional: If true, creates the index in the background" + }, + "expireAfterSeconds": { + "type": "number", + "description": "Optional: Specifies the TTL for documents (time to live)" + }, + "partialFilterExpression": { + "type": "object", + "description": "Optional: Filter expression for partial indexes" + } + }, + "required": [ + "key" + ] + } + }, + "writeConcern": { + "type": "object", + "description": "Optional: Write concern for the index creation" + }, + "commitQuorum": { + "type": [ + "string", + "number" + ], + "description": "Optional: Number of voting members required to create index" + } + }, + "required": [ + "collection", + "indexes" + ] + } + }, + { + "name": "count", + "description": "Count the number of documents in a collection that match a query", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Name of the collection to count documents in" + }, + "query": { + "type": "object", + "description": "Optional: Query filter to select documents to count" + }, + "limit": { + "type": "integer", + "description": "Optional: Maximum number of documents to count" + }, + "skip": { + "type": "integer", + "description": "Optional: Number of documents to skip before counting" + }, + "hint": { + "type": "object", + "description": "Optional: Index hint to force query plan" + }, + "readConcern": { + "type": "object", + "description": "Optional: Read concern for the count operation" + }, + "maxTimeMS": { + "type": "integer", + "description": "Optional: Maximum time to allow the count to run" + }, + "collation": { + "type": "object", + "description": "Optional: Collation rules for string comparison" + } + }, + "required": [ + "collection" + ] + } + }, + { + "name": "listCollections", + "description": "List all collections in the MongoDB database", + "inputSchema": { + "type": "object", + "properties": { + "nameOnly": { + "type": "boolean", + "description": "Optional: If true, returns only the collection names instead of full collection info" + }, + "filter": { + "type": "object", + "description": "Optional: Filter to apply to the collections" + } + } + } + } + ] + }, + "data-exploration": { + "name": "data-exploration", + "display_name": "Data Exploration", + "description": "MCP server for autonomous data exploration on .csv-based datasets, providing intelligent insights with minimal effort. NOTE: Will execute arbitrary Python code on your machine, please use with caution!", + "repository": { + "type": "git", + "url": "https://github.com/reading-plus-ai/mcp-server-data-exploration" + }, + "homepage": "https://github.com/reading-plus-ai/mcp-server-data-exploration", + "author": { + "name": "reading-plus-ai" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "data", + "exploration" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-ds" + ] + } + }, + "examples": [ + { + "title": "California Real Estate Listing Prices", + "description": "Exploring housing price trends in California using a dataset.", + "prompt": "csv_path: Local path to the CSV file, topic: Housing price trends in California." + }, + { + "title": "Weather in London", + "description": "Investigating daily weather history in London using a dataset.", + "prompt": "csv_path: Local path to the CSV file, topic: Weather in London." + } + ], + "tools": [ + { + "name": "load_csv", + "description": "\nLoad CSV File Tool\n\nPurpose:\nLoad a local CSV file into a DataFrame.\n\nUsage Notes:\n\t\u2022\tIf a df_name is not provided, the tool will automatically assign names sequentially as df_1, df_2, and so on.\n", + "inputSchema": { + "properties": { + "csv_path": { + "title": "Csv Path", + "type": "string" + }, + "df_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Df Name" + } + }, + "required": [ + "csv_path" + ], + "title": "LoadCsv", + "type": "object" + } + }, + { + "name": "run_script", + "description": "\nPython Script Execution Tool\n\nPurpose:\nExecute Python scripts for specific data analytics tasks.\n\nAllowed Actions\n\t1.\tPrint Results: Output will be displayed as the script\u2019s stdout.\n\t2.\t[Optional] Save DataFrames: Store DataFrames in memory for future use by specifying a save_to_memory name.\n\nProhibited Actions\n\t1.\tOverwriting Original DataFrames: Do not modify existing DataFrames to preserve their integrity for future tasks.\n\t2.\tCreating Charts: Chart generation is not permitted.\n", + "inputSchema": { + "properties": { + "script": { + "title": "Script", + "type": "string" + }, + "save_to_memory": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Save To Memory" + } + }, + "required": [ + "script" + ], + "title": "RunScript", + "type": "object" + } + } + ] + }, + "tmdb": { + "name": "tmdb", + "display_name": "TMDB", + "description": "This MCP server integrates with The Movie Database (TMDB) API to provide movie information, search capabilities, and recommendations.", + "repository": { + "type": "git", + "url": "https://github.com/Laksh-star/mcp-server-tmdb" + }, + "homepage": "https://github.com/Laksh-star/mcp-server-tmdb", + "author": { + "name": "Laksh-star" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "tmdb", + "movies", + "recommendations" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/Laksh-star/mcp-server-tmdb" + ], + "env": { + "TMDB_API_KEY": "${TMDB_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Search for Movies", + "description": "Search for movies by title or keywords", + "prompt": "\"Search for movies about artificial intelligence\"" + }, + { + "title": "Get Trending Movies", + "description": "Get today's or this week's trending movies", + "prompt": "\"What are the trending movies today?\"" + }, + { + "title": "Get Movie Recommendations", + "description": "Get movie recommendations based on a movie ID", + "prompt": "\"Get movie recommendations based on movie ID 550\"" + }, + { + "title": "Get Movie Details", + "description": "Get details of a specific movie by ID", + "prompt": "\"Tell me about the movie with ID 550\"" + } + ], + "arguments": { + "TMDB_API_KEY": { + "description": "API key used to authenticate requests to the TMDB API.", + "required": true, + "example": "your_api_key_here" + } + }, + "tools": [ + { + "name": "search_movies", + "description": "Search for movies by title or keywords", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query for movie titles" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "get_recommendations", + "description": "Get movie recommendations based on a movie ID", + "inputSchema": { + "type": "object", + "properties": { + "movieId": { + "type": "string", + "description": "TMDB movie ID to get recommendations for" + } + }, + "required": [ + "movieId" + ] + } + }, + { + "name": "get_trending", + "description": "Get trending movies for a time window", + "inputSchema": { + "type": "object", + "properties": { + "timeWindow": { + "type": "string", + "enum": [ + "day", + "week" + ], + "description": "Time window for trending movies" + } + }, + "required": [ + "timeWindow" + ] + } + } + ] + }, + "minima": { + "name": "minima", + "display_name": "Minima", + "description": "MCP server for RAG on local files", + "repository": { + "type": "git", + "url": "https://github.com/dmayboroda/minima" + }, + "homepage": "https://github.com/dmayboroda/minima", + "author": { + "name": "dmayboroda" + }, + "license": "MPLv2", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "ChatGPT", + "Integration", + "Local", + "Open Source" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/dmayboroda/minima.git@main#subdirectory=mcp-server", + "minima" + ] + } + }, + "arguments": { + "LOCAL_FILES_PATH": { + "description": "Specify the root folder for indexing (on your cloud or local pc). Indexing is a recursive process, meaning all documents within subfolders of this root folder will also be indexed. Supported file types: .pdf, .xls, .docx, .txt, .md, .csv.", + "required": true, + "example": "/Users/davidmayboroda/Downloads/PDFs/" + }, + "EMBEDDING_MODEL_ID": { + "description": "Specify the embedding model to use. Currently, only Sentence Transformer models are supported. Testing has been done with sentence-transformers/all-mpnet-base-v2, but other Sentence Transformer models can be used.", + "required": false, + "example": "sentence-transformers/all-mpnet-base-v2" + }, + "EMBEDDING_SIZE": { + "description": "Define the embedding dimension provided by the model, which is needed to configure Qdrant vector storage. Ensure this value matches the actual embedding size of the specified EMBEDDING_MODEL_ID.", + "required": false, + "example": "768" + }, + "OLLAMA_MODEL": { + "description": "Set up the Ollama model, use an ID available on the Ollama site. Please, use LLM model here, not an embedding.", + "required": false, + "example": "qwen2:0.5b" + }, + "RERANKER_MODEL": { + "description": "Specify the reranker model. Currently, we have tested with BAAI rerankers. You can explore all available rerankers using a specific link.", + "required": false, + "example": "BAAI/bge-reranker-base" + }, + "USER_ID": { + "description": "Just use your email here, this is needed to authenticate custom GPT to search in your data.", + "required": true, + "example": "user@gmail.com" + }, + "PASSWORD": { + "description": "Put any password here, this is used to create a firebase account for the email specified above.", + "required": true, + "example": "password" + } + }, + "tools": [ + { + "name": "query", + "description": "Find a context in local files (PDF, CSV, DOCX, MD, TXT)", + "inputSchema": { + "properties": { + "text": { + "description": "context to find", + "title": "Text", + "type": "string" + } + }, + "required": [ + "text" + ], + "title": "Query", + "type": "object" + } + } + ] + }, + "fastn-ai-unified-api-mcp-server": { + "name": "fastn-ai-unified-api-mcp-server", + "display_name": "Fastn AI Unified API", + "description": "A remote, dynamic MCP server with a unified API that connects to 1,000+ tools, actions, and workflows, featuring built-in authentication and monitoring.", + "repository": { + "type": "git", + "url": "https://github.com/fastnai/mcp-fastn" + }, + "homepage": "https://github.com/fastnai/mcp-fastn", + "author": { + "name": "fastnai" + }, + "license": "MIT", + "categories": [ + "MCP Tools" + ], + "tags": [ + "Fastn", + "Dynamic Tool Registration", + "API-Driven Operations" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/fastnai/mcp-fastn", + "fastn", + "--api_key", + "${YOUR_API_KEY}", + "--space_id", + "${YOUR_WORKSPACE_ID}" + ] + } + }, + "arguments": { + "YOUR_API_KEY": { + "description": "The API key is required to authenticate and access the Fastn server's features and services.", + "required": true, + "example": "your_actual_api_key_here" + }, + "YOUR_WORKSPACE_ID": { + "description": "The unique identifier for your workspace in Fastn, which directs the server to the correct environment and settings.", + "required": true, + "example": "your_actual_workspace_id_here" + } + } + }, + "sentry": { + "name": "sentry", + "display_name": "Sentry", + "description": "Retrieving and analyzing issues from Sentry.io", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/sentry", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "sentry", + "monitoring", + "errors", + "debugging" + ], + "examples": [ + { + "title": "Retrieve issue details from Sentry", + "description": "Use this command to get detailed information about a specific Sentry issue using its ID or URL.", + "prompt": "sentry-issue {issue_id_or_url}" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-sentry", + "--auth-token", + "${YOUR_SENTRY_TOKEN}" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "mcp/sentry", + "--auth-token", + "${YOUR_SENTRY_TOKEN}" + ] + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp_server_sentry", + "--auth-token", + "${YOUR_SENTRY_TOKEN}" + ] + } + }, + "arguments": { + "YOUR_SENTRY_TOKEN": { + "description": "An authentication token required to access your Sentry account and retrieve issue details.", + "required": true, + "example": "abc123def456" + } + }, + "tools": [ + { + "name": "get_sentry_issue", + "description": "Retrieve and analyze a Sentry issue by ID or URL. Use this tool when you need to:\n - Investigate production errors and crashes\n - Access detailed stacktraces from Sentry\n - Analyze error patterns and frequencies\n - Get information about when issues first/last occurred\n - Review error counts and status", + "inputSchema": { + "type": "object", + "properties": { + "issue_id_or_url": { + "type": "string", + "description": "Sentry issue ID or URL to analyze" + } + }, + "required": [ + "issue_id_or_url" + ] + } + } + ], + "is_official": true + }, + "mcp-proxy": { + "name": "mcp-proxy", + "display_name": "MCP Proxy", + "description": "Connect to MCP servers that run on SSE transport, or expose stdio servers as an SSE server.", + "repository": { + "type": "git", + "url": "https://github.com/sparfenyuk/mcp-proxy" + }, + "homepage": "https://github.com/sparfenyuk/mcp-proxy", + "author": { + "name": "sparfenyuk" + }, + "license": "MIT", + "categories": [ + "MCP Tools" + ], + "tags": [ + "proxy", + "sse", + "stdio" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-proxy" + ] + } + } + }, + "dataset-viewer": { + "name": "dataset-viewer", + "display_name": "Dataset Viewer", + "description": "Browse and analyze Hugging Face datasets with features like search, filtering, statistics, and data export", + "repository": { + "type": "git", + "url": "https://github.com/privetin/dataset-viewer" + }, + "homepage": "https://github.com/privetin/dataset-viewer", + "author": { + "name": "privetin", + "url": "https://github.com/privetin" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "Hugging Face", + "datasets", + "data analysis" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/privetin/dataset-viewer", + "dataset-viewer" + ] + } + }, + "examples": [ + { + "title": "Validate a dataset", + "description": "Check if a dataset exists and is accessible.", + "prompt": "{\"dataset\": \"stanfordnlp/imdb\"}" + }, + { + "title": "Get dataset information", + "description": "Retrieve detailed information about a dataset.", + "prompt": "{\"dataset\": \"stanfordnlp/imdb\"}" + }, + { + "title": "Search dataset contents", + "description": "Search for text within a dataset.", + "prompt": "{\"dataset\": \"stanfordnlp/imdb\",\"config\": \"plain_text\",\"split\": \"train\",\"query\": \"great movie\"}" + }, + { + "title": "Filter and sort rows", + "description": "Filter rows using SQL-like conditions and sort them.", + "prompt": "{\"dataset\": \"stanfordnlp/imdb\",\"config\": \"plain_text\",\"split\": \"train\",\"where\": \"label = 'positive'\",\"orderby\": \"text DESC\",\"page\": 0}" + }, + { + "title": "Get dataset statistics", + "description": "Get statistics about a dataset split.", + "prompt": "{\"dataset\": \"stanfordnlp/imdb\",\"config\": \"plain_text\",\"split\": \"train\"}" + } + ], + "arguments": { + "HUGGINGFACE_TOKEN": { + "description": "Your Hugging Face API token for accessing private datasets", + "required": false, + "example": "" + } + }, + "tools": [ + { + "name": "get_info", + "description": "Get detailed information about a Hugging Face dataset including description, features, splits, and statistics. Run validate first to check if the dataset exists and is accessible.", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset" + ] + } + }, + { + "name": "get_rows", + "description": "Get paginated rows from a Hugging Face dataset", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "config": { + "type": "string", + "description": "Dataset configuration/subset name. Use get_info to list available configs", + "examples": [ + "default", + "en", + "es" + ] + }, + "split": { + "type": "string", + "description": "Dataset split name. Splits partition the data for training/evaluation", + "examples": [ + "train", + "validation", + "test" + ] + }, + "page": { + "type": "integer", + "description": "Page number (0-based), returns 100 rows per page", + "default": 0 + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset", + "config", + "split" + ] + } + }, + { + "name": "get_first_rows", + "description": "Get first rows from a Hugging Face dataset split", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "config": { + "type": "string", + "description": "Dataset configuration/subset name. Use get_info to list available configs", + "examples": [ + "default", + "en", + "es" + ] + }, + "split": { + "type": "string", + "description": "Dataset split name. Splits partition the data for training/evaluation", + "examples": [ + "train", + "validation", + "test" + ] + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset", + "config", + "split" + ] + } + }, + { + "name": "search_dataset", + "description": "Search for text within a Hugging Face dataset", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "config": { + "type": "string", + "description": "Dataset configuration/subset name. Use get_info to list available configs", + "examples": [ + "default", + "en", + "es" + ] + }, + "split": { + "type": "string", + "description": "Dataset split name. Splits partition the data for training/evaluation", + "examples": [ + "train", + "validation", + "test" + ] + }, + "query": { + "type": "string", + "description": "Text to search for in the dataset" + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset", + "config", + "split", + "query" + ] + } + }, + { + "name": "filter", + "description": "Filter rows in a Hugging Face dataset using SQL-like conditions", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "config": { + "type": "string", + "description": "Dataset configuration/subset name. Use get_info to list available configs", + "examples": [ + "default", + "en", + "es" + ] + }, + "split": { + "type": "string", + "description": "Dataset split name. Splits partition the data for training/evaluation", + "examples": [ + "train", + "validation", + "test" + ] + }, + "where": { + "type": "string", + "description": "SQL-like WHERE clause to filter rows", + "examples": [ + "column = \"value\"", + "score > 0.5", + "text LIKE \"%query%\"" + ] + }, + "orderby": { + "type": "string", + "description": "SQL-like ORDER BY clause to sort results", + "optional": true, + "examples": [ + "column ASC", + "score DESC", + "name ASC, id DESC" + ] + }, + "page": { + "type": "integer", + "description": "Page number for paginated results (100 rows per page)", + "default": 0, + "minimum": 0 + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset", + "config", + "split", + "where" + ] + } + }, + { + "name": "get_statistics", + "description": "Get statistics about a Hugging Face dataset", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "config": { + "type": "string", + "description": "Dataset configuration/subset name. Use get_info to list available configs", + "examples": [ + "default", + "en", + "es" + ] + }, + "split": { + "type": "string", + "description": "Dataset split name. Splits partition the data for training/evaluation", + "examples": [ + "train", + "validation", + "test" + ] + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset", + "config", + "split" + ] + } + }, + { + "name": "get_parquet", + "description": "Export Hugging Face dataset split as Parquet file", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset" + ] + } + }, + { + "name": "validate", + "description": "Check if a Hugging Face dataset exists and is accessible", + "inputSchema": { + "type": "object", + "properties": { + "dataset": { + "type": "string", + "description": "Hugging Face dataset identifier in the format owner/dataset", + "pattern": "^[^/]+/[^/]+$", + "examples": [ + "ylecun/mnist", + "stanfordnlp/imdb" + ] + }, + "auth_token": { + "type": "string", + "description": "Hugging Face auth token for private/gated datasets", + "optional": true + } + }, + "required": [ + "dataset" + ] + } + } + ] + }, + "intercom": { + "name": "intercom", + "display_name": "Intercom Support Server", + "description": "An MCP-compliant server for retrieving customer support tickets from Intercom. This tool enables AI assistants like Claude Desktop and Cline to access and analyze your Intercom support tickets.", + "repository": { + "type": "git", + "url": "https://github.com/raoulbia-ai/mcp-server-for-intercom" + }, + "homepage": "https://github.com/raoulbia-ai/mcp-server-for-intercom", + "author": { + "name": "raoulbia-ai" + }, + "license": "Apache-2.0", + "categories": [ + "Messaging" + ], + "tags": [ + "Intercom", + "support-tickets", + "API" + ], + "examples": [ + { + "title": "List Tickets Example", + "description": "Retrieve support tickets from Intercom between specific dates", + "prompt": "{\"startDate\":\"15/01/2025\",\"endDate\":\"21/01/2025\",\"keyword\":\"billing\"}" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/raoulbia-ai/mcp-server-for-intercom" + ], + "env": { + "INTERCOM_ACCESS_TOKEN": "your-intercom-access-token" + } + } + }, + "arguments": { + "INTERCOM_ACCESS_TOKEN": { + "description": "Your Intercom API token used to authenticate requests to the Intercom API.", + "required": true, + "example": "your_intercom_api_token" + } + } + }, + "xero-mcp-server@john-zhang-dev": { + "name": "@john-zhang-dev/xero-mcp-server", + "display_name": "Xero", + "description": "Enabling clients to interact with Xero system for streamlined accounting, invoicing, and business operations.", + "repository": { + "type": "git", + "url": "https://github.com/john-zhang-dev/xero-mcp" + }, + "license": "MIT", + "examples": [ + { + "title": "Visualize my financial position over the last month", + "description": "", + "prompt": "Visualize my financial position over the last month" + }, + { + "title": "Track my spendings over last week", + "description": "", + "prompt": "Track my spendings over last week" + }, + { + "title": "Add all transactions from the monthly statement into my revenue account (account code 201) as receive money", + "description": "", + "prompt": "Add all transactions from the monthly statement into my revenue account (account code 201) as receive money" + } + ], + "author": { + "name": "john-zhang-dev" + }, + "homepage": "https://github.com/john-zhang-dev/xero-mcp", + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "xero-mcp@latest" + ], + "env": { + "XERO_CLIENT_ID": "${XERO_CLIENT_ID}", + "XERO_CLIENT_SECRET": "${XERO_CLIENT_SECRET}", + "XERO_REDIRECT_URI": "${XERO_REDIRECT_URI}" + } + } + }, + "arguments": { + "XERO_CLIENT_ID": { + "description": "The Client ID obtained from the Xero Developer center after creating an OAuth 2.0 app, required for authentication.", + "required": true, + "example": "YOUR_CLIENT_ID" + }, + "XERO_CLIENT_SECRET": { + "description": "The Client Secret generated in the Xero Developer center, necessary for authenticating requests.", + "required": true, + "example": "YOUR_CLIENT_SECRET" + }, + "XERO_REDIRECT_URI": { + "description": "The URI to redirect to after authentication, should typically match the redirect URI specified in the OAuth 2.0 app settings.", + "required": false, + "example": "http://localhost:5000/callback" + } + }, + "categories": [ + "Finance" + ], + "tools": [ + { + "name": "authenticate", + "description": "Authenticate with Xero using OAuth2", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "create_bank_transactions", + "description": "Creates one or more spent or received money transaction. Only use this tool when user has directly and explicitly ask you to create transactions.", + "inputSchema": { + "type": "object", + "description": "Transactions with an array of BankTransaction objects to create", + "properties": { + "pagination": { + "$ref": "#/components/schemas/Pagination" + }, + "Warnings": { + "description": "Displays array of warning messages from the API", + "type": "array", + "items": { + "$ref": "#/components/schemas/ValidationError" + } + }, + "BankTransactions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BankTransaction" + } + } + }, + "example": "{ bankTransactions: [{ type: \"SPEND\", date: \"2023-01-01\", reference: \"INV-001\", subTotal: \"100\", total: \"115\", totalTax: \"15\", lineItems: [{ accountCode: \"401\", description: \"taxi fare\", lineAmount: \"115\" }], contact: { contactId: \"00000000-0000-0000-0000-000000000000\", name: \"John Doe\" }, \"bankAccount\": { \"accountID\": \"6f7594f2-f059-4d56-9e67-47ac9733bfe9\", \"Code\": \"088\", \"Name\": \"Business Wells Fargo\" } }]}" + } + }, + { + "name": "create_contacts", + "description": "Creates one or multiple contacts in a Xero organisation. Only use this tool when user has directly and explicitly ask you to create contact.", + "inputSchema": { + "type": "object", + "description": "Contacts with an array of Contact objects to create", + "properties": { + "pagination": { + "$ref": "#/components/schemas/Pagination" + }, + "Warnings": { + "description": "Displays array of warning messages from the API", + "type": "array", + "items": { + "$ref": "#/components/schemas/ValidationError" + } + }, + "Contacts": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Contact" + } + } + }, + "example": "{ contacts: [{ name: \"John Doe\" }]}" + } + }, + { + "name": "get_balance_sheet", + "description": "Returns a balance sheet for the end of the month of the specified date. It also returns the value at the end of the same month for the previous year.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_accounts", + "description": "Retrieves the full chart of accounts", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_bank_transactions", + "description": "Retrieves any spent or received money transactions", + "inputSchema": { + "type": "object", + "properties": { + "where": { + "type": "string", + "description": "Filter bank transactions. See example", + "example": "Date >= DateTime(2015, 01, 01) && Date < DateTime(2015, 12, 31)" + } + } + } + }, + { + "name": "list_contacts", + "description": "Retrieves all contacts in a Xero organisation", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_invoices", + "description": "Retrieves sales invoices or purchase bills", + "inputSchema": { + "type": "object", + "properties": { + "where": { + "type": "string", + "description": "Filter invoices. See example", + "example": "Date >= DateTime(2015, 01, 01) && Date < DateTime(2015, 12, 31), DueDate < DateTime(2015, 12, 31)" + } + } + } + }, + { + "name": "list_journals", + "description": "Retrieves journals", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_organisations", + "description": "Retrieves Xero organisation details", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_payments", + "description": "Retrieves payments for invoices and credit notes", + "inputSchema": { + "type": "object", + "properties": { + "where": { + "type": "string", + "description": "Filter payments. See example", + "example": "Date >= DateTime(2015, 01, 01) && Date < DateTime(2015, 12, 31)" + } + } + } + }, + { + "name": "list_quotes", + "description": "Retrieves sales quotes", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ] + }, + "vega-lite": { + "name": "vega-lite", + "display_name": "Vega-Lite Data Visualization", + "description": "Generate visualizations from fetched data using the VegaLite format and renderer.", + "repository": { + "type": "git", + "url": "https://github.com/isaacwasserman/mcp-vegalite-server" + }, + "homepage": "https://github.com/isaacwasserman/mcp-vegalite-server", + "author": { + "name": "isaacwasserman" + }, + "license": "[NOT FOUND]", + "categories": [ + "Media Creation" + ], + "tags": [ + "visualization", + "data", + "vega-lite" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/isaacwasserman/mcp-vegalite-server", + "mcp_server_vegalite" + ] + } + }, + "examples": [ + { + "title": "Saving Data", + "description": "Use the save_data tool to save a table of data for visualization.", + "prompt": "save_data(name='my_table', data=[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}])" + }, + { + "title": "Visualizing Data", + "description": "Use the visualize_data tool to visualize saved data using Vega-Lite syntax.", + "prompt": "visualize_data(data_name='my_table', vegalite_specification='{\"mark\": \"point\", \"encoding\": {\"x\":{\"field\":\"x\",\"type\":\"quantitative\"},\"y\":{\"field\":\"y\",\"type\":\"quantitative\"}}}')" + } + ], + "tools": [ + { + "name": "save_data", + "description": "A tool which allows you to save data to a named table for later use in visualizations.\nWhen to use this tool:\n- Use this tool when you have data that you want to visualize later.\nHow to use this tool:\n- Provide the name of the table to save the data to (for later reference) and the data itself.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the table to save the data to" + }, + "data": { + "type": "array", + "items": { + "type": "object", + "description": "Row of the table as a dictionary/object" + }, + "description": "The data to save" + } + }, + "required": [ + "name", + "data" + ] + } + }, + { + "name": "visualize_data", + "description": "A tool which allows you to produce a data visualization using the Vega-Lite grammar.\nWhen to use this tool:\n- At times, it will be advantageous to provide the user with a visual representation of some data, rather than just a textual representation.\n- This tool is particularly useful when the data is complex or has many dimensions, making it difficult to understand in a tabular format. It is not useful for singular data points.\nHow to use this tool:\n- Prior to visualization, data must be saved to a named table using the save_data tool.\n- After saving the data, use this tool to visualize the data by providing the name of the table with the saved data and a Vega-Lite specification.", + "inputSchema": { + "type": "object", + "properties": { + "data_name": { + "type": "string", + "description": "The name of the data table to visualize" + }, + "vegalite_specification": { + "type": "string", + "description": "The vegalite v5 specification for the visualization. Do not include the data field, as this will be added automatically." + } + }, + "required": [ + "data_name", + "vegalite_specification" + ] + } + } + ] + }, + "glean": { + "name": "glean", + "display_name": "Glean", + "description": "A server that uses Glean API to search and chat.", + "repository": { + "type": "git", + "url": "https://github.com/longyi1207/glean-mcp-server" + }, + "homepage": "https://github.com/longyi1207/glean-mcp-server", + "author": { + "name": "longyi1207", + "url": "https://github.com/longyi1207" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "glean", + "search", + "chat", + "docker" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/longyi1207/glean-mcp-server" + ], + "env": { + "GLEAN_API_KEY": "YOUR_API_KEY_HERE", + "GLEAN_DOMAIN": "YOUR_DOMAIN_HERE" + } + } + }, + "arguments": { + "GLEAN_API_KEY": { + "description": "The API key required to authenticate with the Glean API.", + "required": true, + "example": "YOUR_API_KEY_HERE" + }, + "GLEAN_DOMAIN": { + "description": "The domain used for the Glean API service operations.", + "required": true, + "example": "YOUR_DOMAIN_HERE" + } + } + }, + "google-drive": { + "name": "google-drive", + "display_name": "Google Drive", + "description": "File access and search capabilities for Google Drive", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/gdrive", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "google drive", + "files", + "API" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-gdrive" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-v", + "mcp-gdrive:/gdrive-server", + "-e", + "GDRIVE_CREDENTIALS_PATH=/gdrive-server/credentials.json", + "mcp/gdrive" + ] + } + }, + "tools": [ + { + "name": "search", + "description": "Search for files in Google Drive.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query" + } + }, + "required": [ + "query" + ] + } + ], + "is_official": true + }, + "excel": { + "name": "excel", + "display_name": "Excel", + "description": "Excel manipulation including data reading/writing, worksheet management, formatting, charts, and pivot table.", + "repository": { + "type": "git", + "url": "https://github.com/haris-musa/excel-mcp-server" + }, + "homepage": "https://github.com/haris-musa/excel-mcp-server", + "author": { + "name": "haris-musa" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Excel Manipulation", + "Python" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/haris-musa/excel-mcp-server", + "excel-mcp-server" + ], + "env": { + "EXCEL_FILES_PATH": "${EXCEL_FILES_PATH}" + } + } + }, + "arguments": { + "EXCEL_FILES_PATH": { + "description": "Directory where Excel files will be stored.", + "required": false, + "example": "/path/to/excel/files" + } + }, + "tools": [ + { + "name": "create_workbook", + "description": "Creates a new Excel workbook.", + "inputSchema": { + "filepath": { + "type": "string" + } + }, + "required": [ + "filepath" + ] + }, + { + "name": "create_worksheet", + "description": "Creates a new worksheet in an existing workbook.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name" + ] + }, + { + "name": "get_workbook_metadata", + "description": "Get metadata about workbook including sheets and ranges.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "include_ranges": { + "type": "boolean" + } + }, + "required": [ + "filepath" + ] + }, + { + "name": "write_data_to_excel", + "description": "Write data to Excel worksheet.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "data": { + "type": "array" + }, + "start_cell": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "data" + ] + }, + { + "name": "read_data_from_excel", + "description": "Read data from Excel worksheet.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "start_cell": { + "type": "string" + }, + "end_cell": { + "type": "string" + }, + "preview_only": { + "type": "boolean" + } + }, + "required": [ + "filepath", + "sheet_name" + ] + }, + { + "name": "format_range", + "description": "Apply formatting to a range of cells.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "start_cell": { + "type": "string" + }, + "end_cell": { + "type": "string" + }, + "bold": { + "type": "boolean" + }, + "italic": { + "type": "boolean" + }, + "underline": { + "type": "boolean" + }, + "font_size": { + "type": "integer" + }, + "font_color": { + "type": "string" + }, + "bg_color": { + "type": "string" + }, + "border_style": { + "type": "string" + }, + "border_color": { + "type": "string" + }, + "number_format": { + "type": "string" + }, + "alignment": { + "type": "string" + }, + "wrap_text": { + "type": "boolean" + }, + "merge_cells": { + "type": "boolean" + }, + "protection": { + "type": "object" + }, + "conditional_format": { + "type": "object" + } + }, + "required": [ + "filepath", + "sheet_name", + "start_cell" + ] + }, + { + "name": "merge_cells", + "description": "Merge a range of cells.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "start_cell": { + "type": "string" + }, + "end_cell": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "start_cell", + "end_cell" + ] + }, + { + "name": "unmerge_cells", + "description": "Unmerge a previously merged range of cells.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "start_cell": { + "type": "string" + }, + "end_cell": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "start_cell", + "end_cell" + ] + }, + { + "name": "apply_formula", + "description": "Apply Excel formula to cell.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "cell": { + "type": "string" + }, + "formula": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "cell", + "formula" + ] + }, + { + "name": "validate_formula_syntax", + "description": "Validate Excel formula syntax without applying it.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "cell": { + "type": "string" + }, + "formula": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "cell", + "formula" + ] + }, + { + "name": "create_chart", + "description": "Create chart in worksheet.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "data_range": { + "type": "string" + }, + "chart_type": { + "type": "string" + }, + "target_cell": { + "type": "string" + }, + "title": { + "type": "string" + }, + "x_axis": { + "type": "string" + }, + "y_axis": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "data_range", + "chart_type", + "target_cell" + ] + }, + { + "name": "create_pivot_table", + "description": "Create pivot table in worksheet.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "data_range": { + "type": "string" + }, + "target_cell": { + "type": "string" + }, + "rows": { + "type": "array" + }, + "values": { + "type": "array" + }, + "columns": { + "type": "array" + }, + "agg_func": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "data_range", + "target_cell", + "rows", + "values" + ] + }, + { + "name": "copy_worksheet", + "description": "Copy worksheet within workbook.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "source_sheet": { + "type": "string" + }, + "target_sheet": { + "type": "string" + } + }, + "required": [ + "filepath", + "source_sheet", + "target_sheet" + ] + }, + { + "name": "delete_worksheet", + "description": "Delete worksheet from workbook.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name" + ] + }, + { + "name": "rename_worksheet", + "description": "Rename worksheet in workbook.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "old_name": { + "type": "string" + }, + "new_name": { + "type": "string" + } + }, + "required": [ + "filepath", + "old_name", + "new_name" + ] + }, + { + "name": "copy_range", + "description": "Copy a range of cells to another location.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "source_start": { + "type": "string" + }, + "source_end": { + "type": "string" + }, + "target_start": { + "type": "string" + }, + "target_sheet": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "source_start", + "source_end", + "target_start" + ] + }, + { + "name": "delete_range", + "description": "Delete a range of cells and shift remaining cells.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "start_cell": { + "type": "string" + }, + "end_cell": { + "type": "string" + }, + "shift_direction": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "start_cell", + "end_cell" + ] + }, + { + "name": "validate_excel_range", + "description": "Validate if a range exists and is properly formatted.", + "inputSchema": { + "filepath": { + "type": "string" + }, + "sheet_name": { + "type": "string" + }, + "start_cell": { + "type": "string" + }, + "end_cell": { + "type": "string" + } + }, + "required": [ + "filepath", + "sheet_name", + "start_cell" + ] + } + ] + }, + "edubase": { + "display_name": "EduBase MCP server", + "repository": { + "type": "git", + "url": "https://github.com/EduBase/MCP" + }, + "homepage": "https://www.edubase.net", + "author": { + "name": "EduBase" + }, + "license": "MIT", + "tags": [ + "education", + "learning", + "quiz", + "assessment", + "API" + ], + "arguments": { + "EDUBASE_API_URL": { + "description": "URL to the EduBase API", + "required": true, + "example": "https://domain.edubase.net/api" + }, + "EDUBASE_API_APP": { + "description": "Your integration app ID", + "required": true, + "example": "your_integration_app_id" + }, + "EDUBASE_API_KEY": { + "description": "Your integration secret key", + "required": true, + "example": "your_integration_secret_key" + } + }, + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "EDUBASE_API_URL", + "-e", + "EDUBASE_API_APP", + "-e", + "EDUBASE_API_KEY", + "edubase/mcp" + ], + "env": { + "EDUBASE_API_URL": "https://domain.edubase.net/api", + "EDUBASE_API_APP": "your_integration_app_id", + "EDUBASE_API_KEY": "your_integration_secret_key" + }, + "description": "Run using Docker", + "recommended": false + }, + "custom": { + "type": "custom", + "command": "node", + "args": [ + "/path/to/dist/index.js" + ], + "env": { + "EDUBASE_API_URL": "https://domain.edubase.net/api", + "EDUBASE_API_APP": "your_integration_app_id", + "EDUBASE_API_KEY": "your_integration_secret_key" + }, + "description": "Run using Node.js", + "recommended": true + } + }, + "examples": [ + { + "title": "Collaborative Education Management", + "description": "Collaboratively creating and uploading questions, scheduling exams and analyzing user results with Claude", + "prompt": "I'd like to create a new quiz in EduBase with 5 multiple choice questions about basic algebra." + } + ], + "name": "edubase", + "description": "\"EduBase", + "categories": [ + "MCP Tools" + ], + "tools": [ + { + "name": "edubase_get_question", + "description": "Check existing question. Questions are the lowest level in the EduBase hierarchy, serving as the building blocks for Quiz sets.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "external unique question identifier" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "edubase_post_question", + "description": "Publish or update a question. Questions are the atomic building blocks of the EduBase Quiz system and represent the lowest level in the hierarchy (Questions -> Quiz sets -> Exams).", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "External unique question identifier for question management.\nOn repeated uploads, the questions are updated (rather then added) based on this value, which can be an arbitrary text.\nIf the question already exists at upload time with the same external identifier (in the given folder or Quiz set), the existing question will be updated instead of being added as a new one.\n- Use cases:\n - Integration with external systems\n - Version control\n - Batch updates\n - Content synchronization\n- Best practices:\n - Use consistent naming conventions\n - Include version, source or date information\n - Consider hierarchical IDs for related content\nExample:\n- id=MATHEMATICS_ARITHMETIC_BASIC_ADDITION_STATIC_001\n- type=numerical\n- question=What is 2+2?\n- answer=4" + }, + "type": { + "type": "string", + "description": "Type of the question.\nEduBase supports various question types to accommodate different assessment needs:\n- Basic Types:\n - GENERIC: Strict matching including spaces and punctuation\n - TEXT: Basic text input with flexible matching (ignores spaces and punctuation)\n - FREE-TEXT: Extended text response with semi-automatic grading\n - READING: Non-assessed text display for complex question groups\n- Choice-Based Types:\n - CHOICE: Single correct answer selection\n - MULTIPLE-CHOICE: Multiple correct answers\n - ORDER: Sequence arrangement (arrange items in correct order)\n - TRUE/FALSE: Statement evaluation (true statements in ANSWER, false in OPTIONS)\n- Numerical Types:\n - NUMERIC: Numerical value validation with fractions, constants, intervals\n - DATE/TIME: Calendar date validation with adjustable precision\n - EXPRESSION: Mathematical expression evaluation\n- Advanced Types:\n - MATRIX/MATRIX:EXPRESSION: Matrix evaluation (format: [a;b|c;d] for 2x2)\n - SET/SET:TEXT: Unordered collection validation\n - FILE: File submission evaluation\nExample:\ntype=numerical" + }, + "question": { + "type": "string", + "description": "The main question text that will be displayed to the test taker.\nSupports rich formatting options:\n- LaTeX Support (requires QUESTION_FORMAT=LATEX):\n - Inline: $$...$$\n - Block: $$$$...$$$$\n - IMPORTANT: When using LaTeX in questions, you MUST use double dollar signs ($$...$$) for inline math or quadruple dollar signs ($$$$...$$$$) for block math.\n - Single dollar signs ($...$) are NOT supported and will not render correctly. The inline or block method must be used, as $...$ won't work!\n- Parameters: Use curly braces {parameter_name} (defined in PARAMETERS field)\n- Quick expressions: Use ~~~expression~~~ for simple parameter calculations, e.g., area of a circle is ~~~{r}*{r}*pi~~~\n- Style formatting with EduTags:\n - Bold: [[B]]...[[/B]]\n - Italic: [[I]]...[[/I]]\n - Underline: [[U]]...[[/U]]\n - Subscript: [[SUB]]...[[/SUB]], Superscript: [[SUP]]...[[/SUP]]\n - Code: [[CODE]]...[[/CODE]], [[CODEBLOCK]]...[[/CODEBLOCK]]\n - Colors: [[COLOR:{color}]]...[[/COLOR]], [[BACKGROUND:{color}]]...[[/BACKGROUND]]\n- Tables: Use [[..]] format with semicolons for columns, vertical bars for rows, e.g., [[Header 1; Header 2 | Data 1; Data 2]]\n- Answer placeholders: [[___]] (3 underscores), for fill-in-the-gaps\nExample:\nquestion=Calculate the area of a circle with radius {r} using $$A = \\pi r^2$$" + }, + "question_format": { + "type": "string", + "description": "Controls question text rendering.\n- NORMAL: Default text formatting with standard font size, recommended for most tasks\n- LATEX: Enables LaTeX for mathematical, scientific notations (using KaTeX)\n- LONG: Smaller font with automatic paragraph breaks (ideal for lengthy text)\nExample:\nquestion_format=LATEX" + }, + "answer": { + "type": "string", + "description": "The correct answer(s) for the question.\n- For multiple answers, separate with triple-and operator (\"&&&\")\n- Parameters can be used in curly braces {param_name}\n- LaTeX Support (requires QUESTION_FORMAT=LATEX):\n - Inline: $$...$$\n - Block: $$$$...$$$$\n - IMPORTANT: When using LaTeX in answer, you MUST use double dollar signs ($$...$$) for inline math or quadruple dollar signs ($$$$...$$$$) for block math.\n - Single dollar signs ($...$) are NOT supported and will not render correctly. The inline or block method must be used, as $...$ won't work!\n- Usage by question type:\n - CHOICE: The correct option\n - MULTIPLE-CHOICE: All correct options\n - TEXT/NUMERICAL/EXPRESSION: Expected response(s)\n - ORDER: Items in correct sequence\n - TRUE/FALSE: True statements (false statements go in OPTIONS)\n - MATRIX types: Use format [a;b|c;d] for matrices\n - SET types: Unordered collection of elements\nExample:\nanswer=Paris\nanswer=sin(x)^2+cos(x)^2 # with type = EXPRESSION\nanswer=$$sin^2(x)+cos^2(x)$$ # with type = CHOICE so it renders correctly" + }, + "language": { + "type": "string", + "description": "The language of the question.\n- Alpha-2 code according to ISO 639-1\nExample:\nlanguage=hu # Hungarian" + }, + "image": { + "type": "string", + "description": "Attach an image to the question.\nSupported formats: PNG, JPEG, WebP\nFormat: filename=data, where data is either a base64-encoded image or a URL" + }, + "answer_order": { + "type": "string", + "description": "Controls whether the sequence of multiple answers matters.\n- Plus sign (+) to indicate YES\n- Blank field or minus sign (-) indicates NO (default)\n- When using answer_label, this is automatically activated\n- Essential for questions where sequence is important (e.g., steps in a process)\nExample:\nanswer_order=+\nExample API call:\nid=europe_cities_population\ntype=text\nquestion=List the following European cities in descending order by population (largest first)\nanswer=London &&& Madrid &&& Paris\nanswer_order=+" + }, + "answer_label": { + "type": "string", + "description": "Text displayed in/above the input field during the test.\n- Separate multiple labels with triple-and operators (\"&&&\")\n- Automatically activates the answer_order function\n- Perfect for multi-part questions where each part needs clear labeling\n- Useful for creating pairing/matching questions\nExample:\nanswer_label=a) Distance (km) &&& b) Time (hours) &&& c) Speed (km/h)\nExample API call:\nid=basic_math\ntype=numerical\nquestion=Given the number 16:\\n\\na) What is double this number?\\n\\nb) What is half of this number?\\n\\nc) What is this number plus 10?\nanswer=32 &&& 8 &&& 26\nanswer_label=a) Double &&& b) Half &&& c) Plus 10\npoints=3" + }, + "answer_hide": { + "type": "string", + "description": "Controls whether correct answers are hidden on the results page.\n- Plus sign (+) to indicate YES\n- Blank field or minus sign (-) indicates NO (default)\n- Useful for test security and preventing answer sharing\n- Critical for reusable questions and practice tests\nExample:\nanswer_hide=+\nExample API call:\nid=uk_countries\ntype=text\nquestion=Name any of the countries within the United Kingdom!\nanswer=England &&& Northern Ireland &&& Scotland &&& Wales\nanswer_require=1\nanswer_hide=+" + }, + "answer_indefinite": { + "type": "string", + "description": "Allows users to add any number of input fields using + and - buttons.\n- Plus sign (+) to indicate YES\n- Blank field or minus sign (-) indicates NO (default)\n- Answer labels will not appear when this is enabled\n- Ideal for brainstorming exercises or questions with variable number of answers\nExample:\nanswer_indefinite=+\nExample API call:\nid=name_countries\ntype=text\nquestion=Name as many European countries as you can think of.\nanswer=France &&& Germany &&& Italy &&& Spain &&& United Kingdom &&& ...\nanswer_indefinite=+" + }, + "answer_format": { + "type": "string", + "description": "Defines how to display the answer on the results page.\n- Only applicable for FREE-TEXT questions\n- Format: type or type:value\n- Available types:\n - normal: standard text (default)\n - code: with syntax highlighting (specify language after colon)\nExample:\nanswer_format=code:python\nanswer_format=code:sql\nExample API call:\nid=sql_basics\ntype=free-text\nquestion=Write a SQL query to select all columns from the \"users\" table where the age is greater than 18.\nanswer=SELECT * FROM users WHERE age > 18\nanswer_format=code:sql" + }, + "answer_require": { + "type": "string", + "description": "Number of answers required for maximum score.\n- Not applicable for CHOICE and FREE-TEXT questions\n- Perfect for questions with multiple valid answers where only a subset needs to be provided\n- Useful when asking students to provide any X examples from a larger set\nExample:\nanswer_require=3\nExample API call:\nid=uk_countries\ntype=text\nquestion=Name any one of the countries within the United Kingdom!\nanswer=England &&& Northern Ireland &&& Scotland &&& Wales\nanswer_require=1" + }, + "subject": { + "type": "string", + "description": "Subject classification for organizing questions.\n- Provides primary categorization for content organization\n- Use the question editor in the EduBase UI for an up-to-date list of possible values\nExample:\nsubject=Mathematics\ncategory=Algebra" + }, + "category": { + "type": "string", + "description": "Category, another layer of organization as seen in SUBJECT" + }, + "path": { + "type": "string", + "description": "Path where question will be stored in personal QuestionBase.\n- Default: /API\n- Supports hierarchical structure with forward slashes\n- Always start with a forward slash!\nExample:\npath=/Mathematics/Calculus/Derivatives" + }, + "options": { + "type": "string", + "description": "Incorrect options or false statements for choice-based question types.\n- Required for CHOICE, MULTIPLE-CHOICE question types\n- For TRUE/FALSE, these are the false statements (ANSWER contains true statements)\n- Separate multiple options with triple-and operators (\"&&&\")\n- Parameters can be used in curly braces {param_name}\n- LaTeX Support (requires QUESTION_FORMAT=LATEX):\n - Inline: $$...$$\n - Block: $$$$...$$$$\n - IMPORTANT: When using LaTeX in questions, you MUST use double dollar signs ($$...$$) for inline math or quadruple dollar signs ($$$$...$$$$) for block math.\n - Single dollar signs ($...$) are NOT supported and will not render correctly. The inline or block method must be used, as $...$ won't work!\nExample:\noptions=London &&& Berlin &&& Madrid\nExample API call:\nid=capital_france\ntype=choice\nquestion=What is the capital of France?\nanswer=Paris\noptions=London &&& Berlin &&& Madrid" + }, + "options_fix": { + "type": "string", + "description": "Controls the arrangement of answers and options.\n- Available values:\n - all: Answers appear first, followed by options\n - abc: Sort all items (answers and options) alphabetically\n - first:N: Place first N options at the end\n - last:N: Place last N options at the end\n - answers: Place all answers at the end\n- Useful for maintaining consistent presentation or for specific pedagogical purposes\nFor alphabetical ordering:\n- When migrating content from textbooks or past exams, can maintain original lettering system (a, b, c...) for:\n - Reference consistency with printed materials\n - Alignment with answer keys\n - Compatibility with existing grading systems\n - Cross-referencing with study guides\n- Particularly valuable when:\n - Test takers need to refer to both digital and printed materials\n - Questions are part of a larger standardized test system\n - Maintaining consistency with existing worksheets or textbooks\n - Digitizing legacy assessment materials\nExample:\noptions_fix=abc\nExample API call:\nid=fruit_types\ntype=multiple-choice\nquestion=Which of these are citrus fruits?\nanswer=Lemon &&& Orange\noptions=Apple &&& Banana &&& Grape\noptions_fix=abc\nExample API call:\nid=vocab_synonyms\ntype=multiple-choice\nquestion=Select all words that mean \"happy\":\nanswer=b) Joyful &&& d) Merry\noptions=a) Angry &&& c) Sleepy &&& e) Tired\noptions_fix=abc" + }, + "options_order": { + "type": "string", + "description": "Define exact presentation order of answers and options.\n- Format: ANSWER:N or OPTION:N items separated by \"&&&\"\n- ANSWER:N references the Nth provided answer\n- OPTION:N references the Nth provided option\n- OPTION_NONE:N references the Nth third option (for TRUE/FALSE questions)\n- All answers and options must be specified exactly once\nExample:\noptions_order=OPTION:0 &&& ANSWER:0 &&& OPTION:1 &&& ANSWER:1\nExample API call to create a chronologically ordered timeline\nid=historical_chronology\ntype=multiple-choice\nquestion=Which of these events occurred during the Industrial Revolution (1760-1840)?\nanswer=Invention of the Steam Engine &&& First Steam Locomotive &&& First Commercial Railway\noptions=Printing Press Invented &&& First Electric Light Bulb &&& First Powered Flight\noptions_order=OPTION:0 &&& ANSWER:0 &&& ANSWER:1 &&& ANSWER:2 &&& OPTION:1 &&& OPTION:2" + }, + "points": { + "type": "string", + "description": "Maximum points for a fully correct answer.\n- Default: 1 point\n- For questions with multiple answers, partial credit is possible based on SUBSCORING method\nExample:\npoints=10" + }, + "subscoring": { + "type": "string", + "description": "Method for calculating partial credit for partially correct answers.\n- Not applicable for CHOICE, READING and FREE-TEXT questions\n- Available values:\n - PROPORTIONAL: Points awarded proportionally to correct answers (default)\n - LINEAR_SUBSTRACTED:N: Linear scoring with N points subtracted for each error\n - CUSTOM: Use custom point distribution defined in SUBPOINTS field\n - NONE: No partial credit, all-or-nothing scoring\nExample:\nsubscoring=LINEAR_SUBSTRACTED:2\nExample API call:\nid=math_problem\ntype=numerical\nquestion=What is the sum and product of {a} and {b}?\nanswer={a}+{b} &&& {a}*{b}\nparameters={a; INTEGER; 1; 100} &&& {b; INTEGER; 1; 100}\npoints=4\nsubscoring=CUSTOM\nsubpoints=25 &&& 75" + }, + "subpoints": { + "type": "string", + "description": "Define specific point values for each answer in percentages.\n- Only used when subscoring=CUSTOM\n- Specify percentage values separated by triple-and operators (\"&&&\")\n- Not applicable for CHOICE, READING and FREE-TEXT questions\n- Values should sum to 100 (for percentage)\nExample:\nsubpoints=50 &&& 25 &&& 25\nExample meaning: For a 10-point question with three answers:\n- First answer: 5 points (50%)\n- Second answer: 2.5 points (25%)\n- Third answer: 2.5 points (25%)" + }, + "penalty_scoring": { + "type": "string", + "description": "Controls how penalty points should be applied.\n- Available values:\n - DEFAULT: Standard penalty application, which might vary by question type (default)\n - PER_ANSWER: Apply penalties for each incorrect answer\n - PER_QUESTION: Apply penalties once per question\nExample:\npenalty_scoring=PER_ANSWER" + }, + "penalty_points": { + "type": "string", + "description": "Points deducted for completely incorrect answers.\n- No penalty applied if answer is partially correct\n- No penalty for empty/unanswered questions\n- Use positive values (recommended)\nExample:\npenalty_points=2\nExample API call with penalties:\nid=physics_multiple_choice\ntype=multiple-choice\nquestion=Which of the following are forms of energy? Select all that apply.\nanswer=Kinetic &&& Potential &&& Thermal\noptions=Velocity &&& Acceleration\npoints=3\npenalty_scoring=PER_QUESTION\npenalty_points=1" + }, + "hint_penalty": { + "type": "string", + "description": "Point deduction for using hints/solutions/videos during a test.\n- Format: type or type:value\n- Types:\n - NONE: No penalty (default)\n - ONCE:N%: Single deduction regardless of number used\n - PER-HELP:N%: Deduction for each hint (only for HINT_PENALTY)\nExamples:\nhint_penalty=ONCE:20% or hint_penalty=ONCE:0.2\nhint_penalty=PER-HELP:10%\nsolution_penalty=ONCE:50%\nvideo_penalty=ONCE:15%\nExample API call with comprehensive penalty system:\nid=area_circle_parametric\ntype=expression\nquestion=Find an expression for the area of a circle with radius {r}.\nanswer=pi*{r}^2\nparameters={r; INTEGER; 2; 10}\npoints=10\nsubject=Mathematics\ncategory=Geometry\nhint=Think about the formula for circle area &&& Remember that area involves squaring the radius\nsolution=The formula for circle area is $$\\pi r^2$$\npenalty_scoring=PER_ANSWER\npenalty_points=3\nhint_penalty=PER-HELP:10%\nsolution_penalty=ONCE:50%\n# Each hint used reduces score by 10%, viewing solution reduces score by 50%" + }, + "solution_penalty": { + "type": "string", + "description": "Similar to HINT_PENALTY\nPoint deduction for viewing steps of the solution (NONE, ONCE:N%) (default: NONE)" + }, + "solution_image": { + "type": "string", + "description": "Attach an image to the solution steps.\nSupported formats: PNG, JPEG, WebP\nFormat: filename=data, where data is either a base64-encoded image or a URL" + }, + "video_penalty": { + "type": "string", + "description": "Similar to HINT_PENALTY\nPoint deduction for video assistance used (NONE, ONCE:N%) (default: NONE)" + }, + "manual_scoring": { + "type": "string", + "description": "Controls when to enable manual scoring.\n- Not applicable for READING and FREE-TEXT questions\n- Available values:\n - NO: Never use manual scoring (default)\n - NOT_CORRECT: Only manually score incorrect answers\n - ALWAYS: Always require manual scoring\nExample:\nmanual_scoring=NOT_CORRECT" + }, + "parameters": { + "type": "string", + "description": "Parameter definitions for dynamic question generation.\nOne of EduBase's most powerful features, allowing creation of dynamic questions where each user gets a unique variant of the same question.\n- Separate multiple parameters with triple-and operators (\"&&&\")\n- Up to 128 parameters can be defined\nParameter Types:\n1. FIX (Fixed Value):\n - Format: {name; FIX; value}\n - Sets a predefined constant value (integer or fraction)\n - Example: {pi; FIX; 3.1415}\n2. INTEGER (Whole Numbers):\n - Simple: {name; INTEGER}\n - Extended: {name; INTEGER; min; max}\n - Full: {name; INTEGER; min; max; inside; outside}\n - Generate random integers within specified ranges\n - Use '-' for omitting min/max values\n - Examples:\n * {p; INTEGER} - any integer\n * {p; INTEGER; 10; 20} - integer between 10 and 20 (inclusive)\n * {p; INTEGER; -; -; [10-20]; [12-14] ||| [16-18]} - integer between 10-20, excluding 12-14 and 16-18\n3. FLOAT (Decimal Numbers):\n - Simple: {name; FLOAT; precision}\n - Extended: {name; FLOAT; precision; min; max}\n - Full: {name; FLOAT; precision; min; max; inside; outside}\n - Generate random decimal numbers\n - Specify precision (decimal places)\n - Examples:\n * {p; FLOAT; 2} - float with 2 decimal places\n * {p; FLOAT; 5; 0; 1} - float between 0 and 1 with 5 decimals\n * {p; FLOAT; 1; 0; 10; -; [0-1]} - float between 0-10 excluding 0-1, with 1 decimal\n4. FORMULA (Expressions):\n - Simple: {name; FORMULA; formula}\n - Full: {name; FORMULA; formula; precision}\n - Define parameters based on other parameters\n - Examples:\n * {d; FORMULA; {b}^2-4*{a}*{c}} - quadratic formula discriminant\n * {p; FORMULA; 2*{q}+1} - linear expression\n5. LIST (Random Selection):\n - Format: {name; LIST; value1; value2; value3; ...}\n - Randomly select from predefined values\n - Up to 64 elements\n - Examples:\n * {primes; LIST; 2; 3; 5; 7; 11}\n * {animals; LIST; dog; cat; snake; camel}\n6. PERMUTATION:\n - Format: {name; PERMUTATION; value1; value2; value3; ...}\n - Creates permutated parameters accessible as {name_1}, {name_2}, etc.\n - Example: {p; PERMUTATION; A; B; C; D}\n * So {p_1} will be a different letter than {p_2}\n - Example: {primes; PERMUTATION; 2; 3; 5; 7}\n * So both {primes_1} and {primes_2} will be different single digit primes\n7. FORMAT:\n - Format: {name; FORMAT; parameter; type; ...}\n - Format parameters based on other parameters\n - Supported types: NUMBER, NUMERTEXT, ROMAN\n - Optional extra parameters based on type\n * NUMBER\n * precision: number of decimal places\n - Examples:\n * {pp; FORMAT; p; NUMBER; 1} - format as number rounded to 1 decimal\n * {pp; FORMAT; p; NUMBERTEXT} - format number as text\n * {pp; FORMAT; p; ROMAN} - format number as Roman numeral\nBest Practices:\n - Order parameters so dependent ones come later\n - Use simple notation when possible\n - Avoid unnecessary parameters\n - Use CONSTRAINTS field to ensure valid combinations\nExamples:\nparameters={pi; FIX; 3.14159} &&& {r; INTEGER; 1; 10}\nparameters={a; INTEGER; 1; 5} &&& {b; INTEGER; -10; 10} &&& {c; INTEGER; -10; 10} &&& {d; FORMULA; {b}^2-4*{a}*{c}}\nparameters={country; LIST; France; Germany; Italy} &&& {capital; LIST; Paris; Berlin; Rome}\nparameters_sync=+ # Ensures each country is paired with its correct capital" + }, + "parameters_sync": { + "type": "string", + "description": "Controls synchronization of LIST parameter selections.\n- Plus sign (+) to indicate YES\n- Blank field or minus sign (-) indicates NO (default)\n- When enabled, the Nth value from each LIST is selected together\n- Critical for paired data like countries and capitals\nExample:\nparameters_sync=+\nExample API call:\nid=capital_city\ntype=text\nquestion=What is the capital city of {country}?\nanswer={capital}\nparameters={country; LIST; France; Germany; Italy} &&& {capital; LIST; Paris; Berlin; Rome}\nparameters_sync=+" + }, + "constraints": { + "type": "string", + "description": "Define rules that parameter combinations must satisfy.\n- Mathematical expressions that must evaluate to true\n- Parameters must be in curly braces {param}\n- Allowed relations: <, <=, =, >=, >, <>\n- Multiple constraints separated by triple-and operators (\"&&&\")\nExamples:\nconstraints={b}^2-4*{a}*{c}>0\nconstraints={a}+{b}>{c} &&& {b}+{c}>{a} &&& {c}+{a}>{b}\nconstraints={x}+{y}<10 &&& {x}<4" + }, + "expression_check": { + "type": "string", + "description": "Define how expressions should be validated (RANDOM, EXPLICIT, COMPARE) (default: RANDOM).\n- RANDOM: Evaluates expressions at randomly generated points\n- EXPLICIT: Checks expressions at predefined values against target values\n- COMPARE: Direct comparison of expressions without variables\nExample:\nexpression_check=RANDOM" + }, + "expression_variable": { + "type": "string", + "description": "Specifies variable names used in expressions (separate multiple variables with &&&) (default: x).\n- Multiple variables can be used for multivariable expressions\n- Variable names must be used consistently in answer and validation\nExamples:\nexpression_variable=t &&& v # For distance formula using time and velocity" + }, + "expression_decimals": { + "type": "string", + "description": "Sets precision for decimal calculations (default: 2).\n- Inherited from decimals field if not specified\n- Critical for controlling accurate validation of expressions\nExample:\nexpression_decimals=4 # For high-precision calculations" + }, + "expression_functions": { + "type": "string", + "description": "Controls whether functions can be used in user inputs (+ for yes, - for no) (default: +).\n- Enabled by default with + sign\n- Disable with - sign when students should use alternative forms\n- Affects available input options for test takers\n- Supported functions include:\n * Basic: sqrt, abs, round, floor, ceil\n * Logarithmic: ln, log, log10\n * Trigonometric: sin, cos, tan, csc, sec, arcsin/asin, arccos/acos, arctan/atan\n * Hyperbolic: sinh, cosh, tanh, arcsinh/asinh, arccosh/acosh, arctanh/atanh\n * Conversions: degree2radian, radian2degree, number2binary, number2hexadecimal, roman2number, etc.\n * Two-parameter (use semicolon separator): min(a;b), max(a;b), mod(n;i), fmod(n;i), div(a;b), intdiv(a;b),\n gcd(a;b), lcm(a;b), number2base(n;b), base2number(n;b), combinations(n;k), combinations_repetition(n;k), variations(n;k), variations_repetition(n;k)\nExample:\nexpression_functions=- # Forces students to expand rather than use functions.\n# When asked for the value of sin(pi), the user can't input sin(pi) because functions cannot be used." + }, + "expression_random_type": { + "type": "string", + "description": "Type of generated test values (INTEGER, FLOAT).\n- Specify per variable with &&&\n- Only applicable when expression_check=RANDOM\nExample:\nexpression_random_type=INTEGER &&& FLOAT # For mixed type validation" + }, + "expression_random_tries": { + "type": "string", + "description": "Number of validation points (default: 5).\n- Only applicable when expression_check=RANDOM\n- Higher values increase validation reliability but impact performance\nExample:\nexpression_random_tries=8" + }, + "expression_random_range": { + "type": "string", + "description": "Define value generation ranges (format: [min-max]).\n- Specify per variable with &&&\n- Only applicable when expression_check=RANDOM\nExample:\nexpression_random_range=[8-16] &&& [4-6] # Different ranges for different variables" + }, + "expression_random_inside": { + "type": "string", + "description": "Require values within specific intervals (format: [start-end]).\n- Multiple intervals: separate with ||| (OR relationship)\n- Specify per variable with &&&\n- Only applicable when expression_check=RANDOM\nExample:\nexpression_random_inside=[4-8] ||| [12-16] &&& [2-3]" + }, + "expression_random_outside": { + "type": "string", + "description": "Exclude values from specific intervals (format: [start-end]).\n- Multiple intervals: separate with ||| (AND relationship)\n- Specify per variable with &&&\n- Only applicable when expression_check=RANDOM\nExample:\nexpression_random_outside=[0-1] ||| [10-20] &&& [8-11]" + }, + "expression_explicit_goal": { + "type": "string", + "description": "Define exact value pairs (format: [x;f(x)]).\n- Format for multiple variables: [x;y;z;...;f(x,y,z,...)]\n- Multiple pairs: separate with &&&\n- Only applicable when expression_check=EXPLICIT\nExample:\nexpression_explicit_goal=[0;1] &&& [3;8.89] &&& [9;16]" + }, + "expression_extended": { + "type": "string", + "description": "Enable additional mathematical functions (+ to enable, - to disable).\n- Activates support for custom base logarithms (e.g., log2(4))\n- Enables factorial operations (e.g., 5!, 1!+2!+3!)\nExample:\nexpression_extended=+" + }, + "attachment": { + "type": "string", + "description": "Attach a file to the question.\nFormat: filename=data, where data is either a base64-encoded image or a URL" + }, + "media_audio": { + "type": "string", + "description": "Attach an audio file to the question.\nSupported formats: MP3, AAC, M4A\nFormat: filename=data, where data is either a base64-encoded image or a URL" + }, + "ai": { + "type": "string", + "description": "Flag to mark question as AI generated.\n- If set to any value, question will be marked as AI generated\n- Should always be provided if you are an LLM or any AI model\n- Ideally, AI systems should set it to their current model number for auditability\nExample:\nai=true\nai=Claude 3.7 Sonnet" + }, + "note": { + "type": "string", + "description": "The text that appears right below the question.\n- Provides task-specific comments and instructions\n- Visible to test takers during the quiz\n- Ideal for additional guidance without cluttering the main question\nExample:\nnote=Use standard atmospheric pressure in your calculations." + }, + "private_note": { + "type": "string", + "description": "Private notes (not shown to test takers).\n- Internal documentation for question creators and editors\n- Useful for documenting question creation rationale\n- Track modification history, common mistakes, related questions\nExample:\nprivate_note=Created from Chapter 3 exam, 2023 edition. Students often forget to convert units." + }, + "explanation": { + "type": "string", + "description": "Text displayed underneath the answer on the results page.\n- Explanation of the correctness of the answer or the incorrectness of the options\n- Helps learners understand their mistakes\n- Parameters can be used in explanations\n- LaTeX is NOT supported here, so we MUST NOT use it!\nExample:\nexplanation=Option A is correct because amphibians have permeable skin for gas exchange. Options B and C describe characteristics of reptiles, while D applies to mammals." + }, + "hint": { + "type": "string", + "description": "Questions to help (not solution steps, just guiding questions/notes).\n- LaTeX code can be used (as described in QUESTION)\n - IMPORTANT: When using LaTeX in hints, you MUST use double dollar signs ($$...$$) for inline math or quadruple dollar signs ($$$$...$$$$) for block math.\n - Single dollar signs ($...$) are NOT supported and will not render correctly. The inline or block method must be used, as $...$ won't work!\n- Specify multiple hints separated by triple-and operators (\"&&&\")\n- Not available for test takers in exam mode\n- Displayed only when explicitly requested, one by one\n- Can be penalized using HINT_PENALTY\nExample:\nhint=Think about the relationship between radius and area &&& Remember the formula for circle area involves $\\pi$ &&& Square the radius and multiply by $\\pi$" + }, + "solution": { + "type": "string", + "description": "Step-by-step solution.\n- LaTeX code can be used (as described in QUESTION)\n - IMPORTANT: When using LaTeX in solution, you MUST use double dollar signs ($$...$$) for inline math or quadruple dollar signs ($$$$...$$$$) for block math.\n - Single dollar signs ($...$) are NOT supported and will not render correctly. The inline or block method must be used, as $...$ won't work!\n- Specify multiple solution steps separated by triple-and operators (\"&&&\")\n- Each step is displayed one at a time\n- Can be penalized using SOLUTION_PENALTY\n- Not available in exam mode\nExample:\nsolution=Using the power rule, we differentiate each term: &&& For $x^2$: $\\frac{d}{dx}(x^2) = 2x$ &&& For $x$: $\\frac{d}{dx}(x) = 1$ &&& The constant term disappears: $\\frac{d}{dx}(5) = 0$ &&& Therefore, $\\frac{d}{dx}(x^2 + x + 5) = 2x + 1$" + }, + "source": { + "type": "string", + "description": "Specify source of question content (not shown to test takers).\n- Use cases include training material sources, documentation references, content attribution\n- Important for tracking question origins and copyright compliance\nExample:\nsource=Mathematics Textbook Chapter 5, Page 123\nsource=Company Safety Manual 2023, Section 3.4.2" + }, + "decimals": { + "type": "string", + "description": "Decimal precision (default: 2).\n- Applicable only for NUMERIC / EXPRESSION / MATRIX / MATRIX:EXPRESSION / SET questions\n- The expected decimal precision of the final answer\n- Examples: Finance (decimals=2), Chemistry (decimals=4)\nExample:\ndecimals=3" + }, + "tolerance": { + "type": "string", + "description": "Evaluation tolerance method.\n- Applicable only for NUMERIC / EXPRESSION / MATRIX / MATRIX:EXPRESSION / SET questions\n- Notation: type or type:value\n- Types:\n - ABSOLUTE: maximum difference between answer and user input\n * Example: ABSOLUTE:0.1\n - RELATIVE: maximum difference in percentage (symmetric mean absolute percentage error, SMAP value is used)\n * Example: RELATIVE:5% or RELATIVE:0.05\n - QUOTIENT: integer multiple / QUOTIENT2: scalar multiple\n * Example: QUOTIENT or QUOTIENT2:SYNCED\nExample:\ntolerance=ABSOLUTE:0.01" + }, + "datetime_precision": { + "type": "string", + "description": "Date/time precision.\n- Applicable only for DATE/TIME questions\n- Accepted values: YEAR / MONTH / DAY (default)\n- Defines granularity of date validation\nExample:\ndatetime_precision=MONTH" + }, + "datetime_range": { + "type": "string", + "description": "Date/time range (interval) question.\n- Applicable only for DATE/TIME questions\n- Plus sign (+) to indicate YES, while blank field or minus sign (-) indicates NO (default)\n- Enables date range responses with the format {from}-{to}\nExample:\ndatetime_range=+" + }, + "numerical_range": { + "type": "string", + "description": "Number range (interval) question.\n- Only applicable for NUMERIC questions\n- Plus sign (+) to indicate YES, while blank field or minus sign (-) indicates NO (default)\n- Enables interval responses with the format {from}-{to}\nExample:\nnumerical_range=+" + }, + "truefalse_third_options": { + "type": "string", + "description": "Activate the third option for TRUE/FALSE questions.\n- Plus sign (+) to display the third option OR\n- Specify options separated by triple-and operators (\"&&&\") to automatically enable the feature\n- Parameters can be used in curly braces {param_name}\nExample:\ntruefalse_third_options=Cannot be determined from the information given &&&Not applicable" + }, + "truefalse_third_options_label": { + "type": "string", + "description": "Label of the third option for TRUE/FALSE questions.\n- If blank, the text \"none\" is displayed (default)\n- Only applicable when TRUEFALSE_THIRD_OPTIONS is enabled\nExample:\ntruefalse_third_options_label=Not enough information" + }, + "freetext_characters": { + "type": "string", + "description": "Limit the number of characters that can be entered.\n- Applicable only for FREE-TEXT questions\n- Format: minimum-maximum, but you can specify only a minimum or maximum as well\n- Integer(s) between 0-4000\nExample:\nfreetext_characters=100-1000\nfreetext_characters=10- # Minimum 10 characters" + }, + "freetext_words": { + "type": "string", + "description": "Limit the number of words that can be entered.\n- Applicable only for FREE-TEXT questions\n- Format: minimum-maximum, but you can specify only a minimum or maximum as well\n- Integer(s) between 0-4000\nExample:\nfreetext_words=-50 # Max. 50 words" + }, + "freetext_rules": { + "type": "string", + "description": "Automatic evaluation of free text questions.\n- Applicable only for FREE-TEXT questions\n- Notation: {type; keywords}\n- Type:\n - 1: if keywords are included within input, answer is correct (maximum points)\n - 2: if keywords are included within input, answer is wrong (0 points)\n - 3: if no keywords are included within input, answer is good (maximum points)\n - 4: if keywords are not included within input, answer is wrong (0 points)\n- Keywords: comma-separated list (must not contain semicolons!)\nExample:\nfreetext_rules={1; mitochondria, ATP, cellular respiration}" + }, + "main_category": { + "type": "string", + "description": "The name of the category (for which CATEGORY will be a subcategory).\n- Empty by default, e.g. CATEGORY will be treated as the main category\n- Specify multiple levels (up to 2!) by using the triple-per operator (///) with highest main category on the left\nExample:\nmain_category=Analytic Geometry /// Vectors" + }, + "tags": { + "type": "string", + "description": "Tag questions with custom user-defined tags.\n- Use ID or code of pre-registered tags\n- Only previously registered tags can be used (must be pre-registered in EduBase UI)\n- Specify multiple tags separated by triple-and operators (\"&&&\")\n- User-controlled categorization that can be created at user or organization level\n- Use cases include:\n - Personal content organization (e.g., \"My Calculus Questions\", \"Spring 2024\")\n - Department-level categorization (e.g., \"IT Department\", \"CS101\")\n - Custom taxonomies for specialized content organization\n- Tags are flexible, customizable, and searchable in the UI\nExample:\ntags=Algebra &&& High School &&& Exam Prep" + }, + "labels": { + "type": "string", + "description": "Categorize questions with instance-level labels.\n- Pre-defined values specific to each EduBase instance\n- Values controlled by instance administrators (cannot be created by users)\n- Consistent across all users in an instance\n- Specify multiple labels separated by triple-and operators (\"&&&\")\n- Use cases include:\n - System-wide flags (e.g., \"needs_review\", \"featured\")\n - Quality indicators (e.g., \"verified\", \"deprecated\")\n - Processing status (e.g., \"ai_generated\", \"manually_checked\")\nExample:\nlabel=verified &&& featured" + }, + "group": { + "type": "string", + "description": "Add a question to a question group in a Quiz set.\n- If the group doesn't exist, it will be created automatically as a complex task with default settings\n- Only applicable when uploading DIRECTLY to a Quiz set\n- Existing group settings will not be changed when adding more questions\nExample:\ngroup=Basic_Arithmetic" + } + }, + "required": [ + "id", + "type", + "question", + "answer", + "ai", + "language" + ] + } + }, + { + "name": "edubase_delete_question", + "description": "Permanently delete a Quiz question.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "external unique question identifier" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "edubase_get_exams", + "description": "List owned and managed exams. Exams are the highest level in the EduBase Quiz hierarchy, built from Quiz sets.", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "search string to filter results" + }, + "limit": { + "type": "number", + "description": "limit number of results (default, in search mode: 16)" + }, + "page": { + "type": "number", + "description": "page number (default: 1), not used in search mode!" + } + }, + "required": [] + } + }, + { + "name": "edubase_get_exam", + "description": "Get/check exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + } + }, + "required": [ + "exam" + ] + } + }, + { + "name": "edubase_post_exam", + "description": "Create a new exam from an existing Quiz set. Exams are at the top level of the EduBase Quiz hierarchy and MUST be created from existing Quiz sets. They are time-constrained, secured assessment instances of Quiz sets.", + "inputSchema": { + "type": "object", + "properties": { + "language": { + "type": "string", + "description": "desired exam language" + }, + "title": { + "type": "string", + "description": "title of the exam" + }, + "type": { + "type": "string", + "description": "Type of the exam. (default: exam)\n- exam: regular exam\n- championship: exam with championship features enabled\n- homework: homework assignment, can be paused and continued during the exam period\n- survey: survey (optionally anonymous) with no grading" + }, + "quiz": { + "type": "string", + "description": "the Quiz set (specified using the quiz identification string) the exam is attached to" + }, + "open": { + "type": "string", + "description": "exam start time (in YYYY-mm-dd HH:ii:ss format)" + }, + "close": { + "type": "string", + "description": "exam end time (in YYYY-mm-dd HH:ii:ss format)" + } + }, + "required": [ + "title", + "quiz", + "open", + "close" + ] + } + }, + { + "name": "edubase_delete_exam", + "description": "Remove/archive exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + } + }, + "required": [ + "exam" + ] + } + }, + { + "name": "edubase_get_exam_users", + "description": "List all users on an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + } + }, + "required": [ + "exam" + ] + } + }, + { + "name": "edubase_post_exam_users", + "description": "Assign user(s) to an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + } + }, + "required": [ + "exam", + "users" + ] + } + }, + { + "name": "edubase_delete_exam_users", + "description": "Remove user(s) from an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + } + }, + "required": [ + "exam", + "users" + ] + } + }, + { + "name": "edubase_post_exam_summary", + "description": "Submit a new AI exam summary.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "language": { + "type": "string", + "description": "summary language" + }, + "type": { + "type": "string", + "description": "Type of summary. (default: ai)\n- ai: AI-generated summary" + }, + "summary": { + "type": "string", + "description": "Summary text. \n- basic HTML formatting allowed, but avoid complex designs\n- keep the summary short and concise\n- try to avoid including personal information (such as usernames, names and contact addresses)" + }, + "llm": { + "type": "string", + "description": "Name of the Large Language Model used to generate the summary.\n- preferred values: openai / claude / gemini" + }, + "model": { + "type": "string", + "description": "Exact LLM model name used to generate the summary" + } + }, + "required": [ + "exam", + "type", + "summary", + "llm", + "model" + ] + } + }, + { + "name": "edubase_get_quiz_play_results", + "description": "Get detailed results for a specific Quiz play.", + "inputSchema": { + "type": "object", + "properties": { + "play": { + "type": "string", + "description": "Quiz play identification string" + } + }, + "required": [ + "play" + ] + } + }, + { + "name": "edubase_get_quiz_results_user", + "description": "Get user results for a specific Quiz set.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "Quiz set identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "quiz", + "user" + ] + } + }, + { + "name": "edubase_get_exam_results_user", + "description": "Get user results for a specific exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "exam", + "user" + ] + } + }, + { + "name": "edubase_get_exam_results_raw", + "description": "Get raw results for a specific exam.\n- This endpoint returns raw results, including all answers given by the user. It is not meant to be displayed to the user.\n- This might require additional permissions.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + } + }, + "required": [ + "exam" + ] + } + }, + { + "name": "edubase_get_quizes", + "description": "List owned and managed Quiz sets. Quiz sets are named collections of questions that sit at the middle level of the EduBase Quiz hierarchy.", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "search string to filter results" + }, + "limit": { + "type": "number", + "description": "limit number of results (default, in search mode: 16)" + }, + "page": { + "type": "number", + "description": "page number (default: 1), not used in search mode!" + } + }, + "required": [] + } + }, + { + "name": "edubase_get_quiz", + "description": "Get/check Quiz set. Containing questions and powering Exams.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + } + }, + "required": [ + "quiz" + ] + } + }, + { + "name": "edubase_post_quiz", + "description": "Create a new Quiz set. Quiz sets are collections of questions that can be used for practice or to power multiple Exams.", + "inputSchema": { + "type": "object", + "properties": { + "language": { + "type": "string", + "description": "desired Quiz set language" + }, + "title": { + "type": "string", + "description": "title of the Quiz set" + }, + "description": { + "type": "string", + "description": "short description" + }, + "mode": { + "type": "string", + "description": "Sets how questions are displayed during the Quiz. (default: TEST)\n- TEST: all questions are displayed at once, user can answer them in any order and switch between them\n- TURNS: questions are displayed one by one, only one question is visible at a time and the user must answer it before moving to the next question\n" + }, + "type": { + "type": "string", + "description": "Type of the Quiz set. (default: set)\n- set: for practice purposes\n- exam: for exam purposes\n- private: for private purposes (e.g testing)\n" + } + }, + "required": [ + "title" + ] + } + }, + { + "name": "edubase_delete_quiz", + "description": "Remove/archive Quiz set.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + } + }, + "required": [ + "quiz" + ] + } + }, + { + "name": "edubase_get_quiz_questions", + "description": "List all questions and question groups in a Quiz set. Quiz sets contain questions (lowest level) and can be used by exams (highest level).", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + } + }, + "required": [ + "quiz" + ] + } + }, + { + "name": "edubase_post_quiz_questions", + "description": "Assign question(s) to a Quiz set, or one of its question group. Questions can exist independently from Quiz sets.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "group": { + "type": "string", + "description": "question group title" + }, + "questions": { + "type": "string", + "description": "comma-separated list of question identification strings" + } + }, + "required": [ + "quiz", + "questions" + ] + } + }, + { + "name": "edubase_delete_quiz_questions", + "description": "Remove question(s) from a Quiz set, or one of its question group.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "group": { + "type": "string", + "description": "question group title" + }, + "questions": { + "type": "string", + "description": "comma-separated list of question identification strings" + } + }, + "required": [ + "quiz", + "questions" + ] + } + }, + { + "name": "edubase_get_users", + "description": "List managed, non-generated users.", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "search string to filter results" + }, + "limit": { + "type": "number", + "description": "limit number of results (default, in search mode: 16)" + }, + "page": { + "type": "number", + "description": "page number (default: 1), not used in search mode!" + } + }, + "required": [] + } + }, + { + "name": "edubase_get_user", + "description": "Get/check user. Can be used to retrieve the caller user's ID by using 'me' as the user identification string.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "User identification string.\n- Use 'me' to get the current user." + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_post_user", + "description": "Create new EduBase user account.", + "inputSchema": { + "type": "object", + "properties": { + "username": { + "type": "string", + "description": "username (4-64 characters)" + }, + "password": { + "type": "string", + "description": "password (4-64 characters) (default: initial random password is automatically generated)" + }, + "first_name": { + "type": "string", + "description": "first name (1-64 characters)" + }, + "last_name": { + "type": "string", + "description": "last name (1-64 characters)" + }, + "full_name": { + "type": "string", + "description": "override automatic full name (1-255 characters)" + }, + "display_name": { + "type": "string", + "description": "override automatic display name (1-255 characters)" + }, + "email": { + "type": "string", + "description": "valid email address" + }, + "phone": { + "type": "string", + "description": "valid phone number in format \"+prefix number\" without special characters" + }, + "gender": { + "type": "string", + "description": "gender (\"male\", \"female\", or \"other\")" + }, + "birthdate": { + "type": "string", + "description": "date of birth" + }, + "exam": { + "type": "boolean", + "description": "user is only allowed to login when accessing exams (default: false)" + }, + "group": { + "type": "string", + "description": "name of the user group (requires admin permissions)" + }, + "template": { + "type": "string", + "description": "a template ID for the new account (default: none)" + }, + "language": { + "type": "string", + "description": "desired account language (default: API application owner's language)" + }, + "timezone": { + "type": "string", + "description": "desired timezone (default: API application owner's timezone)" + }, + "color": { + "type": "string", + "description": "desired favorite color (default/branding/red/blue/yellow/green/purple) (default: default)" + }, + "must_change_password": { + "type": "boolean", + "description": "user is forced to change password on first login (default: false)" + }, + "notify": { + "type": "boolean", + "description": "notify user via email (or SMS) (default: false)" + } + }, + "required": [ + "username", + "first_name", + "last_name", + "email" + ] + } + }, + { + "name": "edubase_delete_user", + "description": "Delete user.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_get_user_name", + "description": "Get user's name.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_post_user_name", + "description": "Update a user's name.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "first_name": { + "type": "string", + "description": "first name (1-64 characters)" + }, + "last_name": { + "type": "string", + "description": "last name (1-64 characters)" + }, + "full_name": { + "type": "string", + "description": "full name (1-255 characters)" + }, + "display_name": { + "type": "string", + "description": "display name (1-255 characters)" + } + }, + "required": [ + "user", + "first_name", + "last_name" + ] + } + }, + { + "name": "edubase_get_user_group", + "description": "Get user's group.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_post_user_group", + "description": "Update a user's group.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "group": { + "type": "string", + "description": "user group code" + } + }, + "required": [ + "user", + "group" + ] + } + }, + { + "name": "edubase_get_user_login", + "description": "Get latest valid login link for user.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_post_user_login", + "description": "Generate login link. If a valid link with the same settings exists, it will be returned instead of creating a new one.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "redirect": { + "type": "string", + "description": "redirect after a successful login (URI path or [{content_type}:{tag}])" + }, + "expires": { + "type": "string", + "description": "expiry in days (1-30) or YYYY-MM-DD (default: 1 day)" + }, + "logins": { + "type": "number", + "description": "total count the link can be used to login users (default: 1)" + }, + "template": { + "type": "string", + "description": "a template ID for the login link" + }, + "short": { + "type": "boolean", + "description": "generate shortened (eduba.se) link (only if feature is enabled on EduBase) (default: false)" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_delete_user_login", + "description": "Delete a previously generated login link.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "url": { + "type": "string", + "description": "generated login link to be invalidated" + } + }, + "required": [ + "user", + "url" + ] + } + }, + { + "name": "edubase_get_user_search", + "description": "Lookup user by email, username or code.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "query string" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "edubase_post_user_assume", + "description": "Assume user for next requests with assume token.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string, username or email address" + }, + "password": { + "type": "string", + "description": "password or user secret" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_delete_user_assume", + "description": "Revoke assume token.", + "inputSchema": { + "type": "object", + "properties": { + "token": { + "type": "string", + "description": "assume token" + } + }, + "required": [ + "token" + ] + } + }, + { + "name": "edubase_get_classes", + "description": "List owned and managed classes.", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "search string to filter results" + }, + "limit": { + "type": "number", + "description": "limit number of results (default, in search mode: 16)" + }, + "page": { + "type": "number", + "description": "page number (default: 1), not used in search mode!" + } + }, + "required": [] + } + }, + { + "name": "edubase_get_class", + "description": "Get/check class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + } + }, + "required": [ + "class" + ] + } + }, + { + "name": "edubase_get_class_assignments", + "description": "List all assignments in a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + } + }, + "required": [ + "class" + ] + } + }, + { + "name": "edubase_get_class_members", + "description": "List all members in a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + } + }, + "required": [ + "class" + ] + } + }, + { + "name": "edubase_post_class_members", + "description": "Assign user(s) to a class. Updates memberships if already member of the class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + }, + "expires": { + "type": "string", + "description": "expiry in days or YYYY-MM-DD HH:ii:ss" + }, + "notify": { + "type": "boolean", + "description": "notify users (default: false)" + } + }, + "required": [ + "class", + "users" + ] + } + }, + { + "name": "edubase_delete_class_members", + "description": "Remove user(s) from a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + } + }, + "required": [ + "class", + "users" + ] + } + }, + { + "name": "edubase_post_classes_members", + "description": "Assign user(s) to class(es). Updates memberships if already member of a class.", + "inputSchema": { + "type": "object", + "properties": { + "classes": { + "type": "string", + "description": "comma-separated list of class identification strings" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + }, + "expires": { + "type": "string", + "description": "expiry in days or YYYY-MM-DD HH:ii:ss" + }, + "notify": { + "type": "boolean", + "description": "notify users (default: false)" + } + }, + "required": [ + "classes", + "users" + ] + } + }, + { + "name": "edubase_get_user_classes", + "description": "List all classes a user is member of.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_post_user_classes", + "description": "Assign user to class(es). Updates membership if already member of a class.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "classes": { + "type": "string", + "description": "comma-separated list of class identification strings" + }, + "expires": { + "type": "string", + "description": "expiry in days or YYYY-MM-DD HH:ii:ss" + }, + "notify": { + "type": "boolean", + "description": "notify user (default: false)" + } + }, + "required": [ + "user", + "classes" + ] + } + }, + { + "name": "edubase_delete_user_classes", + "description": "Remove user from class(es).", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "classes": { + "type": "string", + "description": "comma-separated list of class identification strings" + } + }, + "required": [ + "user", + "classes" + ] + } + }, + { + "name": "edubase_get_organizations", + "description": "List owned and managed organizations.", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "search string to filter results" + }, + "limit": { + "type": "number", + "description": "limit number of results (default, in search mode: 16)" + }, + "page": { + "type": "number", + "description": "page number (default: 1), not used in search mode!" + } + }, + "required": [] + } + }, + { + "name": "edubase_get_organization", + "description": "Get/check organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + } + }, + "required": [ + "organization" + ] + } + }, + { + "name": "edubase_get_organization_members", + "description": "List all members in an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + } + }, + "required": [ + "organization" + ] + } + }, + { + "name": "edubase_post_organization_members", + "description": "Assign user(s) to an organization. Updates memberships if already member of the organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + }, + "department": { + "type": "string", + "description": "optional name of department" + }, + "permission_organization": { + "type": "string", + "description": "optional permission level to organization (member / teacher / supervisor / admin) (default: member)" + }, + "permission_content": { + "type": "string", + "description": "optional permission level to contents in organization (none / view / control / modify / grant / admin) (default: none)" + }, + "notify": { + "type": "boolean", + "description": "notify users (default: false)" + } + }, + "required": [ + "organization", + "users" + ] + } + }, + { + "name": "edubase_delete_organization_members", + "description": "Remove user(s) from an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + } + }, + "required": [ + "organization", + "users" + ] + } + }, + { + "name": "edubase_post_organizations_members", + "description": "Assign user(s) to organization(s). Updates memberships if already member of an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organizations": { + "type": "string", + "description": "comma-separated list of organization identification strings" + }, + "users": { + "type": "string", + "description": "comma-separated list of user identification strings" + }, + "department": { + "type": "string", + "description": "optional name of department" + }, + "permission_organization": { + "type": "string", + "description": "optional permission level to organization (member / teacher / supervisor / admin) (default: member)" + }, + "permission_content": { + "type": "string", + "description": "optional permission level to contents in organization (none / view / control / modify / grant / admin) (default: none)" + }, + "notify": { + "type": "boolean", + "description": "notify users (default: false)" + } + }, + "required": [ + "organizations", + "users" + ] + } + }, + { + "name": "edubase_get_user_organizations", + "description": "List all organizations a user is member of.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "edubase_post_user_organizations", + "description": "Assign user to organization(s). Updates membership if already member of an organization.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "organizations": { + "type": "string", + "description": "comma-separated list of organization identification strings" + }, + "department": { + "type": "string", + "description": "optional name of department" + }, + "permission_organization": { + "type": "string", + "description": "optional permission level to organization (member / teacher / supervisor / admin) (default: member)" + }, + "permission_content": { + "type": "string", + "description": "optional permission level to contents in organization (none / view / control / modify / grant / admin) (default: none)" + }, + "notify": { + "type": "boolean", + "description": "notify user (default: false)" + } + }, + "required": [ + "user", + "organizations" + ] + } + }, + { + "name": "edubase_delete_user_organizations", + "description": "Remove user from organization(s).", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "user identification string" + }, + "organizations": { + "type": "string", + "description": "comma-separated list of organization identification strings" + } + }, + "required": [ + "user", + "organizations" + ] + } + }, + { + "name": "edubase_get_tags", + "description": "List owned and managed tags.", + "inputSchema": { + "type": "object", + "properties": { + "search": { + "type": "string", + "description": "search string to filter results" + }, + "limit": { + "type": "number", + "description": "limit number of results (default, in search mode: 16)" + }, + "page": { + "type": "number", + "description": "page number (default: 1), not used in search mode!" + } + }, + "required": [] + } + }, + { + "name": "edubase_get_tag", + "description": "Get/check tag.", + "inputSchema": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "tag" + ] + } + }, + { + "name": "edubase_get_class_tags", + "description": "List all attached tags of a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + } + }, + "required": [ + "class" + ] + } + }, + { + "name": "edubase_get_class_tag", + "description": "Check if tag is attached to a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "class", + "tag" + ] + } + }, + { + "name": "edubase_post_class_tag", + "description": "Attach tag to a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "class", + "tag" + ] + } + }, + { + "name": "edubase_delete_class_tag", + "description": "Remove a tag attachment from a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "class", + "tag" + ] + } + }, + { + "name": "edubase_get_course_tags", + "description": "List all attached tags of a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + } + }, + "required": [ + "course" + ] + } + }, + { + "name": "edubase_get_course_tag", + "description": "Check if tag is attached to a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "course", + "tag" + ] + } + }, + { + "name": "edubase_post_course_tag", + "description": "Attach tag to a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "course", + "tag" + ] + } + }, + { + "name": "edubase_delete_course_tag", + "description": "Remove a tag attachment from a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "course", + "tag" + ] + } + }, + { + "name": "edubase_get_event_tags", + "description": "List all attached tags of an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + } + }, + "required": [ + "event" + ] + } + }, + { + "name": "edubase_get_event_tag", + "description": "Check if tag is attached to an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "event", + "tag" + ] + } + }, + { + "name": "edubase_post_event_tag", + "description": "Attach tag to an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "event", + "tag" + ] + } + }, + { + "name": "edubase_delete_event_tag", + "description": "Remove a tag attachment from an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "event", + "tag" + ] + } + }, + { + "name": "edubase_get_exam_tags", + "description": "List all attached tags of an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + } + }, + "required": [ + "exam" + ] + } + }, + { + "name": "edubase_get_exam_tag", + "description": "Check if tag is attached to an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "exam", + "tag" + ] + } + }, + { + "name": "edubase_post_exam_tag", + "description": "Attach tag to an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "exam", + "tag" + ] + } + }, + { + "name": "edubase_delete_exam_tag", + "description": "Remove a tag attachment from an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "exam", + "tag" + ] + } + }, + { + "name": "edubase_get_integration_tags", + "description": "List all attached tags of an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + } + }, + "required": [ + "integration" + ] + } + }, + { + "name": "edubase_get_integration_tag", + "description": "Check if tag is attached to an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "integration", + "tag" + ] + } + }, + { + "name": "edubase_post_integration_tag", + "description": "Attach tag to an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "integration", + "tag" + ] + } + }, + { + "name": "edubase_delete_integration_tag", + "description": "Remove a tag attachment from an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "integration", + "tag" + ] + } + }, + { + "name": "edubase_get_organization_tags", + "description": "List all attached tags of an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + } + }, + "required": [ + "organization" + ] + } + }, + { + "name": "edubase_get_organization_tag", + "description": "Check if tag is attached to an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "organization", + "tag" + ] + } + }, + { + "name": "edubase_post_organization_tag", + "description": "Attach tag to an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "organization", + "tag" + ] + } + }, + { + "name": "edubase_delete_organization_tag", + "description": "Remove a tag attachment from an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "organization", + "tag" + ] + } + }, + { + "name": "edubase_get_quiz_tags", + "description": "List all attached tags of a Quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + } + }, + "required": [ + "quiz" + ] + } + }, + { + "name": "edubase_get_quiz_tag", + "description": "Check if tag is attached to a Quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "quiz", + "tag" + ] + } + }, + { + "name": "edubase_post_quiz_tag", + "description": "Attach tag to a Quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "quiz", + "tag" + ] + } + }, + { + "name": "edubase_delete_quiz_tag", + "description": "Remove a tag attachment from a Quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "quiz", + "tag" + ] + } + }, + { + "name": "edubase_get_scorm_tags", + "description": "List all attached tags of a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + } + }, + "required": [ + "scorm" + ] + } + }, + { + "name": "edubase_get_scorm_tag", + "description": "Check if tag is attached to a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "scorm", + "tag" + ] + } + }, + { + "name": "edubase_post_scorm_tag", + "description": "Attach tag to a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "scorm", + "tag" + ] + } + }, + { + "name": "edubase_delete_scorm_tag", + "description": "Remove a tag attachment from a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "scorm", + "tag" + ] + } + }, + { + "name": "edubase_get_video_tags", + "description": "List all attached tags of a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + } + }, + "required": [ + "video" + ] + } + }, + { + "name": "edubase_get_video_tag", + "description": "Check if tag is attached to a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "video", + "tag" + ] + } + }, + { + "name": "edubase_post_video_tag", + "description": "Attach tag to a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "video", + "tag" + ] + } + }, + { + "name": "edubase_delete_video_tag", + "description": "Remove a tag attachment from a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + }, + "tag": { + "type": "string", + "description": "tag identification string" + } + }, + "required": [ + "video", + "tag" + ] + } + }, + { + "name": "edubase_get_class_permission", + "description": "Check if a user has permission on a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "class", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_class_permission", + "description": "Create new permission for a user on a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "class", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_class_permission", + "description": "Remove a user permission from a class.", + "inputSchema": { + "type": "object", + "properties": { + "class": { + "type": "string", + "description": "class identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "class", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_course_permission", + "description": "Check if a user has permission on a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "course", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_course_permission", + "description": "Create new permission for a user on a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "course", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_course_permission", + "description": "Remove a user permission from a course.", + "inputSchema": { + "type": "object", + "properties": { + "course": { + "type": "string", + "description": "course identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "course", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_event_permission", + "description": "Check if a user has permission on an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / finances / grant / admin)" + } + }, + "required": [ + "event", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_event_permission", + "description": "Create new permission for a user on an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / finances / grant / admin)" + } + }, + "required": [ + "event", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_event_permission", + "description": "Remove a user permission from an event.", + "inputSchema": { + "type": "object", + "properties": { + "event": { + "type": "string", + "description": "event identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / finances / grant / admin)" + } + }, + "required": [ + "event", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_exam_permission", + "description": "Check if a user has permission on an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "exam", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_exam_permission", + "description": "Create new permission for a user on an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "exam", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_exam_permission", + "description": "Remove a user permission from an exam.", + "inputSchema": { + "type": "object", + "properties": { + "exam": { + "type": "string", + "description": "exam identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "exam", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_integration_permission", + "description": "Check if a user has permission on an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "integration", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_integration_permission", + "description": "Create new permission for a user on an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "integration", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_integration_permission", + "description": "Remove a user permission from an integration.", + "inputSchema": { + "type": "object", + "properties": { + "integration": { + "type": "string", + "description": "integration identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "integration", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_organization_permission", + "description": "Check if a user has permission on an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "organization", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_organization_permission", + "description": "Create new permission for a user on an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "organization", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_organization_permission", + "description": "Remove a user permission from an organization.", + "inputSchema": { + "type": "object", + "properties": { + "organization": { + "type": "string", + "description": "organization identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "organization", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_quiz_permission", + "description": "Check if a user has permission on a quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "quiz", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_quiz_permission", + "description": "Create new permission for a user on a quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "quiz", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_quiz_permission", + "description": "Remove a user permission from a quiz.", + "inputSchema": { + "type": "object", + "properties": { + "quiz": { + "type": "string", + "description": "quiz identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "quiz", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_scorm_permission", + "description": "Check if a user has permission on a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "scorm", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_scorm_permission", + "description": "Create new permission for a user on a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "scorm", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_scorm_permission", + "description": "Remove a user permission from a SCORM learning material.", + "inputSchema": { + "type": "object", + "properties": { + "scorm": { + "type": "string", + "description": "SCORM identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "scorm", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_tag_permission", + "description": "Check if a user has permission on a tag.", + "inputSchema": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "description": "tag identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "tag", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_tag_permission", + "description": "Create new permission for a user on a tag.", + "inputSchema": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "description": "tag identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "tag", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_tag_permission", + "description": "Remove a user permission from a tag.", + "inputSchema": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "description": "tag identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "tag", + "user", + "permission" + ] + } + }, + { + "name": "edubase_get_video_permission", + "description": "Check if a user has permission on a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "video", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_video_permission", + "description": "Create new permission for a user on a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "video", + "user", + "permission" + ] + } + }, + { + "name": "edubase_delete_video_permission", + "description": "Remove a user permission from a video.", + "inputSchema": { + "type": "object", + "properties": { + "video": { + "type": "string", + "description": "video identification string" + }, + "user": { + "type": "string", + "description": "user identification string" + }, + "permission": { + "type": "string", + "description": "permission level (view / control / modify / grant / admin)" + } + }, + "required": [ + "video", + "user", + "permission" + ] + } + }, + { + "name": "edubase_post_custom_metric", + "description": "Update a custom metric.", + "inputSchema": { + "type": "object", + "properties": { + "metric": { + "type": "string", + "description": "metric name" + }, + "value": { + "type": "number", + "description": "target value (also accepts increments with a + prefix)" + } + }, + "required": [ + "metric", + "value" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "ramp-mcp": { + "display_name": "Ramp MCP", + "repository": { + "type": "git", + "url": "https://github.com/ramp-public/ramp-mcp" + }, + "homepage": "https://ramp.com", + "author": { + "name": "ramp-public" + }, + "license": "MIT", + "tags": [ + "ramp", + "finance", + "api", + "database", + "etl" + ], + "arguments": { + "RAMP_CLIENT_ID": { + "description": "Ramp API client ID", + "required": true, + "example": "" + }, + "RAMP_CLIENT_SECRET": { + "description": "Ramp API client secret", + "required": true, + "example": "" + }, + "RAMP_ENV": { + "description": "Ramp environment (demo, qa, or prd)", + "required": true, + "example": "demo" + }, + "-s": { + "description": "Comma-separated list of API scopes to enable", + "required": true, + "example": "transactions:read,reimbursements:read" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/ramp-public/ramp-mcp.git", + "ramp-mcp", + "-s", + "${-s}" + ], + "env": { + "RAMP_CLIENT_ID": "${RAMP_CLIENT_ID}", + "RAMP_CLIENT_SECRET": "${RAMP_CLIENT_SECRET}", + "RAMP_ENV": "${RAMP_ENV}" + }, + "description": "Run using uv package manager", + "recommended": true + } + }, + "examples": [ + { + "title": "Query transactions", + "description": "Load and analyze transaction data from Ramp", + "prompt": "Load my recent transactions and show me the top 5 vendors by spend amount." + } + ], + "name": "ramp-mcp", + "description": "A Model Context Protocol server for retrieving and analyzing data or running tasks for [Ramp](https://ramp.com) using [Developer API](https://docs.ramp.com/developer-api/v1/overview/introduction). In order to get around token and input size limitations, this server implements a simple ETL pipeline + ephemeral sqlite database in memory for analysis by an LLM. All requests are made to demo by default, but can be changed by setting `RAMP_ENV=prd`. Large datasets may not be processable due to API and/or your MCP client limitations.", + "categories": [ + "Finance" + ], + "is_official": true + }, + "opendota": { + "name": "opendota", + "display_name": "OpenDota", + "description": "Interact with OpenDota API to retrieve Dota 2 match data, player statistics, and more.", + "repository": { + "type": "git", + "url": "https://github.com/asusevski/opendota-mcp-server" + }, + "homepage": "https://github.com/asusevski/opendota-mcp-server", + "author": { + "name": "asusevski" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "Dota 2", + "API", + "Gaming", + "Statistics" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/asusevski/opendota-mcp-server.git", + "src/opendota_server/server" + ] + } + } + }, + "apimatic-validator-mcp": { + "display_name": "APIMatic Validator MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/apimatic/apimatic-validator-mcp" + }, + "license": "[NOT GIVEN]", + "homepage": "https://www.apimatic.io/", + "author": { + "name": "apimatic" + }, + "tags": [ + "OpenAPI", + "validation", + "APIMatic" + ], + "arguments": { + "APIMATIC_API_KEY": { + "description": "API key for APIMatic service", + "required": true, + "example": "" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "node", + "args": [ + "build/index.js" + ], + "package": "[NOT GIVEN]", + "env": { + "APIMATIC_API_KEY": "" + }, + "description": "Run the APIMatic Validator MCP Server using Node.js", + "recommended": true + } + }, + "examples": [ + { + "title": "Validate OpenAPI Specification", + "description": "Validate an OpenAPI file using APIMatic", + "prompt": "Please validate this OpenAPI specification" + } + ], + "name": "apimatic-validator-mcp", + "description": "This repository provides a Model Context Protocol (MCP) Server for validating OpenAPI specifications using [APIMatic](https://www.apimatic.io/). The server processes OpenAPI files and returns validation summaries by leveraging APIMatic\u2019s API.", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "stripe": { + "name": "stripe", + "display_name": "Stripe Model Context Protocol", + "description": "The Stripe Model Context Protocol server allows you to integrate with Stripe APIs through function calling. This protocol supports various tools to interact with different Stripe services.", + "repository": { + "type": "git", + "url": "https://github.com/stripe/agent-toolkit" + }, + "homepage": "https://github.com/stripe/agent-toolkit/tree/main/modelcontextprotocol", + "author": { + "name": "stripe" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "stripe", + "payments", + "customers", + "refunds" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@stripe/mcp", + "--tools=all", + "--api-key=${STRIPE_SECRET_KEY}" + ] + } + }, + "examples": [ + { + "title": "Create a customer", + "description": "Creates a new customer in Stripe.", + "prompt": "{\"tool\": \"customer_create\", \"arguments\": {\"email\": \"customer@example.com\", \"name\": \"John Doe\"}}" + }, + { + "title": "Retrieve a customer", + "description": "Retrieves details of an existing customer.", + "prompt": "{\"tool\": \"customer_retrieve\", \"arguments\": {\"customer_id\": \"cus_123456\"}}" + }, + { + "title": "Create a payment intent", + "description": "Creates a payment intent for processing payments.", + "prompt": "{\"tool\": \"payment_intent_create\", \"arguments\": {\"amount\": 5000, \"currency\": \"usd\", \"customer\": \"cus_123456\"}}" + }, + { + "title": "Create a refund", + "description": "Creates a refund for a charge.", + "prompt": "{\"tool\": \"refund_create\", \"arguments\": {\"charge_id\": \"ch_abc123\"}}" + } + ], + "arguments": { + "STRIPE_SECRET_KEY": { + "description": "Your Stripe secret API key required for authenticating requests to the Stripe API.", + "required": true, + "example": "sk_test_4eC39HqLyjWDarjtT1zdp7dc" + } + }, + "tools": [ + { + "name": "create_customer", + "description": "\nThis tool will create a customer in Stripe.\n\nIt takes two arguments:\n- name (str): The name of the customer.\n- email (str, optional): The email of the customer.\n", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the customer" + }, + "email": { + "type": "string", + "format": "email", + "description": "The email of the customer" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "list_customers", + "description": "\nThis tool will fetch a list of Customers from Stripe.\n\nIt takes no input.\n", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "minimum": 1, + "maximum": 100, + "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100." + }, + "email": { + "type": "string", + "description": "A case-sensitive filter on the list based on the customer's email field. The value must be a string." + } + } + } + }, + { + "name": "create_product", + "description": "\nThis tool will create a product in Stripe.\n\nIt takes two arguments:\n- name (str): The name of the product.\n- description (str, optional): The description of the product.\n", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the product." + }, + "description": { + "type": "string", + "description": "The description of the product." + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "list_products", + "description": "\nThis tool will fetch a list of Products from Stripe.\n\nIt takes one optional argument:\n- limit (int, optional): The number of products to return.\n", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "minimum": 1, + "maximum": 100, + "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 10." + } + } + } + }, + { + "name": "create_price", + "description": "\nThis tool will create a price in Stripe. If a product has not already been specified, a product should be created first.\n\nIt takes three arguments:\n- product (str): The ID of the product to create the price for.\n- unit_amount (int): The unit amount of the price in cents.\n- currency (str): The currency of the price.\n", + "inputSchema": { + "type": "object", + "properties": { + "product": { + "type": "string", + "description": "The ID of the product to create the price for." + }, + "unit_amount": { + "type": "integer", + "description": "The unit amount of the price in cents." + }, + "currency": { + "type": "string", + "description": "The currency of the price." + } + }, + "required": [ + "product", + "unit_amount", + "currency" + ] + } + }, + { + "name": "list_prices", + "description": "\nThis tool will fetch a list of Prices from Stripe.\n\nIt takes two arguments.\n- product (str, optional): The ID of the product to list prices for.\n- limit (int, optional): The number of prices to return.\n", + "inputSchema": { + "type": "object", + "properties": { + "product": { + "type": "string", + "description": "The ID of the product to list prices for." + }, + "limit": { + "type": "integer", + "minimum": 1, + "maximum": 100, + "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 10." + } + } + } + }, + { + "name": "create_payment_link", + "description": "\nThis tool will create a payment link in Stripe.\n\nIt takes two arguments:\n- price (str): The ID of the price to create the payment link for.\n- quantity (int): The quantity of the product to include in the payment link.\n", + "inputSchema": { + "type": "object", + "properties": { + "price": { + "type": "string", + "description": "The ID of the price to create the payment link for." + }, + "quantity": { + "type": "integer", + "description": "The quantity of the product to include." + } + }, + "required": [ + "price", + "quantity" + ] + } + }, + { + "name": "create_invoice", + "description": "\nThis tool will create an invoice in Stripe.\n\nIt takes two arguments:\n- customer (str): The ID of the customer to create the invoice for.\n\n- days_until_due (int, optional): The number of days until the invoice is due.\n", + "inputSchema": { + "type": "object", + "properties": { + "customer": { + "type": "string", + "description": "The ID of the customer to create the invoice for." + }, + "days_until_due": { + "type": "integer", + "description": "The number of days until the invoice is due." + } + }, + "required": [ + "customer" + ] + } + }, + { + "name": "create_invoice_item", + "description": "\nThis tool will create an invoice item in Stripe.\n\nIt takes two arguments:\n- customer (str): The ID of the customer to create the invoice item for.\n\n- price (str): The ID of the price to create the invoice item for.\n- invoice (str): The ID of the invoice to create the invoice item for.\n", + "inputSchema": { + "type": "object", + "properties": { + "customer": { + "type": "string", + "description": "The ID of the customer to create the invoice item for." + }, + "price": { + "type": "string", + "description": "The ID of the price for the item." + }, + "invoice": { + "type": "string", + "description": "The ID of the invoice to create the item for." + } + }, + "required": [ + "customer", + "price", + "invoice" + ] + } + }, + { + "name": "finalize_invoice", + "description": "\nThis tool will finalize an invoice in Stripe.\n\nIt takes one argument:\n- invoice (str): The ID of the invoice to finalize.\n", + "inputSchema": { + "type": "object", + "properties": { + "invoice": { + "type": "string", + "description": "The ID of the invoice to finalize." + } + }, + "required": [ + "invoice" + ] + } + }, + { + "name": "retrieve_balance", + "description": "\nThis tool will retrieve the balance from Stripe. It takes no input.\n", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "create_refund", + "description": "\nThis tool will refund a payment intent in Stripe.\n\nIt takes three arguments:\n- payment_intent (str): The ID of the payment intent to refund.\n- amount (int, optional): The amount to refund in cents.\n- reason (str, optional): The reason for the refund.\n", + "inputSchema": { + "type": "object", + "properties": { + "payment_intent": { + "type": "string", + "description": "The ID of the PaymentIntent to refund." + }, + "amount": { + "type": "integer", + "description": "The amount to refund in cents." + } + }, + "required": [ + "payment_intent" + ] + } + }, + { + "name": "list_payment_intents", + "description": "\nThis tool will list payment intents in Stripe.\n\nIt takes two arguments:\n- customer (str, optional): The ID of the customer to list payment intents for.\n\n- limit (int, optional): The number of payment intents to return.\n", + "inputSchema": { + "type": "object", + "properties": { + "customer": { + "type": "string", + "description": "The ID of the customer to list payment intents for." + }, + "limit": { + "type": "integer", + "minimum": 1, + "maximum": 100, + "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100." + } + } + } + }, + { + "name": "search_documentation", + "description": "\nThis tool will take in a user question about integrating with Stripe in their application, then search and retrieve relevant Stripe documentation to answer the question.\n\nIt takes two arguments:\n- question (str): The user question to search an answer for in the Stripe documentation.\n- language (str, optional): The programming language to search for in the the documentation.\n", + "inputSchema": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "The user question about integrating with Stripe will be used to search the documentation." + }, + "language": { + "type": "string", + "enum": [ + "dotnet", + "go", + "java", + "node", + "php", + "ruby", + "python", + "curl" + ], + "description": "The programming language to search for in the the documentation." + } + }, + "required": [ + "question" + ] + } + } + ], + "is_official": true + }, + "unity3d-game-engine": { + "name": "unity3d-game-engine", + "display_name": "Unity3D Game Engine", + "description": "An MCP server that enables LLMs to interact with Unity3d Game Engine, supporting access to a variety of the Unit's Editor engine tools (e.g. Console Logs, Test Runner logs, Editor functions, hierarchy state, etc) and executing them as MCP tools or gather them as resources.", + "repository": { + "type": "git", + "url": "https://github.com/CoderGamester/mcp-unity" + }, + "homepage": "https://github.com/CoderGamester/mcp-unity", + "author": { + "name": "CoderGamester" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Unity", + "Node.js", + "TypeScript", + "WebSocket", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/CoderGamester/mcp-unity" + ], + "env": { + "UNITY_PORT": "8090" + } + } + }, + "examples": [ + { + "title": "Execute Menu Item", + "description": "Execute Unity menu items programmatically using MCP Unity.", + "prompt": "mcp-unity execute_menu_item" + } + ], + "arguments": { + "UNITY_PORT": { + "description": "Environment variable to set the port number for the Unity MCP Server. This should be set to the desired port for the server to run and connect with the Unity Editor.", + "required": false, + "example": "8090" + } + }, + "tools": [ + { + "name": "execute_menu_item", + "description": "Executes a Unity menu item by path", + "inputSchema": { + "type": "object", + "properties": { + "menuPath": { + "type": "string", + "description": "The path to the menu item to execute (e.g. \"GameObject/Create Empty\")" + } + }, + "required": [ + "menuPath" + ] + } + }, + { + "name": "select_object", + "description": "Sets the selected object in the Unity editor by path or ID", + "inputSchema": { + "type": "object", + "properties": { + "objectPath": { + "type": "string", + "description": "The path or ID of the object to select (e.g. \"Main Camera\" or a Unity object ID)" + } + }, + "required": [ + "objectPath" + ] + } + }, + { + "name": "package_manager", + "description": "Manages packages in the Unity Package Manager", + "inputSchema": { + "type": "object", + "properties": { + "methodSource": { + "type": "string", + "description": "The method source to use (registry, github, or disk) to add the package" + }, + "packageName": { + "type": "string", + "description": "The package name to add from Unity registry (e.g. com.unity.textmeshpro)" + }, + "version": { + "type": "string", + "description": "The version to use for registry packages (optional)" + }, + "repositoryUrl": { + "type": "string", + "description": "The GitHub repository URL (e.g. https://github.com/username/repo.git)" + }, + "branch": { + "type": "string", + "description": "The branch to use for GitHub packages (optional)" + }, + "path": { + "type": "string", + "description": "The path to use (folder path for disk method or subfolder for GitHub)" + } + }, + "required": [ + "methodSource" + ] + } + }, + { + "name": "run_tests", + "description": "Runs Unity's Test Runner tests", + "inputSchema": { + "type": "object", + "properties": { + "testMode": { + "type": "string", + "description": "The test mode to run (EditMode, PlayMode, or All)" + }, + "testFilter": { + "type": "string", + "description": "Optional test filter (e.g. specific test name or namespace)" + } + } + } + }, + { + "name": "notify_message", + "description": "Sends a message to the Unity console", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "The message to display in the Unity console" + }, + "type": { + "type": "string", + "description": "The type of message (info, warning, error)" + } + }, + "required": [ + "message" + ] + } + } + ] + }, + "needle-mcp": { + "display_name": "Needle MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/needle-ai/needle-mcp" + }, + "homepage": "https://needle-ai.com", + "author": { + "name": "needle-ai" + }, + "license": "MIT", + "tags": [ + "document management", + "search", + "Needle" + ], + "arguments": { + "NEEDLE_API_KEY": { + "description": "API key for Needle service", + "required": true, + "example": "your_needle_api_key" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/needle-ai/needle-mcp", + "needle-mcp" + ], + "env": { + "NEEDLE_API_KEY": "your_needle_api_key" + }, + "description": "Run using UV package manager", + "recommended": true + } + }, + "examples": [ + { + "title": "Create Collection", + "description": "Create a new document collection", + "prompt": "Create a new collection called 'Technical Docs'" + }, + { + "title": "Add Document", + "description": "Add a document to an existing collection", + "prompt": "Add this document to the collection, which is https://needle-ai.com" + }, + { + "title": "Search Collection", + "description": "Search for information in a collection", + "prompt": "Search the collection for information about AI" + }, + { + "title": "List Collections", + "description": "List all available collections", + "prompt": "List all my collections" + } + ], + "name": "needle-mcp", + "description": "MCP (Model Context Protocol) server to manage documents and perform searches using [Needle](https://needle-ai.com) through Claude\u2019s Desktop Application.", + "categories": [ + "Knowledge Base" + ], + "is_official": true, + "tools": [ + { + "name": "needle_list_collections", + "description": "Retrieve a complete list of all Needle document collections accessible to your account. \n Returns detailed information including collection IDs, names, and creation dates. Use this tool when you need to:\n - Get an overview of available document collections\n - Find collection IDs for subsequent operations\n - Verify collection existence before performing operations\n The response includes metadata that's required for other Needle operations.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "needle_create_collection", + "description": "Create a new document collection in Needle for organizing and searching documents. \n A collection acts as a container for related documents and enables semantic search across its contents.\n Use this tool when you need to:\n - Start a new document organization\n - Group related documents together\n - Set up a searchable document repository\n Returns a collection ID that's required for subsequent operations. Choose a descriptive name that \n reflects the collection's purpose for better organization.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A clear, descriptive name for the collection that reflects its purpose and contents" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "needle_get_collection_details", + "description": "Fetch comprehensive metadata about a specific Needle collection. \n Provides detailed information about the collection's configuration, creation date, and current status.\n Use this tool when you need to:\n - Verify a collection's existence and configuration\n - Check collection metadata before operations\n - Get creation date and other attributes\n Requires a valid collection ID and returns detailed collection metadata. Will error if collection doesn't exist.", + "inputSchema": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "description": "The unique collection identifier returned from needle_create_collection or needle_list_collections" + } + }, + "required": [ + "collection_id" + ] + } + }, + { + "name": "needle_get_collection_stats", + "description": "Retrieve detailed statistical information about a Needle collection's contents and status.\n Provides metrics including:\n - Total number of documents\n - Processing status of documents\n - Storage usage and limits\n - Index status and health\n Use this tool to:\n - Monitor collection size and growth\n - Verify processing completion\n - Check collection health before operations\n Essential for ensuring collection readiness before performing searches.", + "inputSchema": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "description": "The unique collection identifier to get statistics for" + } + }, + "required": [ + "collection_id" + ] + } + }, + { + "name": "needle_list_files", + "description": "List all documents stored within a specific Needle collection with their current status.\n Returns detailed information about each file including:\n - File ID and name\n - Processing status (pending, processing, complete, error)\n - Upload date and metadata\n Use this tool when you need to:\n - Inventory available documents\n - Check processing status of uploads\n - Get file IDs for reference\n - Verify document availability before searching\n Essential for monitoring document processing completion before performing searches.", + "inputSchema": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "description": "The unique collection identifier to list files from" + } + }, + "required": [ + "collection_id" + ] + } + }, + { + "name": "needle_add_file", + "description": "Add a new document to a Needle collection by providing a URL for download.\n Supports multiple file formats including:\n - PDF documents\n - Microsoft Word files (DOC, DOCX)\n - Plain text files (TXT)\n - Web pages (HTML)\n \n The document will be:\n 1. Downloaded from the provided URL\n 2. Processed for text extraction\n 3. Indexed for semantic search\n \n Use this tool when you need to:\n - Add new documents to a collection\n - Make documents searchable\n - Expand your knowledge base\n \n Important: Documents require processing time before they're searchable.\n Check processing status using needle_list_files before searching new content.", + "inputSchema": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "description": "The unique collection identifier where the file will be added" + }, + "name": { + "type": "string", + "description": "A descriptive filename that will help identify this document in results" + }, + "url": { + "type": "string", + "description": "Public URL where the document can be downloaded from" + } + }, + "required": [ + "collection_id", + "name", + "url" + ] + } + }, + { + "name": "needle_search", + "description": "Perform intelligent semantic search across documents in a Needle collection.\n This tool uses advanced embedding technology to find relevant content based on meaning,\n not just keywords. The search:\n - Understands natural language queries\n - Finds conceptually related content\n - Returns relevant text passages with source information\n - Ranks results by semantic relevance\n \n Use this tool when you need to:\n - Find specific information within documents\n - Answer questions from document content\n - Research topics across multiple documents\n - Locate relevant passages and their sources\n \n More effective than traditional keyword search for:\n - Natural language questions\n - Conceptual queries\n - Finding related content\n \n Returns matching text passages with their source file IDs.", + "inputSchema": { + "type": "object", + "properties": { + "collection_id": { + "type": "string", + "description": "The unique collection identifier to search within" + }, + "query": { + "type": "string", + "description": "Natural language query describing the information you're looking for" + } + }, + "required": [ + "collection_id", + "query" + ] + } + } + ] + }, + "cloudinary": { + "name": "cloudinary", + "display_name": "Cloudinary", + "description": "Cloudinary Model Context Protocol Server to upload media to Cloudinary and get back the media link and details.", + "repository": { + "type": "git", + "url": "https://github.com/felores/cloudinary-mcp-server" + }, + "homepage": "https://github.com/felores/cloudinary-mcp-server", + "author": { + "name": "felores" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "cloudinary", + "images", + "videos" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@felores/cloudinary-mcp-server@latest" + ], + "env": { + "CLOUDINARY_CLOUD_NAME": "${CLOUDINARY_CLOUD_NAME}", + "CLOUDINARY_API_KEY": "${CLOUDINARY_API_KEY}", + "CLOUDINARY_API_SECRET": "${CLOUDINARY_API_SECRET}" + } + } + }, + "examples": [ + { + "title": "Upload an Image", + "description": "This example demonstrates how to upload an image to Cloudinary.", + "prompt": "use_mcp_tool({ server_name: 'cloudinary', tool_name: 'upload', arguments: { file: 'path/to/image.jpg', resource_type: 'image', public_id: 'my-custom-id' }});" + } + ], + "arguments": { + "CLOUDINARY_CLOUD_NAME": { + "description": "Your Cloudinary cloud name, used to identify your account and resources.", + "required": true, + "example": "my_cloud_name" + }, + "CLOUDINARY_API_KEY": { + "description": "Your Cloudinary API key, used to authenticate requests to the Cloudinary API.", + "required": true, + "example": "my_api_key" + }, + "CLOUDINARY_API_SECRET": { + "description": "Your Cloudinary API secret, used to authenticate requests and secure your Cloudinary account.", + "required": true, + "example": "my_api_secret" + } + }, + "tools": [ + { + "name": "upload", + "description": "Upload media (images/videos) to Cloudinary. For large files, the upload is processed in chunks and returns a streaming response. The uploaded asset will be available at:\n- HTTP: http://res.cloudinary.com/{cloud_name}/{resource_type}/upload/v1/{public_id}.{format}\n- HTTPS: https://res.cloudinary.com/{cloud_name}/{resource_type}/upload/v1/{public_id}.{format}\nwhere {cloud_name} is your Cloudinary cloud name, resource_type is 'image' or 'video', and format is determined by the file extension.", + "inputSchema": { + "type": "object", + "properties": { + "file": { + "type": "string", + "description": "Path to file, URL, or base64 data URI to upload" + }, + "resource_type": { + "type": "string", + "enum": [ + "image", + "video", + "raw" + ], + "description": "Type of resource to upload. For videos, the upload will return a streaming response as it processes in chunks." + }, + "public_id": { + "type": "string", + "description": "Public ID to assign to the uploaded asset. This will be used in the final URL. If not provided, Cloudinary will generate one." + }, + "overwrite": { + "type": "boolean", + "description": "Whether to overwrite existing assets with the same public ID" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Tags to assign to the uploaded asset" + } + }, + "required": [ + "file" + ] + } + } + ] + }, + "notion": { + "name": "notion", + "display_name": "Notion", + "description": "Notion MCP integration. Search, Read, Update, and Create pages through Claude chat.", + "repository": { + "type": "git", + "url": "https://github.com/v-3/notion-server" + }, + "homepage": "https://github.com/v-3/notion-server", + "author": { + "name": "v-3" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Notion" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/v-3/notion-server" + ], + "env": { + "NOTION_API_KEY": "${NOTION_API_KEY}" + } + } + }, + "arguments": { + "NOTION_API_KEY": { + "description": "Your Notion API key for authentication to access data within your Notion workspace.", + "required": true, + "example": "your_notion_api_key_here" + } + } + }, + "dicom": { + "name": "dicom", + "display_name": "DICOM Model Context Protocol", + "description": "An MCP server to query and retrieve medical images and for parsing and reading dicom-encapsulated documents (pdf etc.).", + "repository": { + "type": "git", + "url": "https://github.com/ChristianHinge/dicom-mcp" + }, + "homepage": "https://github.com/ChristianHinge/dicom-mcp", + "author": { + "name": "ChristianHinge", + "url": "https://github.com/ChristianHinge" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "DICOM", + "Medical Imaging", + "AI", + "PDF Extraction" + ], + "examples": [ + { + "title": "List available DICOM nodes", + "description": "Retrieve and display all configured DICOM nodes and calling AE titles.", + "prompt": "list_dicom_nodes()" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/ChristianHinge/dicom-mcp", + "dicom-mcp", + "${CONFIG_PATH}" + ] + } + }, + "arguments": { + "CONFIG_PATH": { + "description": "Path to the configuration file", + "required": true, + "example": "/path/to/config.yaml" + } + }, + "tools": [ + { + "name": "list_dicom_nodes", + "description": "Lists all configured DICOM nodes and calling AE titles.", + "inputSchema": {}, + "required": [] + }, + { + "name": "switch_dicom_node", + "description": "Switches to a different configured DICOM node.", + "inputSchema": { + "node_name": { + "type": "string", + "description": "Name of the node to switch to" + } + }, + "required": [ + "node_name" + ] + }, + { + "name": "switch_calling_aet", + "description": "Switches to a different configured calling AE title.", + "inputSchema": { + "aet_name": { + "type": "string", + "description": "Name of the calling AE title to switch to" + } + }, + "required": [ + "aet_name" + ] + }, + { + "name": "verify_connection", + "description": "Tests connectivity to the configured DICOM node using C-ECHO.", + "inputSchema": {}, + "required": [] + }, + { + "name": "query_patients", + "description": "Search for patients matching specified criteria.", + "inputSchema": { + "name_pattern": { + "type": "string", + "description": "Patient name pattern (can include wildcards)", + "optional": true + }, + "patient_id": { + "type": "string", + "description": "Patient ID", + "optional": true + }, + "birth_date": { + "type": "string", + "description": "Patient birth date (YYYYMMDD)", + "optional": true + }, + "attribute_preset": { + "type": "string", + "description": "Preset level of detail", + "optional": true + }, + "additional_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Additional DICOM attributes to include", + "optional": true + }, + "exclude_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "DICOM attributes to exclude", + "optional": true + } + }, + "required": [] + }, + { + "name": "query_studies", + "description": "Search for studies matching specified criteria.", + "inputSchema": { + "patient_id": { + "type": "string", + "description": "Patient ID", + "optional": true + }, + "study_date": { + "type": "string", + "description": "Study date or range (YYYYMMDD or YYYYMMDD-YYYYMMDD)", + "optional": true + }, + "modality_in_study": { + "type": "string", + "description": "Modalities in study", + "optional": true + }, + "study_description": { + "type": "string", + "description": "Study description (can include wildcards)", + "optional": true + }, + "accession_number": { + "type": "string", + "description": "Accession number", + "optional": true + }, + "study_instance_uid": { + "type": "string", + "description": "Study Instance UID", + "optional": true + }, + "attribute_preset": { + "type": "string", + "description": "Preset level of detail", + "optional": true + }, + "additional_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Additional DICOM attributes to include", + "optional": true + }, + "exclude_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "DICOM attributes to exclude", + "optional": true + } + }, + "required": [] + }, + { + "name": "query_series", + "description": "Search for series within a study.", + "inputSchema": { + "study_instance_uid": { + "type": "string", + "description": "Study Instance UID" + }, + "modality": { + "type": "string", + "description": "Modality (e.g., 'CT', 'MR')", + "optional": true + }, + "series_number": { + "type": "string", + "description": "Series number", + "optional": true + }, + "series_description": { + "type": "string", + "description": "Series description", + "optional": true + }, + "series_instance_uid": { + "type": "string", + "description": "Series Instance UID", + "optional": true + }, + "attribute_preset": { + "type": "string", + "description": "Preset level of detail", + "optional": true + }, + "additional_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Additional DICOM attributes to include", + "optional": true + }, + "exclude_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "DICOM attributes to exclude", + "optional": true + } + }, + "required": [ + "study_instance_uid" + ] + }, + { + "name": "query_instances", + "description": "Search for instances within a series.", + "inputSchema": { + "series_instance_uid": { + "type": "string", + "description": "Series Instance UID" + }, + "instance_number": { + "type": "string", + "description": "Instance number", + "optional": true + }, + "sop_instance_uid": { + "type": "string", + "description": "SOP Instance UID", + "optional": true + }, + "attribute_preset": { + "type": "string", + "description": "Preset level of detail", + "optional": true + }, + "additional_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Additional DICOM attributes to include", + "optional": true + }, + "exclude_attributes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "DICOM attributes to exclude", + "optional": true + } + }, + "required": [ + "series_instance_uid" + ] + }, + { + "name": "get_attribute_presets", + "description": "Lists available attribute presets for queries.", + "inputSchema": {}, + "required": [] + }, + { + "name": "retrieve_instance", + "description": "Retrieves a specific DICOM instance and saves it to the local filesystem.", + "inputSchema": { + "study_instance_uid": { + "type": "string", + "description": "Study Instance UID" + }, + "series_instance_uid": { + "type": "string", + "description": "Series Instance UID" + }, + "sop_instance_uid": { + "type": "string", + "description": "SOP Instance UID" + }, + "output_directory": { + "type": "string", + "description": "Directory to save the retrieved instance to (default: './retrieved_files')", + "optional": true + } + }, + "required": [ + "study_instance_uid", + "series_instance_uid", + "sop_instance_uid" + ] + }, + { + "name": "extract_pdf_text_from_dicom", + "description": "Retrieves a DICOM instance containing an encapsulated PDF and extracts its text content.", + "inputSchema": { + "study_instance_uid": { + "type": "string", + "description": "Study Instance UID" + }, + "series_instance_uid": { + "type": "string", + "description": "Series Instance UID" + }, + "sop_instance_uid": { + "type": "string", + "description": "SOP Instance UID" + } + }, + "required": [ + "study_instance_uid", + "series_instance_uid", + "sop_instance_uid" + ] + } + ] + }, + "huggingface-spaces": { + "name": "huggingface-spaces", + "display_name": "HuggingFace Spaces \ud83e\udd17", + "description": "Server for using HuggingFace Spaces, supporting Open Source Image, Audio, Text Models and more. Claude Desktop mode for easy integration.", + "repository": { + "type": "git", + "url": "https://github.com/evalstate/mcp-hfspace" + }, + "author": { + "name": "evalstate" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "Hugging Face", + "Claude Desktop" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@llmindset/mcp-hfspace" + ] + } + }, + "examples": [ + { + "title": "Image Generation Example", + "description": "Using mcp-hfspace to generate images.", + "prompt": "Use shuttleai/shuttle-3.1-aesthetic to create an image." + }, + { + "title": "Text-to-Speech Example", + "description": "Using mcp-hfspace to convert text to speech.", + "prompt": "Create an audio file from the text 'Hello, world!'." + }, + { + "title": "Speech-to-Text Example", + "description": "Using mcp-hfspace to transcribe audio to text.", + "prompt": "Transcribe the audio file 'sample_audio.wav'." + }, + { + "title": "Vision Model Example", + "description": "Using mcp-hfspace to analyze images.", + "prompt": "Analyze the image file 'test_image.jpg'." + } + ], + "homepage": "https://github.com/evalstate/mcp-hfspace", + "arguments": { + "CLAUDE_DESKTOP_MODE": { + "description": "Enables or disables the Claude Desktop Mode for the server.", + "required": false, + "example": "false" + } + }, + "tools": [ + { + "name": "available-files", + "description": "A list of available file and resources. If the User requests things like 'most recent image' or 'the audio' use this tool to identify the intended resource.This tool returns 'resource uri', 'name', 'size', 'last modified' and 'mime type' in a markdown table", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "FLUX_1-schnell-infer", + "description": "Call the FLUX.1-schnell endpoint /infer", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Prompt", + "examples": [ + "Hello!!" + ] + }, + "seed": { + "type": "number", + "description": "Seed", + "default": 0 + }, + "randomize_seed": { + "type": "boolean", + "description": "Randomize seed", + "default": true, + "examples": [ + true + ] + }, + "width": { + "type": "number", + "description": "Width", + "default": 1024, + "examples": [ + 256 + ] + }, + "height": { + "type": "number", + "description": "Height", + "default": 1024, + "examples": [ + 256 + ] + }, + "num_inference_steps": { + "type": "number", + "description": "Number of inference steps", + "default": 4, + "examples": [ + 1 + ] + } + }, + "required": [ + "prompt" + ] + } + } + ] + }, + "mcp-audiense-insights": { + "display_name": "Audiense Insights", + "repository": { + "type": "git", + "url": "https://github.com/AudienseCo/mcp-audiense-insights" + }, + "homepage": "https://github.com/AudienseCo/mcp-audiense-insights", + "author": { + "name": "AudienseCo" + }, + "license": "Apache 2.0", + "tags": [ + "marketing", + "audience analysis", + "insights", + "demographics", + "influencers" + ], + "arguments": { + "AUDIENSE_CLIENT_ID": { + "description": "Audiense API client ID", + "required": true, + "example": "your_client_id_here" + }, + "AUDIENSE_CLIENT_SECRET": { + "description": "Audiense API client secret", + "required": true, + "example": "your_client_secret_here" + }, + "TWITTER_BEARER_TOKEN": { + "description": "X/Twitter API Bearer Token for enriched influencer data", + "required": false, + "example": "your_token_here" + } + }, + "installations": { + "custom": { + "type": "npm", + "command": "node", + "args": [ + "/ABSOLUTE/PATH/TO/YOUR/build/index.js" + ], + "env": { + "AUDIENSE_CLIENT_ID": "your_client_id_here", + "AUDIENSE_CLIENT_SECRET": "your_client_secret_here", + "TWITTER_BEARER_TOKEN": "your_token_here" + }, + "description": "Manual installation by configuring Claude Desktop" + } + }, + "examples": [ + { + "title": "Audiense Demo", + "description": "Helps analyze Audiense reports interactively", + "prompt": "audiense-demo" + }, + { + "title": "Segment Matching", + "description": "Match and compare audience segments across Audiense reports, identifying similarities, unique traits, and key insights", + "prompt": "segment-matching" + } + ], + "name": "mcp-audiense-insights", + "description": "This server, based on the [Model Context Protocol (MCP)](https://github.com/modelcontextprotocol), allows **Claude** or any other MCP-compatible client to interact with your [Audiense Insights](https://www.audiense.com/) account. It extracts **marketing insights and audience analysis** from Audiense reports, covering **demographic, cultural, influencer, and content engagement analysis**.", + "categories": [ + "Analytics" + ], + "is_official": true + }, + "hubspot": { + "name": "hubspot", + "display_name": "HubSpot CRM Integration", + "description": "HubSpot CRM integration for managing contacts and companies. Create and retrieve CRM data directly through Claude chat.", + "repository": { + "type": "git", + "url": "https://github.com/buryhuang/mcp-hubspot" + }, + "homepage": "https://github.com/buryhuang/mcp-hubspot", + "author": { + "name": "buryhuang" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "HubSpot", + "API", + "AI", + "CRM", + "Integration" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "HUBSPOT_ACCESS_TOKEN=${HUBSPOT_ACCESS_TOKEN}", + "buryhuang/mcp-hubspot:latest" + ], + "env": { + "HUBSPOT_ACCESS_TOKEN": "${HUBSPOT_ACCESS_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Create HubSpot contacts from LinkedIn", + "description": "This prompt allows you to create contacts in HubSpot by parsing information from a LinkedIn profile.", + "prompt": "Create HubSpot contacts and companies from following:\n\nJohn Doe\nSoftware Engineer at Tech Corp\nSan Francisco Bay Area \u2022 500+ connections\n\nExperience\nTech Corp\nSoftware Engineer\nJan 2020 - Present \u00b7 4 yrs\nSan Francisco, California\n\nPrevious Company Inc.\nSenior Developer\n2018 - 2020 \u00b7 2 yrs\n\nEducation\nUniversity of California, Berkeley\nComputer Science, BS\n2014 - 2018" + }, + { + "title": "Get latest company activities", + "description": "Use this prompt to get the latest activities related to your company in HubSpot.", + "prompt": "What's happening latestly with my pipeline?" + } + ], + "arguments": { + "HUBSPOT_ACCESS_TOKEN": { + "description": "The HubSpot access token required for authenticating API requests to HubSpot.", + "required": true, + "example": "your_access_token_here" + } + }, + "tools": [ + { + "name": "hubspot_create_contact", + "description": "Create a new contact in HubSpot", + "inputSchema": { + "type": "object", + "properties": { + "firstname": { + "type": "string", + "description": "Contact's first name" + }, + "lastname": { + "type": "string", + "description": "Contact's last name" + }, + "email": { + "type": "string", + "description": "Contact's email address" + }, + "properties": { + "type": "object", + "description": "Additional contact properties" + } + }, + "required": [ + "firstname", + "lastname" + ] + } + }, + { + "name": "hubspot_create_company", + "description": "Create a new company in HubSpot", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Company name" + }, + "properties": { + "type": "object", + "description": "Additional company properties" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "hubspot_get_company_activity", + "description": "Get activity history for a specific company", + "inputSchema": { + "type": "object", + "properties": { + "company_id": { + "type": "string", + "description": "HubSpot company ID" + } + }, + "required": [ + "company_id" + ] + } + }, + { + "name": "hubspot_get_recent_engagements", + "description": "Get recent engagement activities across all contacts and companies", + "inputSchema": { + "type": "object", + "properties": { + "days": { + "type": "integer", + "description": "Number of days to look back (default: 7)" + }, + "limit": { + "type": "integer", + "description": "Maximum number of engagements to return (default: 50)" + } + } + } + }, + { + "name": "hubspot_get_active_companies", + "description": "Get most recently active companies from HubSpot", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "description": "Maximum number of companies to return (default: 10)" + } + } + } + }, + { + "name": "hubspot_get_active_contacts", + "description": "Get most recently active contacts from HubSpot", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "description": "Maximum number of contacts to return (default: 10)" + } + } + } + } + ] + }, + "ticketmaster": { + "name": "ticketmaster", + "display_name": "Ticketmaster", + "description": "Search for events, venues, and attractions through the Ticketmaster Discovery API", + "repository": { + "type": "git", + "url": "https://github.com/delorenj/mcp-server-ticketmaster" + }, + "homepage": "https://github.com/delorenj/mcp-server-ticketmaster", + "author": { + "name": "delorenj" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "ticketmaster", + "events", + "venues", + "attractions" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@delorenj/mcp-server-ticketmaster" + ], + "env": { + "TICKETMASTER_API_KEY": "${TICKETMASTER_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Structured JSON Output", + "description": "Example of structured JSON output for searching events.", + "prompt": "\nticketmaster\nsearch_ticketmaster\n\n{\n \"type\": \"event\",\n \"keyword\": \"concert\",\n \"startDate\": \"2025-02-01\",\n \"endDate\": \"2025-02-28\",\n \"city\": \"New York\",\n \"stateCode\": \"NY\"\n}\n\n" + }, + { + "title": "Human-Readable Text Output", + "description": "Example of human-readable text output for searching events.", + "prompt": "\nticketmaster\nsearch_ticketmaster\n\n{\n \"type\": \"event\",\n \"keyword\": \"concert\",\n \"startDate\": \"2025-02-01\",\n \"endDate\": \"2025-02-28\",\n \"city\": \"New York\",\n \"stateCode\": \"NY\",\n \"format\": \"text\"\n}\n\n" + } + ], + "arguments": { + "TICKETMASTER_API_KEY": { + "description": "API key required to access the Ticketmaster Discovery API.", + "required": true, + "example": "your-api-key-here" + } + }, + "tools": [ + { + "name": "search_ticketmaster", + "description": "Search for events, venues, or attractions on Ticketmaster", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "event", + "venue", + "attraction" + ], + "description": "Type of search to perform" + }, + "keyword": { + "type": "string", + "description": "Search keyword or term" + }, + "startDate": { + "type": "string", + "description": "Start date in YYYY-MM-DD format" + }, + "endDate": { + "type": "string", + "description": "End date in YYYY-MM-DD format" + }, + "city": { + "type": "string", + "description": "City name" + }, + "stateCode": { + "type": "string", + "description": "State code (e.g., NY, CA)" + }, + "countryCode": { + "type": "string", + "description": "Country code (e.g., US, CA)" + }, + "venueId": { + "type": "string", + "description": "Specific venue ID to search" + }, + "attractionId": { + "type": "string", + "description": "Specific attraction ID to search" + }, + "classificationName": { + "type": "string", + "description": "Event classification/category (e.g., \"Sports\", \"Music\")" + }, + "format": { + "type": "string", + "enum": [ + "json", + "text" + ], + "description": "Output format (defaults to json)", + "default": "json" + } + }, + "required": [ + "type" + ] + } + } + ] + }, + "figma": { + "name": "figma", + "display_name": "Figma", + "description": "Give your coding agent direct access to Figma file data, helping it one-shot design implementation.", + "repository": { + "type": "git", + "url": "https://github.com/GLips/Figma-Context-MCP" + }, + "homepage": "https://github.com/GLips/Figma-Context-MCP", + "author": { + "name": "GLips" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "Figma", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "figma-developer-mcp", + "--figma-api-key=${FIGMA_API_KEY}", + "--stdio" + ] + } + }, + "arguments": { + "FIGMA_API_KEY": { + "description": "Your Figma API access token (required)", + "required": true, + "example": "" + } + }, + "tools": [ + { + "name": "get_figma_data", + "description": "When the nodeId cannot be obtained, obtain the layout information about the entire Figma file", + "inputSchema": { + "type": "object", + "properties": { + "fileKey": { + "type": "string", + "description": "The key of the Figma file to fetch, often found in a provided URL like figma.com/(file|design)//..." + }, + "nodeId": { + "type": "string", + "description": "The ID of the node to fetch, often found as URL parameter node-id=, always use if provided" + }, + "depth": { + "type": "number", + "description": "How many levels deep to traverse the node tree, only use if explicitly requested by the user" + } + }, + "required": [ + "fileKey" + ] + } + }, + { + "name": "download_figma_images", + "description": "Download SVG and PNG images used in a Figma file based on the IDs of image or icon nodes", + "inputSchema": { + "type": "object", + "properties": { + "fileKey": { + "type": "string", + "description": "The key of the Figma file containing the node" + }, + "nodes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "nodeId": { + "type": "string", + "description": "The ID of the Figma image node to fetch, formatted as 1234:5678" + }, + "imageRef": { + "type": "string", + "description": "If a node has an imageRef fill, you must include this variable. Leave blank when downloading Vector SVG images." + }, + "fileName": { + "type": "string", + "description": "The local name for saving the fetched file" + } + }, + "required": [ + "nodeId", + "fileName" + ], + "additionalProperties": false + }, + "description": "The nodes to fetch as images" + }, + "localPath": { + "type": "string", + "description": "The absolute path to the directory where images are stored in the project. Automatically creates directories if needed." + } + }, + "required": [ + "fileKey", + "nodes", + "localPath" + ] + } + } + ] + }, + "riza-mcp": { + "display_name": "Riza MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/riza-io/riza-mcp" + }, + "homepage": "https://riza.io", + "author": { + "name": "riza-io" + }, + "license": "MIT", + "tags": [ + "code interpreter", + "LLM", + "tools" + ], + "arguments": { + "RIZA_API_KEY": { + "description": "API key for Riza service", + "required": true, + "example": "your-api-key" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@riza-io/riza-mcp" + ], + "env": { + "RIZA_API_KEY": "your-api-key" + }, + "recommended": true + } + }, + "examples": [ + { + "title": "Configure with Claude Desktop", + "description": "Configuration for Claude Desktop", + "prompt": "{\n \"mcpServers\": {\n \"riza-server\": {\n \"command\": \"npx\",\n \"args\": [\n \"@riza-io/riza-mcp\"\n ],\n \"env\": {\n \"RIZA_API_KEY\": \"your-api-key\"\n }\n }\n }\n}" + } + ], + "name": "riza-mcp", + "description": "[Riza](https://riza.io) offers an isolated code interpreter for your LLM-generated code.", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "create_tool", + "description": "Create a new tool. This tool will be used to create new tools. You can use the tools you have created to perform tasks.", + "inputSchema": { + "type": "object", + "required": [ + "name", + "description", + "code", + "input_schema", + "language" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the tool you are writing. This is what you will use to call the tool." + }, + "description": { + "type": "string", + "description": "A description of the tool you are writing. This will help you or other agents or people pick the appropriate tool in the future." + }, + "code": { + "type": "string", + "description": "The Typescript code for the tool you are writing. The code should be a valid Typescript function named `execute` that takes one argument called `input`. When called, the `input` provided will match the schema of the `input_schema` of the tool." + }, + "input_schema": { + "type": "object", + "description": "The input schema for the tool. This must be provided as a valid JSON Schema object." + }, + "language": { + "type": "string", + "description": "The language of the tool you are writing. This must be either 'TYPESCRIPT' or 'PYTHON'." + } + } + } + }, + { + "name": "fetch_tool", + "description": "Fetch a tool, including its source code.", + "inputSchema": { + "type": "object", + "properties": { + "tool_id": { + "type": "string", + "description": "The ID of the tool to fetch." + } + } + } + }, + { + "name": "edit_tool", + "description": "Edit a tool, including its source code. Omit properties that you do not want to change.", + "inputSchema": { + "type": "object", + "required": [ + "tool_id", + "code", + "language", + "input_schema" + ], + "properties": { + "tool_id": { + "type": "string", + "description": "The ID of the tool you are editing." + }, + "name": { + "type": "string", + "description": "The name of the tool you are editing. This is what you will use to call the tool." + }, + "description": { + "type": "string", + "description": "A description of the tool you are editing. This will help you or other agents or people pick the appropriate tool in the future." + }, + "code": { + "type": "string", + "description": "The Typescript code for the tool you are editing. The code should be a valid Typescript function named `execute` that takes one argument called `input`. When called, the `input` provided will match the schema of the `input_schema` of the tool." + }, + "input_schema": { + "type": "object", + "description": "The input schema for the tool. This must be provided as a valid JSON Schema object." + }, + "language": { + "type": "string", + "description": "The language of the tool you are editing. This must be either 'TYPESCRIPT' or 'PYTHON'." + } + } + } + }, + { + "name": "execute_code", + "description": "Execute arbitrary Typescript or Python code.", + "inputSchema": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The code you are writing. This will be executed as a script. Write any output to stdout or stderr." + }, + "language": { + "type": "string", + "description": "The language of the code you are writing. This must be either 'TYPESCRIPT' or 'PYTHON'." + } + } + } + }, + { + "name": "list_tools", + "description": "Lists the tool definitions of all self-written tools available for use. These tools can be used by calling `use_tool` with the name and input.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "execute_tool", + "description": "Executes a tool. This tool will be used to execute a self-written tool.", + "inputSchema": { + "type": "object", + "required": [ + "tool_id", + "input" + ], + "properties": { + "tool_id": { + "type": "string", + "description": "The ID of the tool you are executing." + }, + "input": { + "type": "object", + "description": "The input to the tool. This must match the input schema of the tool." + } + } + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "uns-mcp": { + "display_name": "Unstructured API MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/Unstructured-IO/UNS-MCP" + }, + "homepage": "https://docs.unstructured.io/", + "author": { + "name": "Unstructured-IO" + }, + "license": "[NOT GIVEN]", + "tags": [ + "unstructured", + "api", + "document processing", + "workflow", + "connectors" + ], + "arguments": { + "UNSTRUCTURED_API_KEY": { + "description": "API key for the Unstructured platform", + "required": true, + "example": "YOUR_KEY" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "uns_mcp" + ], + "env": { + "UNSTRUCTURED_API_KEY": "YOUR_KEY" + }, + "description": "Run using Python with uv", + "recommended": true + } + }, + "name": "uns-mcp", + "description": "An MCP server implementation for interacting with the Unstructured API. This server provides tools to list sources and workflows.", + "categories": [ + "Knowledge Base" + ], + "is_official": true, + "tools": [ + { + "name": "create_s3_source", + "description": "Create an S3 source connector.\n\n Args:\n name: A unique name for this connector\n remote_url: The S3 URI to the bucket or folder (e.g., s3://my-bucket/)\n recursive: Whether to access subfolders within the bucket\n\n Returns:\n String containing the created source connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "remote_url": { + "title": "Remote Url", + "type": "string" + }, + "recursive": { + "default": false, + "title": "Recursive", + "type": "boolean" + } + }, + "required": [ + "name", + "remote_url" + ], + "title": "create_s3_sourceArguments", + "type": "object" + } + }, + { + "name": "update_s3_source", + "description": "Update an S3 source connector.\n\n Args:\n source_id: ID of the source connector to update\n remote_url: The S3 URI to the bucket or folder\n recursive: Whether to access subfolders within the bucket\n\n Returns:\n String containing the updated source connector information\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + }, + "remote_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Remote Url" + }, + "recursive": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Recursive" + } + }, + "required": [ + "source_id" + ], + "title": "update_s3_sourceArguments", + "type": "object" + } + }, + { + "name": "delete_s3_source", + "description": "Delete an S3 source connector.\n\n Args:\n source_id: ID of the source connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + } + }, + "required": [ + "source_id" + ], + "title": "delete_s3_sourceArguments", + "type": "object" + } + }, + { + "name": "create_azure_source", + "description": "Create an Azure source connector.\n\n Args:\n name: A unique name for this connector\n remote_url: The Azure Storage remote URL,\n with the format az:///\n recursive: Whether to access subfolders within the bucket\n\n Returns:\n String containing the created source connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "remote_url": { + "title": "Remote Url", + "type": "string" + }, + "recursive": { + "default": false, + "title": "Recursive", + "type": "boolean" + } + }, + "required": [ + "name", + "remote_url" + ], + "title": "create_azure_sourceArguments", + "type": "object" + } + }, + { + "name": "update_azure_source", + "description": "Update an azure source connector.\n\n Args:\n source_id: ID of the source connector to update\n remote_url: The Azure Storage remote URL, with the format\n az:///\n recursive: Whether to access subfolders within the bucket\n\n Returns:\n String containing the updated source connector information\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + }, + "remote_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Remote Url" + }, + "recursive": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Recursive" + } + }, + "required": [ + "source_id" + ], + "title": "update_azure_sourceArguments", + "type": "object" + } + }, + { + "name": "delete_azure_source", + "description": "Delete an azure source connector.\n\n Args:\n source_id: ID of the source connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + } + }, + "required": [ + "source_id" + ], + "title": "delete_azure_sourceArguments", + "type": "object" + } + }, + { + "name": "create_gdrive_source", + "description": "Create a gdrive source connector.\n\n Args:\n name: A unique name for this connector\n remote_url: The gdrive URI to the bucket or folder (e.g., gdrive://my-bucket/)\n recursive: Whether to access subfolders within the bucket\n\n Returns:\n String containing the created source connector information\n ", + "inputSchema": { + "$defs": { + "Nullable_List_str__": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ] + }, + "OptionalNullable_List_str__": { + "anyOf": [ + { + "$ref": "#/$defs/Nullable_List_str__" + }, + { + "$ref": "#/$defs/Unset" + }, + { + "type": "null" + } + ] + }, + "Unset": { + "properties": {}, + "title": "Unset", + "type": "object" + } + }, + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "drive_id": { + "title": "Drive Id", + "type": "string" + }, + "recursive": { + "default": false, + "title": "Recursive", + "type": "boolean" + }, + "extensions": { + "$ref": "#/$defs/OptionalNullable_List_str__", + "default": "~?~unset~?~sentinel~?~" + } + }, + "required": [ + "name", + "drive_id" + ], + "title": "create_gdrive_sourceArguments", + "type": "object" + } + }, + { + "name": "update_gdrive_source", + "description": "Update an gdrive source connector.\n\n Args:\n source_id: ID of the source connector to update\n remote_url: The gdrive URI to the bucket or folder\n recursive: Whether to access subfolders within the bucket\n\n Returns:\n String containing the updated source connector information\n ", + "inputSchema": { + "$defs": { + "Nullable_List_str__": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ] + }, + "OptionalNullable_List_str__": { + "anyOf": [ + { + "$ref": "#/$defs/Nullable_List_str__" + }, + { + "$ref": "#/$defs/Unset" + }, + { + "type": "null" + } + ] + }, + "Unset": { + "properties": {}, + "title": "Unset", + "type": "object" + } + }, + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + }, + "drive_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Drive Id" + }, + "recursive": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Recursive" + }, + "extensions": { + "$ref": "#/$defs/OptionalNullable_List_str__", + "default": "~?~unset~?~sentinel~?~" + } + }, + "required": [ + "source_id" + ], + "title": "update_gdrive_sourceArguments", + "type": "object" + } + }, + { + "name": "delete_gdrive_source", + "description": "Delete an gdrive source connector.\n\n Args:\n source_id: ID of the source connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + } + }, + "required": [ + "source_id" + ], + "title": "delete_gdrive_sourceArguments", + "type": "object" + } + }, + { + "name": "create_onedrive_source", + "description": "Create a OneDrive source connector.\n\n Args:\n name: A unique name for this connector\n path: The path to the target folder in the OneDrive account,\n starting with the account\u2019s root folder\n user_pname: The User Principal Name (UPN) for the OneDrive user account in Entra ID.\n This is typically the user\u2019s email address.\n recursive: Whether to access subfolders\n authority_url: The authentication token provider URL for the Entra ID app registration.\n The default is https://login.microsoftonline.com.\n\n Returns:\n String containing the created source connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "path": { + "title": "Path", + "type": "string" + }, + "user_pname": { + "title": "User Pname", + "type": "string" + }, + "recursive": { + "default": false, + "title": "Recursive", + "type": "boolean" + }, + "authority_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://login.microsoftonline.com", + "title": "Authority Url" + } + }, + "required": [ + "name", + "path", + "user_pname" + ], + "title": "create_onedrive_sourceArguments", + "type": "object" + } + }, + { + "name": "update_onedrive_source", + "description": "Update a OneDrive source connector.\n\n Args:\n source_id: ID of the source connector to update\n path: The path to the target folder in the OneDrive account,\n starting with the account\u2019s root folder\n user_pname: The User Principal Name (UPN) for the OneDrive user account in Entra ID.\n This is typically the user\u2019s email address.\n recursive: Whether to access subfolders\n authority_url: The authentication token provider URL for the Entra ID app registration.\n The default is https://login.microsoftonline.com.\n tenant: The directory (tenant) ID of the Entra ID app registration.\n client_id: The application (client) ID of the Microsoft Entra ID app registration\n that has access to the OneDrive account.\n\n Returns:\n String containing the updated source connector information\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + }, + "path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path" + }, + "user_pname": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "User Pname" + }, + "recursive": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Recursive" + }, + "authority_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Authority Url" + }, + "tenant": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Tenant" + }, + "client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Client Id" + } + }, + "required": [ + "source_id" + ], + "title": "update_onedrive_sourceArguments", + "type": "object" + } + }, + { + "name": "delete_onedrive_source", + "description": "Delete a OneDrive source connector.\n\n Args:\n source_id: ID of the source connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + } + }, + "required": [ + "source_id" + ], + "title": "delete_onedrive_sourceArguments", + "type": "object" + } + }, + { + "name": "create_s3_destination", + "description": "Create an S3 destination connector.\n\n Args:\n name: A unique name for this connector\n remote_url: The S3 URI to the bucket or folder\n key: The AWS access key ID\n secret: The AWS secret access key\n token: The AWS STS session token for temporary access (optional)\n endpoint_url: Custom URL if connecting to a non-AWS S3 bucket\n\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "remote_url": { + "title": "Remote Url", + "type": "string" + } + }, + "required": [ + "name", + "remote_url" + ], + "title": "create_s3_destinationArguments", + "type": "object" + } + }, + { + "name": "update_s3_destination", + "description": "Update an S3 destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n remote_url: The S3 URI to the bucket or folder\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "remote_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Remote Url" + }, + "recursive": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Recursive" + } + }, + "required": [ + "destination_id" + ], + "title": "update_s3_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_s3_destination", + "description": "Delete an S3 destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_s3_destinationArguments", + "type": "object" + } + }, + { + "name": "create_weaviate_destination", + "description": "Create an weaviate vector database destination connector.\n\n Args:\n cluster_url: URL of the weaviate cluster\n collection : Name of the collection to use in the weaviate cluster\n Note: The collection is a table in the weaviate cluster.\n In platform, there are dedicated code to generate collection for users\n here, due to the simplicity of the server, we are not generating it for users.\n\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "cluster_url": { + "title": "Cluster Url", + "type": "string" + }, + "collection": { + "title": "Collection", + "type": "string" + } + }, + "required": [ + "name", + "cluster_url", + "collection" + ], + "title": "create_weaviate_destinationArguments", + "type": "object" + } + }, + { + "name": "update_weaviate_destination", + "description": "Update an weaviate destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n cluster_url (optional): URL of the weaviate cluster\n collection (optional): Name of the collection(like a file) to use in the weaviate cluster\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "cluster_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Cluster Url" + }, + "collection": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Collection" + } + }, + "required": [ + "destination_id" + ], + "title": "update_weaviate_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_weaviate_destination", + "description": "Delete an weaviate destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_weaviate_destinationArguments", + "type": "object" + } + }, + { + "name": "create_astradb_destination", + "description": "Create an AstraDB destination connector.\n\n Args:\n name: A unique name for this connector\n collection_name: The name of the collection to use\n keyspace: The AstraDB keyspace\n batch_size: The batch size for inserting documents, must be positive (default: 20)\n\n Note: A collection in AstraDB is a schemaless document store optimized for NoSQL workloads,\n equivalent to a table in traditional databases.\n A keyspace is the top-level namespace in AstraDB that groups multiple collections.\n We require the users to create their own collection and keyspace before\n creating the connector.\n\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "collection_name": { + "title": "Collection Name", + "type": "string" + }, + "keyspace": { + "title": "Keyspace", + "type": "string" + }, + "batch_size": { + "default": 20, + "title": "Batch Size", + "type": "integer" + } + }, + "required": [ + "name", + "collection_name", + "keyspace" + ], + "title": "create_astradb_destinationArguments", + "type": "object" + } + }, + { + "name": "update_astradb_destination", + "description": "Update an AstraDB destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n collection_name: The name of the collection to use (optional)\n keyspace: The AstraDB keyspace (optional)\n batch_size: The batch size for inserting documents (optional)\n\n Note: We require the users to create their own collection and\n keyspace before creating the connector.\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "collection_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Collection Name" + }, + "keyspace": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Keyspace" + }, + "batch_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Batch Size" + } + }, + "required": [ + "destination_id" + ], + "title": "update_astradb_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_astradb_destination", + "description": "Delete an AstraDB destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_astradb_destinationArguments", + "type": "object" + } + }, + { + "name": "create_neo4j_destination", + "description": "Create an neo4j destination connector.\n\n Args:\n name: A unique name for this connector\n database: The neo4j database, e.g. \"neo4j\"\n uri: The neo4j URI, e.g. neo4j+s://.databases.neo4j.io\n username: The neo4j username\n\n\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "database": { + "title": "Database", + "type": "string" + }, + "uri": { + "title": "Uri", + "type": "string" + }, + "username": { + "title": "Username", + "type": "string" + }, + "batch_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "title": "Batch Size" + } + }, + "required": [ + "name", + "database", + "uri", + "username" + ], + "title": "create_neo4j_destinationArguments", + "type": "object" + } + }, + { + "name": "update_neo4j_destination", + "description": "Update an neo4j destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n database: The neo4j database, e.g. \"neo4j\"\n uri: The neo4j URI, e.g. neo4j+s://.databases.neo4j.io\n username: The neo4j username\n\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "database": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Database" + }, + "uri": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Uri" + }, + "username": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Username" + }, + "batch_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Batch Size" + } + }, + "required": [ + "destination_id" + ], + "title": "update_neo4j_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_neo4j_destination", + "description": "Delete an neo4j destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_neo4j_destinationArguments", + "type": "object" + } + }, + { + "name": "create_mongodb_destination", + "description": "Create an MongoDB destination connector.\n\n Args:\n name: A unique name for this connector\n database: The name of the database to connect to.\n collection: The name of the target MongoDB collection\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "database": { + "title": "Database", + "type": "string" + }, + "collection": { + "title": "Collection", + "type": "string" + } + }, + "required": [ + "name", + "database", + "collection" + ], + "title": "create_mongodb_destinationArguments", + "type": "object" + } + }, + { + "name": "update_mongodb_destination", + "description": "Update an MongoDB destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n database: The name of the database to connect to.\n collection: The name of the target MongoDB collection\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "database": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Database" + }, + "collection": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Collection" + } + }, + "required": [ + "destination_id" + ], + "title": "update_mongodb_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_mongodb_destination", + "description": "Delete an MongoDB destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_mongodb_destinationArguments", + "type": "object" + } + }, + { + "name": "create_databricks_volumes_destination", + "description": "Create an databricks volume destination connector.\n\n Args:\n name: A unique name for this connector\n catalog: Name of the catalog in the Databricks Unity Catalog service for the workspace.\n host: The Databricks host URL for the Databricks workspace.\n volume: Name of the volume associated with the schema.\n schema: Name of the schema associated with the volume. The default value is \"default\".\n volume_path: Any target folder path within the volume, starting from the root of the volume.\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "catalog": { + "title": "Catalog", + "type": "string" + }, + "volume": { + "title": "Volume", + "type": "string" + }, + "host": { + "title": "Host", + "type": "string" + }, + "schema": { + "default": "default", + "title": "Schema", + "type": "string" + }, + "volume_path": { + "default": "/", + "title": "Volume Path", + "type": "string" + } + }, + "required": [ + "name", + "catalog", + "volume", + "host" + ], + "title": "create_databricks_volumes_destinationArguments", + "type": "object" + } + }, + { + "name": "update_databricks_volumes_destination", + "description": "Update an databricks volumes destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n catalog: Name of the catalog to update in the Databricks Unity Catalog\n service for the workspace.\n host: The Databricks host URL for the Databricks workspace to update.\n volume: Name of the volume associated with the schema to update.\n schema: Name of the schema associated with the volume to update.\n The default value is \"default\".\n volume_path: Any target folder path within the volume to update,\n starting from the root of the volume.\n\n\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "catalog": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Catalog" + }, + "volume": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Volume" + }, + "host": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Host" + }, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Schema" + }, + "volume_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Volume Path" + } + }, + "required": [ + "destination_id" + ], + "title": "update_databricks_volumes_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_databricks_volumes_destination", + "description": "Delete an databricks volumes destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_databricks_volumes_destinationArguments", + "type": "object" + } + }, + { + "name": "create_databricks_delta_table_destination", + "description": "Create an databricks volume destination connector.\n\n Args:\n name: A unique name for this connector\n catalog: Name of the catalog in the Databricks Unity Catalog service for the workspace.\n database: The name of the schema (formerly known as a database)\n in Unity Catalog for the target table\n http_path: The cluster\u2019s or SQL warehouse\u2019s HTTP Path value\n server_hostname: The Databricks cluster\u2019s or SQL warehouse\u2019s Server Hostname value\n table_name: The name of the table in the schema\n volume: Name of the volume associated with the schema.\n schema: Name of the schema associated with the volume. The default value is \"default\".\n volume_path: Any target folder path within the volume, starting from the root of the volume.\n Returns:\n String containing the created destination connector information\n ", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "catalog": { + "title": "Catalog", + "type": "string" + }, + "database": { + "title": "Database", + "type": "string" + }, + "http_path": { + "title": "Http Path", + "type": "string" + }, + "server_hostname": { + "title": "Server Hostname", + "type": "string" + }, + "table_name": { + "title": "Table Name", + "type": "string" + }, + "volume": { + "title": "Volume", + "type": "string" + }, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "default", + "title": "Schema" + }, + "volume_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "/", + "title": "Volume Path" + } + }, + "required": [ + "name", + "catalog", + "database", + "http_path", + "server_hostname", + "table_name", + "volume" + ], + "title": "create_databricks_delta_table_destinationArguments", + "type": "object" + } + }, + { + "name": "update_databricks_delta_table_destination", + "description": "Update an databricks volumes destination connector.\n\n Args:\n destination_id: ID of the destination connector to update\n database: The name of the schema (formerly known as a database)\n in Unity Catalog for the target table\n http_path: The cluster\u2019s or SQL warehouse\u2019s HTTP Path value\n server_hostname: The Databricks cluster\u2019s or SQL warehouse\u2019s Server Hostname value\n volume_path: Any target folder path within the volume to update,\n starting from the root of the volume.\n\n\n\n Returns:\n String containing the updated destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + }, + "catalog": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Catalog" + }, + "database": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Database" + }, + "http_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Http Path" + }, + "server_hostname": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Server Hostname" + }, + "table_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Table Name" + }, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Schema" + }, + "volume": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Volume" + }, + "volume_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Volume Path" + } + }, + "required": [ + "destination_id" + ], + "title": "update_databricks_delta_table_destinationArguments", + "type": "object" + } + }, + { + "name": "delete_databricks_delta_table_destination", + "description": "Delete an databricks volumes destination connector.\n\n Args:\n destination_id: ID of the destination connector to delete\n\n Returns:\n String containing the result of the deletion\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "delete_databricks_delta_table_destinationArguments", + "type": "object" + } + }, + { + "name": "invoke_firecrawl_crawlhtml", + "description": "Start an asynchronous web crawl job using Firecrawl to retrieve HTML content.\n\n Args:\n url: URL to crawl\n s3_uri: S3 URI where results will be uploaded\n limit: Maximum number of pages to crawl (default: 100)\n\n Returns:\n Dictionary with crawl job information including the job ID\n ", + "inputSchema": { + "properties": { + "url": { + "title": "Url", + "type": "string" + }, + "s3_uri": { + "title": "S3 Uri", + "type": "string" + }, + "limit": { + "default": 100, + "title": "Limit", + "type": "integer" + } + }, + "required": [ + "url", + "s3_uri" + ], + "title": "invoke_firecrawl_crawlhtmlArguments", + "type": "object" + } + }, + { + "name": "check_crawlhtml_status", + "description": "Check the status of an existing Firecrawl HTML crawl job.\n\n Args:\n crawl_id: ID of the crawl job to check\n\n Returns:\n Dictionary containing the current status of the crawl job\n ", + "inputSchema": { + "properties": { + "crawl_id": { + "title": "Crawl Id", + "type": "string" + } + }, + "required": [ + "crawl_id" + ], + "title": "check_crawlhtml_statusArguments", + "type": "object" + } + }, + { + "name": "invoke_firecrawl_llmtxt", + "description": "Start an asynchronous llmfull.txt generation job using Firecrawl.\n This file is a standardized markdown file containing information to help LLMs\n use a website at inference time.\n The llmstxt endpoint leverages Firecrawl to crawl your website and extracts data\n using gpt-4o-mini\n Args:\n url: URL to crawl\n s3_uri: S3 URI where results will be uploaded\n max_urls: Maximum number of pages to crawl (1-100, default: 10)\n\n Returns:\n Dictionary with job information including the job ID\n ", + "inputSchema": { + "properties": { + "url": { + "title": "Url", + "type": "string" + }, + "s3_uri": { + "title": "S3 Uri", + "type": "string" + }, + "max_urls": { + "default": 10, + "title": "Max Urls", + "type": "integer" + } + }, + "required": [ + "url", + "s3_uri" + ], + "title": "invoke_firecrawl_llmtxtArguments", + "type": "object" + } + }, + { + "name": "check_llmtxt_status", + "description": "Check the status of an existing llmfull.txt generation job.\n\n Args:\n job_id: ID of the llmfull.txt generation job to check\n\n Returns:\n Dictionary containing the current status of the job and text content if completed\n ", + "inputSchema": { + "properties": { + "job_id": { + "title": "Job Id", + "type": "string" + } + }, + "required": [ + "job_id" + ], + "title": "check_llmtxt_statusArguments", + "type": "object" + } + }, + { + "name": "cancel_crawlhtml_job", + "description": "Cancel an in-progress Firecrawl HTML crawl job.\n\n Args:\n crawl_id: ID of the crawl job to cancel\n\n Returns:\n Dictionary containing the result of the cancellation\n ", + "inputSchema": { + "properties": { + "crawl_id": { + "title": "Crawl Id", + "type": "string" + } + }, + "required": [ + "crawl_id" + ], + "title": "cancel_crawlhtml_jobArguments", + "type": "object" + } + }, + { + "name": "list_sources", + "description": "\n List available sources from the Unstructured API.\n\n Args:\n source_type: Optional source connector type to filter by\n\n Returns:\n String containing the list of sources\n ", + "inputSchema": { + "properties": { + "source_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Source Type" + } + }, + "title": "list_sourcesArguments", + "type": "object" + } + }, + { + "name": "get_source_info", + "description": "Get detailed information about a specific source connector.\n\n Args:\n source_id: ID of the source connector to get information for, should be valid UUID\n\n Returns:\n String containing the source connector information\n ", + "inputSchema": { + "properties": { + "source_id": { + "title": "Source Id", + "type": "string" + } + }, + "required": [ + "source_id" + ], + "title": "get_source_infoArguments", + "type": "object" + } + }, + { + "name": "list_destinations", + "description": "List available destinations from the Unstructured API.\n\n Args:\n destination_type: Optional destination connector type to filter by\n\n Returns:\n String containing the list of destinations\n ", + "inputSchema": { + "properties": { + "destination_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Destination Type" + } + }, + "title": "list_destinationsArguments", + "type": "object" + } + }, + { + "name": "get_destination_info", + "description": "Get detailed information about a specific destination connector.\n\n Args:\n destination_id: ID of the destination connector to get information for\n\n Returns:\n String containing the destination connector information\n ", + "inputSchema": { + "properties": { + "destination_id": { + "title": "Destination Id", + "type": "string" + } + }, + "required": [ + "destination_id" + ], + "title": "get_destination_infoArguments", + "type": "object" + } + }, + { + "name": "list_workflows", + "description": "\n List workflows from the Unstructured API.\n\n Args:\n destination_id: Optional destination connector ID to filter by\n source_id: Optional source connector ID to filter by\n status: Optional workflow status to filter by\n\n Returns:\n String containing the list of workflows\n ", + "inputSchema": { + "properties": { + "destination_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Destination Id" + }, + "source_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Source Id" + }, + "status": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Status" + } + }, + "title": "list_workflowsArguments", + "type": "object" + } + }, + { + "name": "get_workflow_info", + "description": "Get detailed information about a specific workflow.\n\n Args:\n workflow_id: ID of the workflow to get information for\n\n Returns:\n String containing the workflow information\n ", + "inputSchema": { + "properties": { + "workflow_id": { + "title": "Workflow Id", + "type": "string" + } + }, + "required": [ + "workflow_id" + ], + "title": "get_workflow_infoArguments", + "type": "object" + } + }, + { + "name": "create_workflow", + "description": "Create a new workflow.\n\n Args:\n workflow_config: A Typed Dictionary containing required fields (destination_id - should be a\n valid UUID, name, source_id - should be a valid UUID, workflow_type) and non-required fields\n (schedule, and workflow_nodes). Note workflow_nodes is only enabled when workflow_type\n is `custom` and is a list of WorkflowNodeTypedDict: partition, prompter,chunk, embed\n Below is an example of a partition workflow node:\n {\n \"name\": \"vlm-partition\",\n \"type\": \"partition\",\n \"sub_type\": \"vlm\",\n \"settings\": {\n \"provider\": \"your favorite provider\",\n \"model\": \"your favorite model\"\n }\n }\n\n\n Returns:\n String containing the created workflow information\n \n\nCustom workflow DAG nodes\n- If WorkflowType is set to custom, you must also specify the settings for the workflow\u2019s\ndirected acyclic graph (DAG) nodes. These nodes\u2019 settings are specified in the workflow_nodes array.\n- A Source node is automatically created when you specify the source_id value outside of the\nworkflow_nodes array.\n- A Destination node is automatically created when you specify the destination_id value outside\nof the workflow_nodes array.\n- You can specify Partitioner, Chunker, Prompter, and Embedder nodes.\n- The order of the nodes in the workflow_nodes array will be the same order that these nodes appear\nin the DAG, with the first node in the array added directly after the Source node.\nThe Destination node follows the last node in the array.\n- Be sure to specify nodes in the allowed order. The following DAG placements are all allowed:\n - Source -> Partitioner -> Destination,\n - Source -> Partitioner -> Chunker -> Destination,\n - Source -> Partitioner -> Chunker -> Embedder -> Destination,\n - Source -> Partitioner -> Prompter -> Chunker -> Destination,\n - Source -> Partitioner -> Prompter -> Chunker -> Embedder -> Destination\n\nPartitioner node\nA Partitioner node has a type of partition and a subtype of auto, vlm, hi_res, or fast.\n\nExamples:\n- auto strategy:\n{\n \"name\": \"Partitioner\",\n \"type\": \"partition\",\n \"subtype\": \"vlm\",\n \"settings\": {\n \"provider\": \"anthropic\", (required)\n \"model\": \"claude-3-5-sonnet-20241022\", (required)\n \"output_format\": \"text/html\",\n \"user_prompt\": null,\n \"format_html\": true,\n \"unique_element_ids\": true,\n \"is_dynamic\": true,\n \"allow_fast\": true\n }\n}\n\n- vlm strategy:\n Allowed values are provider and model. Below are examples:\n - \"provider\": \"anthropic\" \"model\": \"claude-3-5-sonnet-20241022\",\n - \"provider\": \"openai\" \"model\": \"gpt-4o\"\n\n\n- hi_res strategy:\n{\n \"name\": \"Partitioner\",\n \"type\": \"partition\",\n \"subtype\": \"unstructured_api\",\n \"settings\": {\n \"strategy\": \"hi_res\",\n \"include_page_breaks\": ,\n \"pdf_infer_table_structure\": ,\n \"exclude_elements\": [\n \"\",\n \"\"\n ],\n \"xml_keep_tags\": ,\n \"encoding\": \"\",\n \"ocr_languages\": [\n \"\",\n \"\"\n ],\n \"extract_image_block_types\": [\n \"image\",\n \"table\"\n ],\n \"infer_table_structure\": \n }\n}\n- fast strategy\n{\n \"name\": \"Partitioner\",\n \"type\": \"partition\",\n \"subtype\": \"unstructured_api\",\n \"settings\": {\n \"strategy\": \"fast\",\n \"include_page_breaks\": ,\n \"pdf_infer_table_structure\": ,\n \"exclude_elements\": [\n \"\",\n \"\"\n ],\n \"xml_keep_tags\": ,\n \"encoding\": \"\",\n \"ocr_languages\": [\n \"\",\n \"\"\n ],\n \"extract_image_block_types\": [\n \"image\",\n \"table\"\n ],\n \"infer_table_structure\": \n }\n}\n\n\nChunker node\nA Chunker node has a type of chunk and subtype of chunk_by_character or chunk_by_title.\n\n- chunk_by_character\n{\n \"name\": \"Chunker\",\n \"type\": \"chunk\",\n \"subtype\": \"chunk_by_character\",\n \"settings\": {\n \"include_orig_elements\": ,\n \"new_after_n_chars\": , (required, if not provided\nset same as max_characters)\n \"max_characters\": , (required)\n \"overlap\": , (required, if not provided set default to 0)\n \"overlap_all\": ,\n \"contextual_chunking_strategy\": \"v1\"\n }\n}\n\n- chunk_by_title\n{\n \"name\": \"Chunker\",\n \"type\": \"chunk\",\n \"subtype\": \"chunk_by_title\",\n \"settings\": {\n \"multipage_sections\": ,\n \"combine_text_under_n_chars\": ,\n \"include_orig_elements\": ,\n \"new_after_n_chars\": , (required, if not provided\nset same as max_characters)\n \"max_characters\": , (required)\n \"overlap\": , (required, if not provided set default to 0)\n \"overlap_all\": ,\n \"contextual_chunking_strategy\": \"v1\"\n }\n}\n\n\nPrompter node\nAn Prompter node has a type of prompter and subtype of:\n- openai_image_description,\n- anthropic_image_description,\n- bedrock_image_description,\n- vertexai_image_description,\n- openai_table_description,\n- anthropic_table_description,\n- bedrock_table_description,\n- vertexai_table_description,\n- openai_table2html,\n- openai_ner\n\nExample:\n{\n \"name\": \"Prompter\",\n \"type\": \"prompter\",\n \"subtype\": \"\",\n \"settings\": {}\n}\n\n\nEmbedder node\nAn Embedder node has a type of embed\n\nAllowed values for subtype and model_name include:\n\n- \"subtype\": \"azure_openai\"\n - \"model_name\": \"text-embedding-3-small\"\n - \"model_name\": \"text-embedding-3-large\"\n - \"model_name\": \"text-embedding-ada-002\"\n- \"subtype\": \"bedrock\"\n - \"model_name\": \"amazon.titan-embed-text-v2:0\"\n - \"model_name\": \"amazon.titan-embed-text-v1\"\n - \"model_name\": \"amazon.titan-embed-image-v1\"\n - \"model_name\": \"cohere.embed-english-v3\"\n - \"model_name\": \"cohere.embed-multilingual-v3\"\n- \"subtype\": \"togetherai\":\n - \"model_name\": \"togethercomputer/m2-bert-80M-2k-retrieval\"\n - \"model_name\": \"togethercomputer/m2-bert-80M-8k-retrieval\"\n - \"model_name\": \"togethercomputer/m2-bert-80M-32k-retrieval\"\n\nExample:\n{\n \"name\": \"Embedder\",\n \"type\": \"embed\",\n \"subtype\": \"\",\n \"settings\": {\n \"model_name\": \"\"\n }\n}\n", + "inputSchema": { + "$defs": { + "CreateWorkflowTypedDict": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "workflow_type": { + "$ref": "#/$defs/WorkflowType" + }, + "destination_id": { + "$ref": "#/$defs/Nullable_str_" + }, + "schedule": { + "$ref": "#/$defs/Nullable_Schedule_" + }, + "source_id": { + "$ref": "#/$defs/Nullable_str_" + }, + "workflow_nodes": { + "$ref": "#/$defs/Nullable_List_WorkflowNodeTypedDict__" + } + }, + "required": [ + "name", + "workflow_type" + ], + "title": "CreateWorkflowTypedDict", + "type": "object" + }, + "Nullable_Dict_str__Any__": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ] + }, + "Nullable_List_WorkflowNodeTypedDict__": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/WorkflowNodeTypedDict" + }, + "type": "array" + }, + { + "type": "null" + } + ] + }, + "Nullable_Schedule_": { + "anyOf": [ + { + "$ref": "#/$defs/Schedule" + }, + { + "type": "null" + } + ] + }, + "Nullable_str_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "Schedule": { + "enum": [ + "every 15 minutes", + "every hour", + "every 2 hours", + "every 4 hours", + "every 6 hours", + "every 8 hours", + "every 10 hours", + "every 12 hours", + "daily", + "weekly", + "monthly" + ], + "title": "Schedule", + "type": "string" + }, + "WorkflowNodeType": { + "enum": [ + "partition", + "prompter", + "chunk", + "embed" + ], + "title": "WorkflowNodeType", + "type": "string" + }, + "WorkflowNodeTypedDict": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "subtype": { + "title": "Subtype", + "type": "string" + }, + "type": { + "$ref": "#/$defs/WorkflowNodeType" + }, + "id": { + "$ref": "#/$defs/Nullable_str_" + }, + "settings": { + "$ref": "#/$defs/Nullable_Dict_str__Any__" + } + }, + "required": [ + "name", + "subtype", + "type" + ], + "title": "WorkflowNodeTypedDict", + "type": "object" + }, + "WorkflowType": { + "enum": [ + "basic", + "advanced", + "platinum", + "custom" + ], + "title": "WorkflowType", + "type": "string" + } + }, + "properties": { + "workflow_config": { + "$ref": "#/$defs/CreateWorkflowTypedDict" + } + }, + "required": [ + "workflow_config" + ], + "title": "create_workflowArguments", + "type": "object" + } + }, + { + "name": "run_workflow", + "description": "Run a specific workflow.\n\n Args:\n workflow_id: ID of the workflow to run\n\n Returns:\n String containing the response from the workflow execution\n ", + "inputSchema": { + "properties": { + "workflow_id": { + "title": "Workflow Id", + "type": "string" + } + }, + "required": [ + "workflow_id" + ], + "title": "run_workflowArguments", + "type": "object" + } + }, + { + "name": "update_workflow", + "description": "Update an existing workflow.\n\n Args:\n workflow_id: ID of the workflow to update\n workflow_config: A Typed Dictionary containing required fields (destination_id,\n name, source_id, workflow_type) and non-required fields (schedule, and workflow_nodes)\n\n Returns:\n String containing the updated workflow information\n \n\nCustom workflow DAG nodes\n- If WorkflowType is set to custom, you must also specify the settings for the workflow\u2019s\ndirected acyclic graph (DAG) nodes. These nodes\u2019 settings are specified in the workflow_nodes array.\n- A Source node is automatically created when you specify the source_id value outside of the\nworkflow_nodes array.\n- A Destination node is automatically created when you specify the destination_id value outside\nof the workflow_nodes array.\n- You can specify Partitioner, Chunker, Prompter, and Embedder nodes.\n- The order of the nodes in the workflow_nodes array will be the same order that these nodes appear\nin the DAG, with the first node in the array added directly after the Source node.\nThe Destination node follows the last node in the array.\n- Be sure to specify nodes in the allowed order. The following DAG placements are all allowed:\n - Source -> Partitioner -> Destination,\n - Source -> Partitioner -> Chunker -> Destination,\n - Source -> Partitioner -> Chunker -> Embedder -> Destination,\n - Source -> Partitioner -> Prompter -> Chunker -> Destination,\n - Source -> Partitioner -> Prompter -> Chunker -> Embedder -> Destination\n\nPartitioner node\nA Partitioner node has a type of partition and a subtype of auto, vlm, hi_res, or fast.\n\nExamples:\n- auto strategy:\n{\n \"name\": \"Partitioner\",\n \"type\": \"partition\",\n \"subtype\": \"vlm\",\n \"settings\": {\n \"provider\": \"anthropic\", (required)\n \"model\": \"claude-3-5-sonnet-20241022\", (required)\n \"output_format\": \"text/html\",\n \"user_prompt\": null,\n \"format_html\": true,\n \"unique_element_ids\": true,\n \"is_dynamic\": true,\n \"allow_fast\": true\n }\n}\n\n- vlm strategy:\n Allowed values are provider and model. Below are examples:\n - \"provider\": \"anthropic\" \"model\": \"claude-3-5-sonnet-20241022\",\n - \"provider\": \"openai\" \"model\": \"gpt-4o\"\n\n\n- hi_res strategy:\n{\n \"name\": \"Partitioner\",\n \"type\": \"partition\",\n \"subtype\": \"unstructured_api\",\n \"settings\": {\n \"strategy\": \"hi_res\",\n \"include_page_breaks\": ,\n \"pdf_infer_table_structure\": ,\n \"exclude_elements\": [\n \"\",\n \"\"\n ],\n \"xml_keep_tags\": ,\n \"encoding\": \"\",\n \"ocr_languages\": [\n \"\",\n \"\"\n ],\n \"extract_image_block_types\": [\n \"image\",\n \"table\"\n ],\n \"infer_table_structure\": \n }\n}\n- fast strategy\n{\n \"name\": \"Partitioner\",\n \"type\": \"partition\",\n \"subtype\": \"unstructured_api\",\n \"settings\": {\n \"strategy\": \"fast\",\n \"include_page_breaks\": ,\n \"pdf_infer_table_structure\": ,\n \"exclude_elements\": [\n \"\",\n \"\"\n ],\n \"xml_keep_tags\": ,\n \"encoding\": \"\",\n \"ocr_languages\": [\n \"\",\n \"\"\n ],\n \"extract_image_block_types\": [\n \"image\",\n \"table\"\n ],\n \"infer_table_structure\": \n }\n}\n\n\nChunker node\nA Chunker node has a type of chunk and subtype of chunk_by_character or chunk_by_title.\n\n- chunk_by_character\n{\n \"name\": \"Chunker\",\n \"type\": \"chunk\",\n \"subtype\": \"chunk_by_character\",\n \"settings\": {\n \"include_orig_elements\": ,\n \"new_after_n_chars\": , (required, if not provided\nset same as max_characters)\n \"max_characters\": , (required)\n \"overlap\": , (required, if not provided set default to 0)\n \"overlap_all\": ,\n \"contextual_chunking_strategy\": \"v1\"\n }\n}\n\n- chunk_by_title\n{\n \"name\": \"Chunker\",\n \"type\": \"chunk\",\n \"subtype\": \"chunk_by_title\",\n \"settings\": {\n \"multipage_sections\": ,\n \"combine_text_under_n_chars\": ,\n \"include_orig_elements\": ,\n \"new_after_n_chars\": , (required, if not provided\nset same as max_characters)\n \"max_characters\": , (required)\n \"overlap\": , (required, if not provided set default to 0)\n \"overlap_all\": ,\n \"contextual_chunking_strategy\": \"v1\"\n }\n}\n\n\nPrompter node\nAn Prompter node has a type of prompter and subtype of:\n- openai_image_description,\n- anthropic_image_description,\n- bedrock_image_description,\n- vertexai_image_description,\n- openai_table_description,\n- anthropic_table_description,\n- bedrock_table_description,\n- vertexai_table_description,\n- openai_table2html,\n- openai_ner\n\nExample:\n{\n \"name\": \"Prompter\",\n \"type\": \"prompter\",\n \"subtype\": \"\",\n \"settings\": {}\n}\n\n\nEmbedder node\nAn Embedder node has a type of embed\n\nAllowed values for subtype and model_name include:\n\n- \"subtype\": \"azure_openai\"\n - \"model_name\": \"text-embedding-3-small\"\n - \"model_name\": \"text-embedding-3-large\"\n - \"model_name\": \"text-embedding-ada-002\"\n- \"subtype\": \"bedrock\"\n - \"model_name\": \"amazon.titan-embed-text-v2:0\"\n - \"model_name\": \"amazon.titan-embed-text-v1\"\n - \"model_name\": \"amazon.titan-embed-image-v1\"\n - \"model_name\": \"cohere.embed-english-v3\"\n - \"model_name\": \"cohere.embed-multilingual-v3\"\n- \"subtype\": \"togetherai\":\n - \"model_name\": \"togethercomputer/m2-bert-80M-2k-retrieval\"\n - \"model_name\": \"togethercomputer/m2-bert-80M-8k-retrieval\"\n - \"model_name\": \"togethercomputer/m2-bert-80M-32k-retrieval\"\n\nExample:\n{\n \"name\": \"Embedder\",\n \"type\": \"embed\",\n \"subtype\": \"\",\n \"settings\": {\n \"model_name\": \"\"\n }\n}\n", + "inputSchema": { + "$defs": { + "CreateWorkflowTypedDict": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "workflow_type": { + "$ref": "#/$defs/WorkflowType" + }, + "destination_id": { + "$ref": "#/$defs/Nullable_str_" + }, + "schedule": { + "$ref": "#/$defs/Nullable_Schedule_" + }, + "source_id": { + "$ref": "#/$defs/Nullable_str_" + }, + "workflow_nodes": { + "$ref": "#/$defs/Nullable_List_WorkflowNodeTypedDict__" + } + }, + "required": [ + "name", + "workflow_type" + ], + "title": "CreateWorkflowTypedDict", + "type": "object" + }, + "Nullable_Dict_str__Any__": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ] + }, + "Nullable_List_WorkflowNodeTypedDict__": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/WorkflowNodeTypedDict" + }, + "type": "array" + }, + { + "type": "null" + } + ] + }, + "Nullable_Schedule_": { + "anyOf": [ + { + "$ref": "#/$defs/Schedule" + }, + { + "type": "null" + } + ] + }, + "Nullable_str_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "Schedule": { + "enum": [ + "every 15 minutes", + "every hour", + "every 2 hours", + "every 4 hours", + "every 6 hours", + "every 8 hours", + "every 10 hours", + "every 12 hours", + "daily", + "weekly", + "monthly" + ], + "title": "Schedule", + "type": "string" + }, + "WorkflowNodeType": { + "enum": [ + "partition", + "prompter", + "chunk", + "embed" + ], + "title": "WorkflowNodeType", + "type": "string" + }, + "WorkflowNodeTypedDict": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "subtype": { + "title": "Subtype", + "type": "string" + }, + "type": { + "$ref": "#/$defs/WorkflowNodeType" + }, + "id": { + "$ref": "#/$defs/Nullable_str_" + }, + "settings": { + "$ref": "#/$defs/Nullable_Dict_str__Any__" + } + }, + "required": [ + "name", + "subtype", + "type" + ], + "title": "WorkflowNodeTypedDict", + "type": "object" + }, + "WorkflowType": { + "enum": [ + "basic", + "advanced", + "platinum", + "custom" + ], + "title": "WorkflowType", + "type": "string" + } + }, + "properties": { + "workflow_id": { + "title": "Workflow Id", + "type": "string" + }, + "workflow_config": { + "$ref": "#/$defs/CreateWorkflowTypedDict" + } + }, + "required": [ + "workflow_id", + "workflow_config" + ], + "title": "update_workflowArguments", + "type": "object" + } + }, + { + "name": "delete_workflow", + "description": "Delete a specific workflow.\n\n Args:\n workflow_id: ID of the workflow to delete\n\n Returns:\n String containing the response from the workflow deletion\n ", + "inputSchema": { + "properties": { + "workflow_id": { + "title": "Workflow Id", + "type": "string" + } + }, + "required": [ + "workflow_id" + ], + "title": "delete_workflowArguments", + "type": "object" + } + }, + { + "name": "list_jobs", + "description": "\n List jobs via the Unstructured API.\n\n Args:\n workflow_id: Optional workflow ID to filter by\n status: Optional job status to filter by\n\n Returns:\n String containing the list of jobs\n ", + "inputSchema": { + "properties": { + "workflow_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Workflow Id" + }, + "status": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Status" + } + }, + "title": "list_jobsArguments", + "type": "object" + } + }, + { + "name": "get_job_info", + "description": "Get detailed information about a specific job.\n\n Args:\n job_id: ID of the job to get information for\n\n Returns:\n String containing the job information\n ", + "inputSchema": { + "properties": { + "job_id": { + "title": "Job Id", + "type": "string" + } + }, + "required": [ + "job_id" + ], + "title": "get_job_infoArguments", + "type": "object" + } + }, + { + "name": "cancel_job", + "description": "Delete a specific job.\n\n Args:\n job_id: ID of the job to cancel\n\n Returns:\n String containing the response from the job cancellation\n ", + "inputSchema": { + "properties": { + "job_id": { + "title": "Job Id", + "type": "string" + } + }, + "required": [ + "job_id" + ], + "title": "cancel_jobArguments", + "type": "object" + } + } + ] + }, + "starwind-ui": { + "name": "starwind-ui", + "display_name": "Starwind UI", + "description": "This MCP provides relevant commands, documentation, and other information to allow LLMs to take full advantage of Starwind UI's open source Astro components.", + "repository": { + "type": "git", + "url": "https://github.com/Boston343/starwind-ui-mcp" + }, + "homepage": "https://github.com/Boston343/starwind-ui-mcp/", + "author": { + "name": "Boston343" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Starwind", + "Developer Tools", + "AI", + "Components" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/Boston343/starwind-ui-mcp/" + ] + } + } + }, + "mcp-server-adfin": { + "display_name": "Adfin MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/Adfin-Engineering/mcp-server-adfin" + }, + "license": "[NOT GIVEN]", + "installations": { + "python": { + "type": "python", + "command": "uv", + "args": [ + "--directory", + "", + "run", + "main_adfin_mcp.py" + ], + "env": { + "ADFIN_EMAIL": "", + "ADFIN_PASSWORD": "" + }, + "description": "Run Adfin MCP server using uv" + }, + "filesystem": { + "type": "python", + "command": "uv", + "args": [ + "--directory", + "", + "run", + "filesystem.py" + ], + "description": "Run filesystem MCP server using uv" + } + }, + "arguments": { + "ADFIN_EMAIL": { + "description": "Email for Adfin authentication", + "required": true + }, + "ADFIN_PASSWORD": { + "description": "Password for Adfin authentication", + "required": true + } + }, + "examples": [ + { + "title": "Request a credit control status", + "description": "Get credit control status check", + "prompt": "Give me a credit control status check." + }, + { + "title": "Create a new invoice", + "description": "Create an invoice with specific details", + "prompt": "Create a new invoice for 60 GBP for Abc Def that is due in a week. His email is abc.def@example.com." + }, + { + "title": "Upload multiple invoices", + "description": "Upload PDF invoices from a folder", + "prompt": "Upload all pdf invoices from the invoices folder from my Desktop." + } + ], + "tags": [ + "adfin", + "finance", + "invoicing" + ], + "homepage": "[NOT GIVEN]", + "author": { + "name": "Adfin-Engineering" + }, + "name": "mcp-server-adfin", + "description": "1. Python 3.10 or higher", + "categories": [ + "Finance" + ], + "is_official": true + }, + "time": { + "name": "time", + "display_name": "Time", + "description": "A Model Context Protocol server that provides time and timezone conversion capabilities. It automatically detects the system's timezone and offers tools for getting current time and converting between timezones.", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/tree/main/src/time#readme", + "author": { + "name": "MCP Team" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "time", + "timezone", + "date", + "converter" + ], + "arguments": { + "TZ": { + "description": "Environment variable to override the system's default timezone", + "required": false, + "example": "America/New_York" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-time", + "--local-timezone=${TZ}" + ], + "description": "Install and run using uvx (recommended)", + "recommended": true + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp_server_time", + "--local-timezone=${TZ}" + ], + "description": "Run with Python module (requires pip install)" + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "mcp/time", + "--local-timezone=${TZ}" + ], + "description": "Run with Docker" + } + }, + "tools": [ + { + "name": "get_current_time", + "description": "Get current time in a specific timezones", + "inputSchema": { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "IANA timezone name (e.g., 'America/New_York', 'Europe/London'). Use local timezone if no timezone provided by the user." + } + }, + "required": [ + "timezone" + ] + } + }, + { + "name": "convert_time", + "description": "Convert time between timezones", + "inputSchema": { + "type": "object", + "properties": { + "source_timezone": { + "type": "string", + "description": "Source IANA timezone name (e.g., 'America/New_York', 'Europe/London'). Use local timezone if no source timezone provided by the user." + }, + "time": { + "type": "string", + "description": "Time to convert in 24-hour format (HH:MM)" + }, + "target_timezone": { + "type": "string", + "description": "Target IANA timezone name (e.g., 'Asia/Tokyo', 'America/San_Francisco'). Use local timezone if no target timezone provided by the user." + } + }, + "required": [ + "source_timezone", + "time", + "target_timezone" + ] + } + } + ], + "examples": [ + { + "title": "Current time", + "description": "Get the current time in a specific timezone", + "prompt": "What time is it in Tokyo right now?" + }, + { + "title": "Time conversion", + "description": "Convert time between timezones", + "prompt": "Convert 3:30 PM EST to Paris time." + } + ], + "is_official": true + }, + "ableton-live": { + "name": "ableton-live", + "display_name": "Ableton Live", + "description": "an MCP server to control Ableton Live.", + "repository": { + "type": "git", + "url": "https://github.com/Simon-Kansara/ableton-live-mcp-server" + }, + "homepage": "https://github.com/Simon-Kansara/ableton-live-mcp-server", + "author": { + "name": "Simon Kansara" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Ableton Live", + "OSC", + "Music" + ], + "installations": { + "custom": { + "type": "python", + "command": "python", + "args": [ + "mcp_ableton_server.py" + ], + "description": "Run with Python module (requires git clone)" + } + }, + "examples": [ + { + "title": "Prepare a rock band set for recording", + "description": "In Claude desktop, ask Claude to prepare a set to record a rock band.", + "prompt": "_Prepare a set to record a rock band_" + }, + { + "title": "Set input routing for tracks", + "description": "Set the input routing channel of all tracks that have 'voice' in their name to Ext. In 2.", + "prompt": "_Set the input routing channel of all tracks that have 'voice' in their name to Ext. In 2_" + } + ] + }, + "pandoc": { + "name": "pandoc", + "display_name": "Pandoc Document Conversion", + "description": "MCP server for seamless document format conversion using Pandoc, supporting Markdown, HTML, PDF, DOCX (.docx), csv and more.", + "repository": { + "type": "git", + "url": "https://github.com/vivekVells/mcp-pandoc" + }, + "homepage": "https://github.com/vivekVells/mcp-pandoc", + "author": { + "name": "vivekVells" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "pandoc", + "document", + "conversion" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-pandoc" + ] + } + }, + "examples": [ + { + "title": "Convert Markdown to PDF", + "description": "Converts Markdown content to PDF format and saves it to the specified path.", + "prompt": "Convert /path/to/input.md to PDF and save as /path/to/output.pdf" + }, + { + "title": "Convert Content Directly", + "description": "Converts a string of content directly to a specific output format.", + "prompt": "Convert this text to PDF and save as /path/to/document.pdf" + } + ], + "tools": [ + { + "name": "convert-contents", + "description": "Converts content between different formats. Transforms input content from any supported format into the specified output format.\n\n\ud83d\udea8 CRITICAL REQUIREMENTS - PLEASE READ:\n1. PDF Conversion:\n * You MUST install TeX Live BEFORE attempting PDF conversion:\n * Ubuntu/Debian: `sudo apt-get install texlive-xetex`\n * macOS: `brew install texlive`\n * Windows: Install MiKTeX or TeX Live from https://miktex.org/ or https://tug.org/texlive/\n * PDF conversion will FAIL without this installation\n\n2. File Paths - EXPLICIT REQUIREMENTS:\n * When asked to save or convert to a file, you MUST provide:\n - Complete directory path\n - Filename\n - File extension\n * Example request: 'Write a story and save as PDF'\n * You MUST specify: '/path/to/story.pdf' or 'C:\\Documents\\story.pdf'\n * The tool will NOT automatically generate filenames or extensions\n\n3. File Location After Conversion:\n * After successful conversion, the tool will display the exact path where the file is saved\n * Look for message: 'Content successfully converted and saved to: [file_path]'\n * You can find your converted file at the specified location\n * If no path is specified, files may be saved in system temp directory (/tmp/ on Unix systems)\n * For better control, always provide explicit output file paths\n\nSupported formats:\n- Basic formats: txt, html, markdown\n- Advanced formats (REQUIRE complete file paths): pdf, docx, rst, latex, epub\n\n\u2705 CORRECT Usage Examples:\n1. 'Convert this text to HTML' (basic conversion)\n - Tool will show converted content\n\n2. 'Save this text as PDF at /documents/story.pdf'\n - Correct: specifies path + filename + extension\n - Tool will show: 'Content successfully converted and saved to: /documents/story.pdf'\n\n\u274c INCORRECT Usage Examples:\n1. 'Save this as PDF in /documents/'\n - Missing filename and extension\n2. 'Convert to PDF'\n - Missing complete file path\n\nWhen requesting conversion, ALWAYS specify:\n1. The content or input file\n2. The desired output format\n3. For advanced formats: complete output path + filename + extension\nExample: 'Convert this markdown to PDF and save as /path/to/output.pdf'\n\nNote: After conversion, always check the success message for the exact file location.", + "inputSchema": { + "type": "object", + "properties": { + "contents": { + "type": "string", + "description": "The content to be converted (required if input_file not provided)" + }, + "input_file": { + "type": "string", + "description": "Complete path to input file including filename and extension (e.g., '/path/to/input.md')" + }, + "input_format": { + "type": "string", + "description": "Source format of the content (defaults to markdown)", + "default": "markdown", + "enum": [ + "markdown", + "html", + "pdf", + "docx", + "rst", + "latex", + "epub", + "txt" + ] + }, + "output_format": { + "type": "string", + "description": "Desired output format (defaults to markdown)", + "default": "markdown", + "enum": [ + "markdown", + "html", + "pdf", + "docx", + "rst", + "latex", + "epub", + "txt" + ] + }, + "output_file": { + "type": "string", + "description": "Complete path where to save the output including filename and extension (required for pdf, docx, rst, latex, epub formats)" + } + }, + "oneOf": [ + { + "required": [ + "contents" + ] + }, + { + "required": [ + "input_file" + ] + } + ], + "allOf": [ + { + "if": { + "properties": { + "output_format": { + "enum": [ + "pdf", + "docx", + "rst", + "latex", + "epub" + ] + } + } + }, + "then": { + "required": [ + "output_file" + ] + } + } + ] + } + } + ] + }, + "mcp-server-cloudflare": { + "display_name": "Cloudflare MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/cloudflare/mcp-server-cloudflare" + }, + "homepage": "https://github.com/cloudflare/mcp-server-cloudflare", + "author": { + "name": "cloudflare" + }, + "license": "Apache 2.0", + "tags": [ + "cloudflare", + "mcp", + "model-context-protocol", + "llm", + "api" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@cloudflare/mcp-server-cloudflare", + "init" + ], + "package": "@cloudflare/mcp-server-cloudflare", + "env": {}, + "description": "Install and initialize the Cloudflare MCP server", + "recommended": true + } + }, + "examples": [ + { + "title": "Deploy a new Worker", + "description": "Create a new Cloudflare Worker with a Durable Object", + "prompt": "Please deploy me a new Worker with an example durable object." + }, + { + "title": "Query D1 Database", + "description": "Get information about data in a D1 database", + "prompt": "Can you tell me about the data in my D1 database named '...'?" + }, + { + "title": "Copy KV to R2", + "description": "Copy entries from a KV namespace to an R2 bucket", + "prompt": "Can you copy all the entries from my KV namespace '...' into my R2 bucket '...'?" + } + ], + "name": "mcp-server-cloudflare", + "description": "Model Context Protocol (MCP) is a [new, standardized protocol](https://modelcontextprotocol.io/introduction) for managing context between large language models (LLMs) and external systems. In this repository, we provide an installer as well as an MCP Server for [Cloudflare's API](https://api.cloudflare.com).", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "aws-athena": { + "name": "aws-athena", + "display_name": "AWS Athena", + "description": "A MCP server for AWS Athena to run SQL queries on Glue Catalog.", + "repository": { + "type": "git", + "url": "https://github.com/lishenxydlgzs/aws-athena-mcp" + }, + "homepage": "https://github.com/lishenxydlgzs/aws-athena-mcp", + "author": { + "name": "lishenxydlgzs" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "athena", + "sql", + "aws" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@lishenxydlgzs/aws-athena-mcp" + ], + "env": { + "OUTPUT_S3_PATH": "${OUTPUT_S3_PATH}", + "AWS_REGION": "${AWS_REGION}", + "AWS_PROFILE": "${AWS_PROFILE}", + "AWS_ACCESS_KEY_ID": "${AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${AWS_SECRET_ACCESS_KEY}", + "AWS_SESSION_TOKEN": "${AWS_SESSION_TOKEN}", + "QUERY_TIMEOUT_MS": "${QUERY_TIMEOUT_MS}", + "MAX_RETRIES": "${MAX_RETRIES}", + "RETRY_DELAY_MS": "${RETRY_DELAY_MS}" + } + } + }, + "examples": [ + { + "title": "Show All Databases", + "description": "Lists all databases in Athena", + "prompt": "{\"database\": \"default\", \"query\": \"SHOW DATABASES\"}" + }, + { + "title": "List Tables in a Database", + "description": "Shows all tables in the default database", + "prompt": "{\"database\": \"default\", \"query\": \"SHOW TABLES\"}" + }, + { + "title": "Get Table Schema", + "description": "Fetches the schema of the asin_sitebestimg table", + "prompt": "{\"database\": \"default\", \"query\": \"DESCRIBE default.asin_sitebestimg\"}" + }, + { + "title": "Table Rows Preview", + "description": "Shows some rows from my_database.mytable", + "prompt": "{\"database\": \"my_database\", \"query\": \"SELECT * FROM my_table LIMIT 10\", \"maxRows\": 10}" + }, + { + "title": "Advanced Query with Filtering and Aggregation", + "description": "Finds the average price by category for in-stock products", + "prompt": "{\"database\": \"my_database\", \"query\": \"SELECT category, COUNT(*) as count, AVG(price) as avg_price FROM products WHERE in_stock = true GROUP BY category ORDER BY count DESC\", \"maxRows\": 100}" + } + ], + "arguments": { + "OUTPUT_S3_PATH": { + "description": "S3 bucket path for saving Athena query results.", + "required": true, + "example": "s3://your-bucket/athena-results/" + }, + "AWS_REGION": { + "description": "The AWS region to use for Athena queries, defaults to AWS CLI default region.", + "required": false, + "example": "us-east-1" + }, + "AWS_PROFILE": { + "description": "AWS CLI profile to use, defaults to 'default' profile.", + "required": false, + "example": "default" + }, + "AWS_ACCESS_KEY_ID": { + "description": "AWS access key for authentication, if not using IAM role or environment variables.", + "required": false, + "example": "" + }, + "AWS_SECRET_ACCESS_KEY": { + "description": "AWS secret key for authentication, if not using IAM role or environment variables.", + "required": false, + "example": "" + }, + "AWS_SESSION_TOKEN": { + "description": "Session token for temporary AWS credentials, if using temporary access.", + "required": false, + "example": "" + }, + "QUERY_TIMEOUT_MS": { + "description": "Timeout setting for queries in milliseconds (default: 300000 ms).", + "required": false, + "example": "300000" + }, + "MAX_RETRIES": { + "description": "Number of retry attempts for failed queries (default: 100).", + "required": false, + "example": "100" + }, + "RETRY_DELAY_MS": { + "description": "Delay between retry attempts in milliseconds (default: 500 ms).", + "required": false, + "example": "500" + } + }, + "tools": [ + { + "name": "run_query", + "description": "Execute a SQL query using AWS Athena. Returns full results if query completes before timeout, otherwise returns queryExecutionId.", + "inputSchema": { + "type": "object", + "properties": { + "database": { + "type": "string", + "description": "The Athena database to query" + }, + "query": { + "type": "string", + "description": "SQL query to execute" + }, + "maxRows": { + "type": "number", + "description": "Maximum number of rows to return (default: 1000)", + "minimum": 1, + "maximum": 10000 + }, + "timeoutMs": { + "type": "number", + "description": "Timeout in milliseconds (default: 60000)", + "minimum": 1000 + } + }, + "required": [ + "database", + "query" + ] + } + }, + { + "name": "get_result", + "description": "Get results for a completed query. Returns error if query is still running.", + "inputSchema": { + "type": "object", + "properties": { + "queryExecutionId": { + "type": "string", + "description": "The query execution ID" + }, + "maxRows": { + "type": "number", + "description": "Maximum number of rows to return (default: 1000)", + "minimum": 1, + "maximum": 10000 + } + }, + "required": [ + "queryExecutionId" + ] + } + }, + { + "name": "get_status", + "description": "Get the current status of a query execution", + "inputSchema": { + "type": "object", + "properties": { + "queryExecutionId": { + "type": "string", + "description": "The query execution ID" + } + }, + "required": [ + "queryExecutionId" + ] + } + } + ] + }, + "basic-memory": { + "name": "basic-memory", + "display_name": "Basic Memory", + "description": "Local-first knowledge management system that builds a semantic graph from Markdown files, enabling persistent memory across conversations with LLMs.", + "repository": { + "type": "git", + "url": "https://github.com/basicmachines-co/basic-memory" + }, + "homepage": "https://github.com/basicmachines-co/basic-memory", + "author": { + "name": "basicmachines-co" + }, + "license": "AGPL-3.0", + "categories": [ + "Knowledge Base" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "basic-memory", + "mcp" + ] + } + }, + "tags": [ + "LLM", + "Markdown", + "Knowledge Base" + ], + "tools": [ + { + "name": "delete_note", + "description": "Delete a note by title or permalink", + "inputSchema": { + "properties": { + "identifier": { + "title": "Identifier", + "type": "string" + } + }, + "required": [ + "identifier" + ], + "title": "delete_noteArguments", + "type": "object" + } + }, + { + "name": "read_content", + "description": "Read a file's raw content by path or permalink", + "inputSchema": { + "properties": { + "path": { + "title": "Path", + "type": "string" + } + }, + "required": [ + "path" + ], + "title": "read_contentArguments", + "type": "object" + } + }, + { + "name": "build_context", + "description": "Build context from a memory:// URI to continue conversations naturally.\n \n Use this to follow up on previous discussions or explore related topics.\n Timeframes support natural language like:\n - \"2 days ago\"\n - \"last week\" \n - \"today\"\n - \"3 months ago\"\n Or standard formats like \"7d\", \"24h\"\n ", + "inputSchema": { + "properties": { + "url": { + "maxLength": 2028, + "minLength": 1, + "title": "Url", + "type": "string" + }, + "depth": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "title": "Depth" + }, + "timeframe": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "7d", + "title": "Timeframe" + }, + "page": { + "default": 1, + "title": "Page", + "type": "integer" + }, + "page_size": { + "default": 10, + "title": "Page Size", + "type": "integer" + }, + "max_related": { + "default": 10, + "title": "Max Related", + "type": "integer" + } + }, + "required": [ + "url" + ], + "title": "build_contextArguments", + "type": "object" + } + }, + { + "name": "recent_activity", + "description": "Get recent activity from across the knowledge base.\n \n Timeframe supports natural language formats like:\n - \"2 days ago\" \n - \"last week\"\n - \"yesterday\" \n - \"today\"\n - \"3 weeks ago\"\n Or standard formats like \"7d\"\n ", + "inputSchema": { + "$defs": { + "SearchItemType": { + "description": "Types of searchable items.", + "enum": [ + "entity", + "observation", + "relation" + ], + "title": "SearchItemType", + "type": "string" + } + }, + "properties": { + "type": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/SearchItemType" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Type" + }, + "depth": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "title": "Depth" + }, + "timeframe": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "7d", + "title": "Timeframe" + }, + "page": { + "default": 1, + "title": "Page", + "type": "integer" + }, + "page_size": { + "default": 10, + "title": "Page Size", + "type": "integer" + }, + "max_related": { + "default": 10, + "title": "Max Related", + "type": "integer" + } + }, + "title": "recent_activityArguments", + "type": "object" + } + }, + { + "name": "search_notes", + "description": "Search across all content in the knowledge base.", + "inputSchema": { + "$defs": { + "SearchItemType": { + "description": "Types of searchable items.", + "enum": [ + "entity", + "observation", + "relation" + ], + "title": "SearchItemType", + "type": "string" + }, + "SearchQuery": { + "description": "Search query parameters.\n\nUse ONE of these primary search modes:\n- permalink: Exact permalink match\n- permalink_match: Path pattern with *\n- text: Full-text search of title/content (supports boolean operators: AND, OR, NOT)\n\nOptionally filter results by:\n- types: Limit to specific item types\n- entity_types: Limit to specific entity types\n- after_date: Only items after date\n\nBoolean search examples:\n- \"python AND flask\" - Find items with both terms\n- \"python OR django\" - Find items with either term\n- \"python NOT django\" - Find items with python but not django\n- \"(python OR flask) AND web\" - Use parentheses for grouping", + "properties": { + "permalink": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Permalink" + }, + "permalink_match": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Permalink Match" + }, + "text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Text" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Title" + }, + "types": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/SearchItemType" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Types" + }, + "entity_types": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Entity Types" + }, + "after_date": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Date" + } + }, + "title": "SearchQuery", + "type": "object" + } + }, + "properties": { + "query": { + "$ref": "#/$defs/SearchQuery" + }, + "page": { + "default": 1, + "title": "Page", + "type": "integer" + }, + "page_size": { + "default": 10, + "title": "Page Size", + "type": "integer" + } + }, + "required": [ + "query" + ], + "title": "search_notesArguments", + "type": "object" + } + }, + { + "name": "read_note", + "description": "Read a markdown note by title or permalink.", + "inputSchema": { + "properties": { + "identifier": { + "title": "Identifier", + "type": "string" + }, + "page": { + "default": 1, + "title": "Page", + "type": "integer" + }, + "page_size": { + "default": 10, + "title": "Page Size", + "type": "integer" + } + }, + "required": [ + "identifier" + ], + "title": "read_noteArguments", + "type": "object" + } + }, + { + "name": "write_note", + "description": "Create or update a markdown note. Returns a markdown formatted summary of the semantic content.", + "inputSchema": { + "properties": { + "title": { + "title": "Title", + "type": "string" + }, + "content": { + "title": "Content", + "type": "string" + }, + "folder": { + "title": "Folder", + "type": "string" + }, + "tags": { + "default": null, + "title": "tags", + "type": "string" + } + }, + "required": [ + "title", + "content", + "folder" + ], + "title": "write_noteArguments", + "type": "object" + } + }, + { + "name": "canvas", + "description": "Create an Obsidian canvas file to visualize concepts and connections.", + "inputSchema": { + "properties": { + "nodes": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Nodes", + "type": "array" + }, + "edges": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Edges", + "type": "array" + }, + "title": { + "title": "Title", + "type": "string" + }, + "folder": { + "title": "Folder", + "type": "string" + } + }, + "required": [ + "nodes", + "edges", + "title", + "folder" + ], + "title": "canvasArguments", + "type": "object" + } + }, + { + "name": "project_info", + "description": "Get information and statistics about the current Basic Memory project.", + "inputSchema": { + "properties": {}, + "title": "project_infoArguments", + "type": "object" + } + } + ] + }, + "deepseek-r1": { + "name": "deepseek-r1", + "display_name": "Deepseek R1", + "description": "A Model Context Protocol (MCP) server implementation connecting Claude Desktop with DeepSeek's language models (R1/V3)", + "repository": { + "type": "git", + "url": "https://github.com/66julienmartin/MCP-server-Deepseek_R1" + }, + "homepage": "https://github.com/66julienmartin/MCP-server-Deepseek_R1", + "author": { + "name": "66julienmartin", + "url": "https://github.com/66julienmartin" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "Deepseek", + "LLM" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/66julienmartin/MCP-server-Deepseek_R1" + ], + "env": { + "DEEPSEEK_API_KEY": "${DEEPSEEK_API_KEY}" + } + } + }, + "arguments": { + "DEEPSEEK_API_KEY": { + "description": "API key for authenticating with the Deepseek service.", + "required": true, + "example": "your-api-key" + } + }, + "tools": [ + { + "name": "deepseek_r1", + "description": "Generate text using DeepSeek R1 model", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "Input text for DeepSeek" + }, + "max_tokens": { + "type": "number", + "description": "Maximum tokens to generate (default: 8192)", + "minimum": 1, + "maximum": 8192 + }, + "temperature": { + "type": "number", + "description": "Sampling temperature (default: 0.2)", + "minimum": 0, + "maximum": 2 + } + }, + "required": [ + "prompt" + ] + } + } + ] + }, + "dart-mcp-server": { + "display_name": "Dart MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/its-dart/dart-mcp-server" + }, + "license": "MIT", + "homepage": "https://www.itsdart.com/?nr=1", + "author": { + "name": "its-dart" + }, + "tags": [ + "AI", + "MCP", + "Model Context Protocol", + "Project Management" + ], + "arguments": { + "DART_TOKEN": { + "description": "Authentication token from Dart profile", + "required": true, + "example": "dsa_..." + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "dart-mcp-server" + ], + "package": "dart-mcp-server", + "env": { + "DART_TOKEN": "dsa_..." + }, + "description": "Run using npx", + "recommended": true + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "DART_TOKEN", + "mcp/dart" + ], + "env": { + "DART_TOKEN": "dsa_..." + }, + "description": "Run using Docker" + } + }, + "examples": [ + { + "title": "Create Task", + "description": "Create a new task in Dart with title, description, status, priority, and assignee", + "prompt": "create-task" + }, + { + "title": "Create Document", + "description": "Create a new document in Dart with title, text content, and folder", + "prompt": "create-doc" + }, + { + "title": "Summarize Tasks", + "description": "Get a summary of tasks with optional filtering by status and assignee", + "prompt": "summarize-tasks" + } + ], + "name": "dart-mcp-server", + "description": "

    Dart MCP Server

    ", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "get_config", + "description": "Get information about the user's space, including all of the possible values that can be provided to other endpoints. This includes available assignees, dartboards, folders, statuses, tags, priorities, and sizes.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "list_tasks", + "description": "List tasks from Dart with optional filtering parameters. You can filter by assignee, status, dartboard, priority, due date, and more.", + "inputSchema": { + "type": "object", + "properties": { + "assignee": { + "type": "string", + "description": "Filter by assignee name or email" + }, + "assignee_duid": { + "type": "string", + "description": "Filter by assignee DUID" + }, + "dartboard": { + "type": "string", + "description": "Filter by dartboard title" + }, + "dartboard_duid": { + "type": "string", + "description": "Filter by dartboard DUID" + }, + "description": { + "type": "string", + "description": "Filter by description content" + }, + "due_at_before": { + "type": "string", + "description": "Filter by due date before (ISO format)" + }, + "due_at_after": { + "type": "string", + "description": "Filter by due date after (ISO format)" + }, + "duids": { + "type": "string", + "description": "Filter by DUIDs" + }, + "in_trash": { + "type": "boolean", + "description": "Filter by trash status" + }, + "is_draft": { + "type": "boolean", + "description": "Filter by draft status" + }, + "kind": { + "type": "string", + "description": "Filter by task kind" + }, + "limit": { + "type": "number", + "description": "Number of results per page" + }, + "offset": { + "type": "number", + "description": "Initial index for pagination" + }, + "priority": { + "type": "string", + "description": "Filter by priority" + }, + "size": { + "type": "number", + "description": "Filter by task size" + }, + "start_at_before": { + "type": "string", + "description": "Filter by start date before (ISO format)" + }, + "start_at_after": { + "type": "string", + "description": "Filter by start date after (ISO format)" + }, + "status": { + "type": "string", + "description": "Filter by status" + }, + "status_duid": { + "type": "string", + "description": "Filter by status DUID" + }, + "subscriber_duid": { + "type": "string", + "description": "Filter by subscriber DUID" + }, + "tag": { + "type": "string", + "description": "Filter by tag" + }, + "title": { + "type": "string", + "description": "Filter by title" + } + }, + "required": [] + } + }, + { + "name": "create_task", + "description": "Create a new task in Dart. You can specify title, description, status, priority, size, dates, dartboard, assignees, tags, and parent task.", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The title of the task (required)" + }, + "description": { + "type": "string", + "description": "A longer description of the task, which can include markdown formatting" + }, + "status": { + "type": "string", + "description": "The status from the list of available statuses" + }, + "priority": { + "type": "string", + "description": "The priority (Critical, High, Medium, or Low)" + }, + "size": { + "type": "number", + "description": "A number that represents the amount of work needed" + }, + "startAt": { + "type": "string", + "description": "The start date in ISO format (should be at 9:00am in user's timezone)" + }, + "dueAt": { + "type": "string", + "description": "The due date in ISO format (should be at 9:00am in user's timezone)" + }, + "dartboard": { + "type": "string", + "description": "The title of the dartboard (project or list of tasks)" + }, + "assignees": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of assignee names or emails (if workspace allows multiple assignees)" + }, + "assignee": { + "type": "string", + "description": "Single assignee name or email (if workspace doesn't allow multiple assignees)" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of tags to apply to the task" + }, + "parentId": { + "type": "string", + "description": "The ID of the parent task" + } + }, + "required": [ + "title" + ] + } + }, + { + "name": "get_task", + "description": "Retrieve an existing task by its ID. Returns the task's information including title, description, status, priority, dates, and more.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The 12-character alphanumeric ID of the task", + "pattern": "^[a-zA-Z0-9]{12}$" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "update_task", + "description": "Update an existing task. You can modify any of its properties including title, description, status, priority, dates, assignees, and more.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The 12-character alphanumeric ID of the task", + "pattern": "^[a-zA-Z0-9]{12}$" + }, + "title": { + "type": "string", + "description": "The title of the task" + }, + "description": { + "type": "string", + "description": "A longer description of the task, which can include markdown formatting" + }, + "status": { + "type": "string", + "description": "The status from the list of available statuses" + }, + "priority": { + "type": "string", + "description": "The priority (Critical, High, Medium, or Low)" + }, + "size": { + "type": "number", + "description": "A number that represents the amount of work needed" + }, + "startAt": { + "type": "string", + "description": "The start date in ISO format (should be at 9:00am in user's timezone)" + }, + "dueAt": { + "type": "string", + "description": "The due date in ISO format (should be at 9:00am in user's timezone)" + }, + "dartboard": { + "type": "string", + "description": "The title of the dartboard (project or list of tasks)" + }, + "assignees": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of assignee names or emails (if workspace allows multiple assignees)" + }, + "assignee": { + "type": "string", + "description": "Single assignee name or email (if workspace doesn't allow multiple assignees)" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of tags to apply to the task" + }, + "parentId": { + "type": "string", + "description": "The ID of the parent task" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "delete_task", + "description": "Move an existing task to the trash, where it can be recovered if needed. Nothing else about the task will be changed.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The 12-character alphanumeric ID of the task", + "pattern": "^[a-zA-Z0-9]{12}$" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "list_docs", + "description": "List docs from Dart with optional filtering parameters. You can filter by folder, title, text content, and more.", + "inputSchema": { + "type": "object", + "properties": { + "folder": { + "type": "string", + "description": "Filter by folder title" + }, + "folder_duid": { + "type": "string", + "description": "Filter by folder DUID" + }, + "duids": { + "type": "string", + "description": "Filter by DUIDs" + }, + "in_trash": { + "type": "boolean", + "description": "Filter by trash status" + }, + "is_draft": { + "type": "boolean", + "description": "Filter by draft status" + }, + "limit": { + "type": "number", + "description": "Number of results per page" + }, + "offset": { + "type": "number", + "description": "Initial index for pagination" + }, + "s": { + "type": "string", + "description": "Search by title, text, or folder title" + }, + "text": { + "type": "string", + "description": "Filter by text content" + }, + "title": { + "type": "string", + "description": "Filter by title" + } + }, + "required": [] + } + }, + { + "name": "create_doc", + "description": "Create a new doc in Dart. You can specify title, text content, and folder.", + "inputSchema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The title of the doc (required)" + }, + "text": { + "type": "string", + "description": "The text content of the doc, which can include markdown formatting" + }, + "folder": { + "type": "string", + "description": "The title of the folder to place the doc in" + } + }, + "required": [ + "title" + ] + } + }, + { + "name": "get_doc", + "description": "Retrieve an existing doc by its ID. Returns the doc's information including title, text content, folder, and more.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The 12-character alphanumeric ID of the doc", + "pattern": "^[a-zA-Z0-9]{12}$" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "update_doc", + "description": "Update an existing doc. You can modify its title, text content, and folder.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The 12-character alphanumeric ID of the doc", + "pattern": "^[a-zA-Z0-9]{12}$" + }, + "title": { + "type": "string", + "description": "The title of the doc" + }, + "text": { + "type": "string", + "description": "The text content of the doc, which can include markdown formatting" + }, + "folder": { + "type": "string", + "description": "The title of the folder to place the doc in" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "delete_doc", + "description": "Move an existing doc to the trash, where it can be recovered if needed. Nothing else about the doc will be changed.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The 12-character alphanumeric ID of the doc", + "pattern": "^[a-zA-Z0-9]{12}$" + } + }, + "required": [ + "id" + ] + } + } + ], + "prompts": [ + { + "name": "create-task", + "description": "Create a new task in Dart", + "arguments": [ + { + "name": "title", + "description": "Title of the task", + "required": true + }, + { + "name": "description", + "description": "Description of the task", + "required": false + }, + { + "name": "status", + "description": "Status of the task", + "required": false + }, + { + "name": "priority", + "description": "Priority of the task", + "required": false + }, + { + "name": "assignee", + "description": "Email of the assignee", + "required": false + } + ] + }, + { + "name": "create-doc", + "description": "Create a new document in Dart", + "arguments": [ + { + "name": "title", + "description": "Title of the document", + "required": true + }, + { + "name": "text", + "description": "Content of the document", + "required": false + }, + { + "name": "folder", + "description": "Folder to place the document in", + "required": false + } + ] + }, + { + "name": "summarize-tasks", + "description": "Get a summary of tasks with optional filtering", + "arguments": [ + { + "name": "status", + "description": "Filter by status (e.g., 'In Progress', 'Done')", + "required": false + }, + { + "name": "assignee", + "description": "Filter by assignee email", + "required": false + } + ] + } + ], + "resources": [], + "is_official": true + }, + "oceanbase": { + "name": "oceanbase", + "display_name": "OceanBase", + "description": "(by yuanoOo) A Model Context Protocol (MCP) server that enables secure interaction with OceanBase databases.", + "repository": { + "type": "git", + "url": "https://github.com/yuanoOo/oceanbase_mcp_server" + }, + "homepage": "https://github.com/yuanoOo/oceanbase_mcp_server", + "author": { + "name": "yuanoOo" + }, + "license": "Apache-2.0", + "categories": [ + "Databases" + ], + "tags": [ + "OceanBase", + "SQL", + "Security" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/yuanoOo/oceanbase_mcp_server.git", + "oceanbase_mcp_server" + ], + "env": { + "OB_HOST": "${OB_HOST}", + "OB_PORT": "${OB_PORT}", + "OB_USER": "${OB_USER}", + "OB_PASSWORD": "${OB_PASSWORD}", + "OB_DATABASE": "${OB_DATABASE}" + } + } + }, + "arguments": { + "OB_HOST": { + "description": "Database host for connecting to the OceanBase server.", + "required": true, + "example": "localhost" + }, + "OB_PORT": { + "description": "Optional: Database port to connect to OceanBase, defaults to 2881 if not specified.", + "required": false, + "example": "2881" + }, + "OB_USER": { + "description": "Username for authenticating with the OceanBase database.", + "required": true, + "example": "your_username" + }, + "OB_PASSWORD": { + "description": "Password for the specified database user.", + "required": true, + "example": "your_password" + }, + "OB_DATABASE": { + "description": "Name of the OceanBase database to connect to.", + "required": true, + "example": "your_database" + } + }, + "tools": [ + { + "name": "execute_sql", + "description": "Execute an SQL query on the OceanBase server", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The SQL query to execute" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "mcp-installer": { + "name": "mcp-installer", + "display_name": "Installer", + "description": "This server is a server that installs other MCP servers for you.", + "repository": { + "type": "git", + "url": "https://github.com/anaisbetts/mcp-installer" + }, + "homepage": "https://github.com/anaisbetts/mcp-installer", + "author": { + "name": "anaisbetts" + }, + "license": "MIT", + "categories": [ + "MCP Tools" + ], + "tags": [ + "installer", + "server" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@anaisbetts/mcp-installer" + ] + } + }, + "examples": [ + { + "title": "Install MCP server", + "description": "Install the MCP server named mcp-server-fetch", + "prompt": "Hey Claude, install the MCP server named mcp-server-fetch" + }, + { + "title": "Install server with arguments", + "description": "Install the @modelcontextprotocol/server-filesystem package as an MCP server with specific arguments", + "prompt": "Hey Claude, install the @modelcontextprotocol/server-filesystem package as an MCP server. Use ['/Users/anibetts/Desktop'] for the arguments" + }, + { + "title": "Install from directory", + "description": "Install the MCP server from a specific directory", + "prompt": "Hi Claude, please install the MCP server at /Users/anibetts/code/mcp-youtube, I'm too lazy to do it myself." + }, + { + "title": "Set environment variable", + "description": "Install the server @modelcontextprotocol/server-github with an environment variable", + "prompt": "Install the server @modelcontextprotocol/server-github. Set the environment variable GITHUB_PERSONAL_ACCESS_TOKEN to '1234567890'" + } + ], + "tools": [ + { + "name": "install_repo_mcp_server", + "description": "Install an MCP server via npx or uvx", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The package name of the MCP server" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The arguments to pass along" + }, + "env": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The environment variables to set, delimited by =" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "install_local_mcp_server", + "description": "Install an MCP server whose code is cloned locally on your computer", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "The path to the MCP server code cloned on your computer" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The arguments to pass along" + }, + "env": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The environment variables to set, delimited by =" + } + }, + "required": [ + "path" + ] + } + } + ] + }, + "agentrpc": { + "display_name": "AgentRPC", + "repository": { + "type": "git", + "url": "https://github.com/agentrpc/agentrpc" + }, + "homepage": "https://docs.agentrpc.com", + "author": { + "name": "agentrpc" + }, + "license": "Apache License 2.0", + "tags": [ + "RPC", + "AI agents", + "MCP", + "OpenAI", + "multi-language" + ], + "arguments": { + "AGENTRPC_API_SECRET": { + "description": "API secret for authentication", + "required": true, + "example": "" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "agentrpc", + "mcp" + ], + "package": "agentrpc", + "env": { + "AGENTRPC_API_SECRET": "" + }, + "description": "Run the MCP server using npm", + "recommended": true + } + }, + "examples": [ + { + "title": "Claude Desktop Integration", + "description": "Add to your claude_desktop_config.json", + "prompt": "{\n \"mcpServers\": {\n \"agentrpc\": {\n \"command\": \"npx\",\n \"args\": [\n \"-y\",\n \"agentrpc\",\n \"mcp\"\n ],\n \"env\": {\n \"AGENTRPC_API_SECRET\": \"\"\n }\n }\n }\n}" + }, + { + "title": "Cursor Integration", + "description": "Add to your ~/.cursor/mcp.json", + "prompt": "{\n \"mcpServers\": {\n \"agentrpc\": {\n \"command\": \"npx\",\n \"args\": [\"-y\", \"agentrpc\", \"mcp\"],\n \"env\": {\n \"AGENTRPC_API_SECRET\": \"\"\n }\n }\n }\n}" + } + ], + "name": "agentrpc", + "description": "> Universal RPC layer for AI agents across network boundaries and languages", + "categories": [ + "Dev Tools" + ], + "is_official": true + }, + "tavily-mcp": { + "display_name": "Tavily MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/tavily-ai/tavily-mcp" + }, + "homepage": "https://github.com/tavily-ai/tavily-mcp", + "author": { + "name": "tavily-ai" + }, + "license": "MIT", + "tags": [ + "search", + "web", + "extract", + "mcp", + "claude" + ], + "arguments": { + "TAVILY_API_KEY": { + "description": "API key for Tavily services", + "required": true, + "example": "your-api-key-here" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "tavily-mcp" + ], + "description": "Run with npx (requires npm install)", + "env": { + "TAVILY_API_KEY": "your-api-key-here" + } + } + }, + "name": "tavily-mcp", + "description": "Search engine for AI agents (search + extract) powered by Tavily", + "categories": [ + "Web Services" + ], + "is_official": true, + "tools": [ + { + "name": "tavily-search", + "description": "A powerful web search tool that provides comprehensive, real-time results using Tavily's AI search engine. Returns relevant web content with customizable parameters for result count, content type, and domain filtering. Ideal for gathering current information, news, and detailed web content analysis.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query" + }, + "search_depth": { + "type": "string", + "enum": [ + "basic", + "advanced" + ], + "description": "The depth of the search. It can be 'basic' or 'advanced'", + "default": "basic" + }, + "topic": { + "type": "string", + "enum": [ + "general", + "news" + ], + "description": "The category of the search. This will determine which of our agents will be used for the search", + "default": "general" + }, + "days": { + "type": "number", + "description": "The number of days back from the current date to include in the search results. This specifies the time frame of data to be retrieved. Please note that this feature is only available when using the 'news' search topic", + "default": 3 + }, + "time_range": { + "type": "string", + "description": "The time range back from the current date to include in the search results. This feature is available for both 'general' and 'news' search topics", + "enum": [ + "day", + "week", + "month", + "year", + "d", + "w", + "m", + "y" + ] + }, + "max_results": { + "type": "number", + "description": "The maximum number of search results to return", + "default": 10, + "minimum": 5, + "maximum": 20 + }, + "include_images": { + "type": "boolean", + "description": "Include a list of query-related images in the response", + "default": false + }, + "include_image_descriptions": { + "type": "boolean", + "description": "Include a list of query-related images and their descriptions in the response", + "default": false + }, + "include_raw_content": { + "type": "boolean", + "description": "Include the cleaned and parsed HTML content of each search result", + "default": false + }, + "include_domains": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of domains to specifically include in the search results, if the user asks to search on specific sites set this to the domain of the site", + "default": [] + }, + "exclude_domains": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of domains to specifically exclude, if the user asks to exclude a domain set this to the domain of the site", + "default": [] + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "tavily-extract", + "description": "A powerful web content extraction tool that retrieves and processes raw content from specified URLs, ideal for data collection, content analysis, and research tasks.", + "inputSchema": { + "type": "object", + "properties": { + "urls": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of URLs to extract content from" + }, + "extract_depth": { + "type": "string", + "enum": [ + "basic", + "advanced" + ], + "description": "Depth of extraction - 'basic' or 'advanced', if usrls are linkedin use 'advanced' or if explicitly told to use advanced", + "default": "basic" + }, + "include_images": { + "type": "boolean", + "description": "Include a list of images extracted from the urls in the response", + "default": false + } + }, + "required": [ + "urls" + ] + } + } + ] + }, + "gotohuman-mcp-server": { + "display_name": "gotoHuman MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/gotohuman/gotohuman-mcp-server" + }, + "homepage": "https://app.gotohuman.com", + "author": { + "name": "gotohuman" + }, + "license": "MIT", + "tags": [ + "human review", + "AI agents", + "webhook", + "automation" + ], + "arguments": { + "GOTOHUMAN_API_KEY": { + "description": "Your gotoHuman API key", + "required": true, + "example": "your-api-key" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "build/index.js" + ], + "env": { + "GOTOHUMAN_API_KEY": "${GOTOHUMAN_API_KEY}" + }, + "description": "Run the gotoHuman MCP server using Node.js", + "recommended": true + } + }, + "examples": [ + { + "title": "List forms", + "description": "List all available review forms in your account", + "prompt": "list-forms" + }, + { + "title": "Get form schema", + "description": "Get the schema for a specific form", + "prompt": "get-form-schema formId=" + }, + { + "title": "Request human review", + "description": "Request a human review using a specific form", + "prompt": "request-human-review-with-form formId= fieldData= metadata= assignToUsers=" + } + ], + "name": "gotohuman-mcp-server", + "description": "Let your **AI agents ask for human reviews** in gotoHuman via MCP.", + "categories": [ + "AI Systems" + ], + "is_official": true + }, + "google-calendar": { + "name": "google-calendar", + "display_name": "Google Calendar", + "description": "Google Calendar MCP Server for managing Google calendar events. Also supports searching for events by attributes like title and location.", + "repository": { + "type": "git", + "url": "https://github.com/nspady/google-calendar-mcp" + }, + "homepage": "https://github.com/nspady/google-calendar-mcp", + "author": { + "name": "nspady" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Google Calendar", + "event management" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/nspady/google-calendar-mcp" + ] + } + }, + "examples": [ + { + "title": "Add Event from Screenshot", + "description": "Add events from screenshots and images", + "prompt": "Add this event to my calendar based on the attached screenshot." + }, + { + "title": "Check Upcoming Events", + "description": "Discover upcoming events outside usual routines", + "prompt": "What events do I have coming up this week that aren't part of my usual routine?" + }, + { + "title": "Check Attendance", + "description": "Identify events with unaccepted invitations", + "prompt": "Which events tomorrow have attendees who have not accepted the invitation?" + }, + { + "title": "Auto Coordinate Events", + "description": "Create events based on the available times provided", + "prompt": "Here's some available that was provided to me by someone. Take a look at the available times and create an event that is free on my work calendar." + }, + { + "title": "Check Availability", + "description": "Provide your availability checking both calendars", + "prompt": "Please provide availability looking at both my personal and work calendar for this upcoming week." + } + ] + }, + "cryptopanic-mcp-server": { + "name": "cryptopanic-mcp-server", + "display_name": "CryptoPanic News", + "description": "Providing latest cryptocurrency news to AI agents, powered by CryptoPanic.", + "repository": { + "type": "git", + "url": "https://github.com/kukapay/cryptopanic-mcp-server" + }, + "homepage": "https://github.com/kukapay/cryptopanic-mcp-server", + "author": { + "name": "kukapay", + "url": "https://github.com/kukapay" + }, + "license": "MIT", + "examples": [ + { + "title": "Fetch Cryptocurrency News", + "description": "Get the latest news articles on cryptocurrencies.", + "prompt": "get_crypto_news(kind='news', num_pages=1)" + } + ], + "categories": [ + "Finance" + ], + "tags": [ + "cryptocurrency", + "news", + "CryptoPanic" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/kukapay/cryptopanic-mcp-server", + "main.py" + ], + "env": { + "CRYPTOPANIC_API_KEY": "${CRYPTOPANIC_API_KEY}" + } + } + }, + "arguments": { + "CRYPTOPANIC_API_KEY": { + "description": "API key to access CryptoPanic services. This key is necessary to authenticate requests made to the CryptoPanic API.", + "required": true, + "example": "your_api_key_here" + } + } + }, + "ghost": { + "name": "ghost", + "display_name": "Ghost", + "description": "A Model Context Protocol (MCP) server for interacting with Ghost CMS through LLM interfaces like Claude.", + "repository": { + "type": "git", + "url": "https://github.com/MFYDev/ghost-mcp" + }, + "homepage": "https://github.com/MFYDev/ghost-mcp", + "author": { + "name": "MFYDev" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "Ghost", + "CMS", + "Admin API" + ], + "examples": [ + { + "title": "List Posts", + "description": "List blog posts with pagination.", + "prompt": "ghost(action=\"list_posts\", params={\"format\": \"text\", \"page\": 1, \"limit\": 15})" + }, + { + "title": "Search Posts by Title", + "description": "Search for posts by title.", + "prompt": "ghost(action=\"search_posts_by_title\", params={\"query\": \"Welcome\", \"exact\": False})" + }, + { + "title": "Create a Post", + "description": "Create a new post.", + "prompt": "ghost(action=\"create_post\", params={\"post_data\": {\"title\": \"New Post via MCP\",\"status\": \"draft\",\"lexical\": \"{\\\"root\\\":{\\\"children\\\":[{\\\"children\\\":[{\\\"detail\\\":0,\\\"format\\\":0,\\\"mode\\\":\\\"normal\\\",\\\"style\\\":\\\"\\\",\\\"text\\\":\\\"Hello World\\\",\\\"type\\\":\\\"text\\\",\\\"version\\\":1}],\\\"direction\\\":\\\"ltr\\\",\\\"format\\\":\\\"\\\",\\\"indent\\\":0,\\\"type\\\":\\\"paragraph\\\",\\\"version\\\":1}],\\\"direction\\\":\\\"ltr\\\",\\\"format\\\":\\\"\\\",\\\"indent\\\":0,\\\"type\\\":\\\"root\\\",\\\"version\\\":1}}\"}}" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/MFYDev/ghost-mcp", + "src/main.py" + ], + "env": { + "GHOST_API_URL": "${GHOST_API_URL}", + "GHOST_STAFF_API_KEY": "${GHOST_STAFF_API_KEY}" + } + } + }, + "arguments": { + "GHOST_API_URL": { + "description": "Your Ghost Admin API URL", + "required": true, + "example": "https://yourblog.com" + }, + "GHOST_STAFF_API_KEY": { + "description": "Your Ghost Staff API key", + "required": true, + "example": "your_staff_api_key" + } + } + }, + "mcp-server-box": { + "display_name": "MCP Server Box", + "repository": { + "type": "git", + "url": "https://github.com/box-community/mcp-server-box" + }, + "homepage": "https://github.com/box-community/mcp-server-box", + "author": { + "name": "box-community" + }, + "license": "[NOT GIVEN]", + "tags": [ + "box", + "ai", + "file-management", + "search", + "text-extraction" + ], + "arguments": { + "BOX_CLIENT_ID": { + "description": "Box API Client ID", + "required": true, + "example": "your_client_id" + }, + "BOX_CLIENT_SECRET": { + "description": "Box API Client Secret", + "required": true, + "example": "your_client_secret" + } + }, + "installations": { + "python": { + "type": "python", + "command": "uv", + "args": [ + "--directory", + "/path/to/mcp-server-box", + "run", + "src/mcp_server_box.py" + ], + "package": "[NOT GIVEN]", + "env": { + "BOX_CLIENT_ID": "your_client_id", + "BOX_CLIENT_SECRET": "your_client_secret" + }, + "description": "Run using uv package manager", + "recommended": true + } + }, + "examples": [ + { + "title": "Search for files in Box", + "description": "Search for files with specific extensions in Box", + "prompt": "Search for PDF files containing 'quarterly report'" + }, + { + "title": "Extract data using Box AI", + "description": "Extract structured data from a document using Box AI", + "prompt": "Extract the following fields from file 123456: title, date, amount" + }, + { + "title": "Ask questions about a document", + "description": "Ask Box AI questions about a specific document", + "prompt": "What are the key findings in the document with ID 123456?" + } + ], + "name": "mcp-server-box", + "description": "MCP Server Box is a Python project that integrates with the Box API to perform various operations such as file search, text extraction, AI-based querying, and data extraction. It leverages the `box-sdk-gen` library and provides a set of tools to interact with Box files and folders.", + "categories": [ + "Knowledge Base" + ], + "is_official": true + }, + "fewsats-mcp": { + "display_name": "Fewsats MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/Fewsats/fewsats-mcp" + }, + "license": "[NOT GIVEN]", + "homepage": "https://fewsats.com", + "author": { + "name": "Fewsats" + }, + "tags": [ + "payments", + "wallet", + "offers" + ], + "arguments": { + "FEWSATS_API_KEY": { + "description": "API key obtained from Fewsats.com", + "required": true, + "example": "YOUR_FEWSATS_API_KEY" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "fewsats-mcp" + ], + "description": "Run using uv (recommended)", + "recommended": true, + "env": { + "FEWSATS_API_KEY": "YOUR_FEWSATS_API_KEY" + } + }, + "pip": { + "type": "python", + "command": "fewsats-mcp", + "args": [], + "package": "fewsats-mcp", + "description": "Install via pip and run as a script", + "recommended": false + } + }, + "examples": [ + { + "title": "Check Balance", + "description": "Retrieve the balance of the user's wallet", + "prompt": "What's my current wallet balance?" + }, + { + "title": "View Payment Methods", + "description": "Retrieve the user's payment methods", + "prompt": "Show me my available payment methods." + }, + { + "title": "Pay an Offer", + "description": "Pay for an offer using the pay_offer tool", + "prompt": "Pay for the offer with ID 12345." + }, + { + "title": "Get Payment Information", + "description": "Retrieve details about a specific payment", + "prompt": "Show me the details of payment with ID abc123." + } + ], + "name": "fewsats-mcp", + "description": "This MCP server integrates with [Fewsats](https://fewsats.com) and allows AI Agents to purchase anything in a secure way.", + "categories": [ + "Finance" + ], + "tools": [ + { + "name": "balance", + "description": "Retrieve the balance of the user's wallet.\n You will rarely need to call this unless instructed by the user, or to troubleshoot payment issues.\n Fewsats will automatically add balance when needed.", + "inputSchema": { + "properties": {}, + "title": "balanceArguments", + "type": "object" + } + }, + { + "name": "payment_methods", + "description": "Retrieve the user's payment methods.\n You will rarely need to call this unless instructed by the user, or to troubleshoot payment issues.\n Fewsats will automatically select the best payment method.", + "inputSchema": { + "properties": {}, + "title": "payment_methodsArguments", + "type": "object" + } + }, + { + "name": "pay_offer", + "description": "Pays an offer_id from the l402_offers.\n\n The l402_offer parameter must be a dict with this structure:\n {\n 'offers': [\n {\n 'offer_id': 'test_offer_2', # String identifier for the offer\n 'amount': 1, # Numeric cost value\n 'currency': 'usd', # Currency code\n 'description': 'Test offer', # Text description\n 'title': 'Test Package' # Title of the package\n }\n ],\n 'payment_context_token': '60a8e027-8b8b-4ccf-b2b9-380ed0930283', # Payment context token\n 'payment_request_url': 'https://api.fewsats.com/v0/l402/payment-request', # Payment URL\n 'version': '0.2.2' # API version\n }\n\n Returns payment status response. \n If payment status is `needs_review` inform the user he will have to approve it at app.fewsats.com", + "inputSchema": { + "properties": { + "offer_id": { + "title": "Offer Id", + "type": "string" + }, + "l402_offer": { + "additionalProperties": true, + "title": "L402 Offer", + "type": "object" + } + }, + "required": [ + "offer_id", + "l402_offer" + ], + "title": "pay_offerArguments", + "type": "object" + } + }, + { + "name": "payment_info", + "description": "Retrieve the details of a payment.\n If payment status is `needs_review` inform the user he will have to approve it at app.fewsats.com", + "inputSchema": { + "properties": { + "pid": { + "title": "Pid", + "type": "string" + } + }, + "required": [ + "pid" + ], + "title": "payment_infoArguments", + "type": "object" + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "snowflake": { + "name": "snowflake", + "display_name": "Snowflake", + "description": "This MCP server enables LLMs to interact with Snowflake databases, allowing for secure and controlled data operations.", + "repository": { + "type": "git", + "url": "https://github.com/isaacwasserman/mcp-snowflake-server" + }, + "homepage": "https://github.com/isaacwasserman/mcp-snowflake-server", + "author": { + "name": "isaacwasserman" + }, + "license": "NOT GIVEN", + "categories": [ + "Databases" + ], + "tags": [ + "snowflake", + "sql", + "database" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp_snowflake_server", + "--account", + "${ACCOUNT}", + "--warehouse", + "${WAREHOUSE}", + "--user", + "${USER}", + "--password", + "${PASSWORD}", + "--role", + "${ROLE}", + "--database", + "${DATABASE}", + "--schema", + "${SCHEMA}" + ] + } + }, + "arguments": { + "ACCOUNT": { + "description": "The Snowflake account name to connect to.", + "required": true, + "example": "your_account_name" + }, + "WAREHOUSE": { + "description": "The name of the virtual warehouse to be used for the session.", + "required": true, + "example": "your_warehouse_name" + }, + "USER": { + "description": "The username to authenticate with Snowflake.", + "required": true, + "example": "your_username" + }, + "PASSWORD": { + "description": "The password for the specified user.", + "required": true, + "example": "your_password" + }, + "ROLE": { + "description": "The role to be assumed during the session.", + "required": true, + "example": "your_role_name" + }, + "DATABASE": { + "description": "The name of the Snowflake database to connect to.", + "required": true, + "example": "your_database_name" + }, + "SCHEMA": { + "description": "The schema within the database where queries will be executed.", + "required": true, + "example": "your_schema_name" + } + }, + "tools": [ + { + "name": "read_query", + "description": "Execute a SELECT query.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "SELECT SQL query to execute" + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "append_insight", + "description": "Add a data insight to the memo", + "inputSchema": { + "type": "object", + "properties": { + "insight": { + "type": "string", + "description": "Data insight discovered from analysis" + } + }, + "required": [ + "insight" + ] + } + } + ] + }, + "rquest": { + "name": "rquest", + "display_name": "Rquest", + "description": "An MCP server providing realistic browser-like HTTP request capabilities with accurate TLS/JA3/JA4 fingerprints for bypassing anti-bot measures.", + "repository": { + "type": "git", + "url": "https://github.com/xxxbrian/mcp-rquest" + }, + "homepage": "https://github.com/xxxbrian/mcp-rquest", + "author": { + "name": "xxxbrian" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "http", + "request", + "llm", + "browser", + "emulation", + "pdf", + "markdown" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-rquest" + ] + }, + "python": { + "type": "python", + "command": "python", + "args": [ + "-m", + "mcp-rquest" + ] + } + }, + "examples": [ + { + "title": "Convert HTML or PDF to Markdown", + "description": "Use the get_stored_response_with_markdown tool to convert HTML or PDF responses to Markdown for better processing by LLMs.", + "prompt": "get_stored_response_with_markdown('document.pdf')" + } + ], + "tools": [ + { + "name": "http_get", + "description": "Make an HTTP GET request to the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_post", + "description": "Make an HTTP POST request to the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "form": { + "type": "array", + "description": "Form data as [[key, value], ...]" + }, + "json_payload": { + "type": "object", + "description": "JSON payload" + }, + "body": { + "type": "object", + "description": "Request body" + }, + "multipart": { + "type": "array", + "description": "Multipart data as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_put", + "description": "Make an HTTP PUT request to the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "form": { + "type": "array", + "description": "Form data as [[key, value], ...]" + }, + "json_payload": { + "type": "object", + "description": "JSON payload" + }, + "body": { + "type": "object", + "description": "Request body" + }, + "multipart": { + "type": "array", + "description": "Multipart data as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_delete", + "description": "Make an HTTP DELETE request to the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_patch", + "description": "Make an HTTP PATCH request to the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "form": { + "type": "array", + "description": "Form data as [[key, value], ...]" + }, + "json_payload": { + "type": "object", + "description": "JSON payload" + }, + "body": { + "type": "object", + "description": "Request body" + }, + "multipart": { + "type": "array", + "description": "Multipart data as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_head", + "description": "Make an HTTP HEAD request to retrieve only headers from the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_options", + "description": "Make an HTTP OPTIONS request to retrieve options for the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "http_trace", + "description": "Make an HTTP TRACE request for diagnostic tracing of the specified URL", + "inputSchema": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "type": "string", + "description": "URL to send the request to" + }, + "proxy": { + "type": "string", + "description": "Proxy to use for the request" + }, + "headers": { + "type": "object", + "description": "Headers to include in the request" + }, + "cookies": { + "type": "object", + "description": "Cookies to include in the request" + }, + "allow_redirects": { + "type": "boolean", + "description": "Whether to follow redirects" + }, + "max_redirects": { + "type": "integer", + "description": "Maximum number of redirects to follow" + }, + "auth": { + "type": "string", + "description": "Authentication credentials" + }, + "bearer_auth": { + "type": "string", + "description": "Bearer token for authentication" + }, + "basic_auth": { + "type": "array", + "description": "Basic auth credentials as [username, password]" + }, + "query": { + "type": "array", + "description": "Query parameters as [[key, value], ...]" + }, + "force_store_response_content": { + "type": "boolean", + "description": "Force storing response content regardless of size" + } + } + } + }, + { + "name": "get_stored_response", + "description": "Retrieve a stored HTTP response by its ID", + "inputSchema": { + "type": "object", + "required": [ + "response_id" + ], + "properties": { + "response_id": { + "type": "string", + "description": "ID of the stored response" + }, + "start_line": { + "type": "integer", + "description": "Starting line number (1-indexed)" + }, + "end_line": { + "type": "integer", + "description": "Ending line number (inclusive)" + } + } + } + }, + { + "name": "get_stored_response_with_markdown", + "description": "Retrieve a stored HTTP response by its ID and convert it to Markdown format. Supports HTML and PDF content types. (Converting large PDF to Markdown may cause timeout, just wait and try again.)", + "inputSchema": { + "type": "object", + "required": [ + "response_id" + ], + "properties": { + "response_id": { + "type": "string", + "description": "ID of the stored response" + } + } + } + }, + { + "name": "get_model_state", + "description": "Get the current state of the PDF models(used by `get_stored_response_with_markdown`) loading process", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "restart_model_loading", + "description": "Restart the PDF models(used by `get_stored_response_with_markdown`) loading process if it failed or got stuck", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ] + }, + "neo4j": { + "name": "neo4j", + "display_name": "Neo4j Server", + "description": "A community built server that interacts with Neo4j Graph Database.", + "repository": { + "type": "git", + "url": "https://github.com/da-okazaki/mcp-neo4j-server" + }, + "homepage": "https://github.com/da-okazaki/mcp-neo4j-server", + "author": { + "name": "da-okazaki" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "neo4j", + "database" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@alanse/mcp-neo4j-server" + ], + "env": { + "NEO4J_URI": "${NEO4J_URI}", + "NEO4J_USERNAME": "${NEO4J_USERNAME}", + "NEO4J_PASSWORD": "${NEO4J_PASSWORD}" + } + } + }, + "examples": [ + { + "title": "Querying Data", + "description": "Ask questions about the data, e.g., 'Show me all employees in the Sales department'.", + "prompt": "User: \"Show me all employees in the Sales department\"" + }, + { + "title": "Creating Data", + "description": "Instruct the bot to create new entities, e.g., 'Add a new person named John Doe who is 30 years old'.", + "prompt": "User: \"Add a new person named John Doe who is 30 years old\"" + }, + { + "title": "Creating Relationships", + "description": "Request to establish relationships between entities, e.g., 'Make John Doe friends with Jane Smith'.", + "prompt": "User: \"Make John Doe friends with Jane Smith\"" + }, + { + "title": "Complex Operations", + "description": "Perform comprehensive queries like 'Find all products purchased by customers who live in New York'.", + "prompt": "User: \"Find all products purchased by customers who live in New York\"" + } + ], + "arguments": { + "NEO4J_URI": { + "description": "Neo4j database URI (default: bolt://localhost:7687)", + "required": false, + "example": "bolt://localhost:7687" + }, + "NEO4J_USERNAME": { + "description": "Neo4j username (default: neo4j)", + "required": false, + "example": "neo4j" + }, + "NEO4J_PASSWORD": { + "description": "Neo4j password", + "required": true + } + }, + "tools": [ + { + "name": "execute_query", + "description": "Execute a Cypher query on Neo4j database", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Cypher query to execute" + }, + "params": { + "type": "object", + "description": "Query parameters", + "additionalProperties": true + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "create_node", + "description": "Create a new node in Neo4j", + "inputSchema": { + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "Node label" + }, + "properties": { + "type": "object", + "description": "Node properties", + "additionalProperties": true + } + }, + "required": [ + "label", + "properties" + ] + } + }, + { + "name": "create_relationship", + "description": "Create a relationship between two nodes", + "inputSchema": { + "type": "object", + "properties": { + "fromNodeId": { + "type": "number", + "description": "ID of the source node" + }, + "toNodeId": { + "type": "number", + "description": "ID of the target node" + }, + "type": { + "type": "string", + "description": "Relationship type" + }, + "properties": { + "type": "object", + "description": "Relationship properties", + "additionalProperties": true + } + }, + "required": [ + "fromNodeId", + "toNodeId", + "type" + ] + } + } + ] + }, + "discord": { + "name": "discord", + "display_name": "Discord", + "description": "A MCP server to connect to Discord guilds through a bot and read and write messages in channels", + "repository": { + "type": "git", + "url": "https://github.com/v-3/discordmcp" + }, + "homepage": "https://github.com/v-3/discordmcp", + "author": { + "name": "v-3", + "url": "https://github.com/v-3" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "Discord", + "LLM", + "Bot" + ], + "examples": [ + { + "title": "Read Messages", + "description": "Fetch the last 5 messages from a channel.", + "prompt": "{\"channel\": \"general\", \"limit\": 5}" + }, + { + "title": "Send Message", + "description": "Send a message to the specified channel.", + "prompt": "{\"channel\": \"announcements\", \"message\": \"Meeting starts in 10 minutes\"}" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/v-3/discordmcp" + ], + "env": { + "DISCORD_TOKEN": "${DISCORD_TOKEN}" + } + } + }, + "arguments": { + "DISCORD_TOKEN": { + "description": "The Discord bot token required for authentication and to interact with Discord's API.", + "required": true, + "example": "your_discord_bot_token_here" + } + } + }, + "airflow": { + "name": "airflow", + "display_name": "Apache Airflow", + "description": "A MCP Server that connects to [Apache Airflow](https://airflow.apache.org/) using official python client.", + "repository": { + "type": "git", + "url": "https://github.com/yangkyeongmo/mcp-server-apache-airflow" + }, + "homepage": "https://github.com/yangkyeongmo/mcp-server-apache-airflow", + "author": { + "name": "yangkyeongmo" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Apache Airflow", + "DAG", + "Workflow", + "Data Pipeline" + ], + "arguments": { + "AIRFLOW_HOST": { + "description": "URL of your Apache Airflow instance", + "required": true, + "example": "https://your-airflow-host:8080" + }, + "AIRFLOW_USERNAME": { + "description": "Username for authenticating with Airflow", + "required": true, + "example": "admin" + }, + "AIRFLOW_PASSWORD": { + "description": "Password for authenticating with Airflow", + "required": true, + "example": "your_secure_password" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-apache-airflow" + ], + "env": { + "AIRFLOW_HOST": "${AIRFLOW_HOST}", + "AIRFLOW_USERNAME": "${AIRFLOW_USERNAME}", + "AIRFLOW_PASSWORD": "${AIRFLOW_PASSWORD}" + } + } + }, + "tools": [ + { + "name": "get_config", + "description": "Get current configuration", + "inputSchema": { + "properties": { + "section": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Section" + } + }, + "title": "get_configArguments", + "type": "object" + } + }, + { + "name": "get_value", + "description": "Get a specific option from configuration", + "inputSchema": { + "properties": { + "section": { + "title": "Section", + "type": "string" + }, + "option": { + "title": "Option", + "type": "string" + } + }, + "required": [ + "section", + "option" + ], + "title": "get_valueArguments", + "type": "object" + } + }, + { + "name": "list_connections", + "description": "List all connections", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + } + }, + "title": "list_connectionsArguments", + "type": "object" + } + }, + { + "name": "create_connection", + "description": "Create a connection", + "inputSchema": { + "properties": { + "conn_id": { + "title": "Conn Id", + "type": "string" + }, + "conn_type": { + "title": "Conn Type", + "type": "string" + }, + "host": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Host" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Port" + }, + "login": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Login" + }, + "password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Password" + }, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Schema" + }, + "extra": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Extra" + } + }, + "required": [ + "conn_id", + "conn_type" + ], + "title": "create_connectionArguments", + "type": "object" + } + }, + { + "name": "get_connection", + "description": "Get a connection by ID", + "inputSchema": { + "properties": { + "conn_id": { + "title": "Conn Id", + "type": "string" + } + }, + "required": [ + "conn_id" + ], + "title": "get_connectionArguments", + "type": "object" + } + }, + { + "name": "update_connection", + "description": "Update a connection by ID", + "inputSchema": { + "properties": { + "conn_id": { + "title": "Conn Id", + "type": "string" + }, + "conn_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Conn Type" + }, + "host": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Host" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Port" + }, + "login": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Login" + }, + "password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Password" + }, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Schema" + }, + "extra": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Extra" + } + }, + "required": [ + "conn_id" + ], + "title": "update_connectionArguments", + "type": "object" + } + }, + { + "name": "delete_connection", + "description": "Delete a connection by ID", + "inputSchema": { + "properties": { + "conn_id": { + "title": "Conn Id", + "type": "string" + } + }, + "required": [ + "conn_id" + ], + "title": "delete_connectionArguments", + "type": "object" + } + }, + { + "name": "test_connection", + "description": "Test a connection", + "inputSchema": { + "properties": { + "conn_type": { + "title": "Conn Type", + "type": "string" + }, + "host": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Host" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Port" + }, + "login": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Login" + }, + "password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Password" + }, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Schema" + }, + "extra": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Extra" + } + }, + "required": [ + "conn_type" + ], + "title": "test_connectionArguments", + "type": "object" + } + }, + { + "name": "fetch_dags", + "description": "Fetch all DAGs", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Tags" + }, + "only_active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Only Active" + }, + "paused": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Paused" + }, + "dag_id_pattern": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Id Pattern" + } + }, + "title": "get_dagsArguments", + "type": "object" + } + }, + { + "name": "get_dag", + "description": "Get a DAG by ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + } + }, + "required": [ + "dag_id" + ], + "title": "get_dagArguments", + "type": "object" + } + }, + { + "name": "get_dag_details", + "description": "Get a simplified representation of DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "fields": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Fields" + } + }, + "required": [ + "dag_id" + ], + "title": "get_dag_detailsArguments", + "type": "object" + } + }, + { + "name": "get_dag_source", + "description": "Get a source code", + "inputSchema": { + "properties": { + "file_token": { + "title": "File Token", + "type": "string" + } + }, + "required": [ + "file_token" + ], + "title": "get_dag_sourceArguments", + "type": "object" + } + }, + { + "name": "pause_dag", + "description": "Pause a DAG by ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + } + }, + "required": [ + "dag_id" + ], + "title": "pause_dagArguments", + "type": "object" + } + }, + { + "name": "unpause_dag", + "description": "Unpause a DAG by ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + } + }, + "required": [ + "dag_id" + ], + "title": "unpause_dagArguments", + "type": "object" + } + }, + { + "name": "get_dag_tasks", + "description": "Get tasks for DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + } + }, + "required": [ + "dag_id" + ], + "title": "get_dag_tasksArguments", + "type": "object" + } + }, + { + "name": "get_task", + "description": "Get a task by ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "task_id": { + "title": "Task Id", + "type": "string" + } + }, + "required": [ + "dag_id", + "task_id" + ], + "title": "get_taskArguments", + "type": "object" + } + }, + { + "name": "get_tasks", + "description": "Get tasks for DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + } + }, + "required": [ + "dag_id" + ], + "title": "get_tasksArguments", + "type": "object" + } + }, + { + "name": "patch_dag", + "description": "Update a DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "is_paused": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Is Paused" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Tags" + } + }, + "required": [ + "dag_id" + ], + "title": "patch_dagArguments", + "type": "object" + } + }, + { + "name": "patch_dags", + "description": "Update multiple DAGs", + "inputSchema": { + "properties": { + "dag_id_pattern": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Id Pattern" + }, + "is_paused": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Is Paused" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Tags" + } + }, + "title": "patch_dagsArguments", + "type": "object" + } + }, + { + "name": "delete_dag", + "description": "Delete a DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + } + }, + "required": [ + "dag_id" + ], + "title": "delete_dagArguments", + "type": "object" + } + }, + { + "name": "clear_task_instances", + "description": "Clear a set of task instances", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "task_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Task Ids" + }, + "start_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date" + }, + "end_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date" + }, + "include_subdags": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Subdags" + }, + "include_parentdag": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Parentdag" + }, + "include_upstream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Upstream" + }, + "include_downstream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Downstream" + }, + "include_future": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Future" + }, + "include_past": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Past" + }, + "dry_run": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dry Run" + }, + "reset_dag_runs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Reset Dag Runs" + } + }, + "required": [ + "dag_id" + ], + "title": "clear_task_instancesArguments", + "type": "object" + } + }, + { + "name": "set_task_instances_state", + "description": "Set a state of task instances", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "state": { + "title": "State", + "type": "string" + }, + "task_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Task Ids" + }, + "execution_date": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date" + }, + "include_upstream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Upstream" + }, + "include_downstream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Downstream" + }, + "include_future": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Future" + }, + "include_past": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Past" + }, + "dry_run": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dry Run" + } + }, + "required": [ + "dag_id", + "state" + ], + "title": "set_task_instances_stateArguments", + "type": "object" + } + }, + { + "name": "reparse_dag_file", + "description": "Request re-parsing of a DAG file", + "inputSchema": { + "properties": { + "file_token": { + "title": "File Token", + "type": "string" + } + }, + "required": [ + "file_token" + ], + "title": "reparse_dag_fileArguments", + "type": "object" + } + }, + { + "name": "post_dag_run", + "description": "Trigger a DAG by ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Run Id" + }, + "data_interval_end": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Data Interval End" + }, + "data_interval_start": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Data Interval Start" + }, + "end_date": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date" + }, + "execution_date": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date" + }, + "external_trigger": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "External Trigger" + }, + "last_scheduling_decision": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Last Scheduling Decision" + }, + "logical_date": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Logical Date" + }, + "note": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Note" + }, + "run_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Run Type" + }, + "start_date": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date" + } + }, + "required": [ + "dag_id" + ], + "title": "post_dag_runArguments", + "type": "object" + } + }, + { + "name": "get_dag_runs", + "description": "Get DAG runs by ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "execution_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date Gte" + }, + "execution_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date Lte" + }, + "start_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date Gte" + }, + "start_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date Lte" + }, + "end_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date Gte" + }, + "end_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date Lte" + }, + "updated_at_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Updated At Gte" + }, + "updated_at_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Updated At Lte" + }, + "state": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "State" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + } + }, + "required": [ + "dag_id" + ], + "title": "get_dag_runsArguments", + "type": "object" + } + }, + { + "name": "get_dag_runs_batch", + "description": "List DAG runs (batch)", + "inputSchema": { + "properties": { + "dag_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Ids" + }, + "execution_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date Gte" + }, + "execution_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date Lte" + }, + "start_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date Gte" + }, + "start_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date Lte" + }, + "end_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date Gte" + }, + "end_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date Lte" + }, + "state": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "State" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + }, + "page_offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Page Offset" + }, + "page_limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Page Limit" + } + }, + "title": "get_dag_runs_batchArguments", + "type": "object" + } + }, + { + "name": "get_dag_run", + "description": "Get a DAG run by DAG ID and DAG run ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + } + }, + "required": [ + "dag_id", + "dag_run_id" + ], + "title": "get_dag_runArguments", + "type": "object" + } + }, + { + "name": "update_dag_run_state", + "description": "Update a DAG run state by DAG ID and DAG run ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "State" + } + }, + "required": [ + "dag_id", + "dag_run_id" + ], + "title": "update_dag_run_stateArguments", + "type": "object" + } + }, + { + "name": "delete_dag_run", + "description": "Delete a DAG run by DAG ID and DAG run ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + } + }, + "required": [ + "dag_id", + "dag_run_id" + ], + "title": "delete_dag_runArguments", + "type": "object" + } + }, + { + "name": "clear_dag_run", + "description": "Clear a DAG run", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "dry_run": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dry Run" + } + }, + "required": [ + "dag_id", + "dag_run_id" + ], + "title": "clear_dag_runArguments", + "type": "object" + } + }, + { + "name": "set_dag_run_note", + "description": "Update the DagRun note", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "note": { + "title": "Note", + "type": "string" + } + }, + "required": [ + "dag_id", + "dag_run_id", + "note" + ], + "title": "set_dag_run_noteArguments", + "type": "object" + } + }, + { + "name": "get_upstream_dataset_events", + "description": "Get dataset events for a DAG run", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + } + }, + "required": [ + "dag_id", + "dag_run_id" + ], + "title": "get_upstream_dataset_eventsArguments", + "type": "object" + } + }, + { + "name": "get_dag_stats", + "description": "Get DAG stats", + "inputSchema": { + "properties": { + "dag_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Ids" + } + }, + "title": "get_dag_statsArguments", + "type": "object" + } + }, + { + "name": "get_datasets", + "description": "List datasets", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + }, + "uri_pattern": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Uri Pattern" + }, + "dag_ids": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Ids" + } + }, + "title": "get_datasetsArguments", + "type": "object" + } + }, + { + "name": "get_dataset", + "description": "Get a dataset by URI", + "inputSchema": { + "properties": { + "uri": { + "title": "Uri", + "type": "string" + } + }, + "required": [ + "uri" + ], + "title": "get_datasetArguments", + "type": "object" + } + }, + { + "name": "get_dataset_events", + "description": "Get dataset events", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + }, + "dataset_id": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dataset Id" + }, + "source_dag_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Source Dag Id" + }, + "source_task_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Source Task Id" + }, + "source_run_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Source Run Id" + }, + "source_map_index": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Source Map Index" + } + }, + "title": "get_dataset_eventsArguments", + "type": "object" + } + }, + { + "name": "create_dataset_event", + "description": "Create dataset event", + "inputSchema": { + "properties": { + "dataset_uri": { + "title": "Dataset Uri", + "type": "string" + }, + "extra": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Extra" + } + }, + "required": [ + "dataset_uri" + ], + "title": "create_dataset_eventArguments", + "type": "object" + } + }, + { + "name": "get_dag_dataset_queued_event", + "description": "Get a queued Dataset event for a DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "uri": { + "title": "Uri", + "type": "string" + } + }, + "required": [ + "dag_id", + "uri" + ], + "title": "get_dag_dataset_queued_eventArguments", + "type": "object" + } + }, + { + "name": "get_dag_dataset_queued_events", + "description": "Get queued Dataset events for a DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + } + }, + "required": [ + "dag_id" + ], + "title": "get_dag_dataset_queued_eventsArguments", + "type": "object" + } + }, + { + "name": "delete_dag_dataset_queued_event", + "description": "Delete a queued Dataset event for a DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "uri": { + "title": "Uri", + "type": "string" + } + }, + "required": [ + "dag_id", + "uri" + ], + "title": "delete_dag_dataset_queued_eventArguments", + "type": "object" + } + }, + { + "name": "delete_dag_dataset_queued_events", + "description": "Delete queued Dataset events for a DAG", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "before": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before" + } + }, + "required": [ + "dag_id" + ], + "title": "delete_dag_dataset_queued_eventsArguments", + "type": "object" + } + }, + { + "name": "get_dataset_queued_events", + "description": "Get queued Dataset events for a Dataset", + "inputSchema": { + "properties": { + "uri": { + "title": "Uri", + "type": "string" + } + }, + "required": [ + "uri" + ], + "title": "get_dataset_queued_eventsArguments", + "type": "object" + } + }, + { + "name": "delete_dataset_queued_events", + "description": "Delete queued Dataset events for a Dataset", + "inputSchema": { + "properties": { + "uri": { + "title": "Uri", + "type": "string" + }, + "before": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before" + } + }, + "required": [ + "uri" + ], + "title": "delete_dataset_queued_eventsArguments", + "type": "object" + } + }, + { + "name": "get_event_logs", + "description": "List log entries from event log", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + }, + "dag_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Dag Id" + }, + "task_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Task Id" + }, + "run_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Run Id" + }, + "map_index": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Map Index" + }, + "try_number": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Try Number" + }, + "event": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Event" + }, + "owner": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Owner" + }, + "before": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before" + }, + "after": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After" + }, + "included_events": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Included Events" + }, + "excluded_events": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Excluded Events" + } + }, + "title": "get_event_logsArguments", + "type": "object" + } + }, + { + "name": "get_event_log", + "description": "Get a specific log entry by ID", + "inputSchema": { + "properties": { + "event_log_id": { + "title": "Event Log Id", + "type": "integer" + } + }, + "required": [ + "event_log_id" + ], + "title": "get_event_logArguments", + "type": "object" + } + }, + { + "name": "get_import_errors", + "description": "List import errors", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + } + }, + "title": "get_import_errorsArguments", + "type": "object" + } + }, + { + "name": "get_import_error", + "description": "Get a specific import error by ID", + "inputSchema": { + "properties": { + "import_error_id": { + "title": "Import Error Id", + "type": "integer" + } + }, + "required": [ + "import_error_id" + ], + "title": "get_import_errorArguments", + "type": "object" + } + }, + { + "name": "get_health", + "description": "Get instance status", + "inputSchema": { + "properties": {}, + "title": "get_healthArguments", + "type": "object" + } + }, + { + "name": "get_version", + "description": "Get version information", + "inputSchema": { + "properties": {}, + "title": "get_versionArguments", + "type": "object" + } + }, + { + "name": "get_plugins", + "description": "Get a list of loaded plugins", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + } + }, + "title": "get_pluginsArguments", + "type": "object" + } + }, + { + "name": "get_pools", + "description": "List pools", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + } + }, + "title": "get_poolsArguments", + "type": "object" + } + }, + { + "name": "get_pool", + "description": "Get a pool by name", + "inputSchema": { + "properties": { + "pool_name": { + "title": "Pool Name", + "type": "string" + } + }, + "required": [ + "pool_name" + ], + "title": "get_poolArguments", + "type": "object" + } + }, + { + "name": "delete_pool", + "description": "Delete a pool", + "inputSchema": { + "properties": { + "pool_name": { + "title": "Pool Name", + "type": "string" + } + }, + "required": [ + "pool_name" + ], + "title": "delete_poolArguments", + "type": "object" + } + }, + { + "name": "post_pool", + "description": "Create a pool", + "inputSchema": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "slots": { + "title": "Slots", + "type": "integer" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + }, + "include_deferred": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Deferred" + } + }, + "required": [ + "name", + "slots" + ], + "title": "post_poolArguments", + "type": "object" + } + }, + { + "name": "patch_pool", + "description": "Update a pool", + "inputSchema": { + "properties": { + "pool_name": { + "title": "Pool Name", + "type": "string" + }, + "slots": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Slots" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + }, + "include_deferred": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Include Deferred" + } + }, + "required": [ + "pool_name" + ], + "title": "patch_poolArguments", + "type": "object" + } + }, + { + "name": "get_task_instance", + "description": "Get a task instance by DAG ID, task ID, and DAG run ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "task_id": { + "title": "Task Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + } + }, + "required": [ + "dag_id", + "task_id", + "dag_run_id" + ], + "title": "get_task_instanceArguments", + "type": "object" + } + }, + { + "name": "list_task_instances", + "description": "List task instances by DAG ID and DAG run ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "execution_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date Gte" + }, + "execution_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Execution Date Lte" + }, + "start_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date Gte" + }, + "start_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Start Date Lte" + }, + "end_date_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date Gte" + }, + "end_date_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "End Date Lte" + }, + "updated_at_gte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Updated At Gte" + }, + "updated_at_lte": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Updated At Lte" + }, + "duration_gte": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Duration Gte" + }, + "duration_lte": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Duration Lte" + }, + "state": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "State" + }, + "pool": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Pool" + }, + "queue": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Queue" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + } + }, + "required": [ + "dag_id", + "dag_run_id" + ], + "title": "list_task_instancesArguments", + "type": "object" + } + }, + { + "name": "update_task_instance", + "description": "Update a task instance by DAG ID, DAG run ID, and task ID", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "task_id": { + "title": "Task Id", + "type": "string" + }, + "state": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "State" + } + }, + "required": [ + "dag_id", + "dag_run_id", + "task_id" + ], + "title": "update_task_instanceArguments", + "type": "object" + } + }, + { + "name": "list_variables", + "description": "List all variables", + "inputSchema": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + }, + "order_by": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Order By" + } + }, + "title": "list_variablesArguments", + "type": "object" + } + }, + { + "name": "create_variable", + "description": "Create a variable", + "inputSchema": { + "properties": { + "key": { + "title": "Key", + "type": "string" + }, + "value": { + "title": "Value", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + } + }, + "required": [ + "key", + "value" + ], + "title": "create_variableArguments", + "type": "object" + } + }, + { + "name": "get_variable", + "description": "Get a variable by key", + "inputSchema": { + "properties": { + "key": { + "title": "Key", + "type": "string" + } + }, + "required": [ + "key" + ], + "title": "get_variableArguments", + "type": "object" + } + }, + { + "name": "update_variable", + "description": "Update a variable by key", + "inputSchema": { + "properties": { + "key": { + "title": "Key", + "type": "string" + }, + "value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Value" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + } + }, + "required": [ + "key" + ], + "title": "update_variableArguments", + "type": "object" + } + }, + { + "name": "delete_variable", + "description": "Delete a variable by key", + "inputSchema": { + "properties": { + "key": { + "title": "Key", + "type": "string" + } + }, + "required": [ + "key" + ], + "title": "delete_variableArguments", + "type": "object" + } + }, + { + "name": "get_xcom_entries", + "description": "Get all XCom entries", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "task_id": { + "title": "Task Id", + "type": "string" + }, + "map_index": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Map Index" + }, + "xcom_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Xcom Key" + }, + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Offset" + } + }, + "required": [ + "dag_id", + "dag_run_id", + "task_id" + ], + "title": "get_xcom_entriesArguments", + "type": "object" + } + }, + { + "name": "get_xcom_entry", + "description": "Get an XCom entry", + "inputSchema": { + "properties": { + "dag_id": { + "title": "Dag Id", + "type": "string" + }, + "dag_run_id": { + "title": "Dag Run Id", + "type": "string" + }, + "task_id": { + "title": "Task Id", + "type": "string" + }, + "xcom_key": { + "title": "Xcom Key", + "type": "string" + }, + "map_index": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Map Index" + }, + "deserialize": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Deserialize" + }, + "stringify": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Stringify" + } + }, + "required": [ + "dag_id", + "dag_run_id", + "task_id", + "xcom_key" + ], + "title": "get_xcom_entryArguments", + "type": "object" + } + } + ] + }, + "volcengine-tos": { + "name": "volcengine-tos", + "display_name": "VolcEngine TOS", + "description": "A sample MCP server for VolcEngine TOS that flexibly get objects from TOS.", + "repository": { + "type": "git", + "url": "https://github.com/dinghuazhou/sample-mcp-server-tos" + }, + "author": { + "name": "dinghuazhou" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "TOS", + "Volcengine", + "Data" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/dinghuazhou/sample-mcp-server-tos", + "tos-mcp-server" + ] + } + }, + "examples": [ + { + "title": "List Buckets", + "description": "Returns a list of all buckets owned by the authenticated sender of the request", + "prompt": "ListBuckets" + }, + { + "title": "List Objects in a Bucket", + "description": "Returns some or all (up to 1,000) of the objects in a bucket with each request", + "prompt": "ListObjectsV2" + }, + { + "title": "Get an Object", + "description": "Retrieves an object from volcengine TOS.", + "prompt": "GetObject" + } + ], + "homepage": "https://github.com/dinghuazhou/sample-mcp-server-tos" + }, + "mcp-server-milvus": { + "display_name": "MCP Server for Milvus", + "repository": { + "type": "git", + "url": "https://github.com/zilliztech/mcp-server-milvus" + }, + "homepage": "https://github.com/zilliztech/mcp-server-milvus", + "author": { + "name": "zilliztech" + }, + "license": "[NOT GIVEN]", + "tags": [ + "milvus", + "vector database", + "mcp", + "model context protocol" + ], + "arguments": { + "milvus-uri": { + "description": "Milvus server URI", + "required": true, + "example": "http://localhost:19530" + }, + "milvus-token": { + "description": "Optional authentication token", + "required": false, + "example": "[NOT GIVEN]" + }, + "milvus-db": { + "description": "Database name", + "required": false, + "example": "default" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/zilliztech/mcp-server-milvus", + "mcp-server-milvus" + ], + "description": "Run directly with uv without installation" + } + }, + "examples": [ + { + "title": "Listing Collections", + "description": "List all collections in the Milvus database", + "prompt": "What are the collections I have in my Milvus DB?" + }, + { + "title": "Searching for Documents", + "description": "Search for documents using full text search", + "prompt": "Find documents in my text_collection that mention \"machine learning\"" + }, + { + "title": "Creating a Collection", + "description": "Create a new collection with specified schema", + "prompt": "Create a new collection called 'articles' in Milvus with fields for title (string), content (string), and a vector field (128 dimensions)" + } + ], + "name": "mcp-server-milvus", + "description": "This repository contains a MCP server that provides access to Milvus vector database functionality.", + "categories": [ + "Databases" + ], + "is_official": true + }, + "opencti": { + "name": "opencti", + "display_name": "OpenCTI", + "description": "Interact with OpenCTI platform to retrieve threat intelligence data including reports, indicators, malware and threat actors.", + "repository": { + "type": "git", + "url": "https://github.com/Spathodea-Network/opencti-mcp" + }, + "homepage": "https://github.com/Spathodea-Network/opencti-mcp", + "author": { + "name": "Spathodea-Network" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "OpenCTI", + "Threat Intelligence" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/Spathodea-Network/opencti-mcp" + ], + "env": { + "OPENCTI_URL": "${OPENCTI_URL}", + "OPENCTI_TOKEN": "${OPENCTI_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Get Latest Reports", + "description": "Retrieves the most recent threat intelligence reports.", + "prompt": "{ \"name\": \"get_latest_reports\", \"arguments\": { \"first\": 10 } }" + }, + { + "title": "Search Malware", + "description": "Searches for malware information in the OpenCTI database.", + "prompt": "{ \"name\": \"search_malware\", \"arguments\": { \"query\": \"ransomware\" } }" + }, + { + "title": "User Management - List Users", + "description": "Lists all users in the system.", + "prompt": "{ \"name\": \"list_users\", \"arguments\": {} }" + } + ], + "arguments": { + "OPENCTI_URL": { + "description": "Your OpenCTI instance URL", + "required": true + }, + "OPENCTI_TOKEN": { + "description": "Your OpenCTI API token", + "required": true + } + }, + "tools": [ + { + "name": "get_latest_reports", + "description": "\u7372\u53d6\u6700\u65b0\u7684OpenCTI\u5831\u544a", + "inputSchema": { + "type": "object", + "properties": { + "first": { + "type": "number", + "description": "\u8fd4\u56de\u7d50\u679c\u6578\u91cf\u9650\u5236", + "default": 10 + } + } + } + }, + { + "name": "get_report_by_id", + "description": "\u6839\u64daID\u7372\u53d6OpenCTI\u5831\u544a", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "\u5831\u544aID" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "search_indicators", + "description": "\u641c\u5c0bOpenCTI\u4e2d\u7684\u6307\u6a19", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "\u641c\u5c0b\u95dc\u9375\u5b57" + }, + "first": { + "type": "number", + "description": "\u8fd4\u56de\u7d50\u679c\u6578\u91cf\u9650\u5236", + "default": 10 + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "search_malware", + "description": "\u641c\u5c0bOpenCTI\u4e2d\u7684\u60e1\u610f\u7a0b\u5f0f", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "\u641c\u5c0b\u95dc\u9375\u5b57" + }, + "first": { + "type": "number", + "description": "\u8fd4\u56de\u7d50\u679c\u6578\u91cf\u9650\u5236", + "default": 10 + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "search_threat_actors", + "description": "\u641c\u5c0bOpenCTI\u4e2d\u7684\u5a01\u8105\u884c\u70ba\u8005", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "\u641c\u5c0b\u95dc\u9375\u5b57" + }, + "first": { + "type": "number", + "description": "\u8fd4\u56de\u7d50\u679c\u6578\u91cf\u9650\u5236", + "default": 10 + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "get_user_by_id", + "description": "\u6839\u64daID\u7372\u53d6\u4f7f\u7528\u8005\u8cc7\u8a0a", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "\u4f7f\u7528\u8005ID" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "list_users", + "description": "\u5217\u51fa\u6240\u6709\u4f7f\u7528\u8005", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_groups", + "description": "\u5217\u51fa\u6240\u6709\u7fa4\u7d44", + "inputSchema": { + "type": "object", + "properties": { + "first": { + "type": "number", + "description": "\u8fd4\u56de\u7d50\u679c\u6578\u91cf\u9650\u5236", + "default": 10 + } + } + } + }, + { + "name": "list_attack_patterns", + "description": "\u5217\u51fa\u6240\u6709\u653b\u64ca\u6a21\u5f0f", + "inputSchema": { + "type": "object", + "properties": { + "first": { + "type": "number", + "description": "\u8fd4\u56de\u7d50\u679c\u6578\u91cf\u9650\u5236", + "default": 10 + } + } + } + }, + { + "name": "get_campaign_by_name", + "description": "\u6839\u64da\u540d\u7a31\u7372\u53d6\u884c\u52d5\u8cc7\u8a0a", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "\u884c\u52d5\u540d\u7a31" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "list_connectors", + "description": "\u5217\u51fa\u6240\u6709\u9023\u63a5\u5668", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_status_templates", + "description": "\u5217\u51fa\u6240\u6709\u72c0\u614b\u6a21\u677f", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_file_by_id", + "description": "\u6839\u64daID\u7372\u53d6\u6a94\u6848\u8cc7\u8a0a", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "\u6a94\u6848ID" + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "list_files", + "description": "\u5217\u51fa\u6240\u6709\u6a94\u6848", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_marking_definitions", + "description": "\u5217\u51fa\u6240\u6709\u6a19\u8a18\u5b9a\u7fa9", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list_labels", + "description": "\u5217\u51fa\u6240\u6709\u6a19\u7c64", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ] + }, + "arangodb": { + "name": "arangodb", + "display_name": "ArangoDB", + "description": "MCP Server that provides database interaction capabilities through [ArangoDB](https://arangodb.com/).", + "repository": { + "type": "git", + "url": "https://github.com/ravenwits/mcp-server-arangodb" + }, + "homepage": "https://github.com/ravenwits/mcp-server-arangodb", + "author": { + "name": "ravenwits" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "ArangoDB", + "TypeScript" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/ravenwits/mcp-server-arangodb" + ], + "description": "Run with npx (requires npm install)", + "env": { + "ARANGO_URL": "${ARANGO_URL}", + "ARANGO_DATABASE": "${ARANGO_DATABASE}", + "ARANGO_USERNAME": "${ARANGO_USERNAME}", + "ARANGO_PASSWORD": "${ARANGO_PASSWORD}" + } + } + }, + "examples": [ + { + "title": "List all collections", + "description": "Query to list all collections in the database.", + "prompt": "{}" + }, + { + "title": "Insert a new document", + "description": "Insert a new document into the 'users' collection.", + "prompt": "{\"collection\": \"users\", \"document\": {\"name\": \"John Doe\", \"email\": \"john@example.com\"}}" + }, + { + "title": "Update a document", + "description": "Update a document in the 'users' collection by key.", + "prompt": "{\"collection\": \"users\", \"key\": \"123456\", \"update\": {\"name\": \"Jane Doe\"}}" + }, + { + "title": "Remove a document", + "description": "Remove a document from the 'users' collection by key.", + "prompt": "{\"collection\": \"users\", \"key\": \"123456\"}}" + }, + { + "title": "Backup database collections", + "description": "Backup collections to a specified directory.", + "prompt": "{\"outputDir\": \"./backup\"}" + } + ], + "arguments": { + "ARANGO_URL": { + "description": "ArangoDB server URL (note: 8529 is the default port for ArangoDB for local development)", + "required": true + }, + "ARANGO_DATABASE": { + "description": "Database name", + "required": true + }, + "ARANGO_USERNAME": { + "description": "Database user", + "required": true + }, + "ARANGO_PASSWORD": { + "description": "Database password", + "required": true + } + }, + "tools": [ + { + "name": "arango_query", + "description": "Execute an AQL query", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "AQL query string" + }, + "bindVars": { + "type": "object", + "description": "Query bind variables", + "additionalProperties": true + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "arango_insert", + "description": "Insert a document into a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "document": { + "type": "object", + "description": "Document to insert", + "additionalProperties": true + } + }, + "required": [ + "collection", + "document" + ] + } + }, + { + "name": "arango_update", + "description": "Update a document in a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "key": { + "type": "string", + "description": "Document key" + }, + "update": { + "type": "object", + "description": "Update object", + "additionalProperties": true + } + }, + "required": [ + "collection", + "key", + "update" + ] + } + }, + { + "name": "arango_remove", + "description": "Remove a document from a collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Collection name" + }, + "key": { + "type": "string", + "description": "Document key" + } + }, + "required": [ + "collection", + "key" + ] + } + }, + { + "name": "arango_backup", + "description": "Backup collections to JSON files.", + "inputSchema": { + "type": "object", + "properties": { + "outputDir": { + "type": "string", + "description": "An absolute directory path to store backup files", + "default": "./backup", + "optional": true + }, + "collection": { + "type": "string", + "description": "Collection name to backup. If not provided, backs up all collections.", + "optional": true + }, + "docLimit": { + "type": "integer", + "description": "Limit the number of documents to backup. If not provided, backs up all documents.", + "optional": true + } + }, + "required": [ + "outputDir" + ] + } + }, + { + "name": "arango_list_collections", + "description": "List all collections in the database", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "arango_create_collection", + "description": "Create a new collection in the database", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the collection to create" + }, + "type": { + "type": { + "2": "DOCUMENT_COLLECTION", + "3": "EDGE_COLLECTION", + "DOCUMENT_COLLECTION": 2, + "EDGE_COLLECTION": 3 + }, + "description": "Type of collection to create", + "default": 2 + }, + "waitForSync": { + "type": "boolean", + "description": "If true, wait for data to be synchronized to disk before returning", + "default": false + } + }, + "required": [ + "name" + ] + } + } + ] + }, + "elasticsearch": { + "name": "elasticsearch", + "display_name": "Elasticsearch", + "description": "MCP server implementation that provides Elasticsearch interaction.", + "repository": { + "type": "git", + "url": "https://github.com/cr7258/elasticsearch-mcp-server" + }, + "homepage": "https://github.com/cr7258/elasticsearch-mcp-server", + "author": { + "name": "cr7258" + }, + "license": "Apache License Version 2.0", + "categories": [ + "Databases" + ], + "tags": [ + "elasticsearch", + "server" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "elasticsearch-mcp-server" + ], + "env": { + "ELASTIC_HOST": "${ELASTIC_HOST}", + "ELASTIC_USERNAME": "${ELASTIC_USERNAME}", + "ELASTIC_PASSWORD": "${ELASTIC_PASSWORD}" + } + } + }, + "arguments": { + "ELASTIC_HOST": { + "description": "The host URL of the Elasticsearch server.", + "required": true, + "example": "https://localhost:9200" + }, + "ELASTIC_USERNAME": { + "description": "The username for authenticating with the Elasticsearch server.", + "required": true, + "example": "elastic" + }, + "ELASTIC_PASSWORD": { + "description": "The password for authenticating with the Elasticsearch server.", + "required": true, + "example": "test123" + } + }, + "tools": [ + { + "name": "list_indices", + "description": "List all indices in the Elasticsearch cluster", + "inputSchema": { + "properties": {}, + "title": "list_indicesArguments", + "type": "object" + } + }, + { + "name": "get_mapping", + "description": "Get index mapping", + "inputSchema": { + "properties": { + "index": { + "title": "Index", + "type": "string" + } + }, + "required": [ + "index" + ], + "title": "get_mappingArguments", + "type": "object" + } + }, + { + "name": "get_settings", + "description": "Get index settings", + "inputSchema": { + "properties": { + "index": { + "title": "Index", + "type": "string" + } + }, + "required": [ + "index" + ], + "title": "get_settingsArguments", + "type": "object" + } + }, + { + "name": "search_documents", + "description": "Search documents in an index with a custom query", + "inputSchema": { + "properties": { + "index": { + "title": "Index", + "type": "string" + }, + "body": { + "additionalProperties": true, + "title": "Body", + "type": "object" + } + }, + "required": [ + "index", + "body" + ], + "title": "search_documentsArguments", + "type": "object" + } + }, + { + "name": "get_cluster_health", + "description": "Get cluster health status", + "inputSchema": { + "properties": {}, + "title": "get_cluster_healthArguments", + "type": "object" + } + }, + { + "name": "get_cluster_stats", + "description": "Get cluster statistics", + "inputSchema": { + "properties": {}, + "title": "get_cluster_statsArguments", + "type": "object" + } + } + ] + }, + "logfire-mcp": { + "display_name": "Logfire MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/pydantic/logfire-mcp" + }, + "license": "MIT", + "homepage": "https://logfire.pydantic.dev", + "author": { + "name": "pydantic" + }, + "tags": [ + "OpenTelemetry", + "traces", + "metrics", + "logging", + "monitoring" + ], + "arguments": { + "read_token": { + "description": "Logfire read token for accessing the Logfire APIs", + "required": true, + "example": "YOUR_READ_TOKEN" + }, + "base_url": { + "description": "Base URL for the Logfire API (defaults to https://logfire-api.pydantic.dev)", + "required": false, + "example": "https://your-logfire-instance.com" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "logfire-mcp" + ], + "env": { + "LOGFIRE_READ_TOKEN": "YOUR_READ_TOKEN" + }, + "description": "Run using uvx (provided by uv)", + "recommended": true + } + }, + "examples": [ + { + "title": "Find exceptions", + "description": "Find all exceptions in traces from the last hour", + "prompt": "What exceptions occurred in traces from the last hour across all services?" + }, + { + "title": "Analyze file errors", + "description": "Show recent errors in a specific file with trace context", + "prompt": "Show me the recent errors in the file 'app/api.py' with their trace context" + }, + { + "title": "Error count by service", + "description": "Count errors in the last 24 hours per service", + "prompt": "How many errors were there in the last 24 hours per service?" + } + ], + "name": "logfire-mcp", + "description": "This repository contains a Model Context Protocol (MCP) server with tools that can access the OpenTelemetry traces and", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "find_exceptions", + "description": "Get the exceptions on a file.\n\n Args:\n age: Number of minutes to look back, e.g. 30 for last 30 minutes. Maximum allowed value is 7 days.\n ", + "inputSchema": { + "properties": { + "age": { + "title": "Age", + "type": "integer" + } + }, + "required": [ + "age" + ], + "title": "find_exceptionsArguments", + "type": "object" + } + }, + { + "name": "find_exceptions_in_file", + "description": "Get the details about the 10 most recent exceptions on the file.\n\n Args:\n filepath: The path to the file to find exceptions in.\n age: Number of minutes to look back, e.g. 30 for last 30 minutes. Maximum allowed value is 7 days.\n ", + "inputSchema": { + "properties": { + "filepath": { + "title": "Filepath", + "type": "string" + }, + "age": { + "title": "Age", + "type": "integer" + } + }, + "required": [ + "filepath", + "age" + ], + "title": "find_exceptions_in_fileArguments", + "type": "object" + } + }, + { + "name": "arbitrary_query", + "description": "Run an arbitrary query on the Logfire database.\n\n The schema is available via the `get_logfire_records_schema` tool.\n\n Args:\n query: The query to run, as a SQL string.\n age: Number of minutes to look back, e.g. 30 for last 30 minutes. Maximum allowed value is 7 days.\n ", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "age": { + "title": "Age", + "type": "integer" + } + }, + "required": [ + "query", + "age" + ], + "title": "arbitrary_queryArguments", + "type": "object" + } + }, + { + "name": "get_logfire_records_schema", + "description": "Get the records schema from Logfire.\n\n To perform the `arbitrary_query` tool, you can use the `schema://records` to understand the schema.\n ", + "inputSchema": { + "properties": {}, + "title": "get_logfire_records_schemaArguments", + "type": "object" + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "goal-story": { + "name": "goal-story", + "display_name": "Goal Story", + "description": "a Goal Tracker and Visualization Tool for personal and professional development.", + "repository": { + "type": "git", + "url": "https://github.com/hichana/goalstory-mcp" + }, + "homepage": "https://github.com/hichana/goalstory-mcp", + "author": { + "name": "hichana" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "goal tracking", + "storytelling", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "goalstory-mcp", + "https://prod-goalstory-rqc2.encr.app", + "${YOUR_API_KEY}" + ] + } + }, + "arguments": { + "YOUR_API_KEY": { + "description": "The API key required to authenticate your requests to the Goal Story service.", + "required": true, + "example": "abcdefgh12345678" + } + }, + "tools": [ + { + "name": "goalstory_about", + "description": "Retrieve information about Goal Story's philosophy and the power of story-driven goal achievement. Use this to help users understand the unique approach of Goal Storying.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "goalstory_read_self_user", + "description": "Get the user's profile data including their preferences, belief systems, and past goal history to enable personalized goal storying and context-aware discussions.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "goalstory_update_self_user", + "description": "Update the user's profile including their name, visibility preferences, and personal context. When updating 'about' data, guide the user through questions to understand their motivations, beliefs, and goal-achievement style.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The user's preferred name for their Goal Story profile." + }, + "about": { + "type": "string", + "description": "Personal context including motivations, beliefs, and goal-achievement preferences gathered through guided questions." + }, + "visibility": { + "type": "number", + "description": "Profile visibility setting where 0 = public (viewable by others) and 1 = private (only visible to user)." + } + } + } + }, + { + "name": "goalstory_count_goals", + "description": "Get the total number of goals in the user's journey. Useful for tracking overall progress and goal management patterns.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "goalstory_create_goal", + "description": "Begin the goal clarification process by creating a new goal. Always discuss and refine the goal with the user before or after saving, ensuring it's well-defined and aligned with their aspirations. Confirm if any adjustments are needed after creation.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Clear and specific title that captures the essence of the goal." + }, + "description": { + "type": "string", + "description": "Detailed explanation of the goal, including context, motivation, and desired outcomes." + }, + "story_mode": { + "type": "string", + "description": "Narrative approach that shapes how future stories visualize goal achievement." + }, + "belief_mode": { + "type": "string", + "description": "Framework defining how the user's core beliefs and values influence this goal." + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "goalstory_update_goal", + "description": "Update goal details including name, status, description, outcomes, evidence of completion, and story/belief modes that influence how stories are generated.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the goal to be updated." + }, + "name": { + "type": "string", + "description": "Refined or clarified goal title." + }, + "status": { + "type": "number", + "description": "Goal progress status: 0 = active/in progress, 1 = successfully completed." + }, + "description": { + "type": "string", + "description": "Enhanced goal context, motivation, or outcome details." + }, + "outcome": { + "type": "string", + "description": "Actual results and impact achieved through goal completion or progress." + }, + "evidence": { + "type": "string", + "description": "Concrete proof, measurements, or observations of goal progress/completion." + }, + "story_mode": { + "type": "string", + "description": "Updated narrative style for future goal achievement stories." + }, + "belief_mode": { + "type": "string", + "description": "Refined understanding of how personal beliefs shape this goal." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "goalstory_destroy_goal", + "description": "Remove a goal and all its associated steps and stories from the user's journey. Use with confirmation to prevent accidental deletion.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the goal to be permanently removed." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "goalstory_read_one_goal", + "description": "Retrieve detailed information about a specific goal to support focused discussion and story creation.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the goal to retrieve." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "goalstory_read_goals", + "description": "Get an overview of the user's goal journey, with optional pagination to manage larger sets of goals.", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "description": "Page number for viewing subsets of goals (starts at 1)." + }, + "limit": { + "type": "number", + "description": "Maximum number of goals to return per page." + } + } + } + }, + { + "name": "goalstory_read_current_focus", + "description": "Identify which goal and step the user is currently focused on to maintain context in discussions and story creation.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "goalstory_get_story_context", + "description": "Gather rich context about the user, their current goal/step, beliefs, and motivations to create deeply personalized and meaningful stories. Combines user profile data with conversation insights.", + "inputSchema": { + "type": "object", + "properties": { + "goalId": { + "type": "string", + "description": "Unique identifier of the goal for context gathering." + }, + "stepId": { + "type": "string", + "description": "Unique identifier of the specific step for context gathering." + }, + "feedback": { + "type": "string", + "description": "Additional user input to enhance context understanding." + } + }, + "required": [ + "goalId", + "stepId" + ] + } + }, + { + "name": "goalstory_create_steps", + "description": "Formulate actionable steps for a goal through thoughtful discussion. Present the steps for user review either before or after saving, ensuring they're clear and achievable. Confirm if any refinements are needed.", + "inputSchema": { + "type": "object", + "properties": { + "goal_id": { + "type": "string", + "description": "Unique identifier of the goal these steps will help achieve." + }, + "steps": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of clear, actionable step descriptions in sequence." + } + }, + "required": [ + "goal_id", + "steps" + ] + } + }, + { + "name": "goalstory_read_steps", + "description": "Access the action plan for a specific goal, showing all steps in the journey toward achievement.", + "inputSchema": { + "type": "object", + "properties": { + "goal_id": { + "type": "string", + "description": "Unique identifier of the goal whose steps to retrieve." + }, + "page": { + "type": "number", + "description": "Page number for viewing subsets of steps (starts at 1)." + }, + "limit": { + "type": "number", + "description": "Maximum number of steps to return per page." + } + }, + "required": [ + "goal_id" + ] + } + }, + { + "name": "goalstory_read_one_step", + "description": "Get detailed information about a specific step to support focused discussion and story creation.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the step to retrieve." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "goalstory_update_step", + "description": "Update step details including the name, completion status, evidence, and outcome. Use this to track progress and insights.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the step to update." + }, + "name": { + "type": "string", + "description": "Refined or clarified step description." + }, + "status": { + "type": "number", + "description": "Step completion status: 0 = pending/in progress, 1 = completed." + }, + "outcome": { + "type": "string", + "description": "Results and impact achieved through completing this step." + }, + "evidence": { + "type": "string", + "description": "Concrete proof or observations of step completion." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "goalstory_destroy_step", + "description": "Remove a specific step from a goal's action plan.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the step to be permanently removed." + } + }, + "required": [ + "id" + ] + } + }, + { + "name": "goalstory_update_step_notes", + "description": "Update step notes with additional context, insights, or reflections in markdown format. Use this to capture valuable information from discussions.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the step to update." + }, + "notes": { + "type": "string", + "description": "Additional context, insights, or reflections in markdown format." + } + }, + "required": [ + "id", + "notes" + ] + } + }, + { + "name": "goalstory_create_story", + "description": "Generate and save a highly personalized story that visualizes achievement of the current goal/step. Uses understanding of the user's beliefs, motivations, and context to create engaging mental imagery. If context is needed, gathers it through user discussion and profile data.", + "inputSchema": { + "type": "object", + "properties": { + "goal_id": { + "type": "string", + "description": "Unique identifier of the goal this story supports." + }, + "step_id": { + "type": "string", + "description": "Unique identifier of the specific step this story visualizes." + }, + "title": { + "type": "string", + "description": "Engaging headline that captures the essence of the story." + }, + "story_text": { + "type": "string", + "description": "Detailed narrative that vividly illustrates goal/step achievement." + } + }, + "required": [ + "goal_id", + "step_id", + "title", + "story_text" + ] + } + }, + { + "name": "goalstory_read_stories", + "description": "Access the collection of personalized stories created for a specific goal/step pair, supporting reflection and motivation.", + "inputSchema": { + "type": "object", + "properties": { + "goal_id": { + "type": "string", + "description": "Unique identifier of the goal whose stories to retrieve." + }, + "step_id": { + "type": "string", + "description": "Unique identifier of the step whose stories to retrieve." + }, + "page": { + "type": "number", + "description": "Page number for viewing subsets of stories (starts at 1)." + }, + "limit": { + "type": "number", + "description": "Maximum number of stories to return per page." + } + }, + "required": [ + "goal_id", + "step_id" + ] + } + }, + { + "name": "goalstory_read_one_story", + "description": "Retrieve a specific story to revisit the visualization and mental imagery created for goal achievement.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the story to retrieve." + } + }, + "required": [ + "id" + ] + } + } + ] + }, + "heurist-mesh-agent": { + "name": "heurist-mesh-agent", + "display_name": "Mesh Agent", + "description": "Access specialized web3 AI agents for blockchain analysis, smart contract security, token metrics, and blockchain interactions through the [Heurist Mesh network](https://github.com/heurist-network/heurist-agent-framework/tree/main/mesh).", + "repository": { + "type": "git", + "url": "https://github.com/heurist-network/heurist-mesh-mcp-server" + }, + "homepage": "https://github.com/heurist-network/heurist-mesh-mcp-server", + "author": { + "name": "Heurist Network" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "Heurist", + "Agent Framework", + "Blockchain Tools" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/heurist-network/heurist-mesh-mcp-server", + "mesh-tool-server" + ], + "env": { + "HEURIST_API_KEY": "${HEURIST_API_KEY}" + } + } + }, + "arguments": { + "HEURIST_API_KEY": { + "description": "API key for accessing the Heurist services.", + "required": true, + "example": "your-api-key-here" + } + }, + "tools": [ + { + "name": "coingeckotokeninfoagent_get_coingecko_id", + "description": "Search for a token by name to get its CoinGecko ID. This tool helps you find the correct CoinGecko ID for any cryptocurrency when you only know its name or symbol. The CoinGecko ID is required for fetching detailed token information using other CoinGecko tools.", + "inputSchema": { + "type": "object", + "properties": { + "token_name": { + "type": "string", + "description": "The token name to search for" + } + }, + "required": [ + "token_name" + ] + } + }, + { + "name": "coingeckotokeninfoagent_get_token_info", + "description": "Get detailed token information and market data using CoinGecko ID. This tool provides comprehensive cryptocurrency data including current price, market cap, trading volume, price changes, and more.", + "inputSchema": { + "type": "object", + "properties": { + "coingecko_id": { + "type": "string", + "description": "The CoinGecko ID of the token" + } + }, + "required": [ + "coingecko_id" + ] + } + }, + { + "name": "coingeckotokeninfoagent_get_trending_coins", + "description": "Get the current top trending cryptocurrencies on CoinGecko. This tool retrieves a list of the most popular cryptocurrencies based on trading volume and social media mentions.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "coingeckotokeninfoagent_get_token_price_multi", + "description": "Fetch price data for multiple tokens at once using CoinGecko IDs. Efficiently retrieves current prices and optional market data for multiple cryptocurrencies in a single API call.", + "inputSchema": { + "type": "object", + "properties": { + "ids": { + "type": "string", + "description": "Comma-separated CoinGecko IDs of the tokens to query" + }, + "vs_currencies": { + "type": "string", + "description": "Comma-separated target currencies (e.g., usd,eur,btc)", + "default": "usd" + }, + "include_market_cap": { + "type": "boolean", + "description": "Include market capitalization data", + "default": false + }, + "include_24hr_vol": { + "type": "boolean", + "description": "Include 24hr trading volume data", + "default": false + }, + "include_24hr_change": { + "type": "boolean", + "description": "Include 24hr price change percentage", + "default": false + }, + "include_last_updated_at": { + "type": "boolean", + "description": "Include timestamp of when the data was last updated", + "default": false + }, + "precision": { + "type": "string", + "description": "Decimal precision for currency values (e.g., 'full' for maximum precision)", + "default": false + } + }, + "required": [ + "ids", + "vs_currencies" + ] + } + }, + { + "name": "coingeckotokeninfoagent_get_categories_list", + "description": "Get a list of all available cryptocurrency categories from CoinGecko. This tool retrieves all the category IDs and names that can be used for further category-specific queries.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "coingeckotokeninfoagent_get_category_data", + "description": "Get market data for all cryptocurrency categories from CoinGecko. This tool retrieves comprehensive information about all categories including market cap, volume, market cap change, top coins in each category, and more.", + "inputSchema": { + "type": "object", + "properties": { + "order": { + "type": "string", + "description": "Sort order for categories (default: market_cap_desc)", + "enum": [ + "market_cap_desc", + "market_cap_asc", + "name_desc", + "name_asc", + "market_cap_change_24h_desc", + "market_cap_change_24h_asc" + ] + } + }, + "required": [] + } + }, + { + "name": "coingeckotokeninfoagent_get_tokens_by_category", + "description": "Get a list of tokens within a specific category. This tool retrieves token data for all cryptocurrencies that belong to a particular category, including price, market cap, volume, and price changes.", + "inputSchema": { + "type": "object", + "properties": { + "category_id": { + "type": "string", + "description": "The CoinGecko category ID (e.g., 'layer-1')" + }, + "vs_currency": { + "type": "string", + "description": "The currency to show results in (default: usd)", + "default": "usd" + }, + "order": { + "type": "string", + "description": "Sort order for tokens (default: market_cap_desc)", + "enum": [ + "market_cap_desc", + "market_cap_asc", + "volume_desc", + "volume_asc", + "id_asc", + "id_desc" + ], + "default": "market_cap_desc" + }, + "per_page": { + "type": "integer", + "description": "Number of results per page (1-250, default: 100)", + "default": 100, + "minimum": 1, + "maximum": 250 + }, + "page": { + "type": "integer", + "description": "Page number (default: 1)", + "default": 1, + "minimum": 1 + } + }, + "required": [ + "category_id" + ] + } + }, + { + "name": "dexscreenertokeninfoagent_search_pairs", + "description": "Search for trading pairs on decentralized exchanges by token name, symbol, or address. This tool helps you find specific trading pairs across multiple DEXs and blockchains. It returns information about the pairs including price, volume, liquidity, and the exchanges where they're available. Data comes from DexScreener and covers major DEXs on most blockchains. The search results may be incomplete if the token is not traded on any of the supported chains.", + "inputSchema": { + "type": "object", + "properties": { + "search_term": { + "type": "string", + "description": "Search term (token name, symbol, or address)" + } + }, + "required": [ + "search_term" + ] + } + }, + { + "name": "dexscreenertokeninfoagent_get_specific_pair_info", + "description": "Get detailed information about a specific trading pair on a decentralized exchange by chain and pair address. This tool provides comprehensive data about a DEX trading pair including current price, 24h volume, liquidity, price changes, and trading history. Data comes from DexScreener and is updated in real-time. You must specify both the blockchain and the exact pair contract address. The pair address is the LP contract address, not the quote token address.", + "inputSchema": { + "type": "object", + "properties": { + "chain": { + "type": "string", + "description": "Chain identifier (e.g., solana, bsc, ethereum, base)" + }, + "pair_address": { + "type": "string", + "description": "The pair contract address to look up" + } + }, + "required": [ + "chain", + "pair_address" + ] + } + }, + { + "name": "dexscreenertokeninfoagent_get_token_pairs", + "description": "Get all trading pairs for a specific token across decentralized exchanges by chain and token address. This tool retrieves a comprehensive list of all DEX pairs where the specified token is traded on a particular blockchain. It provides data on each pair including the paired token, exchange, price, volume, and liquidity. Data comes from DexScreener and is updated in real-time. You must specify both the blockchain and the exact token contract address.", + "inputSchema": { + "type": "object", + "properties": { + "chain": { + "type": "string", + "description": "Chain identifier (e.g., solana, bsc, ethereum, base)" + }, + "token_address": { + "type": "string", + "description": "The token contract address to look up all pairs for" + } + }, + "required": [ + "chain", + "token_address" + ] + } + }, + { + "name": "elfatwitterintelligenceagent_search_mentions", + "description": "Search for mentions of specific tokens or topics on Twitter. This tool finds discussions about cryptocurrencies, blockchain projects, or other topics of interest. It provides the tweets and mentions of smart accounts (only influential ones) and does not contain all tweets. Use this when you want to understand what influential people are saying about a particular token or topic on Twitter. Each of the search keywords should be one word or phrase. A maximum of 5 keywords are allowed. One key word should be one concept. Never use long sentences or phrases as keywords.", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of keywords to search for" + }, + "days_ago": { + "type": "number", + "description": "Number of days to look back", + "default": 20 + }, + "limit": { + "type": "number", + "description": "Maximum number of results (minimum: 20)", + "default": 20 + } + }, + "required": [ + "keywords" + ] + } + }, + { + "name": "elfatwitterintelligenceagent_search_account", + "description": "Search for a Twitter account with both mention search and account statistics. This tool provides engagement metrics, follower growth, and mentions by smart users. It does not contain all tweets, but only those of influential users. It also identifies the topics and cryptocurrencies they frequently discuss. Data comes from ELFA API and can analyze several weeks of historical activity.", + "inputSchema": { + "type": "object", + "properties": { + "username": { + "type": "string", + "description": "Twitter username to analyze (without @)" + }, + "days_ago": { + "type": "number", + "description": "Number of days to look back for mentions", + "default": 30 + }, + "limit": { + "type": "number", + "description": "Maximum number of mention results", + "default": 20 + } + }, + "required": [ + "username" + ] + } + }, + { + "name": "elfatwitterintelligenceagent_get_trending_tokens", + "description": "Get current trending tokens on Twitter. This tool identifies which cryptocurrencies and tokens are generating the most buzz on Twitter right now. The results include token names, their relative popularity, and sentiment indicators. Use this when you want to discover which cryptocurrencies are currently being discussed most actively on social media. Data comes from ELFA API and represents real-time trends.", + "inputSchema": { + "type": "object", + "properties": { + "time_window": { + "type": "string", + "description": "Time window to analyze", + "default": "24h" + } + } + } + }, + { + "name": "exasearchagent_exa_web_search", + "description": "Search for webpages related to a query using Exa search. This tool performs a web search and returns relevant results including titles, snippets, and URLs. It's useful for finding up-to-date information on any topic, but may fail to find information of niche topics such like small cap crypto projects. Use this when you need to gather information from across the web.", + "inputSchema": { + "type": "object", + "properties": { + "search_term": { + "type": "string", + "description": "The search term" + }, + "limit": { + "type": "number", + "description": "Maximum number of results to return (default: 10)" + } + }, + "required": [ + "search_term" + ] + } + }, + { + "name": "exasearchagent_exa_answer_question", + "description": "Get a direct answer to a question using Exa's answer API. This tool provides concise, factual answers to specific questions by searching and analyzing content from across the web. Use this when you need a direct answer to a specific question rather than a list of search results. It may fail to find information of niche topics such like small cap crypto projects.", + "inputSchema": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "The question to answer" + } + }, + "required": [ + "question" + ] + } + }, + { + "name": "firecrawlsearchagent_firecrawl_web_search", + "description": "Execute a web search query by reading the web pages using Firecrawl. It provides more comprehensive information than standard web search by extracting the full contents from the pages. Use this when you need in-depth information on a topic. Data comes from Firecrawl search API. It may fail to find information of niche topics such like small cap crypto projects.", + "inputSchema": { + "type": "object", + "properties": { + "search_term": { + "type": "string", + "description": "The search term to execute" + } + }, + "required": [ + "search_term" + ] + } + }, + { + "name": "firecrawlsearchagent_firecrawl_extract_web_data", + "description": "Extract structured data from one or multiple web pages using natural language instructions using Firecrawl. This tool can process single URLs or entire domains (using wildcards like example.com/*). Use this when you need specific information from websites rather than general search results. You must specify what data to extract from the pages using the 'extraction_prompt' parameter.", + "inputSchema": { + "type": "object", + "properties": { + "urls": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of URLs to extract data from. Can include wildcards (e.g., 'example.com/*') to crawl entire domains." + }, + "extraction_prompt": { + "type": "string", + "description": "Natural language description of what data to extract from the pages." + } + }, + "required": [ + "urls", + "extraction_prompt" + ] + } + }, + { + "name": "goplusanalysisagent_fetch_security_details", + "description": "Fetch security details of a blockchain token contract", + "inputSchema": { + "type": "object", + "properties": { + "contract_address": { + "type": "string", + "description": "The token contract address" + }, + "chain_id": { + "type": "string", + "description": "The blockchain chain ID or 'solana' for Solana tokens. Supported chains: Ethereum (1), Optimism (10), Cronos (25), BSC (56), Gnosis (100), HECO (128), Polygon (137), Fantom (250), KCC (321), zkSync Era (324), ETHW (10001), FON (201022), Arbitrum (42161), Avalanche (43114), Linea Mainnet (59144), Base (8453), Tron (tron), Scroll (534352), opBNB (204), Mantle (5000), ZKFair (42766), Blast (81457), Manta Pacific (169), Berachain Artio Testnet (80085), Merlin (4200), Bitlayer Mainnet (200901), zkLink Nova (810180), X Layer Mainnet (196), Solana (solana)", + "default": 1 + } + }, + "required": [ + "contract_address" + ] + } + } + ] + }, + "json": { + "name": "json", + "display_name": "JSON Model Context Protocol", + "description": "JSON handling and processing server with advanced query capabilities using JSONPath syntax and support for array, string, numeric, and date operations.", + "repository": { + "type": "git", + "url": "https://github.com/GongRzhe/JSON-MCP-Server" + }, + "homepage": "https://github.com/GongRzhe/JSON-MCP-Server", + "author": { + "name": "GongRzhe" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "json", + "data querying", + "standardized tools" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@gongrzhe/server-json-mcp@1.0.3" + ] + } + }, + "tools": [ + { + "name": "query", + "description": "Query JSON data using JSONPath syntax", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the JSON data source" + }, + "jsonPath": { + "type": "string", + "description": "JSONPath expression (e.g. $.store.book[*].author)" + } + }, + "required": [ + "url", + "jsonPath" + ] + } + }, + { + "name": "filter", + "description": "Filter JSON data using conditions", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the JSON data source" + }, + "jsonPath": { + "type": "string", + "description": "Base JSONPath expression" + }, + "condition": { + "type": "string", + "description": "Filter condition (e.g. @.price < 10)" + } + }, + "required": [ + "url", + "jsonPath", + "condition" + ] + } + } + ] + }, + "algorand": { + "name": "algorand", + "display_name": "Algorand Implementation", + "description": "A comprehensive MCP server for tooling interactions (40+) and resource accessibility (60+) plus many useful prompts for interacting with the Algorand blockchain.", + "repository": { + "type": "git", + "url": "https://github.com/GoPlausible/algorand-mcp" + }, + "homepage": "https://github.com/GoPlausible/algorand-mcp", + "author": { + "name": "GoPlausible", + "url": "https://goplausible.com" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "Algorand", + "Blockchain" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "algorand-mcp" + ], + "env": { + "NFD_API_KEY": "${NFD_API_KEY}", + "NFD_API_URL": "${NFD_API_URL}", + "ALGORAND_ALGOD": "${ALGORAND_ALGOD}", + "ALGORAND_TOKEN": "${ALGORAND_TOKEN}", + "ALGORAND_INDEXER": "${ALGORAND_INDEXER}", + "ALGORAND_INDEXER_API": "${ALGORAND_INDEXER_API}", + "ALGORAND_INDEXER_PORT": "${ALGORAND_INDEXER_PORT}", + "ALGORAND_NETWORK": "${ALGORAND_NETWORK}" + } + } + }, + "arguments": { + "NFD_API_KEY": { + "description": "API key for the NFD service, required for accessing domain functionalities.", + "required": true, + "example": "your_nfd_api_key_here" + }, + "NFD_API_URL": { + "description": "The URL endpoint for the NFD API service.", + "required": false, + "example": "https://api.nf.domains" + }, + "ALGORAND_ALGOD": { + "description": "The URL endpoint for the Algorand Algod node.", + "required": true, + "example": "https://testnet-api.algonode.cloud" + }, + "ALGORAND_TOKEN": { + "description": "The token required to interact with the Algorand Algod node, usually a blank string for testnets.", + "required": false, + "example": "" + }, + "ALGORAND_INDEXER": { + "description": "The URL endpoint for the Algorand Indexer service.", + "required": true, + "example": "https://testnet-idx.algonode.cloud" + }, + "ALGORAND_INDEXER_API": { + "description": "The API endpoint for accessing Algorand indexer functionalities.", + "required": false, + "example": "https://testnet-idx.algonode.cloud/v2" + }, + "ALGORAND_INDEXER_PORT": { + "description": "The port for the Algorand indexer service, usually left blank for default settings.", + "required": false, + "example": "" + }, + "ALGORAND_NETWORK": { + "description": "The network type being used (e.g., testnet or mainnet).", + "required": true, + "example": "testnet" + } + } + }, + "mcp-aiven": { + "display_name": "Aiven MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/Aiven-Open/mcp-aiven" + }, + "homepage": "[NOT GIVEN]", + "author": { + "name": "Aiven-Open" + }, + "license": "[NOT GIVEN]", + "tags": [ + "PostgreSQL", + "Kafka", + "ClickHouse", + "Valkey", + "OpenSearch" + ], + "arguments": { + "AIVEN_BASE_URL": { + "description": "The Aiven API url", + "required": true, + "example": "https://api.aiven.io" + }, + "AIVEN_TOKEN": { + "description": "The authentication token", + "required": true, + "example": "$AIVEN_TOKEN" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/Aiven-Open/mcp-aiven.git", + "mcp-aiven" + ], + "env": { + "AIVEN_BASE_URL": "https://api.aiven.io", + "AIVEN_TOKEN": "$AIVEN_TOKEN" + }, + "description": "Run using uv package manager", + "recommended": true + } + }, + "examples": [ + { + "title": "List Projects", + "description": "List all projects on your Aiven account", + "prompt": "List all my Aiven projects" + }, + { + "title": "List Services", + "description": "List all services in a specific Aiven project", + "prompt": "Show me all services in my Aiven project" + }, + { + "title": "Get Service Details", + "description": "Get the detail of your service in a specific Aiven project", + "prompt": "Get details about my PostgreSQL service in Aiven" + } + ], + "name": "mcp-aiven", + "description": "A [Model Context Protocol](https://modelcontextprotocol.io/) (MCP) server for Aiven.", + "categories": [ + "Databases" + ], + "is_official": true + }, + "keycloak-mcp": { + "name": "keycloak-mcp", + "display_name": "Keycloak Model Context Protocol", + "description": "This MCP server enables natural language interaction with Keycloak for user and realm management including creating, deleting, and listing users and realms.", + "repository": { + "type": "git", + "url": "https://github.com/ChristophEnglisch/keycloak-model-context-protocol" + }, + "homepage": "https://github.com/ChristophEnglisch/keycloak-model-context-protocol", + "author": { + "name": "ChristophEnglisch" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "Keycloak", + "User Management", + "Realm Management" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "keycloak-model-context-protocol" + ], + "env": { + "KEYCLOAK_URL": "${KEYCLOAK_URL}", + "KEYCLOAK_ADMIN": "${KEYCLOAK_ADMIN}", + "KEYCLOAK_ADMIN_PASSWORD": "${KEYCLOAK_ADMIN_PASSWORD}" + } + } + }, + "arguments": { + "KEYCLOAK_URL": { + "description": "The URL of the Keycloak server instance that the MCP will connect to.", + "required": true, + "example": "http://localhost:8080" + }, + "KEYCLOAK_ADMIN": { + "description": "The admin username for accessing the Keycloak server.", + "required": true, + "example": "admin" + }, + "KEYCLOAK_ADMIN_PASSWORD": { + "description": "The password for the admin user to access the Keycloak server.", + "required": true, + "example": "admin" + } + } + }, + "coin-api-mcp": { + "name": "coin-api-mcp", + "display_name": "Coin API", + "description": "Provides access to [coinmarketcap](https://coinmarketcap.com/) cryptocurrency data.", + "repository": { + "type": "git", + "url": "https://github.com/longmans/coin_api_mcp" + }, + "homepage": "https://github.com/longmans/coin_api_mcp", + "author": { + "name": "longmans" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "CoinMarketCap", + "Cryptocurrency", + "Data" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/longmans/coin_api_mcp", + "coin-api" + ], + "env": { + "COINMARKETCAP_API_KEY": "${COINMARKETCAP_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Fetch List of Coins", + "description": "Retrieve a paginated list of all active cryptocurrencies with market data.", + "prompt": "Call `listing-coins` to get the latest cryptocurrency listings." + }, + { + "title": "Get Coin Information", + "description": "Retrieve detailed information about a specific cryptocurrency by its ID or symbol.", + "prompt": "Call `get-coin-info` using the cryptocurrency ID." + } + ], + "arguments": { + "COINMARKETCAP_API_KEY": { + "description": "The API key required to access CoinMarketCap data.", + "required": true, + "example": "your_api_key_here" + } + }, + "tools": [ + { + "name": "listing-coins", + "description": "Returns a paginated list of all active cryptocurrencies with latest market data", + "inputSchema": { + "type": "object", + "properties": { + "start": { + "type": "integer", + "description": "Optionally offset the start (1-based index) of the paginated list of items to return.", + "minimum": 1 + }, + "limit": { + "type": "integer", + "description": "Optionally specify the number of results to return.", + "minimum": 1, + "maximum": 5000 + }, + "price_min": { + "type": "number", + "description": "Optionally specify a threshold of minimum USD price to filter results by.", + "minimum": 0 + }, + "price_max": { + "type": "number", + "description": "Optionally specify a threshold of maximum USD price to filter results by.", + "minimum": 0 + }, + "market_cap_min": { + "type": "number", + "description": "Optionally specify a threshold of minimum market cap to filter results by.", + "minimum": 0 + }, + "market_cap_max": { + "type": "number", + "description": "Optionally specify a threshold of maximum market cap to filter results by.", + "minimum": 0 + }, + "volume_24h_min": { + "type": "number", + "description": "Optionally specify a threshold of minimum 24 hour USD volume to filter results by.", + "minimum": 0 + }, + "volume_24h_max": { + "type": "number", + "description": "Optionally specify a threshold of maximum 24 hour USD volume to filter results by.", + "minimum": 0 + }, + "circulating_supply_min": { + "type": "number", + "description": "Optionally specify a threshold of minimum circulating supply to filter results by.", + "minimum": 0 + }, + "circulating_supply_max": { + "type": "number", + "description": "Optionally specify a threshold of maximum circulating supply to filter results by.", + "minimum": 0 + }, + "percent_change_24h_min": { + "type": "number", + "description": "Optionally specify a threshold of minimum 24 hour percent change to filter results by.", + "minimum": -100 + }, + "percent_change_24h_max": { + "type": "number", + "description": "Optionally specify a threshold of maximum 24 hour percent change to filter results by.", + "minimum": -100 + }, + "convert": { + "type": "string", + "description": "Optionally calculate market quotes in up to 120 currencies at once by passing a comma-separated list of cryptocurrency or fiat currency symbols." + }, + "convert_id": { + "type": "string", + "description": "Optionally calculate market quotes by CoinMarketCap ID instead of symbol." + }, + "sort": { + "type": "string", + "description": "What field to sort the list of cryptocurrencies by.", + "enum": [ + "market_cap", + "name", + "symbol", + "date_added", + "market_cap_strict", + "price", + "circulating_supply", + "total_supply", + "max_supply", + "num_market_pairs", + "volume_24h", + "percent_change_1h", + "percent_change_24h", + "percent_change_7d", + "market_cap_by_total_supply_strict", + "volume_7d", + "volume_30d" + ] + }, + "sort_dir": { + "type": "string", + "description": "The direction in which to order cryptocurrencies against the specified sort.", + "enum": [ + "asc", + "desc" + ] + }, + "cryptocurrency_type": { + "type": "string", + "description": "The type of cryptocurrency to include.", + "enum": [ + "all", + "coins", + "tokens" + ] + }, + "tag": { + "type": "string", + "description": "The tag of cryptocurrency to include.", + "enum": [ + "all", + "defi", + "filesharing" + ] + }, + "aux": { + "type": "string", + "description": "Optionally specify a comma-separated list of supplemental data fields to return." + } + }, + "required": [] + } + }, + { + "name": "get-coin-info", + "description": "Get coins' information includes details like logo, description, official website URL, social links, and links to a cryptocurrency's technical documentation.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "One or more comma-separated CoinMarketCap cryptocurrency IDs. Example: \"1,2\"" + }, + "slug": { + "type": "string", + "description": "Alternatively pass a comma-separated list of cryptocurrency slugs. Example: \"bitcoin,ethereum\"" + }, + "symbol": { + "type": "string", + "description": "Alternatively pass one or more comma-separated cryptocurrency symbols. Example: \"BTC,ETH\"" + }, + "address": { + "type": "string", + "description": "Alternatively pass in a contract address. Example: \"0xc40af1e4fecfa05ce6bab79dcd8b373d2e436c4e\"" + }, + "skip_invalid": { + "type": "boolean", + "description": "Pass true to relax request validation rules. When requesting records on multiple cryptocurrencies an error is returned if any invalid cryptocurrencies are requested or a cryptocurrency does not have matching records in the requested timeframe. If set to true, invalid lookups will be skipped allowing valid cryptocurrencies to still be returned.", + "default": false + }, + "aux": { + "type": "string", + "description": "Optionally specify a comma-separated list of supplemental data fields to return. Pass urls,logo,description,tags,platform,date_added,notice,status to include all auxiliary fields." + } + }, + "required": [] + } + }, + { + "name": "get-coin-quotes", + "description": "the latest market quote for 1 or more cryptocurrencies. Use the \"convert\" option to return market values in multiple fiat and cryptocurrency conversions in the same call.", + "inputSchema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "One or more comma-separated cryptocurrency CoinMarketCap IDs. Example: 1,2" + }, + "slug": { + "type": "string", + "description": "Alternatively pass a comma-separated list of cryptocurrency slugs. Example: \"bitcoin,ethereum\"" + }, + "symbol": { + "type": "string", + "description": "Alternatively pass one or more comma-separated cryptocurrency symbols. Example: \"BTC,ETH\"" + }, + "convert": { + "type": "string", + "description": "Optionally calculate market quotes in up to 120 currencies at once by passing a comma-separated list of cryptocurrency or fiat currency symbols." + }, + "convert_id": { + "type": "string", + "description": "Optionally calculate market quotes by CoinMarketCap ID instead of symbol. This option is identical to\u00a0convert\u00a0outside of ID format." + }, + "aux": { + "type": "string", + "description": "\"num_market_pairs,cmc_rank,date_added,tags,platform,max_supply,circulating_supply,total_supply,is_active,is_fiat\"Optionally specify a comma-separated list of supplemental data fields to return." + }, + "skip_invalid": { + "type": "boolean", + "description": "Pass true to relax request validation rules.", + "default": false + } + }, + "required": [] + } + } + ] + }, + "pif": { + "name": "pif", + "display_name": "PIF Framework", + "description": "A Personal Intelligence Framework (PIF), providing tools for file operations, structured reasoning, and journal-based documentation to support continuity and evolving human-AI collaboration across sessions.", + "repository": { + "type": "git", + "url": "https://github.com/hungryrobot1/MCP-PIF" + }, + "homepage": "https://github.com/hungryrobot1/MCP-PIF", + "author": { + "name": "hungryrobot1" + }, + "license": "MIT", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "PIF", + "TypeScript", + "Node.js" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/hungryrobot1/MCP-PIF" + ] + } + }, + "examples": [ + { + "title": "Reasoning Example", + "description": "Create a structured thought pattern.", + "prompt": "reason: { thoughts: [{ content: 'Initial observation' }, { content: 'Building on previous thought', relationType: 'sequence', relationTo: 0 }] }" + }, + { + "title": "Journal Creation Example", + "description": "Document development for future reference.", + "prompt": "journal_create: { title: 'Implementation Pattern', content: 'Insights about development...', tags: ['development', 'patterns'] }" + } + ], + "arguments": { + "MCP_WORKSPACE_ROOT": { + "description": "Environment variable to specify a workspace location for the server.", + "required": false, + "example": "/path/to/workspace" + }, + "MCP_CONFIG": { + "description": "Environment variable containing a JSON string of configuration options for the server.", + "required": false, + "example": "{\"key\": \"value\"}" + } + } + }, + "graphql-schema": { + "name": "graphql-schema", + "display_name": "GraphQL Schema Model Context Protocol", + "description": "Allow LLMs to explore large GraphQL schemas without bloating the context.", + "repository": { + "type": "git", + "url": "https://github.com/hannesj/mcp-graphql-schema" + }, + "homepage": "https://github.com/hannesj/mcp-graphql-schema", + "author": { + "name": "hannesj" + }, + "license": "[NOT FOUND]", + "categories": [ + "Dev Tools" + ], + "tags": [ + "GraphQL", + "LLMs", + "Schema", + "API" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mcp-graphql-schema", + "/ABSOLUTE/PATH/TO/schema.graphqls" + ] + } + }, + "examples": [ + { + "title": "List all query fields", + "description": "Retrieve a list of all available root-level fields for GraphQL queries.", + "prompt": "What query fields are available in this GraphQL schema?" + }, + { + "title": "User query field details", + "description": "Get detailed definition for the \"user\" query field.", + "prompt": "Show me the details of the \"user\" query field." + }, + { + "title": "Mutation operations", + "description": "List all mutation operations that can be performed in the schema.", + "prompt": "What mutation operations can I perform in this schema?" + }, + { + "title": "List all types", + "description": "Retrieve a list of all types defined in the schema.", + "prompt": "List all types defined in this schema." + }, + { + "title": "Type definition", + "description": "Show the definition of the \"Product\" type.", + "prompt": "Show me the definition of the \"Product\" type." + }, + { + "title": "Order type fields", + "description": "List all fields of the \"Order\" type.", + "prompt": "List all fields of the \"Order\" type." + }, + { + "title": "Search for types and fields", + "description": "Search the schema for types and fields related to \"customer.\"", + "prompt": "Search for types and fields related to \"customer\"." + } + ] + }, + "hyperbrowser": { + "display_name": "Hyperbrowser MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/hyperbrowserai/mcp" + }, + "homepage": "https://docs.hyperbrowser.ai/", + "author": { + "name": "hyperbrowserai" + }, + "license": "MIT", + "tags": [ + "browser", + "web", + "scraping", + "crawling", + "automation" + ], + "arguments": { + "HYPERBROWSER_API_KEY": { + "description": "Your Hyperbrowser API key", + "required": true, + "example": "YOUR-API-KEY" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "hyperbrowser-mcp", + "${HYPERBROWSER_API_KEY}" + ], + "package": "hyperbrowser-mcp", + "env": {}, + "description": "Install via npm", + "recommended": true + }, + "custom": { + "type": "custom", + "command": "node", + "args": [ + "dist/server.js" + ], + "env": {}, + "description": "Run from source code after building", + "recommended": false + } + }, + "examples": [ + { + "title": "Scrape webpage", + "description": "Extract formatted content from any webpage", + "prompt": "Use the scrape_webpage tool to get the content from https://example.com" + }, + { + "title": "Extract structured data", + "description": "Convert HTML into structured JSON", + "prompt": "Use the extract_structured_data tool to get product information from an e-commerce page" + }, + { + "title": "Web search", + "description": "Search the web using Bing", + "prompt": "Use the search_with_bing tool to find information about climate change" + } + ], + "name": "hyperbrowser", + "description": "This is Hyperbrowser's Model Context Protocol (MCP) Server. It provides various tools to scrape, extract structured data, and crawl webpages. It also provides easy access to general purpose browser agents like OpenAI's CUA, Anthropic's Claude Computer Use, and Browser Use.", + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "scrape_webpage", + "description": "Scrape a webpage and extract its content in various formats. This tool allows fetching content from a single URL with configurable browser behavior options. Use this for extracting text content, HTML structure, collecting links, or capturing screenshots of webpages.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "format": "uri", + "description": "The URL of the webpage to scrape" + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + }, + "outputFormat": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "markdown", + "html", + "links", + "screenshot" + ] + }, + "minItems": 1, + "description": "The format of the output" + } + }, + "required": [ + "url", + "outputFormat" + ] + } + }, + { + "name": "crawl_webpages", + "description": "Crawl a website starting from a URL and explore linked pages. This tool allows systematic collection of content from multiple pages within a domain. Use this for larger data collection tasks, content indexing, or site mapping.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "format": "uri", + "description": "The URL of the webpage to crawl." + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + }, + "outputFormat": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "markdown", + "html", + "links", + "screenshot" + ] + }, + "minItems": 1, + "description": "The format of the output" + }, + "followLinks": { + "type": "boolean", + "description": "Whether to follow links on the crawled webpages" + }, + "maxPages": { + "type": "integer", + "exclusiveMinimum": 0, + "minimum": 1, + "maximum": 100, + "default": 10 + }, + "ignoreSitemap": { + "type": "boolean", + "default": false + } + }, + "required": [ + "url", + "outputFormat", + "followLinks" + ] + } + }, + { + "name": "extract_structured_data", + "description": "Extract structured data from a webpage. This tool allows you to extract structured data from a webpage using a schema.", + "inputSchema": { + "type": "object", + "properties": { + "urls": { + "type": "array", + "items": { + "type": "string", + "format": "uri" + }, + "description": "The list of URLs of the webpages to extract structured information from. Can include wildcards (e.g. https://example.com/*)" + }, + "prompt": { + "type": "string", + "description": "The prompt to use for the extraction" + }, + "schema": { + "description": "The json schema to use for the extraction. Must provide an object describing a spec compliant json schema, any other types are invalid." + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + } + }, + "required": [ + "urls", + "prompt" + ] + } + }, + { + "name": "browser_use_agent", + "description": "This tool employs an open-source browser automation agent optimized specifically for fast, efficient, and cost-effective browser tasks using a cloud browser. It requires explicit, detailed instructions to perform highly specific interactions quickly.\n\nOptimal for tasks requiring:\n- Precise, explicitly defined interactions and actions\n- Speed and efficiency with clear, unambiguous instructions\n- Cost-effective automation at scale with straightforward workflows\n\nBest suited use cases include:\n- Explicitly defined registration and login processes\n- Clearly guided navigation through web apps\n- Structured, step-by-step web scraping with detailed guidance\n- Extracting data via explicitly specified browser interactions\n\nYou must provide extremely detailed step-by-step instructions, including exact elements, actions, and explicit context. Clearly define the desired outcome for optimal results. Returns the completed result or an error message if issues arise.\n\nNote: This agent trades off flexibility for significantly faster performance and lower costs compared to Claude and OpenAI agents.", + "inputSchema": { + "type": "object", + "properties": { + "task": { + "type": "string", + "description": "The task to perform inside the browser" + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + }, + "returnStepInfo": { + "type": "boolean", + "default": false, + "description": "Whether to return step-by-step information about the task.Should be false by default. May contain excessive information, so we strongly recommend setting this to false." + }, + "maxSteps": { + "type": "integer", + "exclusiveMinimum": 0, + "minimum": 1, + "maximum": 100, + "default": 25 + } + }, + "required": [ + "task" + ] + } + }, + { + "name": "openai_computer_use_agent", + "description": "This tool utilizes OpenAI's model to autonomously execute general-purpose browser-based tasks with balanced performance and reliability using a cloud browser. It handles complex interactions effectively with practical reasoning and clear execution.\n\nOptimal for tasks requiring:\n- Reliable, general-purpose browser automation\n- Clear, structured interactions with moderate complexity\n- Efficient handling of common web tasks and workflows\n\nBest suited use cases include:\n- Standard multi-step registration or form submissions\n- Navigating typical web applications requiring multiple interactions\n- Conducting structured web research tasks\n- Extracting data through interactive web processes\n\nProvide a clear step-by-step description, necessary context, and expected outcomes. Returns the completed result or an error message if issues arise.", + "inputSchema": { + "type": "object", + "properties": { + "task": { + "type": "string", + "description": "The task to perform inside the browser" + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + }, + "returnStepInfo": { + "type": "boolean", + "default": false, + "description": "Whether to return step-by-step information about the task.Should be false by default. May contain excessive information, so we strongly recommend setting this to false." + }, + "maxSteps": { + "type": "integer", + "exclusiveMinimum": 0, + "minimum": 1, + "maximum": 100, + "default": 25 + } + }, + "required": [ + "task" + ] + } + }, + { + "name": "claude_computer_use_agent", + "description": "This tool leverages Anthropic's Claude model to autonomously execute complex browser tasks with sophisticated reasoning capabilities using a cloud browser. It specializes in handling intricate, nuanced, or highly context-sensitive web interactions.\n\nOptimal for tasks requiring:\n- Complex reasoning over multiple web pages\n- Nuanced interpretation and flexible decision-making\n- Human-like interaction with detailed context awareness\n\nBest suited use cases include:\n- Multi-step processes requiring reasoning (e.g., detailed registrations or onboarding)\n- Interacting intelligently with advanced web apps\n- Conducting in-depth research with complex conditions\n- Extracting information from dynamic or interactive websites\n\nProvide detailed task instructions, relevant context, and clearly specify the desired outcome for best results. Returns the completed result or an error message if issues arise.", + "inputSchema": { + "type": "object", + "properties": { + "task": { + "type": "string", + "description": "The task to perform inside the browser" + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + }, + "returnStepInfo": { + "type": "boolean", + "default": false, + "description": "Whether to return step-by-step information about the task.Should be false by default. May contain excessive information, so we strongly recommend setting this to false." + }, + "maxSteps": { + "type": "integer", + "exclusiveMinimum": 0, + "minimum": 1, + "maximum": 100, + "default": 25 + } + }, + "required": [ + "task" + ] + } + }, + { + "name": "search_with_bing", + "description": "Search the web using Bing. This tool allows you to search the web using bing.com", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query to submit to Bing" + }, + "sessionOptions": { + "type": "object", + "properties": { + "useProxy": { + "type": "boolean", + "default": false, + "description": "Whether to use a proxy. Recommended false." + }, + "useStealth": { + "type": "boolean", + "default": false, + "description": "Whether to use stealth mode. Recommended false." + }, + "solveCaptchas": { + "type": "boolean", + "default": false, + "description": "Whether to solve captchas. Recommended false." + }, + "acceptCookies": { + "type": "boolean", + "default": false, + "description": "Whether to automatically close the accept cookies popup. Recommended false." + } + }, + "additionalProperties": false, + "description": "Options for the browser session. Avoid setting these if not mentioned explicitly" + }, + "numResults": { + "type": "integer", + "exclusiveMinimum": 0, + "minimum": 1, + "maximum": 50, + "default": 10, + "description": "Number of search results to return" + } + }, + "required": [ + "query" + ] + } + } + ], + "prompts": [], + "resources": [ + { + "uri": "hyperbrowser:///", + "name": "Welcome to Hyperbrowser | Hyperbrowser", + "description": "Hyperbrowser documentation provides an introduction to web scraping and automation using the Hyperbrowser tool.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///what-are-headless-browsers", + "name": "What are Headless browsers ? | Hyperbrowser", + "description": "The page explains headless browsers and their role in Hyperbrowser for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///get-started/quickstart/scraping", + "name": "Scraping | Hyperbrowser", + "description": "The \"Scraping\" page in Hyperbrowser details how to extract data from websites using the tool's functionalities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///get-started/quickstart/crawling", + "name": "Crawling | Hyperbrowser", + "description": "The \"Crawling\" page of Hyperbrowser covers the tool's web scraping capabilities and how to implement them.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///get-started/quickstart", + "name": "Quickstart | Hyperbrowser", + "description": "Quickstart guide for Hyperbrowser provides initial setup and functionality instructions for effective web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///get-started/quickstart/puppeteer", + "name": "Puppeteer | Hyperbrowser", + "description": "Puppeteer integration with Hyperbrowser enables web scraping and automation through headless browser control.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///get-started/quickstart/playwright", + "name": "Playwright | Hyperbrowser", + "description": "The page discusses using Playwright with Hyperbrowser for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///get-started/quickstart/selenium", + "name": "Selenium | Hyperbrowser", + "description": "Selenium integration with Hyperbrowser allows for enhanced web scraping and automation capabilities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/overview", + "name": "Overview | Hyperbrowser", + "description": "Overview of Hyperbrowser, a tool for web scraping and automation, detailing its features and functionalities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/overview/session-parameters", + "name": "Session Parameters | Hyperbrowser", + "description": "This page details session parameters for configuring Hyperbrowser's web scraping and automation features.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/advanced-privacy-and-anti-detection", + "name": "Advanced Privacy & Anti-Detection | Hyperbrowser", + "description": "This page discusses Hyperbrowser's advanced privacy features and anti-detection capabilities for web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/profiles", + "name": "Profiles | Hyperbrowser", + "description": "The \"Profiles\" page in Hyperbrowser outlines how to manage user profiles for data scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/recordings", + "name": "Recordings | Hyperbrowser", + "description": "The page covers Hyperbrowser's recording feature for efficient web scraping and automation processes.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/live-view", + "name": "Live View | Hyperbrowser", + "description": "The Live View feature in Hyperbrowser allows real-time monitoring and interaction with web scraping tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///sessions/extensions", + "name": "Extensions | Hyperbrowser", + "description": "The page discusses extensions for Hyperbrowser, enhancing its web scraping and automation capabilities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///web-scraping/scrape", + "name": "Scrape | Hyperbrowser", + "description": "\"Scrape\" page in Hyperbrowser documentation focuses on scraping data from web pages using the tool's features.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///web-scraping/crawl", + "name": "Crawl | Hyperbrowser", + "description": "The page discusses how to utilize Hyperbrowser for effective web crawling.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///web-scraping/extract", + "name": "Extract | Hyperbrowser", + "description": "The Extract page of Hyperbrowser provides guidelines for web scraping and data extraction techniques using the tool.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///agents/browser-use", + "name": "Browser Use | Hyperbrowser", + "description": "The page discusses using Hyperbrowser for web scraping and automation tasks via browser interactions.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///agents/claude-computer-use", + "name": "Claude Computer Use | Hyperbrowser", + "description": "The page provides guidelines on using Claude with Hyperbrowser for effective web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///agents/openai-cua", + "name": "OpenAI CUA | Hyperbrowser", + "description": "The page discusses the integration of OpenAI's CUA with Hyperbrowser for enhanced web scraping and automation capabilities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///guides/ai-function-calling", + "name": "AI Function Calling | Hyperbrowser", + "description": "The page discusses AI function calling features within Hyperbrowser for enhanced web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///guides/scraping", + "name": "Scraping | Hyperbrowser", + "description": "The page covers web scraping techniques and documentation for using Hyperbrowser effectively.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///guides/extract-information-with-an-llm", + "name": "Extract Information with an LLM | Hyperbrowser", + "description": "Learn how to extract information using a Large Language Model with Hyperbrowser.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///guides/using-hyperbrowser-session", + "name": "Using Hyperbrowser Session | Hyperbrowser", + "description": "The page describes how to use sessions in Hyperbrowser for efficient web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///guides/captcha-solving", + "name": "CAPTCHA Solving | Hyperbrowser", + "description": "Hyperbrowser provides tools and guidance for CAPTCHA solving in web scraping and automation processes.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///guides/model-context-protocol", + "name": "Model Context Protocol | Hyperbrowser", + "description": "The page covers the Model Context Protocol used in Hyperbrowser for web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks", + "name": "SDKs | Hyperbrowser", + "description": "The page discusses Hyperbrowser SDKs for web scraping and automation, including features and usage details.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/node", + "name": "Node | Hyperbrowser", + "description": "Overview of the Node module in Hyperbrowser for web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/node/sessions", + "name": "Sessions | Hyperbrowser", + "description": "The page discusses sessions in Hyperbrowser, detailing how to manage and utilize them effectively for web scraping tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/node/profiles", + "name": "Profiles | Hyperbrowser", + "description": "The \"Profiles\" page of Hyperbrowser covers user profiles and their management within the tool.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/node/scrape", + "name": "Scrape | Hyperbrowser", + "description": "The \"Scrape\" page of Hyperbrowser outlines techniques and tools for web scraping and automation.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/node/crawl", + "name": "Crawl | Hyperbrowser", + "description": "The \"Crawl\" page of Hyperbrowser details how to use the tool for web scraping and automated data extraction.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/node/extensions", + "name": "Extensions | Hyperbrowser", + "description": "The Extensions page for Hyperbrowser details available extensions that enhance web scraping and automation functionalities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/python", + "name": "Python | Hyperbrowser", + "description": "The page provides documentation on using Hyperbrowser with Python for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/python/sessions", + "name": "Sessions | Hyperbrowser", + "description": "The page discusses managing sessions in Hyperbrowser for effective web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/python/profiles", + "name": "Profiles | Hyperbrowser", + "description": "The \"Profiles\" page in Hyperbrowser documentation explains how to manage and use user profiles for web scraping tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/python/scrape", + "name": "Scrape | Hyperbrowser", + "description": "The page explains how to use Hyperbrowser for web scraping tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/python/crawl", + "name": "Crawl | Hyperbrowser", + "description": "The Crawl section of Hyperbrowser\u2019s documentation explains web scraping techniques and automation processes.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/sdks/python/extensions", + "name": "Extensions | Hyperbrowser", + "description": "Explore Hyperbrowser extensions for enhanced web scraping and automation capabilities in your projects.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference", + "name": "API Reference | Hyperbrowser", + "description": "API Reference for Hyperbrowser provides detailed information on using its web scraping and automation features.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/sessions", + "name": "Sessions | Hyperbrowser", + "description": "The \"Sessions\" page in Hyperbrowser covers managing and utilizing sessions for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/crawl", + "name": "Crawl | Hyperbrowser", + "description": "The \"Crawl\" section of Hyperbrowser documentation explains how to use the tool for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/scrape", + "name": "Scrape | Hyperbrowser", + "description": "The \"Scrape\" section of Hyperbrowser documentation explains web scraping techniques and automation features of the tool.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/extract", + "name": "Extract | Hyperbrowser", + "description": "The Extract page of Hyperbrowser provides guidelines on data extraction methods and tools for web scraping.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/agents", + "name": "Agents | Hyperbrowser", + "description": "The \"Agents\" page in Hyperbrowser documentation discusses automated entities for web scraping and task execution.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/agents/browser-use", + "name": "Browser Use | Hyperbrowser", + "description": "The page explains how to effectively utilize browser features in Hyperbrowser for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/agents/claude-computer-use", + "name": "Claude Computer Use | Hyperbrowser", + "description": "The page discusses using Claude for web scraping and automation with Hyperbrowser tools.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/agents/openai-cua", + "name": "OpenAI CUA | Hyperbrowser", + "description": "OpenAI CUA for Hyperbrowser details integration and automation features for effective web scraping.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/profiles", + "name": "Profiles | Hyperbrowser", + "description": "The \"Profiles\" page in Hyperbrowser documentation explains how to manage and configure user profiles for scraping tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///reference/api-reference/extensions", + "name": "Extensions | Hyperbrowser", + "description": "The page discusses extensions for Hyperbrowser that enhance its web scraping and automation capabilities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///integrations/langchain", + "name": "LangChain | Hyperbrowser", + "description": "LangChain integrates with Hyperbrowser for enhanced web scraping and automation capabilities.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///integrations/llamaindex", + "name": "LlamaIndex | Hyperbrowser", + "description": "LlamaIndex documentation for integrating with Hyperbrowser for web scraping and automation tasks.", + "mimeType": "text/markdown", + "annotations": null + }, + { + "uri": "hyperbrowser:///~gitbook/pdf", + "name": "Hyperbrowser", + "description": "Hyperbrowser is a web scraping and automation tool, offering extensive documentation for users.", + "mimeType": "text/markdown", + "annotations": null + } + ], + "is_official": true + }, + "magic-mcp": { + "display_name": "21st.dev Magic AI Agent", + "repository": { + "type": "git", + "url": "https://github.com/21st-dev/magic-mcp" + }, + "homepage": "https://21st.dev/magic", + "author": { + "name": "21st-dev" + }, + "license": "MIT", + "tags": [ + "ui", + "components", + "ai", + "generator", + "react" + ], + "arguments": { + "API_KEY": { + "description": "API key for authentication with Magic AI Agent", + "required": true, + "example": "your-api-key" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@21st-dev/magic@latest", + "API_KEY=\"your-api-key\"" + ], + "package": "@21st-dev/magic", + "env": { + "API_KEY": "your-api-key" + }, + "description": "Install via npm package", + "recommended": true + }, + "cli": { + "type": "cli", + "command": "npx", + "args": [ + "@21st-dev/cli@latest", + "install", + "", + "--api-key", + "" + ], + "description": "Install using the CLI tool", + "recommended": true + } + }, + "examples": [ + { + "title": "Create a navigation bar", + "description": "Generate a modern responsive navigation bar component", + "prompt": "/ui create a modern navigation bar with responsive design" + } + ], + "name": "magic-mcp", + "description": "Magic Component Platform (MCP) is a powerful AI-driven tool that helps developers create beautiful, modern UI components instantly through natural language descriptions. It integrates seamlessly with popular IDEs and provides a streamlined workflow for UI development.", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "21st_magic_component_builder", + "description": "\n\"Use this tool when the user requests a new UI component\u2014e.g., mentions /ui, /21 /21st, or asks for a button, input, dialog, table, form, banner, card, or other React component.\nThis tool ONLY returns the text snippet for that UI component. \nAfter calling this tool, you must edit or add files to integrate the snippet into the codebase.\"\n", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "Full users message" + }, + "searchQuery": { + "type": "string", + "description": "Generate a search query for 21st.dev (library for searching UI components) to find a UI component that matches the user's message. Must be a two-four words max or phrase" + }, + "absolutePathToCurrentFile": { + "type": "string", + "description": "Absolute path to the current file to which we want to apply changes" + }, + "absolutePathToProjectDirectory": { + "type": "string", + "description": "Absolute path to the project root directory" + } + }, + "required": [ + "message", + "searchQuery", + "absolutePathToCurrentFile", + "absolutePathToProjectDirectory" + ] + } + }, + { + "name": "logo_search", + "description": "\nSearch and return logos in specified format (JSX, TSX, SVG).\nSupports single and multiple logo searches with category filtering.\nCan return logos in different themes (light/dark) if available.\n\nWhen to use this tool:\n1. When user types \"/logo\" command (e.g., \"/logo GitHub\")\n2. When user asks to add a company logo that's not in the local project\n\nExample queries:\n- Single company: [\"discord\"]\n- Multiple companies: [\"discord\", \"github\", \"slack\"]\n- Specific brand: [\"microsoft office\"]\n- Command style: \"/logo GitHub\" -> [\"github\"]\n- Request style: \"Add Discord logo to the project\" -> [\"discord\"]\n\nFormat options:\n- TSX: Returns TypeScript React component\n- JSX: Returns JavaScript React component\n- SVG: Returns raw SVG markup\n\nEach result includes:\n- Component name (e.g., DiscordIcon)\n- Component code\n- Import instructions\n", + "inputSchema": { + "type": "object", + "properties": { + "queries": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of company names to search for logos" + }, + "format": { + "type": "string", + "enum": [ + "JSX", + "TSX", + "SVG" + ], + "description": "Output format" + } + }, + "required": [ + "queries", + "format" + ] + } + }, + { + "name": "21st_magic_component_inspiration", + "description": "\n\"Use this tool when the user wants to see component, get inspiration, or /21st fetch data and previews from 21st.dev. This tool returns the JSON data of matching components without generating new code. This tool ONLY returns the text snippet for that UI component. \nAfter calling this tool, you must edit or add files to integrate the snippet into the codebase.\"\n", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "Full users message" + }, + "searchQuery": { + "type": "string", + "description": "Search query for 21st.dev (library for searching UI components) to find a UI component that matches the user's message. Must be a two-four words max or phrase" + } + }, + "required": [ + "message", + "searchQuery" + ] + } + }, + { + "name": "21st_magic_component_refiner", + "description": "\n\"Use this tool when the user requests to refine/improve current UI component with /ui or /21 commands, \nor when context is about improving, or refining UI for a React component or molecule (NOT for big pages).\nThis tool improves UI of components and returns improved version of the component and instructions on how to implement it.\"\n", + "inputSchema": { + "type": "object", + "properties": { + "userMessage": { + "type": "string", + "description": "Full user's message about UI refinement" + }, + "absolutePathToRefiningFile": { + "type": "string", + "description": "Absolute path to the file that needs to be refined" + }, + "context": { + "type": "string", + "description": "Extract the specific UI elements and aspects that need improvement based on user messages, code, and conversation history. Identify exactly which components (buttons, forms, modals, etc.) the user is referring to and what aspects (styling, layout, responsiveness, etc.) they want to enhance. Do not include generic improvements - focus only on what the user explicitly mentions or what can be reasonably inferred from the available context. If nothing specific is mentioned or you cannot determine what needs improvement, return an empty string." + } + }, + "required": [ + "userMessage", + "absolutePathToRefiningFile", + "context" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "onchain-mcp": { + "display_name": "Bankless Onchain MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/bankless/onchain-mcp" + }, + "homepage": "https://docs.bankless.com/bankless-api/other-services/onchain-mcp", + "author": { + "name": "bankless" + }, + "license": "MIT", + "tags": [ + "blockchain", + "MCP", + "smart contracts", + "ethereum", + "onchain" + ], + "arguments": { + "BANKLESS_API_TOKEN": { + "description": "API token for Bankless API authentication", + "required": true, + "example": "your_api_token_here" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@bankless/onchain-mcp" + ], + "package": "@bankless/onchain-mcp", + "env": { + "BANKLESS_API_TOKEN": "your_api_token_here" + }, + "description": "Run directly using npx", + "recommended": true + } + }, + "examples": [ + { + "title": "Read Contract State", + "description": "Read the balance of an address from a token contract", + "prompt": "What's the balance of address 0xabcd... in the token contract at 0x1234...?" + }, + { + "title": "Get Proxy Implementation", + "description": "Find the implementation address for a proxy contract", + "prompt": "What's the implementation contract for the proxy at 0x1234...?" + }, + { + "title": "Fetch Event Logs", + "description": "Get Transfer events for a specific token contract", + "prompt": "Show me the recent Transfer events for the contract at 0x1234..." + } + ], + "name": "onchain-mcp", + "description": "MCP (Model Context Protocol) server for blockchain data interaction through the Bankless API.", + "categories": [ + "Finance" + ], + "tools": [ + { + "name": "read_contract", + "description": "Read contract state from a blockchain. important: \n \n In case of a tuple, don't use type tuple, but specify the inner types (found in the source) in order. For nested structs, include the substructs types.\n \n Example: \n struct DataTypeA {\n DataTypeB b;\n //the liquidity index. Expressed in ray\n uint128 liquidityIndex;\n }\n \n struct DataTypeB {\n address token;\n }\n \n results in outputs for function with return type DataTypeA (tuple in abi): outputs: [{\"type\": \"address\"}, {\"type\": \"uint128\"}]", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "contract": { + "type": "string", + "description": "The contract address" + }, + "method": { + "type": "string", + "description": "The contract method to call" + }, + "inputs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "The type of the input parameter" + }, + "value": { + "description": "The value of the input parameter" + } + }, + "required": [ + "type" + ], + "additionalProperties": false + }, + "description": "Input parameters for the method call" + }, + "outputs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Expected output types for the method call. \n In case of a tuple, don't use type tuple, but specify the inner types (found in the source) in order. For nested structs, include the substructs types.\n \n Example: \n struct DataTypeA {\n DataTypeB b;\n //the liquidity index. Expressed in ray\n uint128 liquidityIndex;\n }\n \n struct DataTypeB {\n address token;\n }\n \n results in outputs for function with return type DataTypeA (tuple in abi): outputs: [{\"type\": \"address\"}, {\"type\": \"uint128\"}]\n " + }, + "components": { + "type": "array", + "items": { + "$ref": "#/properties/outputs/items" + }, + "description": "optional components for tuple types" + } + }, + "required": [ + "type" + ], + "additionalProperties": false + }, + "description": "Expected output types for the method call. \n In case of a tuple, don't use type tuple, but specify the inner types (found in the source) in order. For nested structs, include the substructs types.\n \n Example: \n struct DataTypeA {\n DataTypeB b;\n //the liquidity index. Expressed in ray\n uint128 liquidityIndex;\n }\n \n struct DataTypeB {\n address token;\n }\n \n results in outputs for function with return type DataTypeA (tuple in abi): outputs: [{\"type\": \"address\"}, {\"type\": \"uint128\"}]\n " + } + }, + "required": [ + "network", + "contract", + "method", + "inputs", + "outputs" + ] + } + }, + { + "name": "get_proxy", + "description": "Gets the proxy address for a given network and contract", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "contract": { + "type": "string", + "description": "The contract address to request the proxy implementation contract for" + } + }, + "required": [ + "network", + "contract" + ] + } + }, + { + "name": "get_abi", + "description": "Gets the ABI for a given contract on a specific network", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "contract": { + "type": "string", + "description": "The contract address" + } + }, + "required": [ + "network", + "contract" + ] + } + }, + { + "name": "get_source", + "description": "Gets the source code for a given contract on a specific network", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "contract": { + "type": "string", + "description": "The contract address" + } + }, + "required": [ + "network", + "contract" + ] + } + }, + { + "name": "get_events", + "description": "Fetches event logs for a given network and filter criteria", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of contract addresses to filter events" + }, + "topic": { + "type": "string", + "description": "Primary topic to filter events" + }, + "optionalTopics": { + "type": "array", + "items": { + "type": [ + "string", + "null" + ] + }, + "description": "Optional additional topics" + }, + "fromBlock": { + "type": "number", + "description": "Block number to start fetching logs from" + }, + "toBlock": { + "type": "number", + "description": "Block number to stop fetching logs at" + } + }, + "required": [ + "network", + "addresses", + "topic" + ] + } + }, + { + "name": "build_event_topic", + "description": "Builds an event topic signature based on event name and arguments", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "name": { + "type": "string", + "description": "Event name (e.g., \"Transfer(address,address,uint256)\")" + }, + "arguments": { + "type": "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Expected output types for the method call. \n In case of a tuple, don't use type tuple, but specify the inner types (found in the source) in order. For nested structs, include the substructs types.\n \n Example: \n struct DataTypeA {\n DataTypeB b;\n //the liquidity index. Expressed in ray\n uint128 liquidityIndex;\n }\n \n struct DataTypeB {\n address token;\n }\n \n results in outputs for function with return type DataTypeA (tuple in abi): outputs: [{\"type\": \"address\"}, {\"type\": \"uint128\"}]\n " + }, + "components": { + "type": "array", + "items": { + "$ref": "#/properties/arguments/items" + }, + "description": "optional components for tuple types" + } + }, + "required": [ + "type" + ], + "additionalProperties": false + }, + "description": "Event arguments types" + } + }, + "required": [ + "network", + "name", + "arguments" + ] + } + }, + { + "name": "get_transaction_history_for_user", + "description": "Gets transaction history for a user and optional contract", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "user": { + "type": "string", + "description": "The user address" + }, + "contract": { + "type": [ + "string", + "null" + ], + "description": "The contract address (optional)" + }, + "methodId": { + "type": [ + "string", + "null" + ], + "description": "The method ID to filter by (optional)" + }, + "startBlock": { + "type": [ + "string", + "null" + ], + "description": "The starting block number (optional)" + }, + "includeData": { + "type": "boolean", + "default": true, + "description": "Whether to include transaction data" + } + }, + "required": [ + "network", + "user" + ] + } + }, + { + "name": "get_transaction_info", + "description": "Gets detailed information about a specific transaction", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"polygon\")" + }, + "txHash": { + "type": "string", + "description": "The transaction hash to fetch details for" + } + }, + "required": [ + "network", + "txHash" + ] + } + }, + { + "name": "get_token_balances_on_network", + "description": "Gets all token balances for a given address on a specific network", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "address": { + "type": "string", + "description": "The address to check token balances for" + } + }, + "required": [ + "network", + "address" + ] + } + }, + { + "name": "get_block_info", + "description": "Gets detailed information about a specific block by number or hash", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "The blockchain network (e.g., \"ethereum\", \"base\")" + }, + "blockId": { + "type": "string", + "description": "The block number or block hash to fetch information for" + } + }, + "required": [ + "network", + "blockId" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "lightdash": { + "name": "lightdash", + "display_name": "Lightdash", + "description": "Interact with [Lightdash](https://www.lightdash.com/), a BI tool.", + "repository": { + "type": "git", + "url": "https://github.com/syucream/lightdash-mcp-server" + }, + "homepage": "https://github.com/syucream/lightdash-mcp-server", + "author": { + "name": "syucream" + }, + "license": "MIT", + "categories": [ + "Analytics" + ], + "tags": [ + "Lightdash", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "lightdash-mcp-server" + ], + "env": { + "LIGHTDASH_API_KEY": "${LIGHTDASH_API_KEY}", + "LIGHTDASH_API_URL": "${LIGHTDASH_API_URL}" + } + } + }, + "arguments": { + "LIGHTDASH_API_KEY": { + "description": "Your Lightdash PAT (Personal Access Token) required for authenticating API requests.", + "required": true, + "example": "your_personal_access_token_here" + }, + "LIGHTDASH_API_URL": { + "description": "The base URL for the Lightdash API that you are connecting to.", + "required": true, + "example": "https://your.base.url" + } + }, + "tools": [ + { + "name": "lightdash_list_projects", + "description": "List all projects in the Lightdash organization", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "lightdash_get_project", + "description": "Get details of a specific project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_list_spaces", + "description": "List all spaces in a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_list_charts", + "description": "List all charts in a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_list_dashboards", + "description": "List all dashboards in a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_get_custom_metrics", + "description": "Get custom metrics for a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_get_catalog", + "description": "Get catalog for a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_get_metrics_catalog", + "description": "Get metrics catalog for a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_get_charts_as_code", + "description": "Get charts as code for a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_get_dashboards_as_code", + "description": "Get dashboards as code for a project", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + } + }, + "required": [ + "projectUuid" + ] + } + }, + { + "name": "lightdash_get_metadata", + "description": "Get metadata for a specific table in the data catalog", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + }, + "table": { + "type": "string", + "minLength": 1 + } + }, + "required": [ + "projectUuid", + "table" + ] + } + }, + { + "name": "lightdash_get_analytics", + "description": "Get analytics for a specific table in the data catalog", + "inputSchema": { + "type": "object", + "properties": { + "projectUuid": { + "type": "string", + "format": "uuid", + "description": "The UUID of the project. You can obtain it from the project list." + }, + "table": { + "type": "string" + } + }, + "required": [ + "projectUuid", + "table" + ] + } + }, + { + "name": "lightdash_get_user_attributes", + "description": "Get organization user attributes", + "inputSchema": { + "type": "object", + "properties": {} + } + } + ] + }, + "goodnews": { + "name": "goodnews", + "display_name": "Goodnews", + "description": "A simple MCP server that delivers curated positive and uplifting news stories.", + "repository": { + "type": "git", + "url": "https://github.com/VectorInstitute/mcp-goodnews" + }, + "homepage": "https://github.com/VectorInstitute/mcp-goodnews", + "author": { + "name": "VectorInstitute" + }, + "license": "Apache 2.0", + "categories": [ + "Web Services" + ], + "tags": [ + "positive news", + "uplifting", + "Cohere", + "NewsAPI" + ], + "examples": [ + { + "title": "Fetch list of good news", + "description": "Retrieve uplifting news articles using MCP Goodnews.", + "prompt": "Show me some good news from today." + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/VectorInstitute/mcp-goodnews", + "server.py" + ], + "env": { + "NEWS_API_KEY": "", + "COHERE_API_KEY": "" + } + } + }, + "arguments": { + "NEWS_API_KEY": { + "description": "API key for NewsAPI to fetch news articles", + "required": true, + "example": "your_newsapi_key_here" + }, + "COHERE_API_KEY": { + "description": "API key for Cohere to analyze sentiment of news articles", + "required": true, + "example": "your_cohere_api_key_here" + } + } + }, + "oxylabs-mcp": { + "display_name": "Oxylabs Scraper", + "repository": { + "type": "git", + "url": "https://github.com/oxylabs/oxylabs-mcp" + }, + "homepage": "https://github.com/oxylabs/oxylabs-mcp", + "author": { + "name": "oxylabs" + }, + "license": "MIT", + "tags": [ + "web scraping", + "data extraction", + "web unblocker" + ], + "arguments": { + "url": { + "description": "The URL to scrape", + "required": true, + "example": "https://www.google.com/search?q=ai" + }, + "parse": { + "description": "Enable structured data extraction", + "required": false, + "example": "True" + }, + "render": { + "description": "Use headless browser rendering", + "required": false, + "example": "html" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "oxylabs-mcp" + ], + "env": { + "OXYLABS_USERNAME": "YOUR_USERNAME_HERE", + "OXYLABS_PASSWORD": "YOUR_PASSWORD_HERE" + }, + "description": "Install using uv in Claude Desktop" + } + }, + "examples": [ + { + "title": "Basic Google Search", + "description": "Scrape a Google search results page", + "prompt": "Could you scrape https://www.google.com/search?q=ai page?" + }, + { + "title": "Amazon Product with Parse", + "description": "Scrape an Amazon product page with parsing enabled", + "prompt": "Scrape https://www.amazon.de/-/en/Smartphone-Contract-Function-Manufacturer-Exclusive/dp/B0CNKD651V with parse enabled" + }, + { + "title": "Amazon Bestsellers with Parse and Render", + "description": "Scrape an Amazon bestsellers page with parsing and rendering enabled", + "prompt": "Scrape https://www.amazon.de/-/en/gp/bestsellers/beauty/ref=zg_bs_nav_beauty_0 with parse and render enabled" + }, + { + "title": "Best Buy with Web Unblocker", + "description": "Use web unblocker with rendering to scrape a Best Buy page", + "prompt": "Use web unblocker with render to scrape https://www.bestbuy.com/site/top-deals/all-electronics-on-sale/pcmcat1674241939957.c" + } + ], + "name": "oxylabs-mcp", + "description": "A Model Context Protocol (MCP) server that enables AI assistants like Claude to seamlessly access web data through Oxylabs' powerful web scraping technology.", + "categories": [ + "Web Services" + ], + "is_official": true, + "tools": [ + { + "name": "oxylabs_scraper", + "description": "Scrape url using Oxylabs Web Api", + "inputSchema": { + "properties": { + "url": { + "description": "Url to scrape", + "title": "Url", + "type": "string" + }, + "parse": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Should result be parsed. If result should not be parsed then html will be stripped and converted to markdown file", + "title": "Parse" + }, + "render": { + "anyOf": [ + { + "enum": [ + "html", + "None" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether a headless browser should be used to render the page. See: https://developers.oxylabs.io/scraper-apis/web-scraper-api/features/javascript-rendering `html` will return rendered html page `None` will not use render for scraping.", + "title": "Render" + } + }, + "required": [ + "url" + ], + "title": "scrape_urlArguments", + "type": "object" + } + }, + { + "name": "oxylabs_web_unblocker", + "description": "Scrape url using Oxylabs Web Unblocker", + "inputSchema": { + "properties": { + "url": { + "description": "Url to scrape with web unblocker", + "title": "Url", + "type": "string" + }, + "render": { + "anyOf": [ + { + "enum": [ + "html", + "None" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether a headless browser should be used to render the page. See: https://developers.oxylabs.io/advanced-proxy-solutions/web-unblocker/headless-browser/javascript-rendering `html` will return rendered html page `None` will not use render for scraping.", + "title": "Render" + } + }, + "required": [ + "url" + ], + "title": "scrape_with_web_unblockerArguments", + "type": "object" + } + } + ] + }, + "postman": { + "name": "postman", + "display_name": "Postman", + "description": "MCP server for running Postman Collections locally via Newman. Allows for simple execution of Postman Server and returns the results of whether the collection passed all the tests.", + "repository": { + "type": "git", + "url": "https://github.com/shannonlal/mcp-postman" + }, + "homepage": "https://github.com/shannonlal/mcp-postman", + "author": { + "name": "shannonlal" + }, + "license": "ISC", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Postman", + "Newman", + "API" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/shannonlal/mcp-postman" + ] + } + }, + "tools": [ + { + "name": "run-collection", + "description": "Run a Postman Collection using Newman", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Path or URL to the Postman collection" + }, + "environment": { + "type": "string", + "description": "Optional path or URL to environment file" + }, + "globals": { + "type": "string", + "description": "Optional path or URL to globals file" + }, + "iterationCount": { + "type": "number", + "description": "Optional number of iterations to run" + } + }, + "required": [ + "collection" + ] + } + } + ] + }, + "reaper": { + "name": "reaper", + "display_name": "Reaper", + "description": "Interact with your [Reaper](https://www.reaper.fm/) (Digital Audio Workstation) projects.", + "repository": { + "type": "git", + "url": "https://github.com/dschuler36/reaper-mcp-server" + }, + "homepage": "https://github.com/dschuler36/reaper-mcp-server", + "author": { + "name": "dschuler36" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Reaper", + "Claude" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/dschuler36/reaper-mcp-server", + "reaper-mcp-server", + "--reaper-projects-dir", + "${REAPER_PROJECTS_DIR}" + ] + } + }, + "examples": [ + { + "title": "Ask about a Reaper project", + "description": "Request information about a specific Reaper project you have.", + "prompt": "What are the tracks in my 'Project A' Reaper file?" + }, + { + "title": "Find Reaper projects", + "description": "Use the tool to locate all Reaper projects in the configured directory.", + "prompt": "Find all my Reaper projects." + } + ], + "arguments": { + "REAPER_PROJECTS_DIR": { + "description": "The directory where Reaper projects are stored, allowing the MCP server to find and interact with them.", + "required": true, + "example": "/path/to/reaper/projects" + } + } + }, + "hyperliquid": { + "name": "hyperliquid", + "display_name": "Hyperliquid", + "description": "An MCP server implementation that integrates the Hyperliquid SDK for exchange data.", + "repository": { + "type": "git", + "url": "https://github.com/mektigboy/server-hyperliquid" + }, + "license": "MIT", + "author": { + "name": "mektigboy" + }, + "homepage": "https://github.com/mektigboy/server-hyperliquid", + "categories": [ + "Finance" + ], + "tags": [ + "Hyperliquid", + "Exchange" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@mektigboy/server-hyperliquid" + ] + } + }, + "tools": [ + { + "name": "get_all_mids", + "description": "Get mid prices for all coins on Hyperliquid", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "get_candle_snapshot", + "description": "Get candlestick data for a token on Hyperliquid", + "inputSchema": { + "type": "object", + "properties": { + "coin": { + "type": "string", + "description": "The symbol of the token to get candlestick data for" + }, + "interval": { + "type": "string", + "description": "Time interval (e.g., '15m', '1h')" + }, + "startTime": { + "type": "number", + "description": "Start time in milliseconds since epoch" + }, + "endTime": { + "type": "number", + "description": "End time in milliseconds since epoch (optional)" + } + }, + "required": [ + "coin", + "interval", + "startTime" + ] + } + }, + { + "name": "get_l2_book", + "description": "Get the L2 book of a token on Hyperliquid", + "inputSchema": { + "type": "object", + "properties": { + "symbol": { + "type": "string", + "description": "The symbol of the token to get the price of" + }, + "required": [ + "symbol" + ] + } + } + } + ] + }, + "evm-mcp-server": { + "name": "evm-mcp-server", + "display_name": "EVM Server", + "description": "Comprehensive blockchain services for 30+ EVM networks, supporting native tokens, ERC20, NFTs, smart contracts, transactions, and ENS resolution.", + "repository": { + "type": "git", + "url": "https://github.com/mcpdotdirect/evm-mcp-server" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "Ethereum", + "Smart Contracts", + "AI", + "Token Transfers", + "NFTs" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@mcpdotdirect/evm-mcp-server" + ] + } + }, + "author": { + "name": "mcpdotdirect" + }, + "homepage": "https://github.com/mcpdotdirect/evm-mcp-server", + "tools": [ + { + "name": "get_chain_info", + "description": "Get information about an EVM network", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + } + } + }, + { + "name": "resolve_ens", + "description": "Resolve an ENS name to an Ethereum address", + "inputSchema": { + "type": "object", + "properties": { + "ensName": { + "type": "string", + "description": "ENS name to resolve (e.g., 'vitalik.eth')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. ENS resolution works best on Ethereum mainnet. Defaults to Ethereum mainnet." + } + }, + "required": [ + "ensName" + ] + } + }, + { + "name": "get_supported_networks", + "description": "Get a list of supported EVM networks", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_block_by_number", + "description": "Get a block by its block number", + "inputSchema": { + "type": "object", + "properties": { + "blockNumber": { + "type": "number", + "description": "The block number to fetch" + }, + "network": { + "type": "string", + "description": "Network name or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "blockNumber" + ] + } + }, + { + "name": "get_latest_block", + "description": "Get the latest block from the EVM", + "inputSchema": { + "type": "object", + "properties": { + "network": { + "type": "string", + "description": "Network name or chain ID. Defaults to Ethereum mainnet." + } + } + } + }, + { + "name": "get_balance", + "description": "Get the native token balance (ETH, MATIC, etc.) for an address", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "The wallet address or ENS name (e.g., '0x1234...' or 'vitalik.eth') to check the balance for" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "address" + ] + } + }, + { + "name": "get_erc20_balance", + "description": "Get the ERC20 token balance of an Ethereum address", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "The Ethereum address to check" + }, + "tokenAddress": { + "type": "string", + "description": "The ERC20 token contract address" + }, + "network": { + "type": "string", + "description": "Network name or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "address", + "tokenAddress" + ] + } + }, + { + "name": "get_token_balance", + "description": "Get the balance of an ERC20 token for an address", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address or ENS name of the ERC20 token (e.g., '0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48' for USDC or 'uniswap.eth')" + }, + "ownerAddress": { + "type": "string", + "description": "The wallet address or ENS name to check the balance for (e.g., '0x1234...' or 'vitalik.eth')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "tokenAddress", + "ownerAddress" + ] + } + }, + { + "name": "get_transaction", + "description": "Get detailed information about a specific transaction by its hash. Includes sender, recipient, value, data, and more.", + "inputSchema": { + "type": "object", + "properties": { + "txHash": { + "type": "string", + "description": "The transaction hash to look up (e.g., '0x1234...')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "txHash" + ] + } + }, + { + "name": "get_transaction_receipt", + "description": "Get a transaction receipt by its hash", + "inputSchema": { + "type": "object", + "properties": { + "txHash": { + "type": "string", + "description": "The transaction hash to look up" + }, + "network": { + "type": "string", + "description": "Network name or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "txHash" + ] + } + }, + { + "name": "estimate_gas", + "description": "Estimate the gas cost for a transaction", + "inputSchema": { + "type": "object", + "properties": { + "to": { + "type": "string", + "description": "The recipient address" + }, + "value": { + "type": "string", + "description": "The amount of ETH to send in ether (e.g., '0.1')" + }, + "data": { + "type": "string", + "description": "The transaction data as a hex string" + }, + "network": { + "type": "string", + "description": "Network name or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "to" + ] + } + }, + { + "name": "transfer_eth", + "description": "Transfer native tokens (ETH, MATIC, etc.) to an address", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key of the sender account in hex format (with or without 0x prefix). SECURITY: This is used only for transaction signing and is not stored." + }, + "to": { + "type": "string", + "description": "The recipient address or ENS name (e.g., '0x1234...' or 'vitalik.eth')" + }, + "amount": { + "type": "string", + "description": "Amount to send in ETH (or the native token of the network), as a string (e.g., '0.1')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "privateKey", + "to", + "amount" + ] + } + }, + { + "name": "transfer_erc20", + "description": "Transfer ERC20 tokens to another address", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key of the sending account (this is used for signing and is never stored)" + }, + "tokenAddress": { + "type": "string", + "description": "The address of the ERC20 token contract" + }, + "toAddress": { + "type": "string", + "description": "The recipient address" + }, + "amount": { + "type": "string", + "description": "The amount of tokens to send (in token units, e.g., '10' for 10 tokens)" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "privateKey", + "tokenAddress", + "toAddress", + "amount" + ] + } + }, + { + "name": "approve_token_spending", + "description": "Approve another address (like a DeFi protocol or exchange) to spend your ERC20 tokens. This is often required before interacting with DeFi protocols.", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key of the token owner account in hex format (with or without 0x prefix). SECURITY: This is used only for transaction signing and is not stored." + }, + "tokenAddress": { + "type": "string", + "description": "The contract address of the ERC20 token to approve for spending (e.g., '0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48' for USDC on Ethereum)" + }, + "spenderAddress": { + "type": "string", + "description": "The contract address being approved to spend your tokens (e.g., a DEX or lending protocol)" + }, + "amount": { + "type": "string", + "description": "The amount of tokens to approve in token units, not wei (e.g., '1000' to approve spending 1000 tokens). Use a very large number for unlimited approval." + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "privateKey", + "tokenAddress", + "spenderAddress", + "amount" + ] + } + }, + { + "name": "transfer_nft", + "description": "Transfer an NFT (ERC721 token) from one address to another. Requires the private key of the current owner for signing the transaction.", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key of the NFT owner account in hex format (with or without 0x prefix). SECURITY: This is used only for transaction signing and is not stored." + }, + "tokenAddress": { + "type": "string", + "description": "The contract address of the NFT collection (e.g., '0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D' for Bored Ape Yacht Club)" + }, + "tokenId": { + "type": "string", + "description": "The ID of the specific NFT to transfer (e.g., '1234')" + }, + "toAddress": { + "type": "string", + "description": "The recipient wallet address that will receive the NFT" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Most NFTs are on Ethereum mainnet, which is the default." + } + }, + "required": [ + "privateKey", + "tokenAddress", + "tokenId", + "toAddress" + ] + } + }, + { + "name": "transfer_erc1155", + "description": "Transfer ERC1155 tokens to another address. ERC1155 is a multi-token standard that can represent both fungible and non-fungible tokens in a single contract.", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key of the token owner account in hex format (with or without 0x prefix). SECURITY: This is used only for transaction signing and is not stored." + }, + "tokenAddress": { + "type": "string", + "description": "The contract address of the ERC1155 token collection (e.g., '0x76BE3b62873462d2142405439777e971754E8E77')" + }, + "tokenId": { + "type": "string", + "description": "The ID of the specific token to transfer (e.g., '1234')" + }, + "amount": { + "type": "string", + "description": "The quantity of tokens to send (e.g., '1' for a single NFT or '10' for 10 fungible tokens)" + }, + "toAddress": { + "type": "string", + "description": "The recipient wallet address that will receive the tokens" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. ERC1155 tokens exist across many networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "privateKey", + "tokenAddress", + "tokenId", + "amount", + "toAddress" + ] + } + }, + { + "name": "transfer_token", + "description": "Transfer ERC20 tokens to an address", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key of the sender account in hex format (with or without 0x prefix). SECURITY: This is used only for transaction signing and is not stored." + }, + "tokenAddress": { + "type": "string", + "description": "The contract address or ENS name of the ERC20 token to transfer (e.g., '0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48' for USDC or 'uniswap.eth')" + }, + "toAddress": { + "type": "string", + "description": "The recipient address or ENS name that will receive the tokens (e.g., '0x1234...' or 'vitalik.eth')" + }, + "amount": { + "type": "string", + "description": "Amount of tokens to send as a string (e.g., '100' for 100 tokens). This will be adjusted for the token's decimals." + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "privateKey", + "tokenAddress", + "toAddress", + "amount" + ] + } + }, + { + "name": "read_contract", + "description": "Read data from a smart contract by calling a view/pure function. This doesn't modify blockchain state and doesn't require gas or signing.", + "inputSchema": { + "type": "object", + "properties": { + "contractAddress": { + "type": "string", + "description": "The address of the smart contract to interact with" + }, + "abi": { + "type": "array", + "description": "The ABI (Application Binary Interface) of the smart contract function, as a JSON array" + }, + "functionName": { + "type": "string", + "description": "The name of the function to call on the contract (e.g., 'balanceOf')" + }, + "args": { + "type": "array", + "description": "The arguments to pass to the function, as an array (e.g., ['0x1234...'])" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "contractAddress", + "abi", + "functionName" + ] + } + }, + { + "name": "write_contract", + "description": "Write data to a smart contract by calling a state-changing function. This modifies blockchain state and requires gas payment and transaction signing.", + "inputSchema": { + "type": "object", + "properties": { + "contractAddress": { + "type": "string", + "description": "The address of the smart contract to interact with" + }, + "abi": { + "type": "array", + "description": "The ABI (Application Binary Interface) of the smart contract function, as a JSON array" + }, + "functionName": { + "type": "string", + "description": "The name of the function to call on the contract (e.g., 'transfer')" + }, + "args": { + "type": "array", + "description": "The arguments to pass to the function, as an array (e.g., ['0x1234...', '1000000000000000000'])" + }, + "privateKey": { + "type": "string", + "description": "Private key of the sending account in hex format (with or without 0x prefix). SECURITY: This is used only for transaction signing and is not stored." + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "contractAddress", + "abi", + "functionName", + "args", + "privateKey" + ] + } + }, + { + "name": "is_contract", + "description": "Check if an address is a smart contract or an externally owned account (EOA)", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "The wallet or contract address or ENS name to check (e.g., '0x1234...' or 'uniswap.eth')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "address" + ] + } + }, + { + "name": "get_token_info", + "description": "Get comprehensive information about an ERC20 token including name, symbol, decimals, total supply, and other metadata. Use this to analyze any token on EVM chains.", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address of the ERC20 token (e.g., '0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48' for USDC on Ethereum)" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "tokenAddress" + ] + } + }, + { + "name": "get_token_balance_erc20", + "description": "Get ERC20 token balance for an address", + "inputSchema": { + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "The address to check balance for" + }, + "tokenAddress": { + "type": "string", + "description": "The ERC20 token contract address" + }, + "network": { + "type": "string", + "description": "Network name or chain ID. Defaults to Ethereum mainnet." + } + }, + "required": [ + "address", + "tokenAddress" + ] + } + }, + { + "name": "get_nft_info", + "description": "Get detailed information about a specific NFT (ERC721 token), including collection name, symbol, token URI, and current owner if available.", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address of the NFT collection (e.g., '0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D' for Bored Ape Yacht Club)" + }, + "tokenId": { + "type": "string", + "description": "The ID of the specific NFT token to query (e.g., '1234')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Most NFTs are on Ethereum mainnet, which is the default." + } + }, + "required": [ + "tokenAddress", + "tokenId" + ] + } + }, + { + "name": "check_nft_ownership", + "description": "Check if an address owns a specific NFT", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address or ENS name of the NFT collection (e.g., '0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D' for BAYC or 'boredapeyachtclub.eth')" + }, + "tokenId": { + "type": "string", + "description": "The ID of the NFT to check (e.g., '1234')" + }, + "ownerAddress": { + "type": "string", + "description": "The wallet address or ENS name to check ownership against (e.g., '0x1234...' or 'vitalik.eth')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', etc.) or chain ID. Supports all EVM-compatible networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "tokenAddress", + "tokenId", + "ownerAddress" + ] + } + }, + { + "name": "get_erc1155_token_uri", + "description": "Get the metadata URI for an ERC1155 token (multi-token standard used for both fungible and non-fungible tokens). The URI typically points to JSON metadata about the token.", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address of the ERC1155 token collection (e.g., '0x76BE3b62873462d2142405439777e971754E8E77')" + }, + "tokenId": { + "type": "string", + "description": "The ID of the specific token to query metadata for (e.g., '1234')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. ERC1155 tokens exist across many networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "tokenAddress", + "tokenId" + ] + } + }, + { + "name": "get_nft_balance", + "description": "Get the total number of NFTs owned by an address from a specific collection. This returns the count of NFTs, not individual token IDs.", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address of the NFT collection (e.g., '0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D' for Bored Ape Yacht Club)" + }, + "ownerAddress": { + "type": "string", + "description": "The wallet address to check the NFT balance for (e.g., '0x1234...')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. Most NFTs are on Ethereum mainnet, which is the default." + } + }, + "required": [ + "tokenAddress", + "ownerAddress" + ] + } + }, + { + "name": "get_erc1155_balance", + "description": "Get the balance of a specific ERC1155 token ID owned by an address. ERC1155 allows multiple tokens of the same ID, so the balance can be greater than 1.", + "inputSchema": { + "type": "object", + "properties": { + "tokenAddress": { + "type": "string", + "description": "The contract address of the ERC1155 token collection (e.g., '0x76BE3b62873462d2142405439777e971754E8E77')" + }, + "tokenId": { + "type": "string", + "description": "The ID of the specific token to check the balance for (e.g., '1234')" + }, + "ownerAddress": { + "type": "string", + "description": "The wallet address to check the token balance for (e.g., '0x1234...')" + }, + "network": { + "type": "string", + "description": "Network name (e.g., 'ethereum', 'optimism', 'arbitrum', 'base', 'polygon') or chain ID. ERC1155 tokens exist across many networks. Defaults to Ethereum mainnet." + } + }, + "required": [ + "tokenAddress", + "tokenId", + "ownerAddress" + ] + } + }, + { + "name": "get_address_from_private_key", + "description": "Get the EVM address derived from a private key", + "inputSchema": { + "type": "object", + "properties": { + "privateKey": { + "type": "string", + "description": "Private key in hex format (with or without 0x prefix). SECURITY: This is used only for address derivation and is not stored." + } + }, + "required": [ + "privateKey" + ] + } + } + ] + }, + "neovim": { + "name": "neovim", + "display_name": "Neovim Server", + "description": "An MCP Server for your Neovim session.", + "repository": { + "type": "git", + "url": "https://github.com/bigcodegen/mcp-neovim-server" + }, + "homepage": "https://github.com/bigcodegen/mcp-neovim-server", + "author": { + "name": "bigcodegen" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Neovim", + "MCP", + "Claude Desktop" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "mcp-neovim-server" + ], + "env": { + "ALLOW_SHELL_COMMANDS": "${ALLOW_SHELL_COMMANDS}", + "NVIM_SOCKET_PATH": "${NVIM_SOCKET_PATH}" + } + } + }, + "arguments": { + "ALLOW_SHELL_COMMANDS": { + "description": "Set to 'true' to enable shell command execution (e.g. `!ls`).", + "required": false, + "example": "true" + }, + "NVIM_SOCKET_PATH": { + "description": "Set to the path of your Neovim socket.", + "required": false, + "example": "/tmp/nvim" + } + }, + "tools": [ + { + "name": "vim_buffer", + "inputSchema": { + "type": "object", + "properties": { + "filename": { + "type": "string", + "description": "Optional file name to view a specific buffer" + } + } + } + }, + { + "name": "vim_command", + "inputSchema": { + "type": "object", + "properties": { + "command": { + "type": "string", + "description": "Vim command to execute (use ! prefix for shell commands if enabled)" + } + }, + "required": [ + "command" + ] + } + }, + { + "name": "vim_status", + "inputSchema": { + "type": "object", + "properties": { + "filename": { + "type": "string", + "description": "Optional file name to get status for a specific buffer" + } + } + } + }, + { + "name": "vim_edit", + "inputSchema": { + "type": "object", + "properties": { + "startLine": { + "type": "number", + "description": "The line number where editing should begin (1-indexed)" + }, + "mode": { + "type": "string", + "enum": [ + "insert", + "replace", + "replaceAll" + ], + "description": "Whether to insert new content, replace existing content, or replace entire buffer" + }, + "lines": { + "type": "string", + "description": "The text content to insert or use as replacement" + } + }, + "required": [ + "startLine", + "mode", + "lines" + ] + } + }, + { + "name": "vim_window", + "inputSchema": { + "type": "object", + "properties": { + "command": { + "type": "string", + "enum": [ + "split", + "vsplit", + "only", + "close", + "wincmd h", + "wincmd j", + "wincmd k", + "wincmd l" + ], + "description": "Window manipulation command: split or vsplit to create new window, only to keep just current window, close to close current window, or wincmd with h/j/k/l to navigate between windows" + } + }, + "required": [ + "command" + ] + } + }, + { + "name": "vim_mark", + "inputSchema": { + "type": "object", + "properties": { + "mark": { + "type": "string", + "pattern": "^[a-z]$", + "description": "Single lowercase letter [a-z] to use as the mark name" + }, + "line": { + "type": "number", + "description": "The line number where the mark should be placed (1-indexed)" + }, + "column": { + "type": "number", + "description": "The column number where the mark should be placed (0-indexed)" + } + }, + "required": [ + "mark", + "line", + "column" + ] + } + }, + { + "name": "vim_register", + "inputSchema": { + "type": "object", + "properties": { + "register": { + "type": "string", + "pattern": "^[a-z\\\"]$", + "description": "Register name - a lowercase letter [a-z] or double-quote [\"] for the unnamed register" + }, + "content": { + "type": "string", + "description": "The text content to store in the specified register" + } + }, + "required": [ + "register", + "content" + ] + } + }, + { + "name": "vim_visual", + "inputSchema": { + "type": "object", + "properties": { + "startLine": { + "type": "number", + "description": "The starting line number for visual selection (1-indexed)" + }, + "startColumn": { + "type": "number", + "description": "The starting column number for visual selection (0-indexed)" + }, + "endLine": { + "type": "number", + "description": "The ending line number for visual selection (1-indexed)" + }, + "endColumn": { + "type": "number", + "description": "The ending column number for visual selection (0-indexed)" + } + }, + "required": [ + "startLine", + "startColumn", + "endLine", + "endColumn" + ] + } + } + ] + }, + "aws-resources-operations": { + "name": "aws-resources-operations", + "display_name": "AWS Resources", + "description": "Run generated python code to securely query or modify any AWS resources supported by boto3.", + "repository": { + "type": "git", + "url": "https://github.com/baryhuang/mcp-server-aws-resources-python" + }, + "homepage": "https://github.com/baryhuang/mcp-server-aws-resources-python", + "author": { + "name": "baryhuang" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "AWS", + "Docker", + "boto3" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}", + "-e", + "AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}", + "-e", + "AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}", + "buryhuang/mcp-server-aws-resources:latest" + ], + "env": { + "AWS_ACCESS_KEY_ID": "${AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${AWS_SECRET_ACCESS_KEY}", + "AWS_DEFAULT_REGION": "${AWS_DEFAULT_REGION}" + } + } + }, + "arguments": { + "AWS_ACCESS_KEY_ID": { + "description": "Your AWS access key.", + "required": true, + "example": "your_access_key_id_here" + }, + "AWS_SECRET_ACCESS_KEY": { + "description": "Your AWS secret key.", + "required": true, + "example": "your_secret_access_key_here" + }, + "AWS_DEFAULT_REGION": { + "description": "AWS region to operate in. Defaults to 'us-east-1' if not set.", + "required": false, + "example": "us-east-1" + } + }, + "tools": [ + { + "name": "query_aws_resources", + "description": "Execute a boto3 code snippet to query AWS resources", + "inputSchema": { + "type": "object", + "properties": { + "code_snippet": { + "type": "string", + "description": "Python code using boto3 to query AWS resources. The code should have default execution setting variable named 'result'. Example code: 'result = boto3.client('s3').list_buckets()'" + } + }, + "required": [ + "code_snippet" + ] + } + } + ] + }, + "filesystem": { + "name": "filesystem", + "display_name": "Filesystem", + "description": "Secure file operations with configurable access controls", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/filesystem", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "Node.js", + "server", + "filesystem", + "operations" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-filesystem", + "${USER_FILESYSTEM_DIRECTORY}", + "${USER_FILESYSTEM_ALLOWED_DIR}" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "--mount", + "type=bind,src=${USER_FILESYSTEM_DIRECTORY},dst=/projects/Desktop", + "--mount", + "type=bind,src=${USER_FILESYSTEM_ALLOWED_DIR},dst=/projects/other/allowed/dir,ro", + "--mount", + "type=bind,src=${USER_FILESYSTEM_ALLOWED_FILE},dst=/projects/path/to/file.txt", + "mcp/filesystem", + "/projects" + ] + } + }, + "arguments": { + "USER_FILESYSTEM_DIRECTORY": { + "description": "The directory to be mounted in the container", + "required": true, + "example": "/Users/username/Desktop" + }, + "USER_FILESYSTEM_ALLOWED_DIR": { + "description": "The directory to be mounted in the container", + "required": true, + "example": "/Users/username/Desktop" + } + }, + "tools": [ + { + "name": "read_file", + "description": "Read the complete contents of a file from the file system. Handles various text encodings and provides detailed error messages if the file cannot be read. Use this tool when you need to examine the contents of a single file. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "read_multiple_files", + "description": "Read the contents of multiple files simultaneously. This is more efficient than reading files one by one when you need to analyze or compare multiple files. Each file's content is returned with its path as a reference. Failed reads for individual files won't stop the entire operation. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "paths" + ] + } + }, + { + "name": "write_file", + "description": "Create a new file or completely overwrite an existing file with new content. Use with caution as it will overwrite existing files without warning. Handles text content with proper encoding. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "content": { + "type": "string" + } + }, + "required": [ + "path", + "content" + ] + } + }, + { + "name": "edit_file", + "description": "Make line-based edits to a text file. Each edit replaces exact line sequences with new content. Returns a git-style diff showing the changes made. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "edits": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oldText": { + "type": "string", + "description": "Text to search for - must match exactly" + }, + "newText": { + "type": "string", + "description": "Text to replace with" + } + }, + "required": [ + "oldText", + "newText" + ], + "additionalProperties": false + } + }, + "dryRun": { + "type": "boolean", + "default": false, + "description": "Preview changes using git-style diff format" + } + }, + "required": [ + "path", + "edits" + ] + } + }, + { + "name": "create_directory", + "description": "Create a new directory or ensure a directory exists. Can create multiple nested directories in one operation. If the directory already exists, this operation will succeed silently. Perfect for setting up directory structures for projects or ensuring required paths exist. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "list_directory", + "description": "Get a detailed listing of all files and directories in a specified path. Results clearly distinguish between files and directories with [FILE] and [DIR] prefixes. This tool is essential for understanding directory structure and finding specific files within a directory. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "directory_tree", + "description": "Get a recursive tree view of files and directories as a JSON structure. Each entry includes 'name', 'type' (file/directory), and 'children' for directories. Files have no children array, while directories always have a children array (which may be empty). The output is formatted with 2-space indentation for readability. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "move_file", + "description": "Move or rename files and directories. Can move files between directories and rename them in a single operation. If the destination exists, the operation will fail. Works across different directories and can be used for simple renaming within the same directory. Both source and destination must be within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "source": { + "type": "string" + }, + "destination": { + "type": "string" + } + }, + "required": [ + "source", + "destination" + ] + } + }, + { + "name": "search_files", + "description": "Recursively search for files and directories matching a pattern. Searches through all subdirectories from the starting path. The search is case-insensitive and matches partial names. Returns full paths to all matching items. Great for finding files when you don't know their exact location. Only searches within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "pattern": { + "type": "string" + }, + "excludePatterns": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "required": [ + "path", + "pattern" + ] + } + }, + { + "name": "get_file_info", + "description": "Retrieve detailed metadata about a file or directory. Returns comprehensive information including size, creation time, last modified time, permissions, and type. This tool is perfect for understanding file characteristics without reading the actual content. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + }, + { + "name": "list_allowed_directories", + "description": "Returns the list of directories that this server is allowed to access. Use this to understand which directories are available before trying to access files.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + ], + "is_official": true + }, + "ergo-blockchain-mcp": { + "name": "ergo-blockchain-mcp", + "display_name": "Ergo Blockchain Explorer", + "description": "-An MCP server to integrate Ergo Blockchain Node and Explorer APIs for checking address balances, analyzing transactions, viewing transaction history, performing forensic analysis of addresses, searching for tokens, and monitoring network status.", + "repository": { + "type": "git", + "url": "https://github.com/marctheshark3/ergo-mcp" + }, + "homepage": "https://github.com/marctheshark3/ergo-mcp", + "author": { + "name": "marctheshark3" + }, + "license": "MIT", + "categories": [ + "Finance" + ], + "tags": [ + "Ergo", + "Blockchain", + "Python", + "API" + ], + "examples": [ + { + "title": "Running the MCP Server as a Module", + "description": "Run the server using Python module command.", + "prompt": "```bash\n# Make sure your virtual environment is activated:\n# Using the full path (recommended):\n/path/to/your/project/.venv/bin/python -m ergo_explorer\n\n# Or with activated virtual environment:\npython -m ergo_explorer\n```" + }, + { + "title": "Running Tests", + "description": "Execute tests using pytest framework.", + "prompt": "```bash\n# Run all tests\npython -m pytest\n\n# Run specific test files\npython -m pytest tests/unit/test_address_tools.py\n```" + } + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "ergo-mcp" + ], + "env": { + "SERVER_HOST": "", + "SERVER_PORT": "", + "SERVER_WORKERS": "", + "ERGO_NODE_API": "", + "ERGO_NODE_API_KEY": "" + } + } + }, + "arguments": { + "SERVER_HOST": { + "description": "Host to bind the server to (default: 0.0.0.0)", + "required": false, + "example": "localhost" + }, + "SERVER_PORT": { + "description": "Port to run the server on (default: 3001)", + "required": false, + "example": "3001" + }, + "SERVER_WORKERS": { + "description": "Number of worker processes (default: 4)", + "required": false, + "example": "4" + }, + "ERGO_NODE_API": { + "description": "URL of the Ergo node API (for node-specific features)", + "required": false, + "example": "http://localhost:8080" + }, + "ERGO_NODE_API_KEY": { + "description": "API key for the Ergo node (if required)", + "required": false, + "example": "your_api_key" + } + } + }, + "nasa": { + "name": "nasa", + "display_name": "NASA", + "description": "Access to a unified gateway of NASA's data sources including but not limited to APOD, NEO, EPIC, GIBS.", + "repository": { + "type": "git", + "url": "https://github.com/ProgramComputer/NASA-MCP-server" + }, + "homepage": "https://github.com/ProgramComputer/NASA-MCP-server", + "author": { + "name": "ProgramComputer" + }, + "license": "ISC", + "categories": [ + "Knowledge Base" + ], + "tags": [ + "NASA", + "API", + "Data", + "Space", + "Science" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@programcomputer/nasa-mcp-server" + ], + "env": { + "NASA_API_KEY": "${NASA_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Get Today's Astronomy Picture of the Day", + "description": "Fetch the APOD from NASA's API.", + "prompt": "GET /nasa/apod" + }, + { + "title": "Get Mars Rover Photos", + "description": "Retrieve photos taken by the Curiosity rover on a specific sol.", + "prompt": "GET /nasa/mars-rover?rover=curiosity&sol=1000" + }, + { + "title": "Search for Near Earth Objects", + "description": "Find any near earth objects recorded in a specified date range.", + "prompt": "GET /nasa/neo?start_date=2023-01-01&end_date=2023-01-07" + } + ], + "arguments": { + "NASA_API_KEY": { + "description": "Your NASA API key (get at api.nasa.gov)", + "required": false, + "example": "DEMO_KEY" + } + }, + "tools": [ + { + "name": "nasa/apod", + "description": "Fetch NASA's Astronomy Picture of the Day", + "inputSchema": { + "type": "object", + "properties": { + "date": { + "type": "string", + "description": "The date of the APOD image to retrieve (YYYY-MM-DD)" + }, + "count": { + "type": "number", + "description": "Count of random APODs to retrieve" + }, + "start_date": { + "type": "string", + "description": "Start date for date range search (YYYY-MM-DD)" + }, + "end_date": { + "type": "string", + "description": "End date for date range search (YYYY-MM-DD)" + }, + "thumbs": { + "type": "boolean", + "description": "Return URL of thumbnail for video content" + } + }, + "required": [ + "date" + ] + } + }, + { + "name": "nasa/neo", + "description": "Near Earth Object Web Service - information about asteroids", + "inputSchema": { + "type": "object", + "properties": { + "start_date": { + "type": "string", + "description": "Start date for asteroid search (YYYY-MM-DD)" + }, + "end_date": { + "type": "string", + "description": "End date for asteroid search (YYYY-MM-DD)" + }, + "asteroid_id": { + "type": "string", + "description": "ID of a specific asteroid" + } + }, + "required": [ + "start_date", + "end_date" + ] + } + }, + { + "name": "nasa/epic", + "description": "Earth Polychromatic Imaging Camera - views of Earth", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "Image collection (natural or enhanced)" + }, + "date": { + "type": "string", + "description": "Date of the image (YYYY-MM-DD)" + } + } + } + }, + { + "name": "nasa/gibs", + "description": "Global Imagery Browse Services - satellite imagery", + "inputSchema": { + "type": "object", + "properties": { + "layer": { + "type": "string", + "description": "Layer name (e.g., MODIS_Terra_CorrectedReflectance_TrueColor)" + }, + "date": { + "type": "string", + "description": "Date of imagery (YYYY-MM-DD)" + }, + "format": { + "type": "string", + "description": "Image format (png, jpg, jpeg)" + }, + "resolution": { + "type": "number", + "description": "Resolution in pixels per degree" + } + }, + "required": [ + "layer", + "date" + ] + } + }, + { + "name": "nasa/cmr", + "description": "NASA Common Metadata Repository - search for NASA data collections", + "inputSchema": { + "type": "object", + "properties": { + "keyword": { + "type": "string", + "description": "Search keyword" + }, + "limit": { + "type": "number", + "description": "Maximum number of results to return" + }, + "page": { + "type": "number", + "description": "Page number for pagination" + }, + "sort_key": { + "type": "string", + "description": "Field to sort results by" + } + }, + "required": [ + "keyword" + ] + } + }, + { + "name": "nasa/firms", + "description": "NASA Fire Information for Resource Management System - fire data", + "inputSchema": { + "type": "object", + "properties": { + "latitude": { + "type": "number", + "description": "Latitude coordinate" + }, + "longitude": { + "type": "number", + "description": "Longitude coordinate" + }, + "days": { + "type": "number", + "description": "Number of days of data to retrieve" + } + }, + "required": [ + "latitude", + "longitude" + ] + } + }, + { + "name": "nasa/images", + "description": "NASA Image and Video Library - search NASA's media archive", + "inputSchema": { + "type": "object", + "properties": { + "q": { + "type": "string", + "description": "Search query" + }, + "media_type": { + "type": "string", + "description": "Media type (image, video, audio)" + }, + "year_start": { + "type": "string", + "description": "Start year for results" + }, + "year_end": { + "type": "string", + "description": "End year for results" + }, + "page": { + "type": "number", + "description": "Page number for pagination" + } + }, + "required": [ + "q" + ] + } + }, + { + "name": "nasa/exoplanet", + "description": "NASA Exoplanet Archive - data about planets beyond our solar system", + "inputSchema": { + "type": "object", + "properties": { + "table": { + "type": "string", + "description": "Database table to query" + }, + "select": { + "type": "string", + "description": "Columns to return" + }, + "where": { + "type": "string", + "description": "Filter conditions" + }, + "order": { + "type": "string", + "description": "Ordering of results" + }, + "limit": { + "type": "number", + "description": "Maximum number of results" + } + }, + "required": [ + "table" + ] + } + }, + { + "name": "nasa/donki", + "description": "Space Weather Database Of Notifications, Knowledge, Information", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Type of space weather event" + }, + "startDate": { + "type": "string", + "description": "Start date (YYYY-MM-DD)" + }, + "endDate": { + "type": "string", + "description": "End date (YYYY-MM-DD)" + } + }, + "required": [ + "type" + ] + } + }, + { + "name": "nasa/mars-rover", + "description": "NASA Mars Rover Photos - images from Mars rovers", + "inputSchema": { + "type": "object", + "properties": { + "rover": { + "type": "string", + "description": "Name of the rover (curiosity, opportunity, spirit, perseverance)" + }, + "sol": { + "type": "number", + "description": "Martian sol (day) of the photos" + }, + "earth_date": { + "type": "string", + "description": "Earth date of the photos (YYYY-MM-DD)" + }, + "camera": { + "type": "string", + "description": "Camera name" + }, + "page": { + "type": "number", + "description": "Page number for pagination" + } + }, + "required": [ + "rover" + ] + } + }, + { + "name": "nasa/eonet", + "description": "Earth Observatory Natural Event Tracker - natural events data", + "inputSchema": { + "type": "object", + "properties": { + "category": { + "type": "string", + "description": "Event category (wildfires, volcanoes, etc.)" + }, + "days": { + "type": "number", + "description": "Number of days to look back" + }, + "source": { + "type": "string", + "description": "Data source" + }, + "status": { + "type": "string", + "description": "Event status (open, closed)" + }, + "limit": { + "type": "number", + "description": "Maximum number of events to return" + } + } + } + }, + { + "name": "nasa/power", + "description": "Prediction of Worldwide Energy Resources - meteorological data", + "inputSchema": { + "type": "object", + "properties": { + "parameters": { + "type": "string", + "description": "Comma-separated data parameters" + }, + "community": { + "type": "string", + "description": "User community (RE, SB, AG, etc.)" + }, + "longitude": { + "type": "number", + "description": "Longitude coordinate" + }, + "latitude": { + "type": "number", + "description": "Latitude coordinate" + }, + "start": { + "type": "string", + "description": "Start date (YYYYMMDD)" + }, + "end": { + "type": "string", + "description": "End date (YYYYMMDD)" + }, + "format": { + "type": "string", + "description": "Response format (json, csv, etc.)" + } + }, + "required": [ + "parameters", + "community", + "longitude", + "latitude", + "start", + "end" + ] + } + }, + { + "name": "jpl/sbdb", + "description": "Small-Body Database (SBDB) - asteroid and comet data", + "inputSchema": { + "type": "object", + "properties": { + "sstr": { + "type": "string", + "description": "Search string (e.g., asteroid name, number, or designation)" + }, + "cad": { + "type": "boolean", + "description": "Include close approach data" + } + }, + "required": [ + "sstr" + ] + } + }, + { + "name": "jpl/fireball", + "description": "Fireball data - atmospheric impact events", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Maximum number of results to return" + }, + "date-min": { + "type": "string", + "description": "Start date (YYYY-MM-DD)" + }, + "date-max": { + "type": "string", + "description": "End date (YYYY-MM-DD)" + } + } + } + }, + { + "name": "jpl/jd_cal", + "description": "Julian Day number to/from calendar date/time converter", + "inputSchema": { + "type": "object", + "properties": { + "jd": { + "type": "string", + "description": "Julian date to convert to calendar date" + }, + "cd": { + "type": "string", + "description": "Calendar date to convert to Julian date (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss format)" + } + } + } + }, + { + "name": "jpl/nhats", + "description": "Human-accessible NEOs (Near-Earth Objects) data", + "inputSchema": { + "type": "object", + "properties": { + "dv": { + "type": "number", + "description": "Minimum total delta-V (km/s). Values: 4-12, default: 12" + }, + "dur": { + "type": "number", + "description": "Minimum total mission duration (days). Values: 60-450, default: 450" + }, + "stay": { + "type": "number", + "description": "Minimum stay time (days). Values: 8, 16, 24, 32, default: 8" + }, + "launch": { + "type": "string", + "description": "Launch window (year range). Values: 2020-2025, 2025-2030, 2030-2035, 2035-2040, 2040-2045, 2020-2045, default: 2020-2045" + }, + "h": { + "type": "number", + "description": "Object's maximum absolute magnitude (mag). Values: 16-30" + }, + "occ": { + "type": "number", + "description": "Object's maximum orbit condition code. Values: 0-8" + }, + "des": { + "type": "string", + "description": "Object designation (e.g., '2000 SG344' or '433')" + }, + "spk": { + "type": "string", + "description": "Object SPK-ID (e.g., '2000433')" + }, + "plot": { + "type": "boolean", + "description": "Include base-64 encoded plot image" + } + } + } + }, + { + "name": "jpl/cad", + "description": "Asteroid and comet close approaches to the planets in the past and future", + "inputSchema": { + "type": "object", + "properties": { + "dist-max": { + "type": "string", + "description": "Maximum approach distance (e.g., 0.05, 10LD). Default: 0.05 au" + }, + "dist-min": { + "type": "string", + "description": "Minimum approach distance. Default: none" + }, + "date-min": { + "type": "string", + "description": "Start date for search (YYYY-MM-DD). Default: now" + }, + "date-max": { + "type": "string", + "description": "End date for search (YYYY-MM-DD). Default: +60 days" + }, + "body": { + "type": "string", + "description": "Body to find close approaches to (e.g., Earth, Mars, ALL). Default: Earth" + }, + "sort": { + "type": "string", + "description": "Sort field: date, dist, dist-min, v-inf, v-rel, h, object. Default: date" + }, + "des": { + "type": "string", + "description": "Object designation (e.g., '2000 SG344' or '433')" + }, + "spk": { + "type": "string", + "description": "Object SPK-ID (e.g., '2000433')" + }, + "neo": { + "type": "boolean", + "description": "Limit to NEOs. Default: true" + }, + "fullname": { + "type": "boolean", + "description": "Include full object name in result. Default: false" + } + } + } + }, + { + "name": "jpl/sentry", + "description": "JPL Sentry - NEO Earth impact risk assessment data", + "inputSchema": { + "type": "object", + "properties": { + "limit": { + "type": "number", + "description": "Maximum number of results to return" + }, + "date-min": { + "type": "string", + "description": "Start date (YYYY-MM-DD)" + }, + "date-max": { + "type": "string", + "description": "End date (YYYY-MM-DD)" + }, + "des": { + "type": "string", + "description": "Object designation (e.g., '2011 AG5' or '29075')" + }, + "spk": { + "type": "string", + "description": "Object SPK-ID" + }, + "h-max": { + "type": "number", + "description": "Maximum absolute magnitude (size filter)" + }, + "ps-min": { + "type": "string", + "description": "Minimum Palermo Scale value" + }, + "ip-min": { + "type": "string", + "description": "Minimum impact probability" + }, + "removed": { + "type": "boolean", + "description": "Get objects removed from Sentry monitoring" + }, + "all": { + "type": "boolean", + "description": "Get all virtual impactors data" + } + } + } + }, + { + "name": "jpl/horizons", + "description": "JPL Horizons - Solar system objects ephemeris data", + "inputSchema": { + "type": "object", + "properties": { + "format": { + "type": "string", + "description": "Response format (json, text)", + "enum": [ + "json", + "text" + ] + }, + "COMMAND": { + "type": "string", + "description": "Target object identifier (e.g., '499' for Mars, '1' for Ceres, 'C/2020 F3' for Comet NEOWISE)" + }, + "OBJ_DATA": { + "type": "string", + "description": "Include object data", + "enum": [ + "YES", + "NO" + ] + }, + "MAKE_EPHEM": { + "type": "string", + "description": "Generate ephemeris", + "enum": [ + "YES", + "NO" + ] + }, + "EPHEM_TYPE": { + "type": "string", + "description": "Type of ephemeris (OBSERVER, VECTORS, ELEMENTS)", + "enum": [ + "OBSERVER", + "VECTORS", + "ELEMENTS" + ] + }, + "CENTER": { + "type": "string", + "description": "Coordinate center (e.g., '500@399' for Earth)" + }, + "START_TIME": { + "type": "string", + "description": "Start time for ephemeris (e.g., '2023-01-01')" + }, + "STOP_TIME": { + "type": "string", + "description": "Stop time for ephemeris (e.g., '2023-01-02')" + }, + "STEP_SIZE": { + "type": "string", + "description": "Step size for ephemeris points (e.g., '1d' for daily, '1h' for hourly)" + }, + "QUANTITIES": { + "type": "string", + "description": "Observable quantities to include (e.g., 'A' for all, or '1,2,20,23' for specific ones)" + }, + "OUT_UNITS": { + "type": "string", + "description": "Output units for vector tables", + "enum": [ + "KM-S", + "AU-D", + "KM-D" + ] + } + }, + "required": [ + "COMMAND" + ] + } + } + ] + }, + "perplexity": { + "display_name": "Perplexity Ask MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/ppl-ai/modelcontextprotocol" + }, + "homepage": "https://github.com/ppl-ai/modelcontextprotocol", + "author": { + "name": "ppl-ai" + }, + "license": "MIT", + "tags": [ + "perplexity", + "search", + "sonar-api", + "web-search" + ], + "arguments": { + "PERPLEXITY_API_KEY": { + "description": "API key for the Perplexity Sonar API", + "required": true, + "example": "YOUR_API_KEY_HERE" + } + }, + "installations": { + "npx": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "server-perplexity-ask" + ], + "package": "server-perplexity-ask", + "env": { + "PERPLEXITY_API_KEY": "YOUR_API_KEY_HERE" + }, + "description": "Run using NPX" + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "PERPLEXITY_API_KEY", + "mcp/perplexity-ask" + ], + "env": { + "PERPLEXITY_API_KEY": "YOUR_API_KEY_HERE" + }, + "description": "Run using Docker" + } + }, + "examples": [ + { + "title": "Web Search", + "description": "Use Perplexity to search the web for information", + "prompt": "Search the web for the latest information about climate change policies." + } + ], + "name": "perplexity", + "description": "An MCP server implementation that integrates the Sonar API to provide Claude with unparalleled real-time, web-wide research.", + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "perplexity_ask", + "description": "Engages in a conversation using the Sonar API. Accepts an array of messages (each with a role and content) and returns a ask completion response from the Perplexity model.", + "inputSchema": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "items": { + "type": "object", + "properties": { + "role": { + "type": "string", + "description": "Role of the message (e.g., system, user, assistant)" + }, + "content": { + "type": "string", + "description": "The content of the message" + } + }, + "required": [ + "role", + "content" + ] + }, + "description": "Array of conversation messages" + } + }, + "required": [ + "messages" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "discourse": { + "name": "discourse", + "display_name": "Discourse", + "description": "A MCP server to search Discourse posts on a Discourse forum.", + "repository": { + "type": "git", + "url": "https://github.com/AshDevFr/discourse-mcp-server" + }, + "license": "MIT", + "tags": [ + "discourse", + "search" + ], + "author": { + "name": "AshDevFr" + }, + "homepage": "https://github.com/AshDevFr/discourse-mcp-server", + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@ashdev/discourse-mcp-server" + ], + "env": { + "DISCOURSE_API_URL": "${DISCOURSE_API_URL}", + "DISCOURSE_API_KEY": "${DISCOURSE_API_KEY}", + "DISCOURSE_API_USERNAME": "${DISCOURSE_API_USERNAME}" + } + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "DISCOURSE_API_URL=${DISCOURSE_API_URL}", + "-e", + "DISCOURSE_API_KEY=${DISCOURSE_API_KEY}", + "-e", + "DISCOURSE_API_USERNAME=${DISCOURSE_API_USERNAME}", + "ashdev/discourse-mcp-server" + ] + } + }, + "arguments": { + "DISCOURSE_API_URL": { + "description": "API URL for the Discourse forum that the server will connect to.", + "required": true, + "example": "https://try.discourse.org" + }, + "DISCOURSE_API_KEY": { + "description": "API key for authenticating to the Discourse forum.", + "required": true, + "example": "1234" + }, + "DISCOURSE_API_USERNAME": { + "description": "Username for authenticating to the Discourse forum.", + "required": true, + "example": "ash" + } + }, + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "search_posts", + "description": "Search Discourse posts", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "minLength": 5, + "description": "Query" + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "webflow": { + "name": "webflow", + "display_name": "Webflow", + "description": "Interfact with the Webflow APIs", + "repository": { + "type": "git", + "url": "https://github.com/kapilduraphe/webflow-mcp-server" + }, + "homepage": "https://github.com/kapilduraphe/webflow-mcp-server", + "author": { + "name": "kapilduraphe" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "webflow", + "api" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/kapilduraphe/webflow-mcp-server" + ], + "env": { + "WEBFLOW_API_TOKEN": "${WEBFLOW_API_TOKEN}" + } + } + }, + "examples": [ + { + "title": "Get Sites", + "description": "Retrieve a list of all Webflow sites accessible to the authenticated user.", + "prompt": "get_sites" + }, + { + "title": "Get Site", + "description": "Retrieve detailed information about a specific Webflow site by ID.", + "prompt": "get_site siteId" + } + ], + "arguments": { + "WEBFLOW_API_TOKEN": { + "description": "Your Webflow API token to authenticate requests to the Webflow API. This token is required for the server to function and should be kept secure.", + "required": true, + "example": "your-api-token" + } + } + }, + "opik": { + "display_name": "Opik", + "repository": { + "type": "git", + "url": "https://github.com/comet-ml/opik" + }, + "homepage": "https://www.comet.com/site/products/opik/", + "author": { + "name": "comet-ml" + }, + "license": "MIT", + "tags": [ + "llm", + "evaluation", + "tracing", + "monitoring" + ], + "arguments": { + "use_local": { + "description": "Configure SDK to run on local installation", + "required": false, + "example": "True" + } + }, + "installations": { + "docker": { + "type": "docker", + "command": "./opik.sh", + "args": [], + "description": "Start the Opik platform using Docker Compose", + "recommended": false + }, + "pip": { + "type": "python", + "command": "pip", + "args": [ + "install", + "opik" + ], + "package": "opik", + "description": "Install the Python SDK", + "recommended": true + } + }, + "examples": [ + { + "title": "Basic Trace Logging", + "description": "Track LLM function calls using the decorator", + "prompt": "import opik\n\nopik.configure(use_local=True) # Run locally\n\n@opik.track\ndef my_llm_function(user_question: str) -> str:\n # Your LLM code here\n\n return \"Hello\"" + }, + { + "title": "Using LLM as a Judge Metrics", + "description": "Evaluate LLM outputs for hallucination", + "prompt": "from opik.evaluation.metrics import Hallucination\n\nmetric = Hallucination()\nscore = metric.score(\n input=\"What is the capital of France?\",\n output=\"Paris\",\n context=[\"France is a country in Europe.\"]\n)\nprint(score)" + } + ], + "name": "opik", + "description": "", + "categories": [ + "MCP Tools" + ], + "is_official": true + }, + "airtable": { + "name": "airtable", + "display_name": "Airtable", + "description": "Airtable Model Context Protocol Server.", + "repository": { + "type": "git", + "url": "https://github.com/felores/airtable-mcp" + }, + "author": { + "name": "felores" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Airtable", + "Database", + "API" + ], + "arguments": { + "AIRTABLE_API_KEY": { + "description": "Airtable API key for authenticating with the Airtable API", + "required": true, + "example": "pat.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@felores/airtable-mcp-server" + ], + "env": { + "AIRTABLE_API_KEY": "${AIRTABLE_API_KEY}" + }, + "description": "Run with npx (requires npm install)" + } + }, + "homepage": "https://github.com/felores/airtable-mcp", + "tools": [ + { + "name": "list_bases", + "description": "List all accessible Airtable bases", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "list_tables", + "description": "List all tables in a base", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + } + }, + "required": [ + "base_id" + ] + } + }, + { + "name": "create_table", + "description": "Create a new table in a base", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the new table" + }, + "description": { + "type": "string", + "description": "Description of the table" + }, + "fields": { + "type": "array", + "description": "Initial fields for the table", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the field" + }, + "type": { + "type": "string", + "description": "Type of the field (e.g., singleLineText, multilineText, number, etc.)" + }, + "description": { + "type": "string", + "description": "Description of the field" + }, + "options": { + "type": "object", + "description": "Field-specific options" + } + }, + "required": [ + "name", + "type" + ] + } + } + }, + "required": [ + "base_id", + "table_name" + ] + } + }, + { + "name": "update_table", + "description": "Update a table's schema", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_id": { + "type": "string", + "description": "ID of the table to update" + }, + "name": { + "type": "string", + "description": "New name for the table" + }, + "description": { + "type": "string", + "description": "New description for the table" + } + }, + "required": [ + "base_id", + "table_id" + ] + } + }, + { + "name": "create_field", + "description": "Create a new field in a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_id": { + "type": "string", + "description": "ID of the table" + }, + "field": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the field" + }, + "type": { + "type": "string", + "description": "Type of the field" + }, + "description": { + "type": "string", + "description": "Description of the field" + }, + "options": { + "type": "object", + "description": "Field-specific options" + } + }, + "required": [ + "name", + "type" + ] + } + }, + "required": [ + "base_id", + "table_id", + "field" + ] + } + }, + { + "name": "update_field", + "description": "Update a field in a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_id": { + "type": "string", + "description": "ID of the table" + }, + "field_id": { + "type": "string", + "description": "ID of the field to update" + }, + "updates": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "New name for the field" + }, + "description": { + "type": "string", + "description": "New description for the field" + }, + "options": { + "type": "object", + "description": "New field-specific options" + } + } + } + }, + "required": [ + "base_id", + "table_id", + "field_id", + "updates" + ] + } + }, + { + "name": "list_records", + "description": "List records in a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the table" + }, + "max_records": { + "type": "number", + "description": "Maximum number of records to return" + } + }, + "required": [ + "base_id", + "table_name" + ] + } + }, + { + "name": "create_record", + "description": "Create a new record in a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the table" + }, + "fields": { + "type": "object", + "description": "Record fields as key-value pairs" + } + }, + "required": [ + "base_id", + "table_name", + "fields" + ] + } + }, + { + "name": "update_record", + "description": "Update an existing record in a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the table" + }, + "record_id": { + "type": "string", + "description": "ID of the record to update" + }, + "fields": { + "type": "object", + "description": "Record fields to update as key-value pairs" + } + }, + "required": [ + "base_id", + "table_name", + "record_id", + "fields" + ] + } + }, + { + "name": "delete_record", + "description": "Delete a record from a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the table" + }, + "record_id": { + "type": "string", + "description": "ID of the record to delete" + } + }, + "required": [ + "base_id", + "table_name", + "record_id" + ] + } + }, + { + "name": "search_records", + "description": "Search for records in a table", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the table" + }, + "field_name": { + "type": "string", + "description": "Name of the field to search in" + }, + "value": { + "type": "string", + "description": "Value to search for" + } + }, + "required": [ + "base_id", + "table_name", + "field_name", + "value" + ] + } + }, + { + "name": "get_record", + "description": "Get a single record by its ID", + "inputSchema": { + "type": "object", + "properties": { + "base_id": { + "type": "string", + "description": "ID of the base" + }, + "table_name": { + "type": "string", + "description": "Name of the table" + }, + "record_id": { + "type": "string", + "description": "ID of the record to retrieve" + } + }, + "required": [ + "base_id", + "table_name", + "record_id" + ] + } + } + ] + }, + "sequential-thinking": { + "name": "sequential-thinking", + "display_name": "Sequential Thinking", + "description": "Dynamic and reflective problem-solving through thought sequences", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/blob/main/src/sequentialthinking", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "dynamic thinking", + "reflective process", + "structured thinking" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-sequential-thinking" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "mcp/sequentialthinking" + ] + } + }, + "examples": [ + { + "title": "Example Usage", + "description": "Using the Sequential Thinking tool for a complex problem", + "prompt": "Break down the complex problem of organizing an event into manageable steps." + } + ], + "tools": [ + { + "name": "sequentialthinking", + "description": "A detailed tool for dynamic and reflective problem-solving through thoughts.\nThis tool helps analyze problems through a flexible thinking process that can adapt and evolve.\nEach thought can build on, question, or revise previous insights as understanding deepens.\n\nWhen to use this tool:\n- Breaking down complex problems into steps\n- Planning and design with room for revision\n- Analysis that might need course correction\n- Problems where the full scope might not be clear initially\n- Problems that require a multi-step solution\n- Tasks that need to maintain context over multiple steps\n- Situations where irrelevant information needs to be filtered out\n\nKey features:\n- You can adjust total_thoughts up or down as you progress\n- You can question or revise previous thoughts\n- You can add more thoughts even after reaching what seemed like the end\n- You can express uncertainty and explore alternative approaches\n- Not every thought needs to build linearly - you can branch or backtrack\n- Generates a solution hypothesis\n- Verifies the hypothesis based on the Chain of Thought steps\n- Repeats the process until satisfied\n- Provides a correct answer\n\nParameters explained:\n- thought: Your current thinking step, which can include:\n* Regular analytical steps\n* Revisions of previous thoughts\n* Questions about previous decisions\n* Realizations about needing more analysis\n* Changes in approach\n* Hypothesis generation\n* Hypothesis verification\n- next_thought_needed: True if you need more thinking, even if at what seemed like the end\n- thought_number: Current number in sequence (can go beyond initial total if needed)\n- total_thoughts: Current estimate of thoughts needed (can be adjusted up/down)\n- is_revision: A boolean indicating if this thought revises previous thinking\n- revises_thought: If is_revision is true, which thought number is being reconsidered\n- branch_from_thought: If branching, which thought number is the branching point\n- branch_id: Identifier for the current branch (if any)\n- needs_more_thoughts: If reaching end but realizing more thoughts needed\n\nYou should:\n1. Start with an initial estimate of needed thoughts, but be ready to adjust\n2. Feel free to question or revise previous thoughts\n3. Don't hesitate to add more thoughts if needed, even at the \"end\"\n4. Express uncertainty when present\n5. Mark thoughts that revise previous thinking or branch into new paths\n6. Ignore information that is irrelevant to the current step\n7. Generate a solution hypothesis when appropriate\n8. Verify the hypothesis based on the Chain of Thought steps\n9. Repeat the process until satisfied with the solution\n10. Provide a single, ideally correct answer as the final output\n11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached", + "inputSchema": { + "type": "object", + "properties": { + "thought": { + "type": "string", + "description": "Your current thinking step" + }, + "nextThoughtNeeded": { + "type": "boolean", + "description": "Whether another thought step is needed" + }, + "thoughtNumber": { + "type": "integer", + "description": "Current thought number", + "minimum": 1 + }, + "totalThoughts": { + "type": "integer", + "description": "Estimated total thoughts needed", + "minimum": 1 + }, + "isRevision": { + "type": "boolean", + "description": "Whether this revises previous thinking" + }, + "revisesThought": { + "type": "integer", + "description": "Which thought is being reconsidered", + "minimum": 1 + }, + "branchFromThought": { + "type": "integer", + "description": "Branching point thought number", + "minimum": 1 + }, + "branchId": { + "type": "string", + "description": "Branch identifier" + }, + "needsMoreThoughts": { + "type": "boolean", + "description": "If more thoughts are needed" + } + }, + "required": [ + "thought", + "nextThoughtNeeded", + "thoughtNumber", + "totalThoughts" + ] + } + } + ], + "is_official": true + }, + "agentql-mcp": { + "display_name": "AgentQL MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/tinyfish-io/agentql-mcp" + }, + "license": "[NOT GIVEN]", + "homepage": "https://agentql.com", + "author": { + "name": "tinyfish-io" + }, + "tags": [ + "data extraction", + "web scraping" + ], + "arguments": { + "AGENTQL_API_KEY": { + "description": "API key from AgentQL Dev Portal", + "required": true, + "example": "YOUR_API_KEY" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "agentql-mcp" + ], + "package": "agentql-mcp", + "env": { + "AGENTQL_API_KEY": "YOUR_API_KEY" + }, + "description": "Install via npm and run with npx", + "recommended": true + }, + "development": { + "type": "custom", + "command": "/path/to/agentql-mcp/dist/index.js", + "args": [], + "env": { + "AGENTQL_API_KEY": "YOUR_API_KEY" + }, + "description": "Run development version from local build", + "recommended": false + } + }, + "examples": [ + { + "title": "Extract YouTube search results", + "description": "Extract structured data from YouTube search results", + "prompt": "Extract the list of videos from the page https://www.youtube.com/results?search_query=agentql, every video should have a title, an author name, a number of views and a url to the video. Make sure to exclude ads items. Format this as a markdown table." + } + ], + "name": "agentql-mcp", + "description": "This is a Model Context Protocol (MCP) server that integrates [AgentQL](https://agentql.com)'s data extraction capabilities.", + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "extract-web-data", + "description": "Extracts structured data as JSON from a web page given a URL using a Natural Language description of the data.", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL of the public webpage to extract data from" + }, + "prompt": { + "type": "string", + "description": "Natural Language description of the data to extract from the page" + } + }, + "required": [ + "url", + "prompt" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "hdw-linkedin": { + "name": "hdw-linkedin", + "display_name": "HDW", + "description": "Access to profile data and management of user account with [HorizonDataWave.ai](https://horizondatawave.ai/).", + "repository": { + "type": "git", + "url": "https://github.com/horizondatawave/hdw-mcp-server" + }, + "homepage": "https://github.com/horizondatawave/hdw-mcp-server", + "author": { + "name": "horizondatawave" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "LinkedIn", + "API access", + "Data retrieval", + "User management" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@horizondatawave/mcp" + ], + "env": { + "HDW_ACCESS_TOKEN": "${HDW_ACCESS_TOKEN}", + "HDW_ACCOUNT_ID": "${HDW_ACCOUNT_ID}" + } + } + }, + "arguments": { + "HDW_ACCESS_TOKEN": { + "description": "Access token for HorizonDataWave API, used for authentication and authorization to access user data.", + "required": true, + "example": "YOUR_HD_W_ACCESS_TOKEN" + }, + "HDW_ACCOUNT_ID": { + "description": "Account ID for HorizonDataWave API, used to identify the user's account.", + "required": true, + "example": "YOUR_HD_W_ACCOUNT_ID" + } + }, + "tools": [ + { + "name": "search_linkedin_users", + "description": "Search for LinkedIn users with various filters like keywords, name, title, company, location etc.", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "string", + "description": "Any keyword for searching in the user page." + }, + "first_name": { + "type": "string", + "description": "Exact first name" + }, + "last_name": { + "type": "string", + "description": "Exact last name" + }, + "title": { + "type": "string", + "description": "Exact word in the title" + }, + "company_keywords": { + "type": "string", + "description": "Exact word in the company name" + }, + "school_keywords": { + "type": "string", + "description": "Exact word in the school name" + }, + "current_company": { + "type": "string", + "description": "Company URN or name" + }, + "past_company": { + "type": "string", + "description": "Past company URN or name" + }, + "location": { + "type": "string", + "description": "Location name or URN" + }, + "industry": { + "type": "string", + "description": "Industry URN or name" + }, + "education": { + "type": "string", + "description": "Education URN or name" + }, + "count": { + "type": "number", + "description": "Maximum number of results (max 1000)", + "default": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds (20-1500)", + "default": 300 + } + }, + "required": [ + "count" + ] + } + }, + { + "name": "get_linkedin_profile", + "description": "Get detailed information about a LinkedIn user profile", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "User alias, URL, or URN" + }, + "with_experience": { + "type": "boolean", + "description": "Include experience info", + "default": true + }, + "with_education": { + "type": "boolean", + "description": "Include education info", + "default": true + }, + "with_skills": { + "type": "boolean", + "description": "Include skills info", + "default": true + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "get_linkedin_email_user", + "description": "Get LinkedIn user details by email", + "inputSchema": { + "type": "object", + "properties": { + "email": { + "type": "string", + "description": "Email address" + }, + "count": { + "type": "number", + "description": "Max results", + "default": 5 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "email" + ] + } + }, + { + "name": "get_linkedin_user_posts", + "description": "Get LinkedIn posts for a user by URN (must include prefix, example: fsd_profile:ACoAAEWn01QBWENVMWqyM3BHfa1A-xsvxjdaXsY)", + "inputSchema": { + "type": "object", + "properties": { + "urn": { + "type": "string", + "description": "User URN (must include prefix, example: fsd_profile:ACoAA...)" + }, + "count": { + "type": "number", + "description": "Max posts", + "default": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "urn" + ] + } + }, + { + "name": "get_linkedin_user_reactions", + "description": "Get LinkedIn reactions for a user by URN (must include prefix, example: fsd_profile:ACoAA...)", + "inputSchema": { + "type": "object", + "properties": { + "urn": { + "type": "string", + "description": "User URN (must include prefix, example: fsd_profile:ACoAA...)" + }, + "count": { + "type": "number", + "description": "Max reactions", + "default": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "urn" + ] + } + }, + { + "name": "get_linkedin_chat_messages", + "description": "Get top chat messages from LinkedIn management API. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "User URN for filtering messages (must include prefix, e.g. fsd_profile:ACoAA...)" + }, + "count": { + "type": "number", + "description": "Max messages to return", + "default": 20 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "send_linkedin_chat_message", + "description": "Send a chat message via LinkedIn management API. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "Recipient user URN (must include prefix, e.g. fsd_profile:ACoAA...)" + }, + "text": { + "type": "string", + "description": "Message text" + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "user", + "text" + ] + } + }, + { + "name": "send_linkedin_connection", + "description": "Send a connection invitation to LinkedIn user. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "Recipient user URN (must include prefix, e.g. fsd_profile:ACoAA...)" + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "user" + ] + } + }, + { + "name": "send_linkedin_post_comment", + "description": "Create a comment on a LinkedIn post or on another comment. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Comment text" + }, + "urn": { + "type": "string", + "description": "URN of the activity or comment to comment on (e.g., 'activity:123' or 'comment:(activity:123,456)')" + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "text", + "urn" + ] + } + }, + { + "name": "get_linkedin_user_connections", + "description": "Get list of LinkedIn user connections. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "connected_after": { + "type": "number", + "description": "Filter users that added after the specified date (timestamp)" + }, + "count": { + "type": "number", + "description": "Max connections to return", + "default": 20 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [] + } + }, + { + "name": "get_linkedin_post_reposts", + "description": "Get LinkedIn reposts for a post by URN", + "inputSchema": { + "type": "object", + "properties": { + "urn": { + "type": "string", + "description": "Post URN, only activity urn type is allowed (example: activity:7234173400267538433)" + }, + "count": { + "type": "number", + "description": "Max reposts to return", + "default": 50 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "urn", + "count" + ] + } + }, + { + "name": "get_linkedin_post_comments", + "description": "Get LinkedIn comments for a post by URN", + "inputSchema": { + "type": "object", + "properties": { + "urn": { + "type": "string", + "description": "Post URN, only activity urn type is allowed (example: activity:7234173400267538433)" + }, + "sort": { + "type": "string", + "description": "Sort type (relevance or recent)", + "enum": [ + "relevance", + "recent" + ], + "default": "relevance" + }, + "count": { + "type": "number", + "description": "Max comments to return", + "default": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "urn", + "count" + ] + } + }, + { + "name": "get_linkedin_google_company", + "description": "Search for LinkedIn companies using Google search. First result is usually the best match.", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Company keywords for search. For example, company name or company website", + "examples": [ + [ + "Software as a Service (SaaS)" + ], + [ + "google.com" + ] + ] + }, + "with_urn": { + "type": "boolean", + "description": "Include URNs in response (increases execution time)", + "default": false + }, + "count_per_keyword": { + "type": "number", + "description": "Max results per keyword", + "default": 1, + "minimum": 1, + "maximum": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "keywords" + ] + } + }, + { + "name": "get_linkedin_company", + "description": "Get detailed information about a LinkedIn company", + "inputSchema": { + "type": "object", + "properties": { + "company": { + "type": "string", + "description": "Company Alias or URL or URN (example: 'openai' or 'company:1441')" + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "company" + ] + } + }, + { + "name": "get_linkedin_company_employees", + "description": "Get employees of a LinkedIn company", + "inputSchema": { + "type": "object", + "properties": { + "companies": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Company URNs (example: ['company:14064608'])" + }, + "keywords": { + "type": "string", + "description": "Any keyword for searching employees", + "examples": [ + "Alex" + ] + }, + "first_name": { + "type": "string", + "description": "Search for exact first name", + "examples": [ + "Bill" + ] + }, + "last_name": { + "type": "string", + "description": "Search for exact last name", + "examples": [ + "Gates" + ] + }, + "count": { + "type": "number", + "description": "Maximum number of results", + "default": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "companies", + "count" + ] + } + }, + { + "name": "send_linkedin_post", + "description": "Create a post on LinkedIn. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Post text content" + }, + "visibility": { + "type": "string", + "description": "Post visibility", + "enum": [ + "ANYONE", + "CONNECTIONS_ONLY" + ], + "default": "ANYONE" + }, + "comment_scope": { + "type": "string", + "description": "Who can comment on the post", + "enum": [ + "ALL", + "CONNECTIONS_ONLY", + "NONE" + ], + "default": "ALL" + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [ + "text" + ] + } + }, + { + "name": "linkedin_sn_search_users", + "description": "Advanced search for LinkedIn users using Sales Navigator filters", + "inputSchema": { + "type": "object", + "properties": { + "keywords": { + "type": "string", + "description": "Any keyword for searching in the user profile. Using this may reduce result count." + }, + "first_names": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Exact first names to search for" + }, + "last_names": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Exact last names to search for" + }, + "current_titles": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Exact words to search in current titles" + }, + "location": { + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "description": "Location URN (geo:*) or name, or array of them" + }, + "education": { + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "description": "Education URN (company:*) or name, or array of them" + }, + "languages": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "Arabic", + "English", + "Spanish", + "Portuguese", + "Chinese", + "French", + "Italian", + "Russian", + "German", + "Dutch", + "Turkish", + "Tagalog", + "Polish", + "Korean", + "Japanese", + "Malay", + "Norwegian", + "Danish", + "Romanian", + "Swedish", + "Bahasa Indonesia", + "Czech" + ] + }, + "description": "Profile languages" + }, + "past_titles": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Exact words to search in past titles" + }, + "functions": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "Accounting", + "Administrative", + "Arts and Design", + "Business", + "Development", + "Community and Social Services", + "Consulting", + "Education", + "Engineering", + "Entrepreneurship", + "Finance", + "Healthcare Services", + "Human Resources", + "Information Technology", + "Legal", + "Marketing", + "Media and Communication", + "Military and Protective Services", + "Operations", + "Product Management", + "Program and Project Management", + "Purchasing", + "Quality Assurance", + "Research", + "Real Estate", + "Sales", + "Customer Success and Support" + ] + }, + "description": "Job functions" + }, + "levels": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "Entry", + "Director", + "Owner", + "CXO", + "Vice President", + "Experienced Manager", + "Entry Manager", + "Strategic", + "Senior", + "Trainy" + ] + }, + "description": "Job seniority levels" + }, + "years_in_the_current_company": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "0-1", + "1-2", + "3-5", + "6-10", + "10+" + ] + }, + "description": "Years in current company ranges" + }, + "years_in_the_current_position": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "0-1", + "1-2", + "3-5", + "6-10", + "10+" + ] + }, + "description": "Years in current position ranges" + }, + "company_sizes": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "Self-employed", + "1-10", + "11-50", + "51-200", + "201-500", + "501-1,000", + "1,001-5,000", + "5,001-10,000", + "10,001+" + ] + }, + "description": "Company size ranges" + }, + "company_types": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "Public Company", + "Privately Held", + "Non Profit", + "Educational Institution", + "Partnership", + "Self Employed", + "Self Owned", + "Government Agency" + ] + }, + "description": "Company types" + }, + "company_locations": { + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "description": "Company location URN (geo:*) or name, or array of them" + }, + "current_companies": { + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "description": "Current company URN (company:*) or name, or array of them" + }, + "past_companies": { + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "description": "Past company URN (company:*) or name, or array of them" + }, + "industry": { + "type": [ + "string", + "array" + ], + "items": { + "type": "string" + }, + "description": "Industry URN (industry:*) or name, or array of them" + }, + "count": { + "type": "number", + "description": "Maximum number of results (max 2500)", + "default": 10, + "minimum": 1, + "maximum": 2500 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds (20-1500)", + "default": 300, + "minimum": 20, + "maximum": 1500 + } + }, + "required": [ + "count" + ] + } + }, + { + "name": "get_linkedin_conversations", + "description": "Get list of LinkedIn conversations from the messaging interface. Account ID is taken from environment.", + "inputSchema": { + "type": "object", + "properties": { + "connected_after": { + "type": "number", + "description": "Filter conversations created after the specified date (timestamp)" + }, + "count": { + "type": "number", + "description": "Max conversations to return", + "default": 20 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds", + "default": 300 + } + }, + "required": [] + } + }, + { + "name": "google_search", + "description": "Search for information using Google search API", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query. For example: 'python fastapi'" + }, + "count": { + "type": "number", + "description": "Maximum number of results (from 1 to 20)", + "default": 10 + }, + "timeout": { + "type": "number", + "description": "Timeout in seconds (20-1500)", + "default": 300 + } + }, + "required": [ + "query" + ] + } + } + ] + }, + "unity-integration-advanced": { + "name": "unity-integration-advanced", + "display_name": "Unity Integration", + "description": "Advanced Unity3d Game Engine MCP which supports ,Execution of Any Editor Related Code Directly Inside of Unity, Fetch Logs, Get Editor State and Allow File Access of the Project making it much more useful in Script Editing or asset creation.", + "repository": { + "type": "git", + "url": "https://github.com/quazaai/UnityMCPIntegration" + }, + "homepage": "https://github.com/quazaai/UnityMCPIntegration", + "author": { + "name": "quazaai" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Unity", + "Integration", + "AI" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/quazaai/UnityMCPIntegration" + ], + "env": { + "MCP_WEBSOCKET_PORT": "${MCP_WEBSOCKET_PORT}" + } + } + }, + "examples": [ + { + "title": "Get Unity Editor State", + "description": "Retrieve comprehensive information about the current Unity project and editor state.", + "prompt": "get_editor_state()" + }, + { + "title": "Execute C# Code", + "description": "Run specific C# code directly within the Unity Editor.", + "prompt": "execute_editor_command('Debug.Log(\"Hello, World!\");')" + } + ], + "arguments": { + "MCP_WEBSOCKET_PORT": { + "description": "Environment variable to specify the WebSocket port used by the MCP server.", + "required": false, + "example": "5010" + } + }, + "tools": [ + { + "name": "get_current_scene_info", + "description": "Retrieve information about the current scene in Unity Editor with configurable detail level", + "inputSchema": { + "type": "object", + "properties": { + "detailLevel": { + "type": "string", + "enum": [ + "RootObjectsOnly", + "FullHierarchy" + ], + "description": "RootObjectsOnly: Returns just root GameObjects. FullHierarchy: Returns complete hierarchy with all children.", + "default": "RootObjectsOnly" + } + } + }, + "category": "Editor State", + "tags": [ + "unity", + "editor", + "scene" + ], + "returns": { + "type": "object", + "description": "Returns information about the current scene and its hierarchy based on requested detail level" + } + }, + { + "name": "get_game_objects_info", + "description": "Retrieve detailed information about specific GameObjects in the current scene", + "inputSchema": { + "type": "object", + "properties": { + "instanceIDs": { + "type": "array", + "items": { + "type": "number" + }, + "description": "Array of GameObject instance IDs to get information for", + "minItems": 1 + }, + "detailLevel": { + "type": "string", + "enum": [ + "BasicInfo", + "IncludeComponents", + "IncludeChildren", + "IncludeComponentsAndChildren" + ], + "description": "BasicInfo: Basic GameObject information. IncludeComponents: Includes component details. IncludeChildren: Includes child GameObjects. IncludeComponentsAndChildren: Includes both components and a full hierarchy with components on children.", + "default": "IncludeComponents" + } + }, + "required": [ + "instanceIDs" + ] + }, + "category": "Editor State", + "tags": [ + "unity", + "editor", + "gameobjects" + ], + "returns": { + "type": "object", + "description": "Returns detailed information about the requested GameObjects" + } + }, + { + "name": "execute_editor_command", + "description": "Execute C# code directly in the Unity Editor - allows full flexibility including custom namespaces and multiple classes", + "inputSchema": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "C# code to execute in Unity Editor. You MUST define a public class named \"McpScript\" with a public static method named \"Execute\" that returns an object. Example: \"public class McpScript { public static object Execute() { /* your code here */ return result; } }\". You can include any necessary namespaces, additional classes, and methods.", + "minLength": 1 + } + }, + "required": [ + "code" + ] + }, + "category": "Editor Control", + "tags": [ + "unity", + "editor", + "command", + "c#" + ], + "returns": { + "type": "object", + "description": "Returns the execution result, execution time, and status" + } + }, + { + "name": "get_logs", + "description": "Retrieve Unity Editor logs with filtering options", + "inputSchema": { + "type": "object", + "properties": { + "types": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "Log", + "Warning", + "Error", + "Exception" + ] + }, + "description": "Filter logs by type" + }, + "count": { + "type": "number", + "description": "Maximum number of log entries to return", + "minimum": 1, + "maximum": 1000 + }, + "fields": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "message", + "stackTrace", + "logType", + "timestamp" + ] + }, + "description": "Specify which fields to include in the output" + }, + "messageContains": { + "type": "string", + "description": "Filter logs by message content" + }, + "stackTraceContains": { + "type": "string", + "description": "Filter logs by stack trace content" + }, + "timestampAfter": { + "type": "string", + "description": "Filter logs after this ISO timestamp" + }, + "timestampBefore": { + "type": "string", + "description": "Filter logs before this ISO timestamp" + } + } + }, + "category": "Debugging", + "tags": [ + "unity", + "editor", + "logs", + "debugging" + ], + "returns": { + "type": "array", + "description": "Returns an array of log entries matching the specified filters" + } + }, + { + "name": "verify_connection", + "description": "Verify that the MCP server has an active connection to Unity Editor", + "inputSchema": { + "type": "object", + "properties": {} + }, + "category": "Connection", + "tags": [ + "unity", + "editor", + "connection" + ], + "returns": { + "type": "object", + "description": "Returns connection status information" + } + }, + { + "name": "get_editor_state", + "description": "Get the current Unity Editor state including project information", + "inputSchema": { + "type": "object", + "properties": {} + }, + "category": "Editor State", + "tags": [ + "unity", + "editor", + "project" + ], + "returns": { + "type": "object", + "description": "Returns detailed information about the current Unity Editor state, project settings, and environment" + } + }, + { + "name": "read_file", + "description": "Read the contents of a file from the Unity project. Paths are relative to the project's Assets folder. For example, use 'Scenes/MainScene.unity' to read Assets/Scenes/MainScene.unity.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the file to read. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder." + } + }, + "required": [ + "path" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "file" + ] + }, + { + "name": "read_multiple_files", + "description": "Read the contents of multiple files from the Unity project simultaneously.", + "inputSchema": { + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Array of file paths to read. Paths can be absolute or relative to Unity project Assets folder." + } + }, + "required": [ + "paths" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "file", + "batch" + ] + }, + { + "name": "write_file", + "description": "Create a new file or completely overwrite an existing file in the Unity project.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the file to write. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder." + }, + "content": { + "type": "string", + "description": "Content to write to the file" + } + }, + "required": [ + "path", + "content" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "file", + "write" + ] + }, + { + "name": "edit_file", + "description": "Make precise edits to a text file in the Unity project. Returns a git-style diff showing changes.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the file to edit. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder." + }, + "edits": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oldText": { + "type": "string", + "description": "Text to search for - must match exactly" + }, + "newText": { + "type": "string", + "description": "Text to replace with" + } + }, + "required": [ + "oldText", + "newText" + ], + "additionalProperties": false + }, + "description": "Array of edit operations to apply" + }, + "dryRun": { + "type": "boolean", + "default": false, + "description": "Preview changes using git-style diff format" + } + }, + "required": [ + "path", + "edits" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "file", + "edit" + ] + }, + { + "name": "list_directory", + "description": "Get a listing of all files and directories in a specified path in the Unity project. Paths are relative to the Assets folder unless absolute. For example, use 'Scenes' to list all files in Assets/Scenes directory. Use empty string to list the Assets folder.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the directory to list. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder. Example: \"Scenes\" will list all files in the Assets/Scenes directory." + } + }, + "required": [ + "path" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "directory", + "list" + ] + }, + { + "name": "directory_tree", + "description": "Get a recursive tree view of files and directories in the Unity project as a JSON structure.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the directory to get tree of. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder. Example: \"Prefabs\" will show the tree for Assets/Prefabs." + }, + "maxDepth": { + "type": "number", + "default": 5, + "description": "Maximum depth to traverse" + } + }, + "required": [ + "path" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "directory", + "tree" + ] + }, + { + "name": "search_files", + "description": "Recursively search for files and directories matching a pattern in the Unity project.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to search from. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder. Example: \"Scripts\" will search within Assets/Scripts." + }, + "pattern": { + "type": "string", + "description": "Pattern to search for" + }, + "excludePatterns": { + "type": "array", + "items": { + "type": "string" + }, + "default": [], + "description": "Patterns to exclude" + } + }, + "required": [ + "path", + "pattern" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "search" + ] + }, + { + "name": "get_file_info", + "description": "Retrieve detailed metadata about a file or directory in the Unity project.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the file to get info for. Can be absolute or relative to Unity project Assets folder. If empty, defaults to the Assets folder." + } + }, + "required": [ + "path" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "file", + "metadata" + ] + }, + { + "name": "find_assets_by_type", + "description": "Find all Unity assets of a specified type (e.g., Material, Prefab, Scene, Script) in the project. Set searchPath to an empty string to search the entire Assets folder.", + "inputSchema": { + "type": "object", + "properties": { + "assetType": { + "type": "string", + "description": "Type of assets to find (e.g., \"Material\", \"Prefab\", \"Scene\", \"Script\")" + }, + "searchPath": { + "type": "string", + "default": "", + "description": "Directory to search in. Can be absolute or relative to Unity project Assets folder. An empty string will search the entire Assets folder." + }, + "maxDepth": { + "type": "number", + "default": 1, + "description": "Maximum depth to search. 1 means search only in the specified directory, 2 includes immediate subdirectories, and so on. Set to -1 for unlimited depth." + } + }, + "required": [ + "assetType" + ] + }, + "category": "Filesystem", + "tags": [ + "unity", + "filesystem", + "assets", + "search" + ] + } + ] + }, + "playwright": { + "display_name": "Playwright MCP", + "license": "MIT", + "tags": [ + "browser automation", + "web", + "playwright", + "accessibility", + "LLM", + "MCP", + "Model Context Protocol", + "web navigation", + "form-filling", + "data extraction" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@playwright/mcp@latest" + ], + "description": "Using Vision Mode with screenshots for visual-based interactions" + } + }, + "examples": [ + { + "title": "", + "description": "", + "prompt": "Navigate to google.com and search for 'playwright automation'" + }, + { + "title": "", + "description": "", + "prompt": "Fill out a login form with username 'test@example.com' and password 'password123'" + }, + { + "title": "", + "description": "", + "prompt": "Take a snapshot of the current page and click on the first search result" + }, + { + "title": "", + "description": "", + "prompt": "Open a new tab, navigate to github.com, and then switch back to the first tab" + }, + { + "title": "", + "description": "", + "prompt": "Navigate to a shopping website, add an item to cart, and proceed to checkout" + }, + { + "title": "", + "description": "", + "prompt": "Fill out a form with multiple fields and submit it" + }, + { + "title": "", + "description": "", + "prompt": "Take a screenshot of the current page" + }, + { + "title": "", + "description": "", + "prompt": "Navigate to a website with a dropdown menu and select an option" + }, + { + "title": "", + "description": "", + "prompt": "Upload a file to a website" + }, + { + "title": "", + "description": "", + "prompt": "Extract data from a table on a webpage" + } + ], + "name": "playwright", + "repository": { + "type": "git", + "url": "https://github.com/microsoft/playwright-mcp" + }, + "homepage": "https://github.com/microsoft/playwright-mcp", + "author": { + "name": "microsoft" + }, + "description": "A Model Context Protocol (MCP) server that provides browser automation capabilities using [Playwright](https://playwright.dev). This server enables LLMs to interact with web pages through structured accessibility snapshots, bypassing the need for screenshots or visually-tuned models.", + "categories": [ + "Web Services" + ], + "tools": [ + { + "name": "browser_close", + "description": "Close the page", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_wait", + "description": "Wait for a specified time in seconds", + "inputSchema": { + "type": "object", + "properties": { + "time": { + "type": "number", + "description": "The time to wait in seconds" + } + }, + "required": [ + "time" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_resize", + "description": "Resize the browser window", + "inputSchema": { + "type": "object", + "properties": { + "width": { + "type": "number", + "description": "Width of the browser window" + }, + "height": { + "type": "number", + "description": "Height of the browser window" + } + }, + "required": [ + "width", + "height" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_file_upload", + "description": "Upload one or multiple files", + "inputSchema": { + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The absolute paths to the files to upload. Can be a single file or multiple files." + } + }, + "required": [ + "paths" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_install", + "description": "Install the browser specified in the config. Call this if you get an error about the browser not being installed.", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_press_key", + "description": "Press a key on the keyboard", + "inputSchema": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Name of the key to press or a character to generate, such as `ArrowLeft` or `a`" + } + }, + "required": [ + "key" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_navigate", + "description": "Navigate to a URL", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL to navigate to" + } + }, + "required": [ + "url" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_navigate_back", + "description": "Go back to the previous page", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_navigate_forward", + "description": "Go forward to the next page", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_pdf_save", + "description": "Save page as PDF", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_screen_capture", + "description": "Take a screenshot of the current page", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_screen_move_mouse", + "description": "Move mouse to a given position", + "inputSchema": { + "type": "object", + "properties": { + "element": { + "type": "string", + "description": "Human-readable element description used to obtain permission to interact with the element" + }, + "x": { + "type": "number", + "description": "X coordinate" + }, + "y": { + "type": "number", + "description": "Y coordinate" + } + }, + "required": [ + "element", + "x", + "y" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_screen_click", + "description": "Click left mouse button", + "inputSchema": { + "type": "object", + "properties": { + "element": { + "type": "string", + "description": "Human-readable element description used to obtain permission to interact with the element" + }, + "x": { + "type": "number", + "description": "X coordinate" + }, + "y": { + "type": "number", + "description": "Y coordinate" + } + }, + "required": [ + "element", + "x", + "y" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_screen_drag", + "description": "Drag left mouse button", + "inputSchema": { + "type": "object", + "properties": { + "element": { + "type": "string", + "description": "Human-readable element description used to obtain permission to interact with the element" + }, + "startX": { + "type": "number", + "description": "Start X coordinate" + }, + "startY": { + "type": "number", + "description": "Start Y coordinate" + }, + "endX": { + "type": "number", + "description": "End X coordinate" + }, + "endY": { + "type": "number", + "description": "End Y coordinate" + } + }, + "required": [ + "element", + "startX", + "startY", + "endX", + "endY" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_screen_type", + "description": "Type text", + "inputSchema": { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Text to type into the element" + }, + "submit": { + "type": "boolean", + "description": "Whether to submit entered text (press Enter after)" + } + }, + "required": [ + "text" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_tab_list", + "description": "List browser tabs", + "inputSchema": { + "type": "object", + "properties": {}, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_tab_new", + "description": "Open a new tab", + "inputSchema": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL to navigate to in the new tab. If not provided, the new tab will be blank." + } + }, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_tab_select", + "description": "Select a tab by index", + "inputSchema": { + "type": "object", + "properties": { + "index": { + "type": "number", + "description": "The index of the tab to select" + } + }, + "required": [ + "index" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "browser_tab_close", + "description": "Close a tab", + "inputSchema": { + "type": "object", + "properties": { + "index": { + "type": "number", + "description": "The index of the tab to close. Closes current tab if not provided." + } + }, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + ], + "prompts": [], + "resources": [ + { + "uri": "browser://console", + "name": "Page console", + "mimeType": "text/plain" + } + ], + "is_official": true + }, + "screenshotone": { + "display_name": "ScreenshotOne MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/screenshotone/mcp" + }, + "homepage": "https://screenshotone.com", + "author": { + "name": "screenshotone" + }, + "license": "MIT", + "tags": [ + "screenshot", + "website", + "image" + ], + "arguments": { + "SCREENSHOTONE_API_KEY": { + "description": "API key for ScreenshotOne service", + "required": true, + "example": "" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "build/index.js" + ], + "env": { + "SCREENSHOTONE_API_KEY": "your_api_key" + }, + "description": "Run as standalone server", + "recommended": true + } + }, + "examples": [ + { + "title": "Render Website Screenshot", + "description": "Render a screenshot of a website and return it as an image", + "prompt": "Take a screenshot of the website https://example.com" + } + ], + "name": "screenshotone", + "description": "An official implementation of an [MCP (Model Context Protocol)](https://modelcontextprotocol.io/) server for [ScreenshotOne](https://screenshotone.com).", + "categories": [ + "Web Services" + ], + "is_official": true + }, + "mailgun-mcp-server": { + "display_name": "Mailgun MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/mailgun/mailgun-mcp-server" + }, + "homepage": "https://github.com/mailgun/mailgun-mcp-server", + "author": { + "name": "mailgun" + }, + "license": "Apache-2.0", + "tags": [ + "email", + "mailgun", + "mcp" + ], + "arguments": { + "MAILGUN_API_KEY": { + "description": "Your Mailgun API key", + "required": true, + "example": "YOUR-mailgun-api-key" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "path/to/mailgun-mcp-server/src/mailgun-mcp.js" + ], + "env": { + "MAILGUN_API_KEY": "YOUR-mailgun-api-key" + }, + "description": "Run the server using Node.js", + "recommended": true + } + }, + "examples": [ + { + "title": "Send an Email", + "description": "Send an email with a funny body from IT Desk", + "prompt": "Can you send an email to EMAIL_HERE with a funny email body that makes it sound like it's from the IT Desk from Office Space? Please use the sending domain DOMAIN_HERE, and make the email from \"postmaster@DOMAIN_HERE\"!" + }, + { + "title": "Fetch and Visualize Sending Statistics", + "description": "Create a chart with email delivery statistics", + "prompt": "Would you be able to make a chart with email delivery statistics for the past week?" + } + ], + "name": "mailgun", + "description": "A Model Context Protocol (MCP) server implementation for [Mailgun](https://mailgun.com), enabling MCP-compatible AI clients like Claude Desktop to interract with the service.", + "categories": [ + "Messaging" + ], + "is_official": true + }, + "productboard": { + "name": "productboard", + "display_name": "Productboard", + "description": "Integrate the Productboard API into agentic workflows via MCP.", + "repository": { + "type": "git", + "url": "https://github.com/kenjihikmatullah/productboard-mcp" + }, + "author": { + "name": "kenjihikmatullah" + }, + "license": "MIT", + "categories": [ + "Productivity" + ], + "tags": [ + "Productboard", + "API" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "productboard-mcp" + ], + "env": { + "PRODUCTBOARD_ACCESS_TOKEN": "" + } + } + }, + "homepage": "https://github.com/kenjihikmatullah/productboard-mcp", + "arguments": { + "PRODUCTBOARD_ACCESS_TOKEN": { + "description": "An access token needed to authenticate with the Productboard API. This token is required to make requests to the API and must be kept confidential.", + "required": true, + "example": "your_access_token_here" + } + }, + "tools": [ + { + "name": "get_products", + "description": "Returns detail of all products. This API is paginated and the page limit is always 100", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "default": 1 + } + } + } + }, + { + "name": "get_product_detail", + "description": "Returns detailed information about a specific product", + "inputSchema": { + "type": "object", + "properties": { + "productId": { + "type": "string", + "description": "ID of the product to retrieve" + } + }, + "required": [ + "productId" + ] + } + }, + { + "name": "get_features", + "description": "Returns a list of all features. This API is paginated and the page limit is always 100", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "default": 1 + } + } + } + }, + { + "name": "get_feature_detail", + "description": "Returns detailed information about a specific feature", + "inputSchema": { + "type": "object", + "properties": { + "featureId": { + "type": "string", + "description": "ID of the feature to retrieve" + } + }, + "required": [ + "featureId" + ] + } + }, + { + "name": "get_components", + "description": "Returns a list of all components. This API is paginated and the page limit is always 100", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "default": 1 + } + } + } + }, + { + "name": "get_component_detail", + "description": "Returns detailed information about a specific component", + "inputSchema": { + "type": "object", + "properties": { + "componentId": { + "type": "string", + "description": "ID of the component to retrieve" + } + }, + "required": [ + "componentId" + ] + } + }, + { + "name": "get_feature_statuses", + "description": "Returns a list of all feature statuses. This API is paginated and the page limit is always 100", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "default": 1 + } + } + } + }, + { + "name": "get_notes", + "description": "Returns a list of all notes", + "inputSchema": { + "type": "object", + "properties": { + "last": { + "type": "string", + "description": "Return only notes created since given span of months (m), days (s), or hours (h). E.g. 6m | 10d | 24h | 1h. Cannot be combined with createdFrom, createdTo, dateFrom, or dateTo" + }, + "createdFrom": { + "type": "string", + "format": "date", + "description": "Return only notes created since given date. Cannot be combined with last" + }, + "createdTo": { + "type": "string", + "format": "date", + "description": "Return only notes created before or equal to the given date. Cannot be combined with last" + }, + "updatedFrom": { + "type": "string", + "format": "date", + "description": "Return only notes updated since given date" + }, + "updatedTo": { + "type": "string", + "format": "date", + "description": "Return only notes updated before or equal to the given date" + }, + "term": { + "type": "string", + "description": "Return only notes by fulltext search" + }, + "featureId": { + "type": "string", + "description": "Return only notes for specific feature ID or its descendants" + }, + "companyId": { + "type": "string", + "description": "Return only notes for specific company ID" + }, + "ownerEmail": { + "type": "string", + "description": "Return only notes owned by a specific owner email" + }, + "source": { + "type": "string", + "description": "Return only notes from a specific source origin. This is the unique string identifying the external system from which the data came" + }, + "anyTag": { + "type": "string", + "description": "Return only notes that have been assigned any of the tags in the array. Cannot be combined with allTags" + }, + "allTags": { + "type": "string", + "description": "Return only notes that have been assigned all of the tags in the array. Cannot be combined with anyTag" + }, + "pageLimit": { + "type": "number", + "description": "Page limit" + }, + "pageCursor": { + "type": "string", + "description": "Page cursor to get next page of results" + } + } + } + }, + { + "name": "get_note_detail", + "description": "Returns detailed information about a specific note", + "inputSchema": { + "type": "object", + "properties": { + "noteId": { + "type": "string", + "description": "ID of the note to retrieve" + } + }, + "required": [ + "noteId" + ] + } + }, + { + "name": "get_companies", + "description": "Returns a list of all companies. This API is paginated and the page limit is always 100", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number", + "default": 1 + } + } + } + }, + { + "name": "get_company_detail", + "description": "Returns detailed information about a specific company", + "inputSchema": { + "type": "object", + "properties": { + "companyId": { + "type": "string", + "description": "ID of the company to retrieve" + } + }, + "required": [ + "companyId" + ] + } + } + ] + }, + "qwen-max": { + "name": "qwen-max", + "display_name": "Qwen Max", + "description": "A Model Context Protocol (MCP) server implementation for the Qwen models.", + "repository": { + "type": "git", + "url": "https://github.com/66julienmartin/MCP-server-Qwen_Max" + }, + "homepage": "https://github.com/66julienmartin/MCP-server-Qwen_Max", + "author": { + "name": "66julienmartin" + }, + "license": "MIT", + "categories": [ + "AI Systems" + ], + "tags": [ + "Qwen Max", + "Server" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@gongrzhe/quickchart-mcp-server" + ] + } + }, + "arguments": { + "DASHSCOPE_API_KEY": { + "description": "API key required for authentication with the Dashscope service.", + "required": true, + "example": "your-api-key-here" + } + }, + "tools": [ + { + "name": "generate_chart", + "description": "Generate a chart using QuickChart", + "inputSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Chart type (bar, line, pie, doughnut, radar, polarArea, scatter, bubble, radialGauge, speedometer)" + }, + "labels": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Labels for data points" + }, + "datasets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "data": { + "type": "array" + }, + "backgroundColor": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + }, + "borderColor": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + }, + "additionalConfig": { + "type": "object" + } + }, + "required": [ + "data" + ] + } + }, + "title": { + "type": "string" + }, + "options": { + "type": "object" + } + }, + "required": [ + "type", + "datasets" + ] + } + }, + { + "name": "download_chart", + "description": "Download a chart image to a local file", + "inputSchema": { + "type": "object", + "properties": { + "config": { + "type": "object", + "description": "Chart configuration object" + }, + "outputPath": { + "type": "string", + "description": "Path where the chart image should be saved" + } + }, + "required": [ + "config", + "outputPath" + ] + } + } + ] + }, + "inkeep": { + "display_name": "Inkeep MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/inkeep/mcp-server-python" + }, + "homepage": "https://inkeep.com", + "author": { + "name": "inkeep" + }, + "license": "MIT", + "tags": [ + "rag", + "documentation", + "product content" + ], + "arguments": { + "INKEEP_API_BASE_URL": { + "description": "Base URL for the Inkeep API", + "required": true, + "example": "https://api.inkeep.com/v1" + }, + "INKEEP_API_KEY": { + "description": "API key for authenticating with Inkeep", + "required": true, + "example": "" + }, + "INKEEP_API_MODEL": { + "description": "The Inkeep model to use", + "required": true, + "example": "inkeep-rag" + }, + "INKEEP_MCP_TOOL_NAME": { + "description": "Name of the MCP tool", + "required": true, + "example": "search-product-content" + }, + "INKEEP_MCP_TOOL_DESCRIPTION": { + "description": "Description of the MCP tool", + "required": true, + "example": "Retrieves product documentation about Inkeep. The query should be framed as a conversational question about Inkeep." + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "uv", + "args": [ + "--directory", + "", + "run", + "-m", + "inkeep_mcp_server" + ], + "env": { + "INKEEP_API_BASE_URL": "https://api.inkeep.com/v1", + "INKEEP_API_KEY": "", + "INKEEP_API_MODEL": "inkeep-rag", + "INKEEP_MCP_TOOL_NAME": "search-product-content", + "INKEEP_MCP_TOOL_DESCRIPTION": "Retrieves product documentation about Inkeep. The query should be framed as a conversational question about Inkeep." + }, + "description": "Run using uv Python project manager", + "recommended": true + } + }, + "examples": [ + { + "title": "Search Inkeep Documentation", + "description": "Ask a question about Inkeep's product", + "prompt": "How do I integrate Inkeep with my website?" + } + ], + "name": "inkeep", + "description": "Inkeep MCP Server powered by your docs and product content.", + "categories": [ + "Knowledge Base" + ], + "is_official": true + }, + "mcp-neo4j-aura-api": { + "display_name": "Neo4j MCP (Aura API)", + "repository": { + "type": "git", + "url": "https://github.com/neo4j-contrib/mcp-neo4j" + }, + "homepage": "https://github.com/neo4j-contrib/mcp-neo4j", + "author": { + "name": "neo4j-contrib" + }, + "license": "MIT", + "tags": [ + "neo4j", + "mcp", + "knowledge graph", + "aura" + ], + "arguments": { + "NEO4J_CLIENT_ID": { + "description": "Neo4j client ID", + "required": true, + "example": "" + }, + "NEO4J_CLIENT_SECRET": { + "description": "Neo4j client secret", + "required": true, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-neo4j-aura-manager", + "--client-id", + "${NEO4J_CLIENT_ID}", + "--client-secret", + "${NEO4J_CLIENT_SECRET}" + ], + "description": "Clone the repository to access multiple Neo4j MCP servers", + "recommended": true + } + }, + "examples": [ + { + "title": "Database Schema Query", + "description": "Get information about what's in the graph database", + "prompt": "What is in this graph?" + }, + { + "title": "Data Visualization", + "description": "Generate charts from graph data", + "prompt": "Render a chart from the top products sold by frequency, total and average volume" + }, + { + "title": "Instance Management", + "description": "List Neo4j Aura instances", + "prompt": "List my instances" + }, + { + "title": "Instance Creation", + "description": "Create a new Neo4j Aura instance", + "prompt": "Create a new instance named mcp-test for Aura Professional with 4GB and Graph Data Science enabled" + }, + { + "title": "Knowledge Storage", + "description": "Store information in the knowledge graph", + "prompt": "Store the fact that I worked on the Neo4j MCP Servers today with Andreas and Oskar" + } + ], + "name": "mcp-neo4j-aura-api", + "description": "Neo4j graph database server (schema + read/write-cypher) and separate graph database backed memory", + "categories": [ + "Databases" + ], + "is_official": true, + "tools": [ + { + "name": "list_instances", + "description": "List all Neo4j Aura database instances", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_instance_details", + "description": "Get details for one or more Neo4j Aura instances by ID, including status, region, memory, storage", + "inputSchema": { + "type": "object", + "properties": { + "instance_ids": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of instance IDs to retrieve" + } + }, + "required": [ + "instance_ids" + ] + } + }, + { + "name": "get_instance_by_name", + "description": "Find a Neo4j Aura instance by name and returns the details including the id", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the instance to find" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "create_instance", + "description": "Create a new Neo4j Aura database instance", + "inputSchema": { + "type": "object", + "properties": { + "tenant_id": { + "type": "string", + "description": "ID of the tenant/project where the instance will be created" + }, + "name": { + "type": "string", + "description": "Name for the new instance" + }, + "memory": { + "type": "integer", + "description": "Memory allocation in GB", + "default": 1 + }, + "region": { + "type": "string", + "description": "Region for the instance (e.g., 'us-east-1')", + "default": "us-central1" + }, + "type": { + "type": "string", + "description": "Instance type (free-db, professional-db, enterprise-db, or business-critical)", + "default": "free-db" + }, + "vector_optimized": { + "type": "boolean", + "description": "Whether the instance is optimized for vector operations", + "default": false + }, + "cloud_provider": { + "type": "string", + "description": "Cloud provider (gcp, aws, azure)", + "default": "gcp" + }, + "graph_analytics_plugin": { + "type": "boolean", + "description": "Whether to enable the graph analytics plugin", + "default": false + }, + "source_instance_id": { + "type": "string", + "description": "ID of the source instance to clone from (for professional/enterprise instances)" + } + }, + "required": [ + "tenant_id", + "name" + ] + } + }, + { + "name": "update_instance_name", + "description": "Update the name of a Neo4j Aura instance", + "inputSchema": { + "type": "object", + "properties": { + "instance_id": { + "type": "string", + "description": "ID of the instance to update" + }, + "name": { + "type": "string", + "description": "New name for the instance" + } + }, + "required": [ + "instance_id", + "name" + ] + } + }, + { + "name": "update_instance_memory", + "description": "Update the memory allocation of a Neo4j Aura instance", + "inputSchema": { + "type": "object", + "properties": { + "instance_id": { + "type": "string", + "description": "ID of the instance to update" + }, + "memory": { + "type": "integer", + "description": "New memory allocation in GB" + } + }, + "required": [ + "instance_id", + "memory" + ] + } + }, + { + "name": "update_instance_vector_optimization", + "description": "Update the vector optimization setting of a Neo4j Aura instance", + "inputSchema": { + "type": "object", + "properties": { + "instance_id": { + "type": "string", + "description": "ID of the instance to update" + }, + "vector_optimized": { + "type": "boolean", + "description": "Whether the instance should be optimized for vector operations" + } + }, + "required": [ + "instance_id", + "vector_optimized" + ] + } + }, + { + "name": "pause_instance", + "description": "Pause a Neo4j Aura database instance", + "inputSchema": { + "type": "object", + "properties": { + "instance_id": { + "type": "string", + "description": "ID of the instance to pause" + } + }, + "required": [ + "instance_id" + ] + } + }, + { + "name": "resume_instance", + "description": "Resume a paused Neo4j Aura database instance", + "inputSchema": { + "type": "object", + "properties": { + "instance_id": { + "type": "string", + "description": "ID of the instance to resume" + } + }, + "required": [ + "instance_id" + ] + } + }, + { + "name": "list_tenants", + "description": "List all Neo4j Aura tenants/projects", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_tenant_details", + "description": "Get details for a specific Neo4j Aura tenant/project", + "inputSchema": { + "type": "object", + "properties": { + "tenant_id": { + "type": "string", + "description": "ID of the tenant/project to retrieve" + } + }, + "required": [ + "tenant_id" + ] + } + }, + { + "name": "delete_instance", + "description": "Delete a Neo4j Aura database instance", + "inputSchema": { + "type": "object", + "properties": { + "instance_id": { + "type": "string", + "description": "ID of the instance to delete" + } + }, + "required": [ + "instance_id" + ] + } + } + ] + }, + "mcp-oceanbase": { + "display_name": "OceanBase MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/oceanbase/mcp-oceanbase" + }, + "homepage": "https://github.com/oceanbase/mcp-oceanbase", + "author": { + "name": "oceanbase" + }, + "license": "Apache-2.0", + "tags": [ + "database", + "OceanBase" + ], + "arguments": { + "OB_HOST": { + "description": "Database host for connecting to the OceanBase server.", + "required": true, + "example": "localhost" + }, + "OB_PORT": { + "description": "Optional: Database port to connect to OceanBase, defaults to 2881 if not specified.", + "required": false, + "example": "2881" + }, + "OB_USER": { + "description": "Username for authenticating with the OceanBase database.", + "required": true, + "example": "your_username" + }, + "OB_PASSWORD": { + "description": "Password for the specified database user.", + "required": true, + "example": "your_password" + }, + "OB_DATABASE": { + "description": "Name of the OceanBase database to connect to.", + "required": true, + "example": "your_database" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/oceanbase/mcp-oceanbase", + "oceanbase_mcp_server" + ], + "env": { + "OB_HOST": "${OB_HOST}", + "OB_PORT": "${OB_PORT}", + "OB_USER": "${OB_USER}", + "OB_PASSWORD": "${OB_PASSWORD}", + "OB_DATABASE": "${OB_DATABASE}" + }, + "description": "A Model Context Protocol (MCP) server that enables secure interaction with OceanBase databases." + } + }, + "name": "mcp-oceanbase", + "description": "MCP Server for OceanBase database and its tools", + "categories": [ + "Databases" + ], + "is_official": true + }, + "fetch": { + "name": "fetch", + "display_name": "fetch", + "description": "A Model Context Protocol server that provides web content fetching capabilities.", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/tree/main/src/fetch", + "author": { + "name": "modelcontextprotocol" + }, + "license": "MIT", + "categories": [ + "Web Services" + ], + "tags": [ + "Fetch", + "Server" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-server-fetch" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "mcp/fetch" + ] + } + }, + "is_official": true, + "tools": [ + { + "name": "fetch", + "description": "Fetches a URL from the internet and optionally extracts its contents as markdown.\n\nAlthough originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.", + "inputSchema": { + "description": "Parameters for fetching a URL.", + "properties": { + "url": { + "description": "URL to fetch", + "format": "uri", + "minLength": 1, + "title": "Url", + "type": "string" + }, + "max_length": { + "default": 5000, + "description": "Maximum number of characters to return.", + "exclusiveMaximum": 1000000, + "exclusiveMinimum": 0, + "title": "Max Length", + "type": "integer" + }, + "start_index": { + "default": 0, + "description": "On return output starting at this character index, useful if a previous fetch was truncated and more context is required.", + "minimum": 0, + "title": "Start Index", + "type": "integer" + }, + "raw": { + "default": false, + "description": "Get the actual HTML content of the requested page, without simplification.", + "title": "Raw", + "type": "boolean" + } + }, + "required": [ + "url" + ], + "title": "Fetch", + "type": "object" + } + } + ] + }, + "inoyu": { + "name": "inoyu", + "display_name": "Inoyu Apache Unomi", + "description": "Interact with an Apache Unomi CDP customer data platform to retrieve and update customer profiles", + "repository": { + "type": "git", + "url": "https://github.com/sergehuber/inoyu-mcp-unomi-server" + }, + "homepage": "https://github.com/sergehuber/inoyu-mcp-unomi-server", + "author": { + "name": "sergehuber" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Apache Unomi", + "User Profiles", + "Context Management" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "@inoyu/mcp-unomi-server" + ], + "env": { + "UNOMI_BASE_URL": "${UNOMI_BASE_URL}", + "UNOMI_USERNAME": "${UNOMI_USERNAME}", + "UNOMI_PASSWORD": "${UNOMI_PASSWORD}", + "UNOMI_PROFILE_ID": "${UNOMI_PROFILE_ID}", + "UNOMI_KEY": "${UNOMI_KEY}", + "UNOMI_EMAIL": "${UNOMI_EMAIL}", + "UNOMI_SOURCE_ID": "${UNOMI_SOURCE_ID}" + } + } + }, + "arguments": { + "UNOMI_BASE_URL": { + "description": "The base URL of your Apache Unomi server (e.g., http://your-unomi-server:8181)", + "required": true + }, + "UNOMI_USERNAME": { + "description": "The username to authenticate with the Apache Unomi server, default is 'karaf'", + "required": true + }, + "UNOMI_PASSWORD": { + "description": "The password to authenticate with the Apache Unomi server, default is 'karaf'", + "required": true + }, + "UNOMI_PROFILE_ID": { + "description": "The ID of the user profile to be used for context management", + "required": false + }, + "UNOMI_KEY": { + "description": "The authorization key required for secured operations with the Unomi server, defaults to '670c26d1cc413346c3b2fd9ce65dab41'", + "required": false + }, + "UNOMI_EMAIL": { + "description": "The email address associated with the user profile, used for profile lookup", + "required": false + }, + "UNOMI_SOURCE_ID": { + "description": "An identifier for the source of the request (e.g., claude-desktop)", + "required": false + } + }, + "tools": [ + { + "name": "get_my_profile", + "description": "Get your profile using environment variables.", + "inputSchema": { + "requireSegments": { + "type": "boolean", + "description": "Include segment information", + "optional": true + }, + "requireScores": { + "type": "boolean", + "description": "Include scoring information", + "optional": true + } + }, + "required": [] + }, + { + "name": "update_my_profile", + "description": "Update properties of your profile.", + "inputSchema": { + "properties": { + "type": "object", + "description": "Properties to update" + } + }, + "required": [ + "properties" + ] + }, + { + "name": "get_profile", + "description": "Retrieve a specific profile by ID.", + "inputSchema": { + "profileId": { + "type": "string", + "description": "ID of the profile to retrieve" + } + }, + "required": [ + "profileId" + ] + }, + { + "name": "search_profiles", + "description": "Search for profiles.", + "inputSchema": { + "query": { + "type": "string", + "description": "Search query" + }, + "limit": { + "type": "integer", + "description": "Maximum number of results to return", + "optional": true + }, + "offset": { + "type": "integer", + "description": "Pagination offset", + "optional": true + } + }, + "required": [ + "query" + ] + }, + { + "name": "create_scope", + "description": "Create a new Unomi scope.", + "inputSchema": { + "scope": { + "type": "string", + "description": "Identifier for the scope" + }, + "name": { + "type": "string", + "description": "Name of the scope", + "optional": true + }, + "description": { + "type": "string", + "description": "Description of the scope", + "optional": true + } + }, + "required": [ + "scope" + ] + } + ] + }, + "everything": { + "name": "everything", + "display_name": "Everything", + "description": "This MCP server exercises all the features of the MCP protocol. It is a test server for builders of MCP clients.", + "repository": { + "type": "git", + "url": "https://github.com/modelcontextprotocol/servers" + }, + "homepage": "https://github.com/modelcontextprotocol/servers/tree/main/src/everything#readme", + "author": { + "name": "MCP Team" + }, + "license": "MIT", + "categories": [ + "MCP Tools" + ], + "tags": [ + "testing", + "reference", + "example", + "demo" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-everything" + ], + "package": "@modelcontextprotocol/server-everything", + "env": {}, + "description": "Install and run using NPX", + "recommended": true + } + }, + "examples": [ + { + "title": "Test tool usage", + "description": "Test various tools provided by the server", + "prompt": "Show me how to use the different tools in this MCP server." + }, + { + "title": "Test resources", + "description": "Demonstrate accessing resources", + "prompt": "Demonstrate how to access and use resources from this MCP server." + } + ], + "tools": [ + { + "name": "echo", + "description": "Echoes back the input", + "inputSchema": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "Message to echo" + } + }, + "required": [ + "message" + ] + } + }, + { + "name": "add", + "description": "Adds two numbers", + "inputSchema": { + "type": "object", + "properties": { + "a": { + "type": "number", + "description": "First number" + }, + "b": { + "type": "number", + "description": "Second number" + } + }, + "required": [ + "a", + "b" + ] + } + }, + { + "name": "printEnv", + "description": "Prints all environment variables, helpful for debugging MCP server configuration", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "longRunningOperation", + "description": "Demonstrates a long running operation with progress updates", + "inputSchema": { + "type": "object", + "properties": { + "duration": { + "type": "number", + "default": 10, + "description": "Duration of the operation in seconds" + }, + "steps": { + "type": "number", + "default": 5, + "description": "Number of steps in the operation" + } + } + } + }, + { + "name": "sampleLLM", + "description": "Samples from an LLM using MCP's sampling feature", + "inputSchema": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "The prompt to send to the LLM" + }, + "maxTokens": { + "type": "number", + "default": 100, + "description": "Maximum number of tokens to generate" + } + }, + "required": [ + "prompt" + ] + } + }, + { + "name": "getTinyImage", + "description": "Returns the MCP_TINY_IMAGE", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "annotatedMessage", + "description": "Demonstrates how annotations can be used to provide metadata about content", + "inputSchema": { + "type": "object", + "properties": { + "messageType": { + "type": "string", + "enum": [ + "error", + "success", + "debug" + ], + "description": "Type of message to demonstrate different annotation patterns" + }, + "includeImage": { + "type": "boolean", + "default": false, + "description": "Whether to include an example image" + } + }, + "required": [ + "messageType" + ] + } + } + ], + "is_official": true + }, + "godot": { + "name": "godot", + "display_name": "Godot", + "description": "A MCP server providing comprehensive Godot engine integration for project editing, debugging, and scene management.", + "repository": { + "type": "git", + "url": "https://github.com/Coding-Solo/godot-mcp" + }, + "homepage": "https://github.com/Coding-Solo/godot-mcp", + "author": { + "name": "Coding Solo", + "url": "https://github.com/Coding-Solo" + }, + "license": "MIT", + "categories": [ + "Media Creation" + ], + "tags": [ + "Godot", + "AI", + "Game" + ], + "examples": [ + { + "title": "Launch Godot Editor", + "description": "Launch the Godot editor for a specific project.", + "prompt": "Launch the Godot editor for my project at /path/to/project" + }, + { + "title": "Run Godot Project", + "description": "Execute Godot projects in debug mode.", + "prompt": "Run my Godot project and show me any errors" + }, + { + "title": "Get Project Info", + "description": "Retrieve detailed information about the project structure.", + "prompt": "Get information about my Godot project structure" + }, + { + "title": "Debug Assistance", + "description": "Help debug errors in Godot projects.", + "prompt": "Help me debug this error in my Godot project: [paste error]" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/Coding-Solo/godot-mcp" + ] + } + }, + "tools": [ + { + "name": "launch_editor", + "description": "Launch Godot editor for a specific project", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + } + }, + "required": [ + "projectPath" + ] + } + }, + { + "name": "run_project", + "description": "Run the Godot project and capture output", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "scene": { + "type": "string", + "description": "Optional: Specific scene to run" + } + }, + "required": [ + "projectPath" + ] + } + }, + { + "name": "get_debug_output", + "description": "Get the current debug output and errors", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "stop_project", + "description": "Stop the currently running Godot project", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "get_godot_version", + "description": "Get the installed Godot version", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "list_projects", + "description": "List Godot projects in a directory", + "inputSchema": { + "type": "object", + "properties": { + "directory": { + "type": "string", + "description": "Directory to search for Godot projects" + }, + "recursive": { + "type": "boolean", + "description": "Whether to search recursively (default: false)" + } + }, + "required": [ + "directory" + ] + } + }, + { + "name": "get_project_info", + "description": "Retrieve metadata about a Godot project", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + } + }, + "required": [ + "projectPath" + ] + } + }, + { + "name": "create_scene", + "description": "Create a new Godot scene file", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "scenePath": { + "type": "string", + "description": "Path where the scene file will be saved (relative to project)" + }, + "rootNodeType": { + "type": "string", + "description": "Type of the root node (e.g., Node2D, Node3D)", + "default": "Node2D" + } + }, + "required": [ + "projectPath", + "scenePath" + ] + } + }, + { + "name": "add_node", + "description": "Add a node to an existing scene", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "scenePath": { + "type": "string", + "description": "Path to the scene file (relative to project)" + }, + "parentNodePath": { + "type": "string", + "description": "Path to the parent node (e.g., \"root\" or \"root/Player\")", + "default": "root" + }, + "nodeType": { + "type": "string", + "description": "Type of node to add (e.g., Sprite2D, CollisionShape2D)" + }, + "nodeName": { + "type": "string", + "description": "Name for the new node" + }, + "properties": { + "type": "object", + "description": "Optional properties to set on the node" + } + }, + "required": [ + "projectPath", + "scenePath", + "nodeType", + "nodeName" + ] + } + }, + { + "name": "load_sprite", + "description": "Load a sprite into a Sprite2D node", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "scenePath": { + "type": "string", + "description": "Path to the scene file (relative to project)" + }, + "nodePath": { + "type": "string", + "description": "Path to the Sprite2D node (e.g., \"root/Player/Sprite2D\")" + }, + "texturePath": { + "type": "string", + "description": "Path to the texture file (relative to project)" + } + }, + "required": [ + "projectPath", + "scenePath", + "nodePath", + "texturePath" + ] + } + }, + { + "name": "export_mesh_library", + "description": "Export a scene as a MeshLibrary resource", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "scenePath": { + "type": "string", + "description": "Path to the scene file (.tscn) to export" + }, + "outputPath": { + "type": "string", + "description": "Path where the mesh library (.res) will be saved" + }, + "meshItemNames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Optional: Names of specific mesh items to include (defaults to all)" + } + }, + "required": [ + "projectPath", + "scenePath", + "outputPath" + ] + } + }, + { + "name": "save_scene", + "description": "Save changes to a scene file", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "scenePath": { + "type": "string", + "description": "Path to the scene file (relative to project)" + }, + "newPath": { + "type": "string", + "description": "Optional: New path to save the scene to (for creating variants)" + } + }, + "required": [ + "projectPath", + "scenePath" + ] + } + }, + { + "name": "get_uid", + "description": "Get the UID for a specific file in a Godot project (for Godot 4.4+)", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + }, + "filePath": { + "type": "string", + "description": "Path to the file (relative to project) for which to get the UID" + } + }, + "required": [ + "projectPath", + "filePath" + ] + } + }, + { + "name": "update_project_uids", + "description": "Update UID references in a Godot project by resaving resources (for Godot 4.4+)", + "inputSchema": { + "type": "object", + "properties": { + "projectPath": { + "type": "string", + "description": "Path to the Godot project directory" + } + }, + "required": [ + "projectPath" + ] + } + } + ] + }, + "aws": { + "name": "aws", + "display_name": "AWS", + "description": "Perform operations on your AWS resources using an LLM.", + "repository": { + "type": "git", + "url": "https://github.com/rishikavikondala/mcp-server-aws" + }, + "homepage": "https://github.com/rishikavikondala/mcp-server-aws", + "author": { + "name": "rishikavikondala" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "s3", + "dynamodb", + "aws" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/rishikavikondala/mcp-server-aws", + "mcp-server-aws" + ] + } + }, + "arguments": { + "AWS_ACCESS_KEY_ID": { + "description": "This is the access key ID for your AWS account, required for authenticating requests to AWS services.", + "required": true, + "example": "AKIAEXAMPLE" + }, + "AWS_SECRET_ACCESS_KEY": { + "description": "This is the secret access key for your AWS account, used in conjunction with the access key ID to authenticate requests.", + "required": true, + "example": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + }, + "AWS_REGION": { + "description": "This specifies the AWS region you want to use for your operations. It defaults to `us-east-1` if not provided.", + "required": false, + "example": "us-west-2" + } + }, + "tools": [ + { + "name": "s3_bucket_create", + "description": "Create a new S3 bucket", + "inputSchema": { + "type": "object", + "properties": { + "bucket_name": { + "type": "string", + "description": "Name of the S3 bucket to create" + } + }, + "required": [ + "bucket_name" + ] + } + }, + { + "name": "s3_bucket_list", + "description": "List all S3 buckets", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "s3_bucket_delete", + "description": "Delete an S3 bucket", + "inputSchema": { + "type": "object", + "properties": { + "bucket_name": { + "type": "string", + "description": "Name of the S3 bucket to delete" + } + }, + "required": [ + "bucket_name" + ] + } + }, + { + "name": "s3_object_upload", + "description": "Upload an object to S3", + "inputSchema": { + "type": "object", + "properties": { + "bucket_name": { + "type": "string", + "description": "Name of the S3 bucket" + }, + "object_key": { + "type": "string", + "description": "Key/path of the object in the bucket" + }, + "file_content": { + "type": "string", + "description": "Base64 encoded file content for upload" + } + }, + "required": [ + "bucket_name", + "object_key", + "file_content" + ] + } + }, + { + "name": "s3_object_delete", + "description": "Delete an object from S3", + "inputSchema": { + "type": "object", + "properties": { + "bucket_name": { + "type": "string", + "description": "Name of the S3 bucket" + }, + "object_key": { + "type": "string", + "description": "Key/path of the object to delete" + } + }, + "required": [ + "bucket_name", + "object_key" + ] + } + }, + { + "name": "s3_object_list", + "description": "List objects in an S3 bucket", + "inputSchema": { + "type": "object", + "properties": { + "bucket_name": { + "type": "string", + "description": "Name of the S3 bucket" + } + }, + "required": [ + "bucket_name" + ] + } + }, + { + "name": "s3_object_read", + "description": "Read an object's content from S3", + "inputSchema": { + "type": "object", + "properties": { + "bucket_name": { + "type": "string", + "description": "Name of the S3 bucket" + }, + "object_key": { + "type": "string", + "description": "Key/path of the object to read" + } + }, + "required": [ + "bucket_name", + "object_key" + ] + } + }, + { + "name": "dynamodb_table_create", + "description": "Create a new DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "key_schema": { + "type": "array", + "description": "Key schema for table creation" + }, + "attribute_definitions": { + "type": "array", + "description": "Attribute definitions for table creation" + } + }, + "required": [ + "table_name", + "key_schema", + "attribute_definitions" + ] + } + }, + { + "name": "dynamodb_table_describe", + "description": "Get details about a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + } + }, + "required": [ + "table_name" + ] + } + }, + { + "name": "dynamodb_table_list", + "description": "List all DynamoDB tables", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "dynamodb_table_delete", + "description": "Delete a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + } + }, + "required": [ + "table_name" + ] + } + }, + { + "name": "dynamodb_table_update", + "description": "Update a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "attribute_definitions": { + "type": "array", + "description": "Updated attribute definitions" + } + }, + "required": [ + "table_name", + "attribute_definitions" + ] + } + }, + { + "name": "dynamodb_item_put", + "description": "Put an item into a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "item": { + "type": "object", + "description": "Item data to put" + } + }, + "required": [ + "table_name", + "item" + ] + } + }, + { + "name": "dynamodb_item_get", + "description": "Get an item from a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "key": { + "type": "object", + "description": "Key to identify the item" + } + }, + "required": [ + "table_name", + "key" + ] + } + }, + { + "name": "dynamodb_item_update", + "description": "Update an item in a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "key": { + "type": "object", + "description": "Key to identify the item" + }, + "item": { + "type": "object", + "description": "Updated item data" + } + }, + "required": [ + "table_name", + "key", + "item" + ] + } + }, + { + "name": "dynamodb_item_delete", + "description": "Delete an item from a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "key": { + "type": "object", + "description": "Key to identify the item" + } + }, + "required": [ + "table_name", + "key" + ] + } + }, + { + "name": "dynamodb_item_query", + "description": "Query items in a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "key_condition": { + "type": "string", + "description": "Key condition expression" + }, + "expression_values": { + "type": "object", + "description": "Expression attribute values" + } + }, + "required": [ + "table_name", + "key_condition", + "expression_values" + ] + } + }, + { + "name": "dynamodb_item_scan", + "description": "Scan items in a DynamoDB table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "filter_expression": { + "type": "string", + "description": "Filter expression" + }, + "expression_attributes": { + "type": "object", + "properties": { + "values": { + "type": "object", + "description": "Expression attribute values" + }, + "names": { + "type": "object", + "description": "Expression attribute names" + } + } + } + }, + "required": [ + "table_name" + ] + } + }, + { + "name": "dynamodb_batch_get", + "description": "Batch get multiple items from DynamoDB tables", + "inputSchema": { + "type": "object", + "properties": { + "request_items": { + "type": "object", + "description": "Map of table names to keys to retrieve", + "additionalProperties": { + "type": "object", + "properties": { + "Keys": { + "type": "array", + "items": { + "type": "object" + } + }, + "ConsistentRead": { + "type": "boolean" + }, + "ProjectionExpression": { + "type": "string" + } + }, + "required": [ + "Keys" + ] + } + } + }, + "required": [ + "request_items" + ] + } + }, + { + "name": "dynamodb_item_batch_write", + "description": "Batch write operations (put/delete) for DynamoDB items", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "operation": { + "type": "string", + "enum": [ + "put", + "delete" + ], + "description": "Type of batch operation (put or delete)" + }, + "items": { + "type": "array", + "description": "Array of items to process" + }, + "key_attributes": { + "type": "array", + "description": "For delete operations, specify which attributes form the key", + "items": { + "type": "string" + } + } + }, + "required": [ + "table_name", + "operation", + "items" + ] + } + }, + { + "name": "dynamodb_describe_ttl", + "description": "Get the TTL settings for a table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + } + }, + "required": [ + "table_name" + ] + } + }, + { + "name": "dynamodb_update_ttl", + "description": "Update the TTL settings for a table", + "inputSchema": { + "type": "object", + "properties": { + "table_name": { + "type": "string", + "description": "Name of the DynamoDB table" + }, + "ttl_enabled": { + "type": "boolean", + "description": "Whether TTL should be enabled" + }, + "ttl_attribute": { + "type": "string", + "description": "The attribute name to use for TTL" + } + }, + "required": [ + "table_name", + "ttl_enabled", + "ttl_attribute" + ] + } + }, + { + "name": "dynamodb_batch_execute", + "description": "Execute multiple PartiQL statements in a batch", + "inputSchema": { + "type": "object", + "properties": { + "statements": { + "type": "array", + "description": "List of PartiQL statements to execute", + "items": { + "type": "string" + } + }, + "parameters": { + "type": "array", + "description": "List of parameter lists for each statement", + "items": { + "type": "array" + } + } + }, + "required": [ + "statements", + "parameters" + ] + } + } + ] + }, + "github-actions": { + "name": "github-actions", + "display_name": "GitHub Actions", + "description": "A Model Context Protocol (MCP) server for interacting with Github Actions.", + "repository": { + "type": "git", + "url": "https://github.com/ko1ynnky/github-actions-mcp-server" + }, + "homepage": "https://github.com/ko1ynnky/github-actions-mcp-server", + "author": { + "name": "ko1ynnky" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "GitHub Actions", + "Workflow Management", + "Automation" + ], + "examples": [ + { + "title": "List Workflows", + "description": "List workflows in a GitHub repository.", + "prompt": "const result = await listWorkflows({ owner: 'your-username', repo: 'your-repository' });" + }, + { + "title": "Trigger Workflow", + "description": "Trigger a workflow in a GitHub repository.", + "prompt": "const result = await triggerWorkflow({ owner: 'your-username', repo: 'your-repository', workflowId: 'ci.yml', ref: 'main', inputs: { environment: 'production' }});" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/ko1ynnky/github-actions-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + }, + "arguments": { + "GITHUB_PERSONAL_ACCESS_TOKEN": { + "description": "A personal access token required for authentication with GitHub API, used to access user repositories and perform actions.", + "required": true, + "example": "ghp_16CharTokenHere" + } + } + }, + "docker": { + "name": "docker", + "display_name": "Docker Integration", + "description": "Integrate with Docker to manage containers, images, volumes, and networks.", + "repository": { + "type": "git", + "url": "https://github.com/ckreiling/mcp-server-docker" + }, + "license": "MIT", + "examples": [ + { + "title": "Deploy an nginx container", + "description": "Deploy an nginx container exposing it on port 9000", + "prompt": "name: `nginx`, containers: \"deploy an nginx container exposing it on port 9000\"" + }, + { + "title": "Deploy a WordPress and MySQL container", + "description": "Deploy a WordPress container and a supporting MySQL container, exposing WordPress on port 9000", + "prompt": "name: `wordpress`, containers: \"deploy a WordPress container and a supporting MySQL container, exposing Wordpress on port 9000\"" + } + ], + "categories": [ + "Dev Tools" + ], + "tags": [ + "Docker", + "Container", + "Image", + "Volume", + "Network" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/ckreiling/mcp-server-docker", + "mcp-server-docker" + ] + } + }, + "tools": [ + { + "name": "list_containers", + "description": "List all Docker containers", + "inputSchema": { + "$defs": { + "ListContainersFilters": { + "properties": { + "label": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by label, either `key` or `key=value` format", + "title": "Label" + } + }, + "title": "ListContainersFilters", + "type": "object" + } + }, + "properties": { + "all": { + "default": false, + "description": "Show all containers (default shows just running)", + "title": "All", + "type": "boolean" + }, + "filters": { + "anyOf": [ + { + "$ref": "#/$defs/ListContainersFilters" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter containers" + } + }, + "title": "ListContainersInput", + "type": "object" + } + }, + { + "name": "create_container", + "description": "Create a new Docker container", + "inputSchema": { + "description": "Schema for creating a new container.\n\nThis is passed to the Python Docker SDK directly, so the fields are the same\nas the `docker.containers.create` method.", + "properties": { + "detach": { + "default": true, + "description": "Run container in the background. Should be True for long-running containers, can be false for short-lived containers", + "title": "Detach", + "type": "boolean" + }, + "image": { + "description": "Docker image name", + "title": "Image", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container name", + "title": "Name" + }, + "entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Entrypoint to run in container", + "title": "Entrypoint" + }, + "command": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Command to run in container", + "title": "Command" + }, + "network": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Network to attach the container to", + "title": "Network" + }, + "environment": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Environment variables dictionary", + "title": "Environment" + }, + "ports": { + "anyOf": [ + { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "type": "array" + }, + { + "type": "null" + } + ] + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Mapping of container_port to host_port", + "title": "Ports" + }, + "volumes": { + "anyOf": [ + { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "type": "object" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Volume mappings", + "title": "Volumes" + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container labels, either as a dictionary or a list of key=value strings", + "title": "Labels" + }, + "auto_remove": { + "default": false, + "description": "Automatically remove the container", + "title": "Auto Remove", + "type": "boolean" + } + }, + "required": [ + "image" + ], + "title": "CreateContainerInput", + "type": "object" + } + }, + { + "name": "run_container", + "description": "Run an image in a new Docker container", + "inputSchema": { + "description": "Schema for creating a new container.\n\nThis is passed to the Python Docker SDK directly, so the fields are the same\nas the `docker.containers.create` method.", + "properties": { + "detach": { + "default": true, + "description": "Run container in the background. Should be True for long-running containers, can be false for short-lived containers", + "title": "Detach", + "type": "boolean" + }, + "image": { + "description": "Docker image name", + "title": "Image", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container name", + "title": "Name" + }, + "entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Entrypoint to run in container", + "title": "Entrypoint" + }, + "command": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Command to run in container", + "title": "Command" + }, + "network": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Network to attach the container to", + "title": "Network" + }, + "environment": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Environment variables dictionary", + "title": "Environment" + }, + "ports": { + "anyOf": [ + { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "type": "array" + }, + { + "type": "null" + } + ] + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Mapping of container_port to host_port", + "title": "Ports" + }, + "volumes": { + "anyOf": [ + { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "type": "object" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Volume mappings", + "title": "Volumes" + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container labels, either as a dictionary or a list of key=value strings", + "title": "Labels" + }, + "auto_remove": { + "default": false, + "description": "Automatically remove the container", + "title": "Auto Remove", + "type": "boolean" + } + }, + "required": [ + "image" + ], + "title": "CreateContainerInput", + "type": "object" + } + }, + { + "name": "recreate_container", + "description": "Stop and remove a container, then run a new container. Fails if the container does not exist.", + "inputSchema": { + "properties": { + "detach": { + "default": true, + "description": "Run container in the background. Should be True for long-running containers, can be false for short-lived containers", + "title": "Detach", + "type": "boolean" + }, + "image": { + "description": "Docker image name", + "title": "Image", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container name", + "title": "Name" + }, + "entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Entrypoint to run in container", + "title": "Entrypoint" + }, + "command": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Command to run in container", + "title": "Command" + }, + "network": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Network to attach the container to", + "title": "Network" + }, + "environment": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Environment variables dictionary", + "title": "Environment" + }, + "ports": { + "anyOf": [ + { + "additionalProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "type": "array" + }, + { + "type": "null" + } + ] + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Mapping of container_port to host_port", + "title": "Ports" + }, + "volumes": { + "anyOf": [ + { + "additionalProperties": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "type": "object" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Volume mappings", + "title": "Volumes" + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container labels, either as a dictionary or a list of key=value strings", + "title": "Labels" + }, + "auto_remove": { + "default": false, + "description": "Automatically remove the container", + "title": "Auto Remove", + "type": "boolean" + }, + "container_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Container ID to recreate. The `name` parameter will be used if this is not provided", + "title": "Container Id" + } + }, + "required": [ + "image" + ], + "title": "RecreateContainerInput", + "type": "object" + } + }, + { + "name": "start_container", + "description": "Start a Docker container", + "inputSchema": { + "properties": { + "container_id": { + "description": "Container ID or name", + "title": "Container Id", + "type": "string" + } + }, + "required": [ + "container_id" + ], + "title": "ContainerActionInput", + "type": "object" + } + }, + { + "name": "fetch_container_logs", + "description": "Fetch logs for a Docker container", + "inputSchema": { + "properties": { + "container_id": { + "description": "Container ID or name", + "title": "Container Id", + "type": "string" + }, + "tail": { + "anyOf": [ + { + "type": "integer" + }, + { + "const": "all", + "type": "string" + } + ], + "default": 100, + "description": "Number of lines to show from the end", + "title": "Tail" + } + }, + "required": [ + "container_id" + ], + "title": "FetchContainerLogsInput", + "type": "object" + } + }, + { + "name": "stop_container", + "description": "Stop a Docker container", + "inputSchema": { + "properties": { + "container_id": { + "description": "Container ID or name", + "title": "Container Id", + "type": "string" + } + }, + "required": [ + "container_id" + ], + "title": "ContainerActionInput", + "type": "object" + } + }, + { + "name": "remove_container", + "description": "Remove a Docker container", + "inputSchema": { + "properties": { + "container_id": { + "description": "Container ID or name", + "title": "Container Id", + "type": "string" + }, + "force": { + "default": false, + "description": "Force remove the container", + "title": "Force", + "type": "boolean" + } + }, + "required": [ + "container_id" + ], + "title": "RemoveContainerInput", + "type": "object" + } + }, + { + "name": "list_images", + "description": "List Docker images", + "inputSchema": { + "$defs": { + "ListImagesFilters": { + "properties": { + "dangling": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Show dangling images", + "title": "Dangling" + }, + "label": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by label, either `key` or `key=value` format", + "title": "Label" + } + }, + "title": "ListImagesFilters", + "type": "object" + } + }, + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter images by repository name, if desired", + "title": "Name" + }, + "all": { + "default": false, + "description": "Show all images (default hides intermediate)", + "title": "All", + "type": "boolean" + }, + "filters": { + "anyOf": [ + { + "$ref": "#/$defs/ListImagesFilters" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter images" + } + }, + "title": "ListImagesInput", + "type": "object" + } + }, + { + "name": "pull_image", + "description": "Pull a Docker image", + "inputSchema": { + "properties": { + "repository": { + "description": "Image repository", + "title": "Repository", + "type": "string" + }, + "tag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "latest", + "description": "Image tag", + "title": "Tag" + } + }, + "required": [ + "repository" + ], + "title": "PullPushImageInput", + "type": "object" + } + }, + { + "name": "push_image", + "description": "Push a Docker image", + "inputSchema": { + "properties": { + "repository": { + "description": "Image repository", + "title": "Repository", + "type": "string" + }, + "tag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "latest", + "description": "Image tag", + "title": "Tag" + } + }, + "required": [ + "repository" + ], + "title": "PullPushImageInput", + "type": "object" + } + }, + { + "name": "build_image", + "description": "Build a Docker image from a Dockerfile", + "inputSchema": { + "properties": { + "path": { + "description": "Path to build context", + "title": "Path", + "type": "string" + }, + "tag": { + "description": "Image tag", + "title": "Tag", + "type": "string" + }, + "dockerfile": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Path to Dockerfile", + "title": "Dockerfile" + } + }, + "required": [ + "path", + "tag" + ], + "title": "BuildImageInput", + "type": "object" + } + }, + { + "name": "remove_image", + "description": "Remove a Docker image", + "inputSchema": { + "properties": { + "image": { + "description": "Image ID or name", + "title": "Image", + "type": "string" + }, + "force": { + "default": false, + "description": "Force remove the image", + "title": "Force", + "type": "boolean" + } + }, + "required": [ + "image" + ], + "title": "RemoveImageInput", + "type": "object" + } + }, + { + "name": "list_networks", + "description": "List Docker networks", + "inputSchema": { + "$defs": { + "ListNetworksFilter": { + "properties": { + "label": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter by label, either `key` or `key=value` format", + "title": "Label" + } + }, + "title": "ListNetworksFilter", + "type": "object" + } + }, + "properties": { + "filters": { + "anyOf": [ + { + "$ref": "#/$defs/ListNetworksFilter" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter networks" + } + }, + "title": "ListNetworksInput", + "type": "object" + } + }, + { + "name": "create_network", + "description": "Create a Docker network", + "inputSchema": { + "properties": { + "name": { + "description": "Network name", + "title": "Name", + "type": "string" + }, + "driver": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "bridge", + "description": "Network driver", + "title": "Driver" + }, + "internal": { + "default": false, + "description": "Create an internal network", + "title": "Internal", + "type": "boolean" + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Network labels", + "title": "Labels" + } + }, + "required": [ + "name" + ], + "title": "CreateNetworkInput", + "type": "object" + } + }, + { + "name": "remove_network", + "description": "Remove a Docker network", + "inputSchema": { + "properties": { + "network_id": { + "description": "Network ID or name", + "title": "Network Id", + "type": "string" + } + }, + "required": [ + "network_id" + ], + "title": "RemoveNetworkInput", + "type": "object" + } + }, + { + "name": "list_volumes", + "description": "List Docker volumes", + "inputSchema": { + "properties": {}, + "title": "ListVolumesInput", + "type": "object" + } + }, + { + "name": "create_volume", + "description": "Create a Docker volume", + "inputSchema": { + "properties": { + "name": { + "description": "Volume name", + "title": "Name", + "type": "string" + }, + "driver": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "local", + "description": "Volume driver", + "title": "Driver" + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Volume labels", + "title": "Labels" + } + }, + "required": [ + "name" + ], + "title": "CreateVolumeInput", + "type": "object" + } + }, + { + "name": "remove_volume", + "description": "Remove a Docker volume", + "inputSchema": { + "properties": { + "volume_name": { + "description": "Volume name", + "title": "Volume Name", + "type": "string" + }, + "force": { + "default": false, + "description": "Force remove the volume", + "title": "Force", + "type": "boolean" + } + }, + "required": [ + "volume_name" + ], + "title": "RemoveVolumeInput", + "type": "object" + } + } + ] + }, + "opik-mcp": { + "display_name": "Opik MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/comet-ml/opik-mcp" + }, + "homepage": "https://www.comet.com/site/products/opik/", + "author": { + "name": "comet-ml" + }, + "license": "Apache 2.0", + "tags": [ + "MCP", + "Opik", + "IDE Integration" + ], + "arguments": { + "apiUrl": { + "description": "URL for the Opik API", + "required": true, + "example": "https://www.comet.com/opik/api" + }, + "apiKey": { + "description": "Your Opik API key", + "required": true, + "example": "YOUR_API_KEY" + }, + "workspace": { + "description": "Workspace name", + "required": true, + "example": "default" + }, + "debug": { + "description": "Enable debug mode", + "required": false, + "example": "true" + } + }, + "installations": { + "custom": { + "type": "custom", + "command": "node", + "args": [ + "/path/to/opik-mcp/build/index.js" + ], + "env": { + "OPIK_API_BASE_URL": "https://www.comet.com/opik/api", + "OPIK_API_KEY": "YOUR_API_KEY", + "OPIK_WORKSPACE_NAME": "default" + }, + "description": "Manual installation from source" + } + }, + "examples": [ + { + "title": "Cursor IDE Integration", + "description": "Configure Opik MCP Server in Cursor IDE", + "prompt": "Create a .cursor/mcp.json file with the Opik MCP Server configuration" + } + ], + "name": "opik-mcp", + "description": " Query and analyze your Opik logs, traces, prompts and all other telemtry data from your LLMs in natural language.", + "categories": [ + "MCP Tools" + ], + "is_official": true + }, + "openrpc": { + "name": "openrpc", + "display_name": "OpenRPC", + "description": "Interact with and discover JSON-RPC APIs via [OpenRPC](https://open-rpc.org/).", + "repository": { + "type": "git", + "url": "https://github.com/shanejonas/openrpc-mpc-server" + }, + "homepage": "https://github.com/shanejonas/openrpc-mpc-server", + "author": { + "name": "shanejonas" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "OpenRPC", + "JSON-RPC" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "openrpc-mpc-server" + ] + } + }, + "tools": [ + { + "name": "rpc_call", + "description": "Call any JSON-RPC method on a server with parameters. A user would prompt: Call method on with params ", + "inputSchema": { + "type": "object", + "properties": { + "server": { + "type": "string", + "description": "Server URL" + }, + "method": { + "type": "string", + "description": "JSON-RPC method name to call" + }, + "params": { + "type": "string", + "description": "Stringified Parameters to pass to the method" + } + }, + "required": [ + "server", + "method" + ] + } + }, + { + "name": "rpc_discover", + "description": "This uses JSON-RPC to call `rpc.discover` which is part of the OpenRPC Specification for discovery for JSON-RPC servers. A user would prompt: What JSON-RPC methods does this server have? ", + "inputSchema": { + "type": "object", + "properties": { + "server": { + "type": "string", + "description": "Server URL" + } + }, + "required": [ + "server" + ] + } + } + ] + }, + "xero-mcp-server": { + "display_name": "Xero MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/XeroAPI/xero-mcp-server" + }, + "homepage": "https://github.com/XeroAPI/xero-mcp-server", + "author": { + "name": "XeroAPI" + }, + "license": "MIT", + "tags": [ + "xero", + "accounting", + "mcp", + "oauth2" + ], + "arguments": { + "XERO_CLIENT_ID": { + "description": "Your Xero API client ID from your developer account", + "required": true, + "example": "your_client_id_here" + }, + "XERO_CLIENT_SECRET": { + "description": "Your Xero API client secret from your developer account", + "required": true, + "example": "your_client_secret_here" + } + }, + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@xeroapi/xero-mcp-server@latest" + ], + "env": { + "XERO_CLIENT_ID": "your_client_id_here", + "XERO_CLIENT_SECRET": "your_client_secret_here" + }, + "description": "Run directly using npx" + } + }, + "examples": [ + { + "title": "List Contacts", + "description": "Retrieve a list of contacts from Xero", + "prompt": "List all my Xero contacts" + }, + { + "title": "Create Invoice", + "description": "Create a new invoice in Xero", + "prompt": "Create a new invoice in Xero" + }, + { + "title": "List Accounts", + "description": "Retrieve a list of accounts from Xero", + "prompt": "Show me my chart of accounts in Xero" + } + ], + "name": "xero-mcp-server", + "description": "This is a Model Context Protocol (MCP) server implementation for Xero. It provides a bridge between the MCP protocol and Xero's API, allowing for standardized access to Xero's accounting and business features.", + "categories": [ + "Finance" + ], + "tools": [ + { + "name": "list-contacts", + "description": "List all contacts in Xero. This includes Suppliers and Customers.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list-invoices", + "description": "List invoices in Xero. This includes Draft, Submitted, and Paid invoices. Ask the user if they want to see invoices for a specific contact, invoice number, or to see all invoices before running. Ask the user if they want the next page of invoices after running this tool if 10 invoices are returned. If they want the next page, call this tool again with the next page number and the contact or invoice number if one was provided in the previous call.", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number" + }, + "contactIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "invoiceNumbers": { + "type": "array", + "items": { + "type": "string" + }, + "description": "If provided, invoice line items will also be returned" + } + }, + "required": [ + "page" + ] + } + }, + { + "name": "create-contact", + "description": "Create a contact in Xero. When a contact is created, a deep link to the contact in Xero is returned. This deep link can be used to view the contact in Xero directly. This link should be displayed to the user.", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "email": { + "type": "string", + "format": "email" + }, + "phone": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + }, + { + "name": "create-invoice", + "description": "Create an invoice in Xero. When an invoice is created, a deep link to the invoice in Xero is returned. This deep link can be used to view the invoice in Xero directly. This link should be displayed to the user.", + "inputSchema": { + "type": "object", + "properties": { + "contactId": { + "type": "string" + }, + "lineItems": { + "type": "array", + "items": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "quantity": { + "type": "number" + }, + "unitAmount": { + "type": "number" + }, + "accountCode": { + "type": "string" + }, + "taxType": { + "type": "string" + } + }, + "required": [ + "description", + "quantity", + "unitAmount", + "accountCode", + "taxType" + ], + "additionalProperties": false + } + }, + "reference": { + "type": "string" + } + }, + "required": [ + "contactId", + "lineItems" + ] + } + }, + { + "name": "list-accounts", + "description": "Lists all accounts in Xero. Use this tool to get the account codes and names to be used when creating invoices in Xero", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list-tax-rates", + "description": "Lists all tax rates in Xero. Use this tool to get the tax rates to be used when creating invoices in Xero", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "list-quotes", + "description": "List all quotes in Xero. \n Ask the user if they want to see quotes for a specific contact before running. \n Ask the user if they want the next page of quotes after running this tool if 10 quotes are returned. \n If they do, call this tool again with the page number and the contact provided in the previous call.", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number" + }, + "contactId": { + "type": "string" + } + }, + "required": [ + "page" + ] + } + }, + { + "name": "create-quote", + "description": "Create a quote in Xero. When a quote is created, a deep link to the quote in Xero is returned. This deep link can be used to view the quote in Xero directly. This link should be displayed to the user.", + "inputSchema": { + "type": "object", + "properties": { + "contactId": { + "type": "string" + }, + "lineItems": { + "type": "array", + "items": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "quantity": { + "type": "number" + }, + "unitAmount": { + "type": "number" + }, + "accountCode": { + "type": "string" + }, + "taxType": { + "type": "string" + } + }, + "required": [ + "description", + "quantity", + "unitAmount", + "accountCode", + "taxType" + ], + "additionalProperties": false + } + }, + "reference": { + "type": "string" + }, + "quoteNumber": { + "type": "string" + }, + "terms": { + "type": "string" + }, + "title": { + "type": "string" + }, + "summary": { + "type": "string" + } + }, + "required": [ + "contactId", + "lineItems" + ] + } + }, + { + "name": "update-contact", + "description": "Update a contact in Xero. When a contact is updated, a deep link to the contact in Xero is returned. This deep link can be used to view the contact in Xero directly. This link should be displayed to the user.", + "inputSchema": { + "type": "object", + "properties": { + "contactId": { + "type": "string" + }, + "name": { + "type": "string" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "email": { + "type": "string", + "format": "email" + }, + "phone": { + "type": "string" + }, + "address": { + "type": "object", + "properties": { + "addressLine1": { + "type": "string" + }, + "addressLine2": { + "type": "string" + }, + "city": { + "type": "string" + }, + "region": { + "type": "string" + }, + "postalCode": { + "type": "string" + }, + "country": { + "type": "string" + } + }, + "required": [ + "addressLine1" + ], + "additionalProperties": false + } + }, + "required": [ + "contactId", + "name" + ] + } + }, + { + "name": "update-invoice", + "description": "Update an invoice in Xero. Only works on draft invoices. All line items must be provided. Any line items not provided will be removed. Including existing line items. Do not modify line items that have not been specified by the user. When an invoice is updated, a deep link to the invoice in Xero is returned. This deep link can be used to view the contact in Xero directly. This link should be displayed to the user.", + "inputSchema": { + "type": "object", + "properties": { + "invoiceId": { + "type": "string" + }, + "lineItems": { + "type": "array", + "items": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "quantity": { + "type": "number" + }, + "unitAmount": { + "type": "number" + }, + "accountCode": { + "type": "string" + }, + "taxType": { + "type": "string" + } + }, + "required": [ + "description", + "quantity", + "unitAmount", + "accountCode", + "taxType" + ], + "additionalProperties": false + }, + "description": "All line items must be provided. Any line items not provided will be removed. Including existing line items. Do not modify line items that have not been specified by the user" + }, + "reference": { + "type": "string" + }, + "dueDate": { + "type": "string" + } + }, + "required": [ + "invoiceId" + ] + } + }, + { + "name": "list-credit-notes", + "description": "List credit notes in Xero. \n Ask the user if they want to see credit notes for a specific contact,\n or to see all credit notes before running. \n Ask the user if they want the next page of credit notes after running this tool \n if 10 credit notes are returned. \n If they want the next page, call this tool again with the next page number \n and the contact if one was provided in the previous call.", + "inputSchema": { + "type": "object", + "properties": { + "page": { + "type": "number" + }, + "contactId": { + "type": "string" + } + }, + "required": [ + "page" + ] + } + }, + { + "name": "create-credit-note", + "description": "Create a credit note in Xero. When a credit note is created, a deep link to the credit note in Xero is returned. This deep link can be used to view the credit note in Xero directly. This link should be displayed to the user.", + "inputSchema": { + "type": "object", + "properties": { + "contactId": { + "type": "string" + }, + "lineItems": { + "type": "array", + "items": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "quantity": { + "type": "number" + }, + "unitAmount": { + "type": "number" + }, + "accountCode": { + "type": "string" + }, + "taxType": { + "type": "string" + } + }, + "required": [ + "description", + "quantity", + "unitAmount", + "accountCode", + "taxType" + ], + "additionalProperties": false + } + }, + "reference": { + "type": "string" + } + }, + "required": [ + "contactId", + "lineItems" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "home-assistant": { + "name": "home-assistant", + "display_name": "Hass", + "description": "Docker-ready MCP server for Home Assistant with entity management, domain summaries, automation support, and guided conversations. Includes pre-built container images for easy installation.", + "repository": { + "type": "git", + "url": "https://github.com/voska/hass-mcp" + }, + "homepage": "https://github.com/voska/hass-mcp", + "author": { + "name": "voska" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "Home Assistant", + "Claude", + "LLM", + "Automation" + ], + "installations": { + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "HA_URL", + "-e", + "HA_TOKEN", + "voska/hass-mcp" + ], + "env": { + "HA_URL": "http://homeassistant.local:8123", + "HA_TOKEN": "YOUR_LONG_LIVED_TOKEN" + } + } + }, + "examples": [ + { + "title": "Get Current State", + "description": "Retrieve the current state of a specific device.", + "prompt": "What's the current state of my living room lights?" + }, + { + "title": "Turn Off Lights", + "description": "Command to turn off lights in a specific area.", + "prompt": "Turn off all the lights in the kitchen" + }, + { + "title": "List Temperature Sensors", + "description": "List all sensors related to temperature readings.", + "prompt": "List all my sensors that contain temperature data" + }, + { + "title": "Climate Summary", + "description": "Get a summary of climate-related entities.", + "prompt": "Give me a summary of my climate entities" + }, + { + "title": "Create Automation", + "description": "Create an automation based on a specific condition.", + "prompt": "Create an automation that turns on the lights at sunset" + }, + { + "title": "Troubleshoot Automation", + "description": "Help troubleshoot an automation issue.", + "prompt": "Help me troubleshoot why my bedroom motion sensor automation isn't working" + }, + { + "title": "Search Entities", + "description": "Search for specific entities related to a query.", + "prompt": "Search for entities related to my living room" + } + ], + "arguments": { + "HA_URL": { + "description": "The URL for the Home Assistant instance where the Hass-MCP server will connect to retrieve and manage entities.", + "required": true, + "example": "http://homeassistant.local:8123" + }, + "HA_TOKEN": { + "description": "The Long-Lived Access Token from Home Assistant, required for authentication to access the Home Assistant API.", + "required": true, + "example": "YOUR_LONG_LIVED_TOKEN" + } + }, + "tools": [ + { + "name": "get_version", + "description": "\nGet the Home Assistant version\n\nReturns:\n A string with the Home Assistant version (e.g., \"2025.3.0\")\n", + "inputSchema": { + "properties": {}, + "title": "get_versionArguments", + "type": "object" + } + }, + { + "name": "get_entity", + "description": "\nGet the state of a Home Assistant entity with optional field filtering\n\nArgs:\n entity_id: The entity ID to get (e.g. 'light.living_room')\n fields: Optional list of fields to include (e.g. ['state', 'attr.brightness'])\n detailed: If True, returns all entity fields without filtering\n \nExamples:\n entity_id=\"light.living_room\" - basic state check\n entity_id=\"light.living_room\", fields=[\"state\", \"attr.brightness\"] - specific fields\n entity_id=\"light.living_room\", detailed=True - all details\n", + "inputSchema": { + "properties": { + "entity_id": { + "title": "Entity Id", + "type": "string" + }, + "fields": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Fields" + }, + "detailed": { + "default": false, + "title": "Detailed", + "type": "boolean" + } + }, + "required": [ + "entity_id" + ], + "title": "get_entityArguments", + "type": "object" + } + }, + { + "name": "entity_action", + "description": "\nPerform an action on a Home Assistant entity (on, off, toggle)\n\nArgs:\n entity_id: The entity ID to control (e.g. 'light.living_room')\n action: The action to perform ('on', 'off', 'toggle')\n **params: Additional parameters for the service call\n\nReturns:\n The response from Home Assistant\n\nExamples:\n entity_id=\"light.living_room\", action=\"on\", brightness=255\n entity_id=\"switch.garden_lights\", action=\"off\"\n entity_id=\"climate.living_room\", action=\"on\", temperature=22.5\n\nDomain-Specific Parameters:\n - Lights: brightness (0-255), color_temp, rgb_color, transition, effect\n - Covers: position (0-100), tilt_position\n - Climate: temperature, target_temp_high, target_temp_low, hvac_mode\n - Media players: source, volume_level (0-1)\n", + "inputSchema": { + "properties": { + "entity_id": { + "title": "Entity Id", + "type": "string" + }, + "action": { + "title": "Action", + "type": "string" + }, + "params": { + "title": "params", + "type": "string" + } + }, + "required": [ + "entity_id", + "action", + "params" + ], + "title": "entity_actionArguments", + "type": "object" + } + }, + { + "name": "list_entities", + "description": "\nGet a list of Home Assistant entities with optional filtering\n\nArgs:\n domain: Optional domain to filter by (e.g., 'light', 'switch', 'sensor')\n search_query: Optional search term to filter entities by name, id, or attributes\n (Note: Does not support wildcards. To get all entities, leave this empty)\n limit: Maximum number of entities to return (default: 100)\n fields: Optional list of specific fields to include in each entity\n detailed: If True, returns all entity fields without filtering\n\nReturns:\n A list of entity dictionaries with lean formatting by default\n\nExamples:\n domain=\"light\" - get all lights\n search_query=\"kitchen\", limit=20 - search entities\n domain=\"sensor\", detailed=True - full sensor details\n\nBest Practices:\n - Use lean format (default) for most operations\n - Prefer domain filtering over no filtering\n - For domain overviews, use domain_summary_tool instead of list_entities\n - Only request detailed=True when necessary for full attribute inspection\n - To get all entity types/domains, use list_entities without a domain filter, \n then extract domains from entity_ids\n", + "inputSchema": { + "properties": { + "domain": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Domain" + }, + "search_query": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Search Query" + }, + "limit": { + "default": 100, + "title": "Limit", + "type": "integer" + }, + "fields": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Fields" + }, + "detailed": { + "default": false, + "title": "Detailed", + "type": "boolean" + } + }, + "title": "list_entitiesArguments", + "type": "object" + } + }, + { + "name": "search_entities_tool", + "description": "\nSearch for entities matching a query string\n\nArgs:\n query: The search query to match against entity IDs, names, and attributes.\n (Note: Does not support wildcards. To get all entities, leave this blank or use list_entities tool)\n limit: Maximum number of results to return (default: 20)\n\nReturns:\n A dictionary containing search results and metadata:\n - count: Total number of matching entities found\n - results: List of matching entities with essential information\n - domains: Map of domains with counts (e.g. {\"light\": 3, \"sensor\": 2})\n \nExamples:\n query=\"temperature\" - find temperature entities\n query=\"living room\", limit=10 - find living room entities\n query=\"\", limit=500 - list all entity types\n \n", + "inputSchema": { + "properties": { + "query": { + "title": "Query", + "type": "string" + }, + "limit": { + "default": 20, + "title": "Limit", + "type": "integer" + } + }, + "required": [ + "query" + ], + "title": "search_entities_toolArguments", + "type": "object" + } + }, + { + "name": "domain_summary_tool", + "description": "\nGet a summary of entities in a specific domain\n\nArgs:\n domain: The domain to summarize (e.g., 'light', 'switch', 'sensor')\n example_limit: Maximum number of examples to include for each state\n\nReturns:\n A dictionary containing:\n - total_count: Number of entities in the domain\n - state_distribution: Count of entities in each state\n - examples: Sample entities for each state\n - common_attributes: Most frequently occurring attributes\n \nExamples:\n domain=\"light\" - get light summary\n domain=\"climate\", example_limit=5 - climate summary with more examples\nBest Practices:\n - Use this before retrieving all entities in a domain to understand what's available ", + "inputSchema": { + "properties": { + "domain": { + "title": "Domain", + "type": "string" + }, + "example_limit": { + "default": 3, + "title": "Example Limit", + "type": "integer" + } + }, + "required": [ + "domain" + ], + "title": "domain_summary_toolArguments", + "type": "object" + } + }, + { + "name": "system_overview", + "description": "\nGet a comprehensive overview of the entire Home Assistant system\n\nReturns:\n A dictionary containing:\n - total_entities: Total count of all entities\n - domains: Dictionary of domains with their entity counts and state distributions\n - domain_samples: Representative sample entities for each domain (2-3 per domain)\n - domain_attributes: Common attributes for each domain\n - area_distribution: Entities grouped by area (if available)\n \nExamples:\n Returns domain counts, sample entities, and common attributes\nBest Practices:\n - Use this as the first call when exploring an unfamiliar Home Assistant instance\n - Perfect for building context about the structure of the smart home\n - After getting an overview, use domain_summary_tool to dig deeper into specific domains\n", + "inputSchema": { + "properties": {}, + "title": "system_overviewArguments", + "type": "object" + } + }, + { + "name": "list_automations", + "description": "\nGet a list of all automations from Home Assistant\n\nThis function retrieves all automations configured in Home Assistant,\nincluding their IDs, entity IDs, state, and display names.\n\nReturns:\n A list of automation dictionaries, each containing id, entity_id, \n state, and alias (friendly name) fields.\n \nExamples:\n Returns all automation objects with state and friendly names\n\n", + "inputSchema": { + "properties": {}, + "title": "list_automationsArguments", + "type": "object" + } + }, + { + "name": "restart_ha", + "description": "\nRestart Home Assistant\n\n\u26a0\ufe0f WARNING: Temporarily disrupts all Home Assistant operations\n\nReturns:\n Result of restart operation\n", + "inputSchema": { + "properties": {}, + "title": "restart_haArguments", + "type": "object" + } + }, + { + "name": "call_service_tool", + "description": "\nCall any Home Assistant service (low-level API access)\n\nArgs:\n domain: The domain of the service (e.g., 'light', 'switch', 'automation')\n service: The service to call (e.g., 'turn_on', 'turn_off', 'toggle')\n data: Optional data to pass to the service (e.g., {'entity_id': 'light.living_room'})\n\nReturns:\n The response from Home Assistant (usually empty for successful calls)\n\nExamples:\n domain='light', service='turn_on', data={'entity_id': 'light.x', 'brightness': 255}\n domain='automation', service='reload'\n domain='fan', service='set_percentage', data={'entity_id': 'fan.x', 'percentage': 50}\n\n", + "inputSchema": { + "properties": { + "domain": { + "title": "Domain", + "type": "string" + }, + "service": { + "title": "Service", + "type": "string" + }, + "data": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Data" + } + }, + "required": [ + "domain", + "service" + ], + "title": "call_service_toolArguments", + "type": "object" + } + }, + { + "name": "get_history", + "description": "\nGet the history of an entity's state changes\n\nArgs:\n entity_id: The entity ID to get history for\n hours: Number of hours of history to retrieve (default: 24)\n\nReturns:\n A dictionary containing:\n - entity_id: The entity ID requested\n - states: List of state objects with timestamps\n - count: Number of state changes found\n - first_changed: Timestamp of earliest state change\n - last_changed: Timestamp of most recent state change\n \nExamples:\n entity_id=\"light.living_room\" - get 24h history\n entity_id=\"sensor.temperature\", hours=168 - get 7 day history\nBest Practices:\n - Keep hours reasonable (24-72) for token efficiency\n - Use for entities with discrete state changes rather than continuously changing sensors\n - Consider the state distribution rather than every individual state \n", + "inputSchema": { + "properties": { + "entity_id": { + "title": "Entity Id", + "type": "string" + }, + "hours": { + "default": 24, + "title": "Hours", + "type": "integer" + } + }, + "required": [ + "entity_id" + ], + "title": "get_historyArguments", + "type": "object" + } + }, + { + "name": "get_error_log", + "description": "\nGet the Home Assistant error log for troubleshooting\n\nReturns:\n A dictionary containing:\n - log_text: The full error log text\n - error_count: Number of ERROR entries found\n - warning_count: Number of WARNING entries found\n - integration_mentions: Map of integration names to mention counts\n - error: Error message if retrieval failed\n \nExamples:\n Returns errors, warnings count and integration mentions\nBest Practices:\n - Use this tool when troubleshooting specific Home Assistant errors\n - Look for patterns in repeated errors\n - Pay attention to timestamps to correlate errors with events\n - Focus on integrations with many mentions in the log \n", + "inputSchema": { + "properties": {}, + "title": "get_error_logArguments", + "type": "object" + } + } + ] + }, + "mcp-neo4j-memory": { + "display_name": "Neo4j MCP (Memory)", + "repository": { + "type": "git", + "url": "https://github.com/neo4j-contrib/mcp-neo4j" + }, + "homepage": "https://github.com/neo4j-contrib/mcp-neo4j", + "author": { + "name": "neo4j-contrib" + }, + "license": "MIT", + "tags": [ + "neo4j", + "mcp", + "knowledge graph" + ], + "arguments": { + "NEO4J_URI": { + "description": "Neo4j database URL", + "required": true, + "example": "neo4j+s://:@.databases.neo4j.com:7687" + }, + "NEO4J_USERNAME": { + "description": "Neo4j username", + "required": true, + "example": "" + }, + "NEO4J_PASSWORD": { + "description": "Neo4j password", + "required": true, + "example": "" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "mcp-neo4j-memory", + "--db-url", + "${NEO4J_URI}", + "--username", + "${NEO4J_USERNAME}", + "--password", + "${NEO4J_PASSWORD}" + ], + "description": "Clone the repository to access multiple Neo4j MCP servers", + "recommended": true + } + }, + "examples": [ + { + "title": "Database Schema Query", + "description": "Get information about what's in the graph database", + "prompt": "What is in this graph?" + }, + { + "title": "Data Visualization", + "description": "Generate charts from graph data", + "prompt": "Render a chart from the top products sold by frequency, total and average volume" + }, + { + "title": "Instance Management", + "description": "List Neo4j Aura instances", + "prompt": "List my instances" + }, + { + "title": "Instance Creation", + "description": "Create a new Neo4j Aura instance", + "prompt": "Create a new instance named mcp-test for Aura Professional with 4GB and Graph Data Science enabled" + }, + { + "title": "Knowledge Storage", + "description": "Store information in the knowledge graph", + "prompt": "Store the fact that I worked on the Neo4j MCP Servers today with Andreas and Oskar" + } + ], + "name": "mcp-neo4j-memory", + "description": "Neo4j graph database server (schema + read/write-cypher) and separate graph database backed memory", + "categories": [ + "Databases" + ], + "is_official": true + }, + "kagimcp": { + "display_name": "Kagi MCP server", + "repository": { + "type": "git", + "url": "https://github.com/kagisearch/kagimcp" + }, + "homepage": "https://github.com/kagisearch/kagimcp", + "author": { + "name": "kagisearch" + }, + "license": "MIT", + "tags": [ + "search", + "summarizer" + ], + "arguments": { + "KAGI_API_KEY": { + "description": "Your Kagi API key", + "required": true, + "example": "YOUR_API_KEY_HERE" + }, + "KAGI_SUMMARIZER_ENGINE": { + "description": "Summarizer engine choice (defaults to 'cecil')", + "required": false, + "example": "daphne" + }, + "FASTMCP_LOG_LEVEL": { + "description": "Level of logging", + "required": false, + "example": "ERROR" + } + }, + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "kagimcp" + ], + "env": { + "KAGI_API_KEY": "YOUR_API_KEY_HERE", + "KAGI_SUMMARIZER_ENGINE": "YOUR_ENGINE_CHOICE_HERE" + }, + "recommended": true + } + }, + "examples": [ + { + "title": "Search Example", + "description": "Use Kagi search to answer a factual question", + "prompt": "Who was time's 2024 person of the year?" + }, + { + "title": "Summarizer Example", + "description": "Use Kagi to summarize a video", + "prompt": "summarize this video: https://www.youtube.com/watch?v=jNQXAC9IVRw" + } + ], + "name": "kagimcp", + "description": "", + "categories": [ + "Analytics" + ], + "tools": [ + { + "name": "kagi_search_fetch", + "description": "Fetch web results based on one or more queries using the Kagi Search API. Use for general search and when the user explicitly tells you to 'fetch' results/information. Results are from all queries given. They are numbered continuously, so that a user may be able to refer to a result by a specific number.", + "inputSchema": { + "properties": { + "queries": { + "description": "One or more concise, keyword-focused search queries. Include essential context within each query for standalone use.", + "items": { + "type": "string" + }, + "title": "Queries", + "type": "array" + } + }, + "required": [ + "queries" + ], + "title": "kagi_search_fetchArguments", + "type": "object" + } + }, + { + "name": "kagi_summarizer", + "description": "Summarize content from a URL using the Kagi Summarizer API. The Summarizer can summarize any document type (text webpage, video, audio, etc.)", + "inputSchema": { + "properties": { + "url": { + "description": "A URL to a document to summarize.", + "title": "Url", + "type": "string" + }, + "summary_type": { + "default": "summary", + "description": "Type of summary to produce. Options are 'summary' for paragraph prose and 'takeaway' for a bulleted list of key points.", + "enum": [ + "summary", + "takeaway" + ], + "title": "Summary Type", + "type": "string" + }, + "target_language": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Desired output language using language codes (e.g., 'EN' for English). If not specified, the document's original language influences the output.", + "title": "Target Language" + } + }, + "required": [ + "url" + ], + "title": "kagi_summarizerArguments", + "type": "object" + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "agentkit": { + "display_name": "Chargebee Model Context Protocol (MCP) Server", + "repository": { + "type": "git", + "url": "https://github.com/chargebee/agentkit" + }, + "homepage": "https://github.com/chargebee/agentkit", + "author": { + "name": "chargebee" + }, + "license": "MIT", + "tags": [ + "MCP", + "Chargebee", + "AI", + "LLM" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@chargebee/mcp@latest" + ], + "recommended": true, + "description": "Run the Chargebee MCP server using Node.js npx" + } + }, + "examples": [ + { + "title": "Search Chargebee Documentation", + "description": "Use the documentation search tool to retrieve detailed information", + "prompt": "Search for information about Chargebee subscription APIs" + }, + { + "title": "Generate Code Snippets", + "description": "Get context-aware code snippets for Chargebee integration", + "prompt": "Create a code sample for implementing a subscription creation flow with Chargebee" + } + ], + "name": "agentkit", + "description": "MCP Server that connects AI agents to Chargebee platform.", + "categories": [ + "Dev Tools" + ], + "tools": [ + { + "name": "chargebee_documentation_search", + "description": "\nDo not use this tool for code generation. For code generation use \"chargebee_code_planner\" tool. \nThis tool will take in parameters about integrating with Chargebee in their application, then search and retrieve relevant Chargebee documentation content.\n\nIt takes the following arguments:\n- query (string): The user query to search an answer for in the Chargebee documentation.\n- language (enum): The programming language for the documentation. Check the user's application language.\n- userRequest (string): User's original request to you.\n", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The user query to search an answer for in the Chargebee documentation." + }, + "userRequest": { + "type": "string", + "description": "User's original request to you." + }, + "language": { + "type": "string", + "enum": [ + "node", + "python", + "curl", + "java", + "go", + "ruby", + "php", + "dotnet" + ], + "description": "The programming language for the documentation. Check the user's application language." + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "chargebee_code_planner", + "description": "\nAlways use this tool to get the accurate integeration code guide for Chargebee.\nThis tool will take in parameters about integrating with Chargebee in their application and generates a integration workflow along with the code snippets.\n\nIt takes the following arguments:\n- goal (string): What is the user's goal?\n- language (enum): Programming language the code to be generated in. Check the user's application language.\n", + "inputSchema": { + "type": "object", + "properties": { + "goal": { + "type": "string", + "description": "What is the user's goal?" + }, + "language": { + "type": "string", + "enum": [ + "node", + "python", + "curl", + "java", + "go", + "ruby", + "php", + "dotnet" + ], + "description": "Programming language the code to be generated in. Check the user's application language." + } + }, + "required": [ + "goal" + ] + } + } + ], + "prompts": [], + "resources": [], + "is_official": true + }, + "ns-travel-information": { + "name": "ns-travel-information", + "display_name": "NS Travel Information", + "description": "Access Dutch Railways (NS) real-time train travel information and disruptions through the official NS API.", + "repository": { + "type": "git", + "url": "https://github.com/r-huijts/ns-mcp-server" + }, + "homepage": "https://github.com/r-huijts/ns-mcp-server", + "author": { + "name": "r-huijts" + }, + "license": "MIT", + "categories": [ + "Professional Apps" + ], + "tags": [ + "NS", + "Train", + "Travel", + "Information" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "ns-mcp-server" + ], + "env": { + "NS_API_KEY": "${NS_API_KEY}" + } + } + }, + "examples": [ + { + "title": "Check train status", + "description": "Ask if the 8:15 train from Almere to Amsterdam is running on time.", + "prompt": "Is my usual 8:15 train from Almere to Amsterdam running on time?" + }, + { + "title": "Inquire about delays", + "description": "Find out if there are any delays on a specific route.", + "prompt": "Are there any delays on the Rotterdam-Den Haag route today?" + }, + { + "title": "Alternative routes", + "description": "Seek alternative routes in case of maintenance on the direct line.", + "prompt": "What's the best alternative route to Utrecht if there's maintenance on the direct line?" + }, + { + "title": "Get ticket price", + "description": "Ask for ticket prices for travel between cities.", + "prompt": "How much does a first-class ticket from Amsterdam to Rotterdam cost?" + } + ], + "arguments": { + "NS_API_KEY": { + "description": "Your NS API key, required for authenticating API requests to access NS travel information.", + "required": true, + "example": "your_api_key_here" + } + }, + "tools": [ + { + "name": "get_disruptions", + "description": "Get comprehensive information about current and planned disruptions on the Dutch railway network. Returns details about maintenance work, unexpected disruptions, alternative transport options, impact on travel times, and relevant advice. Can filter for active disruptions and specific disruption types.", + "inputSchema": { + "type": "object", + "properties": { + "isActive": { + "type": "boolean", + "description": "Filter to only return active disruptions" + }, + "type": { + "type": "string", + "description": "Type of disruptions to return (e.g., MAINTENANCE, DISRUPTION)", + "enum": [ + "MAINTENANCE", + "DISRUPTION" + ] + } + } + } + }, + { + "name": "get_travel_advice", + "description": "Get detailed travel routes between two train stations, including transfers, real-time updates, platform information, and journey duration. Can plan trips for immediate departure or for a specific future time, with options to optimize for arrival time. Returns multiple route options with status and crowding information.", + "inputSchema": { + "type": "object", + "properties": { + "fromStation": { + "type": "string", + "description": "Name or code of departure station" + }, + "toStation": { + "type": "string", + "description": "Name or code of destination station" + }, + "dateTime": { + "type": "string", + "description": "Format - date-time (as date-time in RFC3339). Datetime that the user want to depart from his origin or or arrive at his destination" + }, + "searchForArrival": { + "type": "boolean", + "description": "If true, dateTime is treated as desired arrival time" + } + }, + "required": [ + "fromStation", + "toStation" + ] + } + }, + { + "name": "get_departures", + "description": "Get real-time departure information for trains from a specific station, including platform numbers, delays, route details, and any relevant travel notes. Returns a list of upcoming departures with timing, destination, and status information.", + "inputSchema": { + "type": "object", + "properties": { + "station": { + "type": "string", + "description": "NS Station code for the station (e.g., ASD for Amsterdam Centraal). Required if uicCode is not provided" + }, + "uicCode": { + "type": "string", + "description": "UIC code for the station. Required if station code is not provided" + }, + "dateTime": { + "type": "string", + "description": "Format - date-time (as date-time in RFC3339). Only supported for departures at foreign stations. Defaults to server time (Europe/Amsterdam)" + }, + "maxJourneys": { + "type": "number", + "description": "Number of departures to return", + "minimum": 1, + "maximum": 100, + "default": 40 + }, + "lang": { + "type": "string", + "description": "Language for localizing the departures list. Only a small subset of text is translated, mainly notes. Defaults to Dutch", + "enum": [ + "nl", + "en" + ], + "default": "nl" + } + }, + "oneOf": [ + { + "required": [ + "station" + ] + }, + { + "required": [ + "uicCode" + ] + } + ] + } + }, + { + "name": "get_ovfiets", + "description": "Get OV-fiets availability at a train station", + "inputSchema": { + "type": "object", + "properties": { + "stationCode": { + "type": "string", + "description": "Station code to check OV-fiets availability for (e.g., ASD for Amsterdam Centraal)" + } + }, + "required": [ + "stationCode" + ] + } + }, + { + "name": "get_station_info", + "description": "Get detailed information about a train station", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Station name or code to search for" + }, + "includeNonPlannableStations": { + "type": "boolean", + "description": "Include stations where trains do not stop regularly", + "default": false + }, + "limit": { + "type": "number", + "description": "Maximum number of results to return", + "minimum": 1, + "maximum": 50, + "default": 10 + } + }, + "required": [ + "query" + ] + } + }, + { + "name": "get_current_time_in_rfc3339", + "description": "Get the current server time (Europe/Amsterdam timezone) in RFC3339 format. This can be used as input for other tools that require date-time parameters.", + "inputSchema": { + "type": "object", + "properties": {} + } + }, + { + "name": "get_arrivals", + "description": "Get real-time arrival information for trains at a specific station, including platform numbers, delays, origin stations, and any relevant travel notes. Returns a list of upcoming arrivals with timing, origin, and status information.", + "inputSchema": { + "type": "object", + "properties": { + "station": { + "type": "string", + "description": "NS Station code for the station (e.g., ASD for Amsterdam Centraal). Required if uicCode is not provided" + }, + "uicCode": { + "type": "string", + "description": "UIC code for the station. Required if station code is not provided" + }, + "dateTime": { + "type": "string", + "description": "Format - date-time (as date-time in RFC3339). Only supported for arrivals at foreign stations. Defaults to server time (Europe/Amsterdam)" + }, + "maxJourneys": { + "type": "number", + "description": "Number of arrivals to return", + "minimum": 1, + "maximum": 100, + "default": 40 + }, + "lang": { + "type": "string", + "description": "Language for localizing the arrivals list. Only a small subset of text is translated, mainly notes. Defaults to Dutch", + "enum": [ + "nl", + "en" + ], + "default": "nl" + } + }, + "oneOf": [ + { + "required": [ + "station" + ] + }, + { + "required": [ + "uicCode" + ] + } + ] + } + }, + { + "name": "get_prices", + "description": "Get price information for domestic train journeys, including different travel classes, ticket types, and discounts. Returns detailed pricing information with conditions and validity.", + "inputSchema": { + "type": "object", + "properties": { + "fromStation": { + "type": "string", + "description": "UicCode or station code of the origin station" + }, + "toStation": { + "type": "string", + "description": "UicCode or station code of the destination station" + }, + "travelClass": { + "type": "string", + "description": "Travel class to return the price for", + "enum": [ + "FIRST_CLASS", + "SECOND_CLASS" + ] + }, + "travelType": { + "type": "string", + "description": "Return the price for a single or return trip", + "enum": [ + "single", + "return" + ], + "default": "single" + }, + "isJointJourney": { + "type": "boolean", + "description": "Set to true to return the price including joint journey discount", + "default": false + }, + "adults": { + "type": "integer", + "description": "Number of adults to return the price for", + "minimum": 1, + "default": 1 + }, + "children": { + "type": "integer", + "description": "Number of children to return the price for", + "minimum": 0, + "default": 0 + }, + "routeId": { + "type": "string", + "description": "Specific identifier for the route to take between the two stations. This routeId is returned in the /api/v3/trips call." + }, + "plannedDepartureTime": { + "type": "string", + "description": "Format - date-time (as date-time in RFC3339). Used to find the correct route if multiple routes are possible." + }, + "plannedArrivalTime": { + "type": "string", + "description": "Format - date-time (as date-time in RFC3339). Used to find the correct route if multiple routes are possible." + } + }, + "required": [ + "fromStation", + "toStation" + ] + } + } + ] + }, + "unity-catalog": { + "name": "unity-catalog", + "display_name": "Unity Catalog", + "description": "An MCP server that enables LLMs to interact with Unity Catalog AI, supporting CRUD operations on Unity Catalog Functions and executing them as MCP tools.", + "repository": { + "type": "git", + "url": "https://github.com/ognis1205/mcp-server-unitycatalog" + }, + "homepage": "https://github.com/ognis1205/mcp-server-unitycatalog", + "author": { + "name": "ognis1205" + }, + "license": "MIT", + "categories": [ + "Dev Tools" + ], + "tags": [ + "Unity Catalog", + "API", + "Functions" + ], + "installations": { + "uvx": { + "type": "uvx", + "command": "uvx", + "args": [ + "--from", + "git+https://github.com/ognis1205/mcp-server-unitycatalog", + "mcp-server-unitycatalog", + "--uc_server", + "${UC_SERVER}", + "--uc_catalog", + "${UC_CATALOG}", + "--uc_schema", + "${UC_SCHEMA}" + ] + }, + "docker": { + "type": "docker", + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "mcp/unitycatalog", + "--uc_server", + "${UC_SERVER}", + "--uc_catalog", + "${UC_CATALOG}", + "--uc_schema", + "${UC_SCHEMA}" + ] + } + }, + "arguments": { + "UC_SERVER": { + "description": "The base URL of the Unity Catalog server.", + "required": true, + "example": "https://my-unity-catalog.com" + }, + "UC_CATALOG": { + "description": "The name of the Unity Catalog catalog.", + "required": true, + "example": "my_catalog" + }, + "UC_SCHEMA": { + "description": "The name of the schema within a Unity Catalog catalog.", + "required": true, + "example": "my_schema" + } + } + }, + "typesense": { + "name": "typesense", + "display_name": "Typesense", + "description": "A Model Context Protocol (MCP) server implementation that provides AI models with access to Typesense search capabilities. This server enables LLMs to discover, search, and analyze data stored in Typesense collections.", + "repository": { + "type": "git", + "url": "https://github.com/suhail-ak-s/mcp-typesense-server" + }, + "homepage": "https://github.com/suhail-ak-s/mcp-typesense-server", + "author": { + "name": "suhail-ak-s" + }, + "license": "MIT", + "categories": [ + "Databases" + ], + "tags": [ + "Typesense", + "Server", + "Search" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "typesense-mcp-server", + "--host", + "${TYPESENSE_HOST}", + "--port", + "8108", + "--protocol", + "http", + "--api-key", + "${API_KEY}" + ] + } + }, + "examples": [ + { + "title": "Example Usage with Claude Desktop", + "description": "Configuration for using Typesense MCP Server with Claude Desktop.", + "prompt": "{\"mcpServers\": {\"typesense\": {\"command\": \"npx\",\"args\": [\"-y\",\"typesense-mcp-server\",\"--host\", \"your-typesense-host\",\"--port\", \"8108\",\"--protocol\", \"http\",\"--api-key\", \"your-api-key\"]}}}" + } + ], + "arguments": { + "TYPESENSE_HOST": { + "description": "The host for the Typesense server. This is the address where your Typesense server is running.", + "required": true, + "example": "localhost" + }, + "API_KEY": { + "description": "The API key for accessing the Typesense server. This is needed for authentication when making requests to the server.", + "required": true, + "example": "your_api_key_here" + } + }, + "tools": [ + { + "name": "typesense_query", + "description": "Search for relevant documents in the TypeSense database based on the user's query.", + "inputSchema": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query entered by the user." + }, + "collection": { + "type": "string", + "description": "The name of the TypeSense collection to search within." + }, + "query_by": { + "type": "string", + "description": "Comma-separated fields to search in the collection, e.g., 'title,content'." + }, + "filter_by": { + "type": "string", + "description": "Optional filtering criteria, e.g., 'category:Chatbot'." + }, + "sort_by": { + "type": "string", + "description": "Sorting criteria, e.g., 'created_at:desc'." + }, + "limit": { + "type": "integer", + "description": "The maximum number of results to return.", + "default": 10 + } + }, + "required": [ + "query", + "collection", + "query_by" + ] + } + }, + { + "name": "typesense_get_document", + "description": "Retrieve a specific document by ID from a Typesense collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "The name of the TypeSense collection" + }, + "document_id": { + "type": "string", + "description": "The ID of the document to retrieve" + } + }, + "required": [ + "collection", + "document_id" + ] + } + }, + { + "name": "typesense_collection_stats", + "description": "Get statistics about a Typesense collection", + "inputSchema": { + "type": "object", + "properties": { + "collection": { + "type": "string", + "description": "The name of the TypeSense collection" + } + }, + "required": [ + "collection" + ] + } + } + ] + }, + "chatsum": { + "name": "chatsum", + "display_name": "Chat Summary", + "description": "Query and Summarize chat messages with LLM. by [mcpso](https://mcp.so/)", + "repository": { + "type": "git", + "url": "https://github.com/mcpso/mcp-server-chatsum" + }, + "homepage": "https://github.com/mcpso/mcp-server-chatsum", + "author": { + "name": "idoubi", + "url": "https://bento.me/idoubi" + }, + "license": "MIT", + "categories": [ + "Messaging" + ], + "tags": [ + "chat", + "summary" + ], + "examples": [ + { + "title": "Summarize Chat Messages", + "description": "Use this prompt to summarize chat messages based on given parameters.", + "prompt": "Summarize these messages: [...]" + } + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/mcpso/mcp-server-chatsum" + ], + "env": { + "CHAT_DB_PATH": "path-to/mcp-server-chatsum/chatbot/data/chat.db" + } + } + }, + "arguments": { + "CHAT_DB_PATH": { + "description": "Path to your chat database file that the server will use to store and retrieve chat messages.", + "required": true, + "example": "path-to/mcp-server-chatsum/chatbot/data/chat.db" + } + }, + "tools": [ + { + "name": "query_chat_messages", + "description": "query chat messages with given parameters", + "inputSchema": { + "type": "object", + "properties": { + "room_names": { + "type": "array", + "description": "chat room names", + "items": { + "type": "string", + "description": "chat room name" + } + }, + "talker_names": { + "type": "array", + "description": "talker names", + "items": { + "type": "string", + "description": "talker name" + } + }, + "limit": { + "type": "number", + "description": "chat messages limit", + "default": 100 + } + }, + "required": [] + } + } + ] + }, + "descope": { + "name": "descope", + "display_name": "Descope", + "description": "An MCP server to integrate with [Descope](https://descope.com/) to search audit logs, manage users, and more.", + "repository": { + "type": "git", + "url": "https://github.com/descope-sample-apps/descope-mcp-server" + }, + "homepage": "https://github.com/descope-sample-apps/descope-mcp-server", + "author": { + "name": "Descope", + "url": "https://descope.com" + }, + "license": "MIT", + "categories": [ + "System Tools" + ], + "tags": [ + "Descope", + "API", + "Server" + ], + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "https://github.com/descope-sample-apps/descope-mcp-server" + ], + "env": { + "DESCOPE_PROJECT_ID": "${DESCOPE_PROJECT_ID}", + "DESCOPE_MANAGEMENT_KEY": "${DESCOPE_MANAGEMENT_KEY}" + } + } + }, + "arguments": { + "DESCOPE_PROJECT_ID": { + "description": "Your Descope Project ID", + "required": true, + "example": "12345-abcde-67890-fghij" + }, + "DESCOPE_MANAGEMENT_KEY": { + "description": "Your Descope Management Key", + "required": true, + "example": "sk_test_4eC39HqLyjEDERyCzKZQz9fgo" + } + } + }, + "integration-app": { + "display_name": "Integration App MCP Server", + "repository": { + "type": "git", + "url": "https://github.com/integration-app/mcp-server" + }, + "license": "[NOT GIVEN]", + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@integration-app/mcp-server" + ], + "package": "@integration-app/mcp-server", + "env": { + "INTEGRATION_APP_TOKEN": "", + "INTEGRATION_KEY": "" + } + } + }, + "arguments": { + "INTEGRATION_APP_TOKEN": { + "description": "Token for accessing Integration App API", + "required": true, + "example": "your-integration-app-token" + }, + "INTEGRATION_KEY": { + "description": "Key of the integration you want to use tools for", + "required": true, + "example": "your-integration-key" + } + }, + "homepage": "https://integration.app", + "author": { + "name": "integration-app" + }, + "tags": [ + "integration", + "tools", + "mcp" + ], + "name": "mcp-server", + "description": "This is an implementation of the [Model Context Protocol (MCP) server](https://modelcontextprotocol.org/) that exposes tools powered by [Integration App](https://integration.app).", + "categories": [ + "MCP Tools" + ], + "is_official": true + }, + "mcp-jetbrains": { + "display_name": "JetBrains MCP Proxy Server", + "repository": { + "type": "git", + "url": "https://github.com/JetBrains/mcp-jetbrains" + }, + "license": "Apache-2.0", + "installations": { + "npm": { + "type": "npm", + "command": "npx", + "args": [ + "-y", + "@jetbrains/mcp-proxy" + ], + "package": "@jetbrains/mcp-proxy", + "description": "Install via npm package", + "recommended": true + } + }, + "homepage": "https://github.com/JetBrains/mcp-jetbrains", + "author": { + "name": "JetBrains" + }, + "tags": [ + "jetbrains", + "ide", + "proxy" + ], + "arguments": { + "IDE_PORT": { + "description": "Port of IDE's built-in webserver", + "required": false, + "example": "" + }, + "HOST": { + "description": "Host/address of IDE's built-in webserver (defaults to 127.0.0.1)", + "required": false, + "example": "" + }, + "LOG_ENABLED": { + "description": "Enable logging", + "required": false, + "example": "true" + } + }, + "name": "mcp-jetbrains", + "description": "The server proxies requests from client to JetBrains IDE.", + "categories": [ + "Dev Tools" + ], + "tools": [], + "prompts": [], + "resources": [], + "is_official": true + } +} \ No newline at end of file diff --git a/src/config/index.ts b/src/config/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..cfe0c455e194979993795c22893974b202578f84 --- /dev/null +++ b/src/config/index.ts @@ -0,0 +1,67 @@ +import dotenv from 'dotenv'; +import fs from 'fs'; +import { McpSettings } from '../types/index.js'; +import { getConfigFilePath } from '../utils/path.js'; +import { getPackageVersion } from '../utils/version.js'; + +dotenv.config(); + +const defaultConfig = { + port: process.env.PORT || 3000, + initTimeout: process.env.INIT_TIMEOUT || 300000, + timeout: process.env.REQUEST_TIMEOUT || 60000, + basePath: process.env.BASE_PATH || '', + mcpHubName: 'mcphub', + mcpHubVersion: getPackageVersion(), +}; + +export const getSettingsPath = (): string => { + return getConfigFilePath('mcp_settings.json', 'Settings'); +}; + +export const loadSettings = (): McpSettings => { + const settingsPath = getSettingsPath(); + try { + const settingsData = fs.readFileSync(settingsPath, 'utf8'); + return JSON.parse(settingsData); + } catch (error) { + console.error(`Failed to load settings from ${settingsPath}:`, error); + return { mcpServers: {}, users: [] }; + } +}; + +export const saveSettings = (settings: McpSettings): boolean => { + const settingsPath = getSettingsPath(); + try { + fs.writeFileSync(settingsPath, JSON.stringify(settings, null, 2), 'utf8'); + return true; + } catch (error) { + console.error(`Failed to save settings to ${settingsPath}:`, error); + return false; + } +}; + +export const replaceEnvVars = (env: Record): Record => { + const res: Record = {}; + for (const [key, value] of Object.entries(env)) { + if (typeof value === 'string') { + res[key] = expandEnvVars(value); + } else { + res[key] = String(value); + } + } + return res; +}; + +export const expandEnvVars = (value: string): string => { + if (typeof value !== 'string') { + return String(value); + } + // Replace ${VAR} format + let result = value.replace(/\$\{([^}]+)\}/g, (_, key) => process.env[key] || ''); + // Also replace $VAR format (common on Unix-like systems) + result = result.replace(/\$([A-Z_][A-Z0-9_]*)/g, (_, key) => process.env[key] || ''); + return result; +}; + +export default defaultConfig; diff --git a/src/controllers/authController.ts b/src/controllers/authController.ts new file mode 100644 index 0000000000000000000000000000000000000000..fc86bf43991dca66e2c77d19761b2f9f8d167ac0 --- /dev/null +++ b/src/controllers/authController.ts @@ -0,0 +1,179 @@ +import { Request, Response } from 'express'; +import jwt from 'jsonwebtoken'; +import { validationResult } from 'express-validator'; +import { findUserByUsername, verifyPassword, createUser, updateUserPassword } from '../models/User.js'; + +// Default secret key - in production, use an environment variable +const JWT_SECRET = process.env.JWT_SECRET || 'your-secret-key-change-this'; +const TOKEN_EXPIRY = '24h'; + +// Login user +export const login = async (req: Request, res: Response): Promise => { + // Validate request + const errors = validationResult(req); + if (!errors.isEmpty()) { + res.status(400).json({ success: false, errors: errors.array() }); + return; + } + + const { username, password } = req.body; + + try { + // Find user by username + const user = findUserByUsername(username); + + if (!user) { + res.status(401).json({ success: false, message: 'Invalid credentials' }); + return; + } + + // Verify password + const isPasswordValid = await verifyPassword(password, user.password); + + if (!isPasswordValid) { + res.status(401).json({ success: false, message: 'Invalid credentials' }); + return; + } + + // Generate JWT token + const payload = { + user: { + username: user.username, + isAdmin: user.isAdmin || false + } + }; + + jwt.sign( + payload, + JWT_SECRET, + { expiresIn: TOKEN_EXPIRY }, + (err, token) => { + if (err) throw err; + res.json({ + success: true, + token, + user: { + username: user.username, + isAdmin: user.isAdmin + } + }); + } + ); + } catch (error) { + console.error('Login error:', error); + res.status(500).json({ success: false, message: 'Server error' }); + } +}; + +// Register new user +export const register = async (req: Request, res: Response): Promise => { + // Validate request + const errors = validationResult(req); + if (!errors.isEmpty()) { + res.status(400).json({ success: false, errors: errors.array() }); + return; + } + + const { username, password, isAdmin } = req.body; + + try { + // Create new user + const newUser = await createUser({ username, password, isAdmin }); + + if (!newUser) { + res.status(400).json({ success: false, message: 'User already exists' }); + return; + } + + // Generate JWT token + const payload = { + user: { + username: newUser.username, + isAdmin: newUser.isAdmin || false + } + }; + + jwt.sign( + payload, + JWT_SECRET, + { expiresIn: TOKEN_EXPIRY }, + (err, token) => { + if (err) throw err; + res.json({ + success: true, + token, + user: { + username: newUser.username, + isAdmin: newUser.isAdmin + } + }); + } + ); + } catch (error) { + console.error('Registration error:', error); + res.status(500).json({ success: false, message: 'Server error' }); + } +}; + +// Get current user +export const getCurrentUser = (req: Request, res: Response): void => { + try { + // User is already attached to request by auth middleware + const user = (req as any).user; + + res.json({ + success: true, + user: { + username: user.username, + isAdmin: user.isAdmin + } + }); + } catch (error) { + console.error('Get current user error:', error); + res.status(500).json({ success: false, message: 'Server error' }); + } +}; + +// Change password +export const changePassword = async (req: Request, res: Response): Promise => { + // Validate request + const errors = validationResult(req); + if (!errors.isEmpty()) { + res.status(400).json({ success: false, errors: errors.array() }); + return; + } + + const { currentPassword, newPassword } = req.body; + const username = (req as any).user.username; + + try { + // Find user by username + const user = findUserByUsername(username); + + if (!user) { + res.status(404).json({ success: false, message: 'User not found' }); + return; + } + + // Verify current password + const isPasswordValid = await verifyPassword(currentPassword, user.password); + + if (!isPasswordValid) { + res.status(401).json({ success: false, message: 'Current password is incorrect' }); + return; + } + + // Update the password + const updated = await updateUserPassword(username, newPassword); + + if (!updated) { + res.status(500).json({ success: false, message: 'Failed to update password' }); + return; + } + + res.json({ success: true, message: 'Password updated successfully' }); + } catch (error) { + console.error('Change password error:', error); + res.status(500).json({ success: false, message: 'Server error' }); + } +}; \ No newline at end of file diff --git a/src/controllers/configController.ts b/src/controllers/configController.ts new file mode 100644 index 0000000000000000000000000000000000000000..b7a1331eab6c177a9b4582677ef536a20e8cee9b --- /dev/null +++ b/src/controllers/configController.ts @@ -0,0 +1,30 @@ +import { Request, Response } from 'express'; +import config from '../config/index.js'; + +/** + * Get runtime configuration for frontend + */ +export const getRuntimeConfig = (req: Request, res: Response): void => { + try { + const runtimeConfig = { + basePath: config.basePath, + version: config.mcpHubVersion, + name: config.mcpHubName, + }; + + res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate'); + res.setHeader('Pragma', 'no-cache'); + res.setHeader('Expires', '0'); + + res.json({ + success: true, + data: runtimeConfig, + }); + } catch (error) { + console.error('Error getting runtime config:', error); + res.status(500).json({ + success: false, + message: 'Failed to get runtime configuration', + }); + } +}; diff --git a/src/controllers/groupController.ts b/src/controllers/groupController.ts new file mode 100644 index 0000000000000000000000000000000000000000..6954df5c1a57c9546271b3f4b0235a6c4619b457 --- /dev/null +++ b/src/controllers/groupController.ts @@ -0,0 +1,341 @@ +import { Request, Response } from 'express'; +import { ApiResponse } from '../types/index.js'; +import { + getAllGroups, + getGroupByIdOrName, + createGroup, + updateGroup, + updateGroupServers, + deleteGroup, + addServerToGroup, + removeServerFromGroup, + getServersInGroup +} from '../services/groupService.js'; + +// Get all groups +export const getGroups = (_: Request, res: Response): void => { + try { + const groups = getAllGroups(); + const response: ApiResponse = { + success: true, + data: groups, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get groups information', + }); + } +}; + +// Get a specific group by ID +export const getGroup = (req: Request, res: Response): void => { + try { + const { id } = req.params; + if (!id) { + res.status(400).json({ + success: false, + message: 'Group ID is required', + }); + return; + } + + const group = getGroupByIdOrName(id); + if (!group) { + res.status(404).json({ + success: false, + message: 'Group not found', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: group, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get group information', + }); + } +}; + +// Create a new group +export const createNewGroup = (req: Request, res: Response): void => { + try { + const { name, description, servers } = req.body; + if (!name) { + res.status(400).json({ + success: false, + message: 'Group name is required', + }); + return; + } + + const serverList = Array.isArray(servers) ? servers : []; + const newGroup = createGroup(name, description, serverList); + if (!newGroup) { + res.status(400).json({ + success: false, + message: 'Failed to create group or group name already exists', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: newGroup, + message: 'Group created successfully', + }; + res.status(201).json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +// Update an existing group +export const updateExistingGroup = (req: Request, res: Response): void => { + try { + const { id } = req.params; + const { name, description, servers } = req.body; + if (!id) { + res.status(400).json({ + success: false, + message: 'Group ID is required', + }); + return; + } + + // Allow updating servers along with other fields + const updateData: any = {}; + if (name !== undefined) updateData.name = name; + if (description !== undefined) updateData.description = description; + if (servers !== undefined) updateData.servers = servers; + + if (Object.keys(updateData).length === 0) { + res.status(400).json({ + success: false, + message: 'At least one field (name, description, or servers) is required to update', + }); + return; + } + + const updatedGroup = updateGroup(id, updateData); + if (!updatedGroup) { + res.status(404).json({ + success: false, + message: 'Group not found or name already exists', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: updatedGroup, + message: 'Group updated successfully', + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +// Update servers in a group (batch update) +export const updateGroupServersBatch = (req: Request, res: Response): void => { + try { + const { id } = req.params; + const { servers } = req.body; + + if (!id) { + res.status(400).json({ + success: false, + message: 'Group ID is required', + }); + return; + } + + if (!Array.isArray(servers)) { + res.status(400).json({ + success: false, + message: 'Servers must be an array of server names', + }); + return; + } + + const updatedGroup = updateGroupServers(id, servers); + if (!updatedGroup) { + res.status(404).json({ + success: false, + message: 'Group not found', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: updatedGroup, + message: 'Group servers updated successfully', + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +// Delete a group +export const deleteExistingGroup = (req: Request, res: Response): void => { + try { + const { id } = req.params; + if (!id) { + res.status(400).json({ + success: false, + message: 'Group ID is required', + }); + return; + } + + const success = deleteGroup(id); + if (!success) { + res.status(404).json({ + success: false, + message: 'Group not found or failed to delete', + }); + return; + } + + res.json({ + success: true, + message: 'Group deleted successfully', + }); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +// Add server to a group +export const addServerToExistingGroup = (req: Request, res: Response): void => { + try { + const { id } = req.params; + const { serverName } = req.body; + if (!id) { + res.status(400).json({ + success: false, + message: 'Group ID is required', + }); + return; + } + + if (!serverName) { + res.status(400).json({ + success: false, + message: 'Server name is required', + }); + return; + } + + const updatedGroup = addServerToGroup(id, serverName); + if (!updatedGroup) { + res.status(404).json({ + success: false, + message: 'Group or server not found', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: updatedGroup, + message: 'Server added to group successfully', + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +// Remove server from a group +export const removeServerFromExistingGroup = (req: Request, res: Response): void => { + try { + const { id, serverName } = req.params; + if (!id || !serverName) { + res.status(400).json({ + success: false, + message: 'Group ID and server name are required', + }); + return; + } + + const updatedGroup = removeServerFromGroup(id, serverName); + if (!updatedGroup) { + res.status(404).json({ + success: false, + message: 'Group not found', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: updatedGroup, + message: 'Server removed from group successfully', + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +// Get servers in a group +export const getGroupServers = (req: Request, res: Response): void => { + try { + const { id } = req.params; + if (!id) { + res.status(400).json({ + success: false, + message: 'Group ID is required', + }); + return; + } + + const group = getGroupByIdOrName(id); + if (!group) { + res.status(404).json({ + success: false, + message: 'Group not found', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: group.servers, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get group servers', + }); + } +}; \ No newline at end of file diff --git a/src/controllers/logController.ts b/src/controllers/logController.ts new file mode 100644 index 0000000000000000000000000000000000000000..0b85761271e7efad87cbc2d7021c983164cb5dd0 --- /dev/null +++ b/src/controllers/logController.ts @@ -0,0 +1,55 @@ +// filepath: /Users/sunmeng/code/github/mcphub/src/controllers/logController.ts +import { Request, Response } from 'express'; +import logService from '../services/logService.js'; + +// Get all logs +export const getAllLogs = (req: Request, res: Response): void => { + try { + const logs = logService.getLogs(); + res.json({ success: true, data: logs }); + } catch (error) { + console.error('Error getting logs:', error); + res.status(500).json({ success: false, error: 'Error getting logs' }); + } +}; + +// Clear all logs +export const clearLogs = (req: Request, res: Response): void => { + try { + logService.clearLogs(); + res.json({ success: true, message: 'Logs cleared successfully' }); + } catch (error) { + console.error('Error clearing logs:', error); + res.status(500).json({ success: false, error: 'Error clearing logs' }); + } +}; + +// Stream logs via SSE +export const streamLogs = (req: Request, res: Response): void => { + try { + // Set headers for SSE + res.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive' + }); + + // Send initial data + const logs = logService.getLogs(); + res.write(`data: ${JSON.stringify({ type: 'initial', logs })}\n\n`); + + // Subscribe to log events + const unsubscribe = logService.subscribe((log) => { + res.write(`data: ${JSON.stringify({ type: 'log', log })}\n\n`); + }); + + // Handle client disconnect + req.on('close', () => { + unsubscribe(); + console.log('Client disconnected from log stream'); + }); + } catch (error) { + console.error('Error streaming logs:', error); + res.status(500).json({ success: false, error: 'Error streaming logs' }); + } +}; \ No newline at end of file diff --git a/src/controllers/marketController.ts b/src/controllers/marketController.ts new file mode 100644 index 0000000000000000000000000000000000000000..b80fb0c785b9318336852995198c241c116ce6b4 --- /dev/null +++ b/src/controllers/marketController.ts @@ -0,0 +1,154 @@ +import { Request, Response } from 'express'; +import { ApiResponse } from '../types/index.js'; +import { + getMarketServers, + getMarketServerByName, + getMarketCategories, + getMarketTags, + searchMarketServers, + filterMarketServersByCategory, + filterMarketServersByTag +} from '../services/marketService.js'; + +// Get all market servers +export const getAllMarketServers = (_: Request, res: Response): void => { + try { + const marketServers = Object.values(getMarketServers()); + const response: ApiResponse = { + success: true, + data: marketServers, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get market servers information', + }); + } +}; + +// Get a specific market server by name +export const getMarketServer = (req: Request, res: Response): void => { + try { + const { name } = req.params; + if (!name) { + res.status(400).json({ + success: false, + message: 'Server name is required', + }); + return; + } + + const server = getMarketServerByName(name); + if (!server) { + res.status(404).json({ + success: false, + message: 'Market server not found', + }); + return; + } + + const response: ApiResponse = { + success: true, + data: server, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get market server information', + }); + } +}; + +// Get all market categories +export const getAllMarketCategories = (_: Request, res: Response): void => { + try { + const categories = getMarketCategories(); + const response: ApiResponse = { + success: true, + data: categories, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get market categories', + }); + } +}; + +// Get all market tags +export const getAllMarketTags = (_: Request, res: Response): void => { + try { + const tags = getMarketTags(); + const response: ApiResponse = { + success: true, + data: tags, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get market tags', + }); + } +}; + +// Search market servers +export const searchMarketServersByQuery = (req: Request, res: Response): void => { + try { + const { query } = req.query; + const searchQuery = typeof query === 'string' ? query : ''; + + const servers = searchMarketServers(searchQuery); + const response: ApiResponse = { + success: true, + data: servers, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to search market servers', + }); + } +}; + +// Filter market servers by category +export const getMarketServersByCategory = (req: Request, res: Response): void => { + try { + const { category } = req.params; + + const servers = filterMarketServersByCategory(category); + const response: ApiResponse = { + success: true, + data: servers, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to filter market servers by category', + }); + } +}; + +// Filter market servers by tag +export const getMarketServersByTag = (req: Request, res: Response): void => { + try { + const { tag } = req.params; + + const servers = filterMarketServersByTag(tag); + const response: ApiResponse = { + success: true, + data: servers, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to filter market servers by tag', + }); + } +}; \ No newline at end of file diff --git a/src/controllers/serverController.ts b/src/controllers/serverController.ts new file mode 100644 index 0000000000000000000000000000000000000000..081041b6f9dde86bb4b32d68a95b0983ecfb88a7 --- /dev/null +++ b/src/controllers/serverController.ts @@ -0,0 +1,506 @@ +import { Request, Response } from 'express'; +import { ApiResponse, AddServerRequest } from '../types/index.js'; +import { + getServersInfo, + addServer, + removeServer, + updateMcpServer, + notifyToolChanged, + toggleServerStatus, +} from '../services/mcpService.js'; +import { loadSettings, saveSettings } from '../config/index.js'; +import { syncAllServerToolsEmbeddings } from '../services/vectorSearchService.js'; + +export const getAllServers = (_: Request, res: Response): void => { + try { + const serversInfo = getServersInfo(); + const response: ApiResponse = { + success: true, + data: serversInfo, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get servers information', + }); + } +}; + +export const getAllSettings = (_: Request, res: Response): void => { + try { + const settings = loadSettings(); + const response: ApiResponse = { + success: true, + data: settings, + }; + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get server settings', + }); + } +}; + +export const createServer = async (req: Request, res: Response): Promise => { + try { + const { name, config } = req.body as AddServerRequest; + if (!name || typeof name !== 'string') { + res.status(400).json({ + success: false, + message: 'Server name is required', + }); + return; + } + + if (!config || typeof config !== 'object') { + res.status(400).json({ + success: false, + message: 'Server configuration is required', + }); + return; + } + + if (!config.url && (!config.command || !config.args)) { + res.status(400).json({ + success: false, + message: 'Server configuration must include either a URL or command with arguments', + }); + return; + } + + // Validate the server type if specified + if (config.type && !['stdio', 'sse', 'streamable-http'].includes(config.type)) { + res.status(400).json({ + success: false, + message: 'Server type must be one of: stdio, sse, streamable-http', + }); + return; + } + + // Validate that URL is provided for sse and streamable-http types + if ((config.type === 'sse' || config.type === 'streamable-http') && !config.url) { + res.status(400).json({ + success: false, + message: `URL is required for ${config.type} server type`, + }); + return; + } + + // Validate headers if provided + if (config.headers && typeof config.headers !== 'object') { + res.status(400).json({ + success: false, + message: 'Headers must be an object', + }); + return; + } + + // Validate that headers are only used with sse and streamable-http types + if (config.headers && config.type === 'stdio') { + res.status(400).json({ + success: false, + message: 'Headers are not supported for stdio server type', + }); + return; + } + + const result = await addServer(name, config); + if (result.success) { + notifyToolChanged(); + res.json({ + success: true, + message: 'Server added successfully', + }); + } else { + res.status(400).json({ + success: false, + message: result.message || 'Failed to add server', + }); + } + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +export const deleteServer = async (req: Request, res: Response): Promise => { + try { + const { name } = req.params; + if (!name) { + res.status(400).json({ + success: false, + message: 'Server name is required', + }); + return; + } + + const result = removeServer(name); + if (result.success) { + notifyToolChanged(); + res.json({ + success: true, + message: 'Server removed successfully', + }); + } else { + res.status(404).json({ + success: false, + message: result.message || 'Server not found or failed to remove', + }); + } + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +export const updateServer = async (req: Request, res: Response): Promise => { + try { + const { name } = req.params; + const { config } = req.body; + if (!name) { + res.status(400).json({ + success: false, + message: 'Server name is required', + }); + return; + } + + if (!config || typeof config !== 'object') { + res.status(400).json({ + success: false, + message: 'Server configuration is required', + }); + return; + } + + if (!config.url && (!config.command || !config.args)) { + res.status(400).json({ + success: false, + message: 'Server configuration must include either a URL or command with arguments', + }); + return; + } + + // Validate the server type if specified + if (config.type && !['stdio', 'sse', 'streamable-http'].includes(config.type)) { + res.status(400).json({ + success: false, + message: 'Server type must be one of: stdio, sse, streamable-http', + }); + return; + } + + // Validate that URL is provided for sse and streamable-http types + if ((config.type === 'sse' || config.type === 'streamable-http') && !config.url) { + res.status(400).json({ + success: false, + message: `URL is required for ${config.type} server type`, + }); + return; + } + + // Validate headers if provided + if (config.headers && typeof config.headers !== 'object') { + res.status(400).json({ + success: false, + message: 'Headers must be an object', + }); + return; + } + + // Validate that headers are only used with sse and streamable-http types + if (config.headers && config.type === 'stdio') { + res.status(400).json({ + success: false, + message: 'Headers are not supported for stdio server type', + }); + return; + } + + const result = await updateMcpServer(name, config); + if (result.success) { + notifyToolChanged(); + res.json({ + success: true, + message: 'Server updated successfully', + }); + } else { + res.status(404).json({ + success: false, + message: result.message || 'Server not found or failed to update', + }); + } + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +export const getServerConfig = (req: Request, res: Response): void => { + try { + const { name } = req.params; + const settings = loadSettings(); + if (!settings.mcpServers || !settings.mcpServers[name]) { + res.status(404).json({ + success: false, + message: 'Server not found', + }); + return; + } + + const serverInfo = getServersInfo().find((s) => s.name === name); + const serverConfig = settings.mcpServers[name]; + const response: ApiResponse = { + success: true, + data: { + name, + status: serverInfo ? serverInfo.status : 'disconnected', + tools: serverInfo ? serverInfo.tools : [], + config: serverConfig, + }, + }; + + res.json(response); + } catch (error) { + res.status(500).json({ + success: false, + message: 'Failed to get server configuration', + }); + } +}; + +export const toggleServer = async (req: Request, res: Response): Promise => { + try { + const { name } = req.params; + const { enabled } = req.body; + if (!name) { + res.status(400).json({ + success: false, + message: 'Server name is required', + }); + return; + } + + if (typeof enabled !== 'boolean') { + res.status(400).json({ + success: false, + message: 'Enabled status must be a boolean', + }); + return; + } + + const result = await toggleServerStatus(name, enabled); + if (result.success) { + notifyToolChanged(); + res.json({ + success: true, + message: result.message || `Server ${enabled ? 'enabled' : 'disabled'} successfully`, + }); + } else { + res.status(404).json({ + success: false, + message: result.message || 'Server not found or failed to toggle status', + }); + } + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; + +export const updateSystemConfig = (req: Request, res: Response): void => { + try { + const { routing, install, smartRouting } = req.body; + + if ( + (!routing || + (typeof routing.enableGlobalRoute !== 'boolean' && + typeof routing.enableGroupNameRoute !== 'boolean' && + typeof routing.enableBearerAuth !== 'boolean' && + typeof routing.bearerAuthKey !== 'string')) && + (!install || + (typeof install.pythonIndexUrl !== 'string' && typeof install.npmRegistry !== 'string')) && + (!smartRouting || + (typeof smartRouting.enabled !== 'boolean' && + typeof smartRouting.dbUrl !== 'string' && + typeof smartRouting.openaiApiBaseUrl !== 'string' && + typeof smartRouting.openaiApiKey !== 'string' && + typeof smartRouting.openaiApiEmbeddingModel !== 'string')) + ) { + res.status(400).json({ + success: false, + message: 'Invalid system configuration provided', + }); + return; + } + + const settings = loadSettings(); + if (!settings.systemConfig) { + settings.systemConfig = { + routing: { + enableGlobalRoute: true, + enableGroupNameRoute: true, + enableBearerAuth: false, + bearerAuthKey: '', + }, + install: { + pythonIndexUrl: '', + npmRegistry: '', + }, + smartRouting: { + enabled: false, + dbUrl: '', + openaiApiBaseUrl: '', + openaiApiKey: '', + openaiApiEmbeddingModel: '', + }, + }; + } + + if (!settings.systemConfig.routing) { + settings.systemConfig.routing = { + enableGlobalRoute: true, + enableGroupNameRoute: true, + enableBearerAuth: false, + bearerAuthKey: '', + }; + } + + if (!settings.systemConfig.install) { + settings.systemConfig.install = { + pythonIndexUrl: '', + npmRegistry: '', + }; + } + + if (!settings.systemConfig.smartRouting) { + settings.systemConfig.smartRouting = { + enabled: false, + dbUrl: '', + openaiApiBaseUrl: '', + openaiApiKey: '', + openaiApiEmbeddingModel: '', + }; + } + + if (routing) { + if (typeof routing.enableGlobalRoute === 'boolean') { + settings.systemConfig.routing.enableGlobalRoute = routing.enableGlobalRoute; + } + + if (typeof routing.enableGroupNameRoute === 'boolean') { + settings.systemConfig.routing.enableGroupNameRoute = routing.enableGroupNameRoute; + } + + if (typeof routing.enableBearerAuth === 'boolean') { + settings.systemConfig.routing.enableBearerAuth = routing.enableBearerAuth; + } + + if (typeof routing.bearerAuthKey === 'string') { + settings.systemConfig.routing.bearerAuthKey = routing.bearerAuthKey; + } + } + + if (install) { + if (typeof install.pythonIndexUrl === 'string') { + settings.systemConfig.install.pythonIndexUrl = install.pythonIndexUrl; + } + if (typeof install.npmRegistry === 'string') { + settings.systemConfig.install.npmRegistry = install.npmRegistry; + } + } + + // Track smartRouting state and configuration changes + const wasSmartRoutingEnabled = settings.systemConfig.smartRouting.enabled || false; + const previousSmartRoutingConfig = { ...settings.systemConfig.smartRouting }; + let needsSync = false; + + if (smartRouting) { + if (typeof smartRouting.enabled === 'boolean') { + // If enabling Smart Routing, validate required fields + if (smartRouting.enabled) { + const currentDbUrl = smartRouting.dbUrl || settings.systemConfig.smartRouting.dbUrl; + const currentOpenaiApiKey = + smartRouting.openaiApiKey || settings.systemConfig.smartRouting.openaiApiKey; + + if (!currentDbUrl || !currentOpenaiApiKey) { + const missingFields = []; + if (!currentDbUrl) missingFields.push('Database URL'); + if (!currentOpenaiApiKey) missingFields.push('OpenAI API Key'); + + res.status(400).json({ + success: false, + message: `Smart Routing requires the following fields: ${missingFields.join(', ')}`, + }); + return; + } + } + settings.systemConfig.smartRouting.enabled = smartRouting.enabled; + } + if (typeof smartRouting.dbUrl === 'string') { + settings.systemConfig.smartRouting.dbUrl = smartRouting.dbUrl; + } + if (typeof smartRouting.openaiApiBaseUrl === 'string') { + settings.systemConfig.smartRouting.openaiApiBaseUrl = smartRouting.openaiApiBaseUrl; + } + if (typeof smartRouting.openaiApiKey === 'string') { + settings.systemConfig.smartRouting.openaiApiKey = smartRouting.openaiApiKey; + } + if (typeof smartRouting.openaiApiEmbeddingModel === 'string') { + settings.systemConfig.smartRouting.openaiApiEmbeddingModel = + smartRouting.openaiApiEmbeddingModel; + } + + // Check if we need to sync embeddings + const isNowEnabled = settings.systemConfig.smartRouting.enabled || false; + const hasConfigChanged = + previousSmartRoutingConfig.dbUrl !== settings.systemConfig.smartRouting.dbUrl || + previousSmartRoutingConfig.openaiApiBaseUrl !== + settings.systemConfig.smartRouting.openaiApiBaseUrl || + previousSmartRoutingConfig.openaiApiKey !== + settings.systemConfig.smartRouting.openaiApiKey || + previousSmartRoutingConfig.openaiApiEmbeddingModel !== + settings.systemConfig.smartRouting.openaiApiEmbeddingModel; + + // Sync if: first time enabling OR smart routing is enabled and any config changed + needsSync = (!wasSmartRoutingEnabled && isNowEnabled) || (isNowEnabled && hasConfigChanged); + } + + if (saveSettings(settings)) { + res.json({ + success: true, + data: settings.systemConfig, + message: 'System configuration updated successfully', + }); + + // If smart routing configuration changed, sync all existing server tools + if (needsSync) { + console.log('SmartRouting configuration changed - syncing all existing server tools...'); + // Run sync asynchronously to avoid blocking the response + syncAllServerToolsEmbeddings().catch((error) => { + console.error('Failed to sync server tools embeddings:', error); + }); + } + } else { + res.status(500).json({ + success: false, + message: 'Failed to save system configuration', + }); + } + } catch (error) { + res.status(500).json({ + success: false, + message: 'Internal server error', + }); + } +}; diff --git a/src/controllers/toolController.ts b/src/controllers/toolController.ts new file mode 100644 index 0000000000000000000000000000000000000000..005f18d6fc042cd462ab06b619cb4a52516e7ee7 --- /dev/null +++ b/src/controllers/toolController.ts @@ -0,0 +1,86 @@ +import { Request, Response } from 'express'; +import { ApiResponse } from '../types/index.js'; +import { handleCallToolRequest } from '../services/mcpService.js'; + +/** + * Interface for tool call request + */ +export interface ToolCallRequest { + toolName: string; + arguments?: Record; +} + +/** + * Interface for tool search request + */ +export interface ToolSearchRequest { + query: string; + limit?: number; +} + +/** + * Interface for tool call result + */ +interface ToolCallResult { + content?: Array<{ + type: string; + text?: string; + [key: string]: any; + }>; + isError?: boolean; + [key: string]: any; +} + +/** + * Call a specific tool with given arguments + */ +export const callTool = async (req: Request, res: Response): Promise => { + try { + const { server } = req.params; + const { toolName, arguments: toolArgs = {} } = req.body as ToolCallRequest; + + if (!toolName) { + res.status(400).json({ + success: false, + message: 'toolName is required', + }); + return; + } + + // Create a mock request structure for handleCallToolRequest + const mockRequest = { + params: { + name: 'call_tool', + arguments: { + toolName, + arguments: toolArgs, + }, + }, + }; + + const extra = { + sessionId: req.headers['x-session-id'] || 'api-session', + server: server || undefined, + }; + + const result = (await handleCallToolRequest(mockRequest, extra)) as ToolCallResult; + + const response: ApiResponse = { + success: true, + data: { + content: result.content || [], + toolName, + arguments: toolArgs, + }, + }; + + res.json(response); + } catch (error) { + console.error('Error calling tool:', error); + res.status(500).json({ + success: false, + message: 'Failed to call tool', + error: error instanceof Error ? error.message : 'Unknown error occurred', + }); + } +}; diff --git a/src/db/connection.ts b/src/db/connection.ts new file mode 100644 index 0000000000000000000000000000000000000000..175f2620ae6ba27dd1fe5eb01e55387dfeacb9d3 --- /dev/null +++ b/src/db/connection.ts @@ -0,0 +1,342 @@ +import 'reflect-metadata'; // Ensure reflect-metadata is imported here too +import { DataSource, DataSourceOptions } from 'typeorm'; +import entities from './entities/index.js'; +import { registerPostgresVectorType } from './types/postgresVectorType.js'; +import { VectorEmbeddingSubscriber } from './subscribers/VectorEmbeddingSubscriber.js'; +import { getSmartRoutingConfig } from '../utils/smartRouting.js'; + +// Helper function to create required PostgreSQL extensions +const createRequiredExtensions = async (dataSource: DataSource): Promise => { + try { + await dataSource.query('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";'); + console.log('UUID extension created or already exists.'); + } catch (err: any) { + console.warn('Failed to create uuid-ossp extension:', err.message); + console.warn('UUID generation functionality may not be available.'); + } + + try { + await dataSource.query('CREATE EXTENSION IF NOT EXISTS vector;'); + console.log('Vector extension created or already exists.'); + } catch (err: any) { + console.warn('Failed to create vector extension:', err.message); + console.warn('Vector functionality may not be available.'); + } +}; + +// Get database URL from smart routing config or fallback to environment variable +const getDatabaseUrl = (): string => { + return getSmartRoutingConfig().dbUrl; +}; + +// Default database configuration +const defaultConfig: DataSourceOptions = { + type: 'postgres', + url: getDatabaseUrl(), + synchronize: true, + entities: entities, + subscribers: [VectorEmbeddingSubscriber], +}; + +// AppDataSource is the TypeORM data source +let appDataSource = new DataSource(defaultConfig); + +// Global promise to track initialization status +let initializationPromise: Promise | null = null; + +// Function to create a new DataSource with updated configuration +export const updateDataSourceConfig = (): DataSource => { + const newConfig: DataSourceOptions = { + ...defaultConfig, + url: getDatabaseUrl(), + }; + + // If the configuration has changed, we need to create a new DataSource + const currentUrl = (appDataSource.options as any).url; + if (currentUrl !== newConfig.url) { + console.log('Database URL configuration changed, updating DataSource...'); + appDataSource = new DataSource(newConfig); + // Reset initialization promise when configuration changes + initializationPromise = null; + } + + return appDataSource; +}; + +// Get the current AppDataSource instance +export const getAppDataSource = (): DataSource => { + return appDataSource; +}; + +// Reconnect database with updated configuration +export const reconnectDatabase = async (): Promise => { + try { + // Close existing connection if it exists + if (appDataSource.isInitialized) { + console.log('Closing existing database connection...'); + await appDataSource.destroy(); + } + + // Reset initialization promise to allow fresh initialization + initializationPromise = null; + + // Update configuration and reconnect + appDataSource = updateDataSourceConfig(); + return await initializeDatabase(); + } catch (error) { + console.error('Error during database reconnection:', error); + throw error; + } +}; + +// Initialize database connection with concurrency control +export const initializeDatabase = async (): Promise => { + // If initialization is already in progress, wait for it to complete + if (initializationPromise) { + console.log('Database initialization already in progress, waiting for completion...'); + return initializationPromise; + } + + // If already initialized, return the existing instance + if (appDataSource.isInitialized) { + console.log('Database already initialized, returning existing instance'); + return Promise.resolve(appDataSource); + } + + // Create a new initialization promise + initializationPromise = performDatabaseInitialization(); + + try { + const result = await initializationPromise; + console.log('Database initialization completed successfully'); + return result; + } catch (error) { + // Reset the promise on error so initialization can be retried + initializationPromise = null; + console.error('Database initialization failed:', error); + throw error; + } +}; + +// Internal function to perform the actual database initialization +const performDatabaseInitialization = async (): Promise => { + try { + // Update configuration before initializing + appDataSource = updateDataSourceConfig(); + + if (!appDataSource.isInitialized) { + console.log('Initializing database connection...'); + // Register the vector type with TypeORM + await appDataSource.initialize(); + registerPostgresVectorType(appDataSource); + + // Create required PostgreSQL extensions + await createRequiredExtensions(appDataSource); + + // Set up vector column and index with a more direct approach + try { + // Check if table exists first + const tableExists = await appDataSource.query(` + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'vector_embeddings' + ); + `); + + if (tableExists[0].exists) { + // Add pgvector support via raw SQL commands + console.log('Configuring vector support for embeddings table...'); + + // Step 1: Drop any existing index on the column + try { + await appDataSource.query(`DROP INDEX IF EXISTS idx_vector_embeddings_embedding;`); + } catch (dropError: any) { + console.warn('Note: Could not drop existing index:', dropError.message); + } + + // Step 2: Alter column type to vector (if it's not already) + try { + // Check column type first + const columnType = await appDataSource.query(` + SELECT data_type FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = 'vector_embeddings' + AND column_name = 'embedding'; + `); + + if (columnType.length > 0 && columnType[0].data_type !== 'vector') { + await appDataSource.query(` + ALTER TABLE vector_embeddings + ALTER COLUMN embedding TYPE vector USING embedding::vector; + `); + console.log('Vector embedding column type updated successfully.'); + } + } catch (alterError: any) { + console.warn('Could not alter embedding column type:', alterError.message); + console.warn('Will try to recreate the table later.'); + } + + // Step 3: Try to create appropriate indices + try { + // First, let's check if there are any records to determine the dimensions + const records = await appDataSource.query(` + SELECT dimensions FROM vector_embeddings LIMIT 1; + `); + + let dimensions = 1536; // Default to common OpenAI embedding size + if (records && records.length > 0 && records[0].dimensions) { + dimensions = records[0].dimensions; + console.log(`Found vector dimension from existing data: ${dimensions}`); + } else { + console.log(`Using default vector dimension: ${dimensions} (no existing data found)`); + } + + // Set the vector dimensions explicitly only if table has data + if (records && records.length > 0) { + await appDataSource.query(` + ALTER TABLE vector_embeddings + ALTER COLUMN embedding TYPE vector(${dimensions}); + `); + + // Now try to create the index + await appDataSource.query(` + CREATE INDEX IF NOT EXISTS idx_vector_embeddings_embedding + ON vector_embeddings USING ivfflat (embedding vector_cosine_ops) WITH (lists = 100); + `); + console.log('Created IVFFlat index for vector similarity search.'); + } else { + console.log( + 'No existing vector data found, skipping index creation - will be handled by vector service.', + ); + } + } catch (indexError: any) { + console.warn('IVFFlat index creation failed:', indexError.message); + console.warn('Trying alternative index type...'); + + try { + // Try HNSW index instead + await appDataSource.query(` + CREATE INDEX IF NOT EXISTS idx_vector_embeddings_embedding + ON vector_embeddings USING hnsw (embedding vector_cosine_ops); + `); + console.log('Created HNSW index for vector similarity search.'); + } catch (hnswError: any) { + // Final fallback to simpler index type + console.warn('HNSW index creation failed too. Using simple L2 distance index.'); + + try { + // Create a basic GIN index as last resort + await appDataSource.query(` + CREATE INDEX IF NOT EXISTS idx_vector_embeddings_embedding + ON vector_embeddings USING gin (embedding); + `); + console.log('Created GIN index for basic vector lookups.'); + } catch (ginError: any) { + console.warn('All index creation attempts failed:', ginError.message); + console.warn('Vector search will be slower without an optimized index.'); + } + } + } + } else { + console.log( + 'Vector embeddings table does not exist yet - will configure after schema sync.', + ); + } + } catch (error: any) { + console.warn('Could not set up vector column/index:', error.message); + console.warn('Will attempt again after schema synchronization.'); + } + + console.log('Database connection established successfully.'); + + // Run one final setup check after schema synchronization is done + if (defaultConfig.synchronize) { + try { + console.log('Running final vector configuration check...'); + + // Try setup again with the same code from above + const tableExists = await appDataSource.query(` + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'vector_embeddings' + ); + `); + + if (tableExists[0].exists) { + console.log('Vector embeddings table found, checking configuration...'); + + // Get the dimension size first + try { + // Try to get dimensions from an existing record + const records = await appDataSource.query(` + SELECT dimensions FROM vector_embeddings LIMIT 1; + `); + + // Only proceed if we have existing data, otherwise let vector service handle it + if (records && records.length > 0 && records[0].dimensions) { + const dimensions = records[0].dimensions; + console.log(`Found vector dimension from database: ${dimensions}`); + + // Ensure column type is vector with explicit dimensions + await appDataSource.query(` + ALTER TABLE vector_embeddings + ALTER COLUMN embedding TYPE vector(${dimensions}); + `); + console.log('Vector embedding column type updated in final check.'); + + // One more attempt at creating the index with dimensions + try { + // Drop existing index if any + await appDataSource.query(` + DROP INDEX IF EXISTS idx_vector_embeddings_embedding; + `); + + // Create new index with proper dimensions + await appDataSource.query(` + CREATE INDEX idx_vector_embeddings_embedding + ON vector_embeddings USING ivfflat (embedding vector_cosine_ops) WITH (lists = 100); + `); + console.log('Created IVFFlat index in final check.'); + } catch (indexError: any) { + console.warn('Final index creation attempt did not succeed:', indexError.message); + console.warn('Using basic lookup without vector index.'); + } + } else { + console.log( + 'No existing vector data found, vector dimensions will be configured by vector service.', + ); + } + } catch (setupError: any) { + console.warn('Vector setup in final check failed:', setupError.message); + } + } + } catch (error: any) { + console.warn('Post-initialization vector setup failed:', error.message); + } + } + } + return appDataSource; + } catch (error) { + console.error('Error during database initialization:', error); + throw error; + } +}; + +// Get database connection status +export const isDatabaseConnected = (): boolean => { + return appDataSource.isInitialized; +}; + +// Close database connection +export const closeDatabase = async (): Promise => { + if (appDataSource.isInitialized) { + await appDataSource.destroy(); + console.log('Database connection closed.'); + } +}; + +// Export AppDataSource for backward compatibility +export const AppDataSource = appDataSource; + +export default getAppDataSource; diff --git a/src/db/entities/VectorEmbedding.ts b/src/db/entities/VectorEmbedding.ts new file mode 100644 index 0000000000000000000000000000000000000000..67f30cbdc966682ec2938965451d216e10c36463 --- /dev/null +++ b/src/db/entities/VectorEmbedding.ts @@ -0,0 +1,46 @@ +import { + Entity, + PrimaryGeneratedColumn, + Column, + CreateDateColumn, + UpdateDateColumn, +} from 'typeorm'; + +@Entity({ name: 'vector_embeddings' }) +export class VectorEmbedding { + @PrimaryGeneratedColumn('uuid') + id: string; + + @Column({ type: 'varchar' }) + content_type: string; // 'market_server', 'tool', 'documentation', etc. + + @Column({ type: 'varchar' }) + content_id: string; // Reference ID to the original content + + @Column('text') + text_content: string; // The text that was embedded + + @Column('simple-json') + metadata: Record; // Additional metadata about the embedding + + @Column({ + type: 'float', + array: true, + nullable: true, + }) + embedding: number[]; // The vector embedding - will be converted to vector type after table creation + + @Column({ type: 'int' }) + dimensions: number; // Dimensionality of the embedding vector + + @Column({ type: 'varchar' }) + model: string; // Model used to create the embedding + + @CreateDateColumn({ name: 'created_at', type: 'timestamp' }) + createdAt: Date; + + @UpdateDateColumn({ name: 'updated_at', type: 'timestamp' }) + updatedAt: Date; +} + +export default VectorEmbedding; diff --git a/src/db/entities/index.ts b/src/db/entities/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..2fccf1643611e86422e9647664a67ff27166033c --- /dev/null +++ b/src/db/entities/index.ts @@ -0,0 +1,7 @@ +import { VectorEmbedding } from './VectorEmbedding.js'; + +// Export all entities +export default [VectorEmbedding]; + +// Export individual entities for direct use +export { VectorEmbedding }; diff --git a/src/db/index.ts b/src/db/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..f8bcba796aa51e28093881c748512d17b70ad3be --- /dev/null +++ b/src/db/index.ts @@ -0,0 +1,33 @@ +import { initializeDatabase, closeDatabase, isDatabaseConnected } from './connection.js'; +import * as repositories from './repositories/index.js'; + +/** + * Initialize the database module + */ +export async function initializeDbModule(): Promise { + try { + // Connect to the database + await initializeDatabase(); + return true; + } catch (error) { + console.error('Failed to initialize database module:', error); + return false; + } +} + +/** + * Get the repository factory for a database entity type + * @param entityType The type of entity to get a repository for + */ +export function getRepositoryFactory(entityType: 'vectorEmbeddings') { + // Return the appropriate repository based on entity type + switch (entityType) { + case 'vectorEmbeddings': + return () => new repositories.VectorEmbeddingRepository(); + default: + throw new Error(`Unknown entity type: ${entityType}`); + } +} + +// Re-export everything from the database module +export { initializeDatabase, closeDatabase, isDatabaseConnected, repositories }; diff --git a/src/db/repositories/BaseRepository.ts b/src/db/repositories/BaseRepository.ts new file mode 100644 index 0000000000000000000000000000000000000000..5c4e748aaf5e8e559191d33c6f1ad0cc70c2583e --- /dev/null +++ b/src/db/repositories/BaseRepository.ts @@ -0,0 +1,69 @@ +import { Repository, EntityTarget, ObjectLiteral } from 'typeorm'; +import { getAppDataSource } from '../connection.js'; + +/** + * Base repository class with common CRUD operations + */ +export class BaseRepository { + protected readonly repository: Repository; + + constructor(entityClass: EntityTarget) { + this.repository = getAppDataSource().getRepository(entityClass); + } + + /** + * Get repository access + */ + getRepository(): Repository { + return this.repository; + } + + /** + * Find all entities + */ + async findAll(): Promise { + return this.repository.find(); + } + + /** + * Find entity by ID + * @param id Entity ID + */ + async findById(id: string | number): Promise { + return this.repository.findOneBy({ id } as any); + } + + /** + * Save or update an entity + * @param entity Entity to save + */ + async save(entity: Partial): Promise { + return this.repository.save(entity as any); + } + + /** + * Save multiple entities + * @param entities Array of entities to save + */ + async saveMany(entities: Partial[]): Promise { + return this.repository.save(entities as any[]); + } + + /** + * Delete an entity by ID + * @param id Entity ID + */ + async delete(id: string | number): Promise { + const result = await this.repository.delete(id); + return result.affected !== null && result.affected !== undefined && result.affected > 0; + } + + /** + * Count total entities + */ + async count(): Promise { + return this.repository.count(); + } +} + +export default BaseRepository; diff --git a/src/db/repositories/VectorEmbeddingRepository.ts b/src/db/repositories/VectorEmbeddingRepository.ts new file mode 100644 index 0000000000000000000000000000000000000000..a3b01ba99a7a2468e0b6034d147fe5f220a46cda --- /dev/null +++ b/src/db/repositories/VectorEmbeddingRepository.ts @@ -0,0 +1,219 @@ +import { VectorEmbedding } from '../entities/VectorEmbedding.js'; +import BaseRepository from './BaseRepository.js'; +import { getAppDataSource } from '../connection.js'; + +export class VectorEmbeddingRepository extends BaseRepository { + constructor() { + super(VectorEmbedding); + } + + /** + * Find by content type and ID + * @param contentType Content type + * @param contentId Content ID + */ + async findByContentIdentity( + contentType: string, + contentId: string, + ): Promise { + return this.repository.findOneBy({ + content_type: contentType, + content_id: contentId, + }); + } + + /** + * Create or update an embedding for content + * @param contentType Content type + * @param contentId Content ID + * @param textContent Text content to embed + * @param embedding Vector embedding + * @param metadata Additional metadata + * @param model Model used to create the embedding + */ + async saveEmbedding( + contentType: string, + contentId: string, + textContent: string, + embedding: number[], + metadata: Record = {}, + model = 'default', + ): Promise { + // Check if embedding exists + let vectorEmbedding = await this.findByContentIdentity(contentType, contentId); + + if (!vectorEmbedding) { + vectorEmbedding = new VectorEmbedding(); + vectorEmbedding.content_type = contentType; + vectorEmbedding.content_id = contentId; + } + + // Update properties + vectorEmbedding.text_content = textContent; + vectorEmbedding.embedding = embedding; + vectorEmbedding.dimensions = embedding.length; + vectorEmbedding.metadata = metadata; + vectorEmbedding.model = model; + + // For raw SQL operations where our subscriber might not be called + // Ensure the embedding is properly formatted for postgres + const rawEmbedding = this.formatEmbeddingForPgVector(embedding); + if (rawEmbedding) { + (vectorEmbedding as any).embedding = rawEmbedding; + } + + return this.save(vectorEmbedding); + } + + /** + * Search for similar embeddings using cosine similarity + * @param embedding Vector embedding to search against + * @param limit Maximum number of results (default: 10) + * @param threshold Similarity threshold (default: 0.7) + * @param contentTypes Optional content types to filter by + */ + async searchSimilar( + embedding: number[], + limit = 10, + threshold = 0.7, + contentTypes?: string[], + ): Promise> { + try { + // Try using vector similarity operator first + try { + // Build query with vector operators + let query = getAppDataSource() + .createQueryBuilder() + .select('vector_embedding.*') + .addSelect(`1 - (vector_embedding.embedding <=> :embedding) AS similarity`) + .from(VectorEmbedding, 'vector_embedding') + .where(`1 - (vector_embedding.embedding <=> :embedding) > :threshold`) + .orderBy('similarity', 'DESC') + .limit(limit) + .setParameter( + 'embedding', + Array.isArray(embedding) ? `[${embedding.join(',')}]` : embedding, + ) + .setParameter('threshold', threshold); + + // Add content type filter if provided + if (contentTypes && contentTypes.length > 0) { + query = query + .andWhere('vector_embedding.content_type IN (:...contentTypes)') + .setParameter('contentTypes', contentTypes); + } + + // Execute query + const results = await query.getRawMany(); + + // Return results if successful + return results.map((row) => ({ + embedding: this.mapRawToEntity(row), + similarity: parseFloat(row.similarity), + })); + } catch (vectorError) { + console.warn( + 'Vector similarity search failed, falling back to basic filtering:', + vectorError, + ); + + // Fallback to just getting the records by content type + let query = this.repository.createQueryBuilder('vector_embedding'); + + // Add content type filter if provided + if (contentTypes && contentTypes.length > 0) { + query = query + .where('vector_embedding.content_type IN (:...contentTypes)') + .setParameter('contentTypes', contentTypes); + } + + // Limit results + query = query.take(limit); + + // Execute query + const results = await query.getMany(); + + // Return results with a placeholder similarity + return results.map((entity) => ({ + embedding: entity, + similarity: 0.5, // Placeholder similarity + })); + } + } catch (error) { + console.error('Error during vector search:', error); + return []; + } + } + + /** + * Search by text using vector similarity + * @param text Text to search for + * @param getEmbeddingFunc Function to convert text to embedding + * @param limit Maximum number of results + * @param threshold Similarity threshold + * @param contentTypes Optional content types to filter by + */ + async searchByText( + text: string, + getEmbeddingFunc: (text: string) => Promise, + limit = 10, + threshold = 0.7, + contentTypes?: string[], + ): Promise> { + try { + // Get embedding for the search text + const embedding = await getEmbeddingFunc(text); + + // Search by embedding + return this.searchSimilar(embedding, limit, threshold, contentTypes); + } catch (error) { + console.error('Error searching by text:', error); + return []; + } + } + + /** + * Map raw database result to entity + * @param raw Raw database result + */ + private mapRawToEntity(raw: any): VectorEmbedding { + const entity = new VectorEmbedding(); + entity.id = raw.id; + entity.content_type = raw.content_type; + entity.content_id = raw.content_id; + entity.text_content = raw.text_content; + entity.metadata = raw.metadata; + entity.embedding = raw.embedding; + entity.dimensions = raw.dimensions; + entity.model = raw.model; + entity.createdAt = raw.created_at; + entity.updatedAt = raw.updated_at; + return entity; + } + + /** + * Format embedding array for pgvector + * @param embedding Array of embedding values + * @returns Properly formatted vector string for pgvector + */ + private formatEmbeddingForPgVector(embedding: number[] | string): string | null { + if (!embedding) return null; + + // If it's already a string and starts with '[', assume it's formatted + if (typeof embedding === 'string') { + if (embedding.startsWith('[') && embedding.endsWith(']')) { + return embedding; + } + return `[${embedding}]`; + } + + // Format array as proper pgvector string + if (Array.isArray(embedding)) { + return `[${embedding.join(',')}]`; + } + + return null; + } +} + +export default VectorEmbeddingRepository; diff --git a/src/db/repositories/index.ts b/src/db/repositories/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..b5a277d07701856d276c09bd4fee47dbfd284f91 --- /dev/null +++ b/src/db/repositories/index.ts @@ -0,0 +1,4 @@ +import VectorEmbeddingRepository from './VectorEmbeddingRepository.js'; + +// Export all repositories +export { VectorEmbeddingRepository }; diff --git a/src/db/subscribers/VectorEmbeddingSubscriber.ts b/src/db/subscribers/VectorEmbeddingSubscriber.ts new file mode 100644 index 0000000000000000000000000000000000000000..19c506a5074698274590718c4a0ce1c1d899d8a9 --- /dev/null +++ b/src/db/subscribers/VectorEmbeddingSubscriber.ts @@ -0,0 +1,53 @@ +import { EntitySubscriberInterface, EventSubscriber, InsertEvent, UpdateEvent } from 'typeorm'; +import { VectorEmbedding } from '../entities/VectorEmbedding.js'; + +/** + * A subscriber to format vector embeddings before saving to database + */ +@EventSubscriber() +export class VectorEmbeddingSubscriber implements EntitySubscriberInterface { + /** + * Indicates that this subscriber only listens to VectorEmbedding events + */ + listenTo() { + return VectorEmbedding; + } + + /** + * Called before entity insertion + */ + beforeInsert(event: InsertEvent) { + this.formatEmbedding(event.entity); + } + + /** + * Called before entity update + */ + beforeUpdate(event: UpdateEvent) { + if (event.entity && event.entity.embedding) { + this.formatEmbedding(event.entity as VectorEmbedding); + } + } + + /** + * Format embedding as a proper vector string + */ + private formatEmbedding(entity: VectorEmbedding | undefined) { + if (!entity || !entity.embedding || !Array.isArray(entity.embedding)) { + return; + } + + // If the embedding is already a string, don't process it + if (typeof entity.embedding === 'string') { + return; + } + + // Format array as proper pgvector string + // Ensure the string starts with '[' and ends with ']' as required by pgvector + const vectorString = `[${entity.embedding.join(',')}]`; + + // Store the string directly (TypeORM will handle conversion) + // We need to use 'as any' because the type is declared as number[] but we're setting a string + (entity as any).embedding = vectorString; + } +} diff --git a/src/db/types/postgresVectorType.ts b/src/db/types/postgresVectorType.ts new file mode 100644 index 0000000000000000000000000000000000000000..84e682169c4fcc83f7db3c24d3529515a8f63a79 --- /dev/null +++ b/src/db/types/postgresVectorType.ts @@ -0,0 +1,38 @@ +import { DataSource } from 'typeorm'; + +/** + * Register the PostgreSQL vector type with TypeORM + * @param dataSource TypeORM data source + */ +export function registerPostgresVectorType(dataSource: DataSource): void { + // Skip if not postgres + if (dataSource.driver.options.type !== 'postgres') { + return; + } + + // Get the postgres driver + const pgDriver = dataSource.driver; + + // Add 'vector' to the list of supported column types + if (pgDriver.supportedDataTypes) { + pgDriver.supportedDataTypes.push('vector' as any); + } + + // Override the normalization for the vector type + if ((pgDriver as any).dataTypeDefaults) { + (pgDriver as any).dataTypeDefaults['vector'] = { + type: 'vector', + }; + } + + // Override the column type resolver to prevent it from converting vector to other types + const originalColumnTypeResolver = (pgDriver as any).columnTypeResolver; + if (originalColumnTypeResolver) { + (pgDriver as any).columnTypeResolver = (column: any) => { + if (column.type === 'vector') { + return 'vector'; + } + return originalColumnTypeResolver(column); + }; + } +} diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..b29ec8bb539e027d72e04066e919065796ef60ee --- /dev/null +++ b/src/index.ts @@ -0,0 +1,18 @@ +import 'reflect-metadata'; +import AppServer from './server.js'; + +const appServer = new AppServer(); + +async function boot() { + try { + await appServer.initialize(); + appServer.start(); + } catch (error) { + console.error('Failed to start application:', error); + process.exit(1); + } +} + +boot(); + +export default appServer.getApp(); diff --git a/src/middlewares/auth.ts b/src/middlewares/auth.ts new file mode 100644 index 0000000000000000000000000000000000000000..a9b54163e6ba14c42ff0ff7675b3565e6caa8447 --- /dev/null +++ b/src/middlewares/auth.ts @@ -0,0 +1,30 @@ +import { Request, Response, NextFunction } from 'express'; +import jwt from 'jsonwebtoken'; + +// Default secret key - in production, use an environment variable +const JWT_SECRET = process.env.JWT_SECRET || 'your-secret-key-change-this'; + +// Middleware to authenticate JWT token +export const auth = (req: Request, res: Response, next: NextFunction): void => { + // Get token from header or query parameter + const headerToken = req.header('x-auth-token'); + const queryToken = req.query.token as string; + const token = headerToken || queryToken; + + // Check if no token + if (!token) { + res.status(401).json({ success: false, message: 'No token, authorization denied' }); + return; + } + + // Verify token + try { + const decoded = jwt.verify(token, JWT_SECRET); + + // Add user from payload to request + (req as any).user = (decoded as any).user; + next(); + } catch (error) { + res.status(401).json({ success: false, message: 'Token is not valid' }); + } +}; \ No newline at end of file diff --git a/src/middlewares/index.ts b/src/middlewares/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..990f5c66b72385c2f42cb97fc26568791a8164f8 --- /dev/null +++ b/src/middlewares/index.ts @@ -0,0 +1,82 @@ +import express, { Request, Response, NextFunction } from 'express'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { dirname } from 'path'; +import fs from 'fs'; +import { auth } from './auth.js'; +import { initializeDefaultUser } from '../models/User.js'; +import config from '../config/index.js'; + +// Create __dirname equivalent for ES modules +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Try to find the correct frontend file path +const findFrontendPath = (): string => { + // First try development environment path + const devPath = path.join(dirname(__dirname), 'frontend', 'dist', 'index.html'); + if (fs.existsSync(devPath)) { + return path.join(dirname(__dirname), 'frontend', 'dist'); + } + + // Try npm/npx installed path (remove /dist directory) + const npmPath = path.join(dirname(dirname(__dirname)), 'frontend', 'dist', 'index.html'); + if (fs.existsSync(npmPath)) { + return path.join(dirname(dirname(__dirname)), 'frontend', 'dist'); + } + + // If none of the above paths exist, return the most reasonable default path and log a warning + console.warn('Warning: Could not locate frontend files. Using default path.'); + return path.join(dirname(__dirname), 'frontend', 'dist'); +}; + +const frontendPath = findFrontendPath(); + +export const errorHandler = ( + err: Error, + _req: Request, + res: Response, + _next: NextFunction, +): void => { + console.error('Unhandled error:', err); + res.status(500).json({ + success: false, + message: 'Internal server error', + }); +}; + +export const initMiddlewares = (app: express.Application): void => { + // Serve static files from the dynamically determined frontend path + // Note: Static files will be handled by the server directly, not here + + app.use((req, res, next) => { + const basePath = config.basePath; + // Only apply JSON parsing for API and auth routes, not for SSE or message endpoints + if ( + req.path !== `${basePath}/sse` && + !req.path.startsWith(`${basePath}/sse/`) && + req.path !== `${basePath}/messages` + ) { + express.json()(req, res, next); + } else { + next(); + } + }); + + // Initialize default admin user if no users exist + initializeDefaultUser().catch((err) => { + console.error('Error initializing default user:', err); + }); + + // Protect API routes with authentication middleware, but exclude auth endpoints + app.use(`${config.basePath}/api`, (req, res, next) => { + // Skip authentication for login and register endpoints + if (req.path === '/auth/login' || req.path === '/auth/register') { + next(); + } else { + auth(req, res, next); + } + }); + + app.use(errorHandler); +}; diff --git a/src/models/User.ts b/src/models/User.ts new file mode 100644 index 0000000000000000000000000000000000000000..6f1e2d915e27ab71838bf566d2316b275e90856a --- /dev/null +++ b/src/models/User.ts @@ -0,0 +1,103 @@ +import fs from 'fs'; +import path from 'path'; +import bcrypt from 'bcryptjs'; +import { IUser, McpSettings } from '../types/index.js'; +import { loadSettings, saveSettings } from '../config/index.js'; + +// Get all users +export const getUsers = (): IUser[] => { + try { + const settings = loadSettings(); + return settings.users || []; + } catch (error) { + console.error('Error reading users from settings:', error); + return []; + } +}; + +// Save users to settings +const saveUsers = (users: IUser[]): void => { + try { + const settings = loadSettings(); + settings.users = users; + saveSettings(settings); + } catch (error) { + console.error('Error saving users to settings:', error); + } +}; + +// Create a new user +export const createUser = async (userData: IUser): Promise => { + const users = getUsers(); + + // Check if username already exists + if (users.some(user => user.username === userData.username)) { + return null; + } + + // Hash the password + const salt = await bcrypt.genSalt(10); + const hashedPassword = await bcrypt.hash(userData.password, salt); + + const newUser = { + username: userData.username, + password: hashedPassword, + isAdmin: userData.isAdmin || false + }; + + users.push(newUser); + saveUsers(users); + + return newUser; +}; + +// Find user by username +export const findUserByUsername = (username: string): IUser | undefined => { + const users = getUsers(); + return users.find(user => user.username === username); +}; + +// Verify user password +export const verifyPassword = async ( + plainPassword: string, + hashedPassword: string +): Promise => { + return await bcrypt.compare(plainPassword, hashedPassword); +}; + +// Update user password +export const updateUserPassword = async ( + username: string, + newPassword: string +): Promise => { + const users = getUsers(); + const userIndex = users.findIndex(user => user.username === username); + + if (userIndex === -1) { + return false; + } + + // Hash the new password + const salt = await bcrypt.genSalt(10); + const hashedPassword = await bcrypt.hash(newPassword, salt); + + // Update the user's password + users[userIndex].password = hashedPassword; + saveUsers(users); + + return true; +}; + +// Initialize with default admin user if no users exist +export const initializeDefaultUser = async (): Promise => { + const users = getUsers(); + + if (users.length === 0) { + await createUser({ + username: 'admin', + password: 'admin123', + isAdmin: true + }); + console.log('Default admin user created'); + } +}; \ No newline at end of file diff --git a/src/routes/index.ts b/src/routes/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..1aed579cc9621e98282e024fa42f70d53f5d61cb --- /dev/null +++ b/src/routes/index.ts @@ -0,0 +1,118 @@ +import express from 'express'; +import { check } from 'express-validator'; +import config from '../config/index.js'; +import { + getAllServers, + getAllSettings, + createServer, + updateServer, + deleteServer, + toggleServer, + updateSystemConfig, +} from '../controllers/serverController.js'; +import { + getGroups, + getGroup, + createNewGroup, + updateExistingGroup, + deleteExistingGroup, + addServerToExistingGroup, + removeServerFromExistingGroup, + getGroupServers, + updateGroupServersBatch, +} from '../controllers/groupController.js'; +import { + getAllMarketServers, + getMarketServer, + getAllMarketCategories, + getAllMarketTags, + searchMarketServersByQuery, + getMarketServersByCategory, + getMarketServersByTag, +} from '../controllers/marketController.js'; +import { login, register, getCurrentUser, changePassword } from '../controllers/authController.js'; +import { getAllLogs, clearLogs, streamLogs } from '../controllers/logController.js'; +import { getRuntimeConfig } from '../controllers/configController.js'; +import { callTool } from '../controllers/toolController.js'; +import { auth } from '../middlewares/auth.js'; + +const router = express.Router(); + +export const initRoutes = (app: express.Application): void => { + // API routes protected by auth middleware in middlewares/index.ts + router.get('/servers', getAllServers); + router.get('/settings', getAllSettings); + router.post('/servers', createServer); + router.put('/servers/:name', updateServer); + router.delete('/servers/:name', deleteServer); + router.post('/servers/:name/toggle', toggleServer); + router.put('/system-config', updateSystemConfig); + + // Group management routes + router.get('/groups', getGroups); + router.get('/groups/:id', getGroup); + router.post('/groups', createNewGroup); + router.put('/groups/:id', updateExistingGroup); + router.delete('/groups/:id', deleteExistingGroup); + router.post('/groups/:id/servers', addServerToExistingGroup); + router.delete('/groups/:id/servers/:serverName', removeServerFromExistingGroup); + router.get('/groups/:id/servers', getGroupServers); + // New route for batch updating servers in a group + router.put('/groups/:id/servers/batch', updateGroupServersBatch); + + // Tool management routes + router.post('/tools/call/:server', callTool); + + // Market routes + router.get('/market/servers', getAllMarketServers); + router.get('/market/servers/search', searchMarketServersByQuery); + router.get('/market/servers/:name', getMarketServer); + router.get('/market/categories', getAllMarketCategories); + router.get('/market/categories/:category', getMarketServersByCategory); + router.get('/market/tags', getAllMarketTags); + router.get('/market/tags/:tag', getMarketServersByTag); + + // Log routes + router.get('/logs', getAllLogs); + router.delete('/logs', clearLogs); + router.get('/logs/stream', streamLogs); + + // Auth routes - move to router instead of app directly + router.post( + '/auth/login', + [ + check('username', 'Username is required').not().isEmpty(), + check('password', 'Password is required').not().isEmpty(), + ], + login, + ); + + router.post( + '/auth/register', + [ + check('username', 'Username is required').not().isEmpty(), + check('password', 'Password must be at least 6 characters').isLength({ min: 6 }), + ], + register, + ); + + router.get('/auth/user', auth, getCurrentUser); + + // Add change password route + router.post( + '/auth/change-password', + [ + auth, + check('currentPassword', 'Current password is required').not().isEmpty(), + check('newPassword', 'New password must be at least 6 characters').isLength({ min: 6 }), + ], + changePassword, + ); + + // Runtime configuration endpoint (no auth required for frontend initialization) + app.get(`${config.basePath}/config`, getRuntimeConfig); + + app.use(`${config.basePath}/api`, router); +}; + +export default router; diff --git a/src/server.ts b/src/server.ts new file mode 100644 index 0000000000000000000000000000000000000000..315bdf6fbeba2df6e83afa89927a7299363229e3 --- /dev/null +++ b/src/server.ts @@ -0,0 +1,205 @@ +import express from 'express'; +import config from './config/index.js'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import fs from 'fs'; +import { initUpstreamServers } from './services/mcpService.js'; +import { initMiddlewares } from './middlewares/index.js'; +import { initRoutes } from './routes/index.js'; +import { + handleSseConnection, + handleSseMessage, + handleMcpPostRequest, + handleMcpOtherRequest, +} from './services/sseService.js'; +import { initializeDefaultUser } from './models/User.js'; + +// Get the directory name in ESM +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +export class AppServer { + private app: express.Application; + private port: number | string; + private frontendPath: string | null = null; + private basePath: string; + + constructor() { + this.app = express(); + this.port = config.port; + this.basePath = config.basePath; + } + + async initialize(): Promise { + try { + // Initialize default admin user if no users exist + await initializeDefaultUser(); + + initMiddlewares(this.app); + initRoutes(this.app); + console.log('Server initialized successfully'); + + initUpstreamServers() + .then(() => { + console.log('MCP server initialized successfully'); + this.app.get(`${this.basePath}/sse/:group?`, (req, res) => handleSseConnection(req, res)); + this.app.post(`${this.basePath}/messages`, handleSseMessage); + this.app.post(`${this.basePath}/mcp/:group?`, handleMcpPostRequest); + this.app.get(`${this.basePath}/mcp/:group?`, handleMcpOtherRequest); + this.app.delete(`${this.basePath}/mcp/:group?`, handleMcpOtherRequest); + }) + .catch((error) => { + console.error('Error initializing MCP server:', error); + throw error; + }) + .finally(() => { + // Find and serve frontend + this.findAndServeFrontend(); + }); + } catch (error) { + console.error('Error initializing server:', error); + throw error; + } + } + + private findAndServeFrontend(): void { + // Find frontend path + this.frontendPath = this.findFrontendDistPath(); + + if (this.frontendPath) { + console.log(`Serving frontend from: ${this.frontendPath}`); + // Serve static files with base path + this.app.use(this.basePath, express.static(this.frontendPath)); + + // Add the wildcard route for SPA with base path + if (fs.existsSync(path.join(this.frontendPath, 'index.html'))) { + this.app.get(`${this.basePath}/*`, (_req, res) => { + res.sendFile(path.join(this.frontendPath!, 'index.html')); + }); + + // Also handle root redirect if base path is set + if (this.basePath) { + this.app.get('/', (_req, res) => { + res.redirect(this.basePath); + }); + } + } + } else { + console.warn('Frontend dist directory not found. Server will run without frontend.'); + const rootPath = this.basePath || '/'; + this.app.get(rootPath, (_req, res) => { + res + .status(404) + .send('Frontend not found. MCPHub API is running, but the UI is not available.'); + }); + } + } + + start(): void { + this.app.listen(this.port, () => { + console.log(`Server is running on port ${this.port}`); + if (this.frontendPath) { + console.log(`Open http://localhost:${this.port} in your browser to access MCPHub UI`); + } else { + console.log( + `MCPHub API is running on http://localhost:${this.port}, but the UI is not available`, + ); + } + }); + } + + getApp(): express.Application { + return this.app; + } + + // Helper method to find frontend dist path in different environments + private findFrontendDistPath(): string | null { + // Debug flag for detailed logging + const debug = process.env.DEBUG === 'true'; + + if (debug) { + console.log('DEBUG: Current directory:', process.cwd()); + console.log('DEBUG: Script directory:', __dirname); + } + + // First, find the package root directory + const packageRoot = this.findPackageRoot(); + + if (debug) { + console.log('DEBUG: Using package root:', packageRoot); + } + + if (!packageRoot) { + console.warn('Could not determine package root directory'); + return null; + } + + // Check for frontend dist in the standard location + const frontendDistPath = path.join(packageRoot, 'frontend', 'dist'); + + if (debug) { + console.log(`DEBUG: Checking frontend at: ${frontendDistPath}`); + } + + if ( + fs.existsSync(frontendDistPath) && + fs.existsSync(path.join(frontendDistPath, 'index.html')) + ) { + return frontendDistPath; + } + + console.warn('Frontend distribution not found at', frontendDistPath); + return null; + } + + // Helper method to find the package root (where package.json is located) + private findPackageRoot(): string | null { + const debug = process.env.DEBUG === 'true'; + + // Possible locations for package.json + const possibleRoots = [ + // Standard npm package location + path.resolve(__dirname, '..', '..'), + // Current working directory + process.cwd(), + // When running from dist directory + path.resolve(__dirname, '..'), + // When installed via npx + path.resolve(__dirname, '..', '..', '..'), + ]; + + // Special handling for npx + if (process.argv[1] && process.argv[1].includes('_npx')) { + const npxDir = path.dirname(process.argv[1]); + possibleRoots.unshift(path.resolve(npxDir, '..')); + } + + if (debug) { + console.log('DEBUG: Checking for package.json in:', possibleRoots); + } + + for (const root of possibleRoots) { + const packageJsonPath = path.join(root, 'package.json'); + if (fs.existsSync(packageJsonPath)) { + try { + const pkg = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); + if (pkg.name === 'mcphub' || pkg.name === '@samanhappy/mcphub') { + if (debug) { + console.log(`DEBUG: Found package.json at ${packageJsonPath}`); + } + return root; + } + } catch (e) { + if (debug) { + console.error(`DEBUG: Failed to parse package.json at ${packageJsonPath}:`, e); + } + // Continue to the next potential root + } + } + } + + return null; + } +} + +export default AppServer; diff --git a/src/services/groupService.ts b/src/services/groupService.ts new file mode 100644 index 0000000000000000000000000000000000000000..f5d6dbed22df14ad528efa8a5f92b5df0cf583d4 --- /dev/null +++ b/src/services/groupService.ts @@ -0,0 +1,232 @@ +import { v4 as uuidv4 } from 'uuid'; +import { IGroup, McpSettings } from '../types/index.js'; +import { loadSettings, saveSettings } from '../config/index.js'; +import { notifyToolChanged } from './mcpService.js'; + +// Get all groups +export const getAllGroups = (): IGroup[] => { + const settings = loadSettings(); + return settings.groups || []; +}; + +// Get group by ID or name +export const getGroupByIdOrName = (key: string): IGroup | undefined => { + const settings = loadSettings(); + const routingConfig = settings.systemConfig?.routing || { + enableGlobalRoute: true, + enableGroupNameRoute: true, + }; + const groups = getAllGroups(); + return ( + groups.find( + (group) => group.id === key || (group.name === key && routingConfig.enableGroupNameRoute), + ) || undefined + ); +}; + +// Create a new group +export const createGroup = ( + name: string, + description?: string, + servers: string[] = [], +): IGroup | null => { + try { + const settings = loadSettings(); + const groups = settings.groups || []; + + // Check if group with same name already exists + if (groups.some((group) => group.name === name)) { + return null; + } + + // Filter out non-existent servers + const validServers = servers.filter((serverName) => settings.mcpServers[serverName]); + + const newGroup: IGroup = { + id: uuidv4(), + name, + description, + servers: validServers, + }; + + // Initialize groups array if it doesn't exist + if (!settings.groups) { + settings.groups = []; + } + + settings.groups.push(newGroup); + + if (!saveSettings(settings)) { + return null; + } + + return newGroup; + } catch (error) { + console.error('Failed to create group:', error); + return null; + } +}; + +// Update an existing group +export const updateGroup = (id: string, data: Partial): IGroup | null => { + try { + const settings = loadSettings(); + if (!settings.groups) { + return null; + } + + const groupIndex = settings.groups.findIndex((group) => group.id === id); + if (groupIndex === -1) { + return null; + } + + // Check for name uniqueness if name is being updated + if (data.name && settings.groups.some((g) => g.name === data.name && g.id !== id)) { + return null; + } + + // If servers array is provided, validate server existence + if (data.servers) { + data.servers = data.servers.filter((serverName) => settings.mcpServers[serverName]); + } + + const updatedGroup = { + ...settings.groups[groupIndex], + ...data, + }; + + settings.groups[groupIndex] = updatedGroup; + + if (!saveSettings(settings)) { + return null; + } + + notifyToolChanged(); + return updatedGroup; + } catch (error) { + console.error(`Failed to update group ${id}:`, error); + return null; + } +}; + +// Update servers in a group (batch update) +export const updateGroupServers = (groupId: string, servers: string[]): IGroup | null => { + try { + const settings = loadSettings(); + if (!settings.groups) { + return null; + } + + const groupIndex = settings.groups.findIndex((group) => group.id === groupId); + if (groupIndex === -1) { + return null; + } + + // Filter out non-existent servers + const validServers = servers.filter((serverName) => settings.mcpServers[serverName]); + + settings.groups[groupIndex].servers = validServers; + + if (!saveSettings(settings)) { + return null; + } + + notifyToolChanged(); + return settings.groups[groupIndex]; + } catch (error) { + console.error(`Failed to update servers for group ${groupId}:`, error); + return null; + } +}; + +// Delete a group +export const deleteGroup = (id: string): boolean => { + try { + const settings = loadSettings(); + if (!settings.groups) { + return false; + } + + const initialLength = settings.groups.length; + settings.groups = settings.groups.filter((group) => group.id !== id); + + if (settings.groups.length === initialLength) { + return false; + } + + return saveSettings(settings); + } catch (error) { + console.error(`Failed to delete group ${id}:`, error); + return false; + } +}; + +// Add server to group +export const addServerToGroup = (groupId: string, serverName: string): IGroup | null => { + try { + const settings = loadSettings(); + if (!settings.groups) { + return null; + } + + // Verify server exists + if (!settings.mcpServers[serverName]) { + return null; + } + + const groupIndex = settings.groups.findIndex((group) => group.id === groupId); + if (groupIndex === -1) { + return null; + } + + const group = settings.groups[groupIndex]; + + // Add server to group if not already in it + if (!group.servers.includes(serverName)) { + group.servers.push(serverName); + + if (!saveSettings(settings)) { + return null; + } + } + + notifyToolChanged(); + return group; + } catch (error) { + console.error(`Failed to add server ${serverName} to group ${groupId}:`, error); + return null; + } +}; + +// Remove server from group +export const removeServerFromGroup = (groupId: string, serverName: string): IGroup | null => { + try { + const settings = loadSettings(); + if (!settings.groups) { + return null; + } + + const groupIndex = settings.groups.findIndex((group) => group.id === groupId); + if (groupIndex === -1) { + return null; + } + + const group = settings.groups[groupIndex]; + group.servers = group.servers.filter((name) => name !== serverName); + + if (!saveSettings(settings)) { + return null; + } + + return group; + } catch (error) { + console.error(`Failed to remove server ${serverName} from group ${groupId}:`, error); + return null; + } +}; + +// Get all servers in a group +export const getServersInGroup = (groupId: string): string[] => { + const group = getGroupByIdOrName(groupId); + return group ? group.servers : []; +}; diff --git a/src/services/logService.ts b/src/services/logService.ts new file mode 100644 index 0000000000000000000000000000000000000000..4f453d722bbcac50e823146ec81ca544194e27ad --- /dev/null +++ b/src/services/logService.ts @@ -0,0 +1,228 @@ +// filepath: /Users/sunmeng/code/github/mcphub/src/services/logService.ts +import { spawn, ChildProcess } from 'child_process'; +import { EventEmitter } from 'events'; +import * as os from 'os'; +import * as process from 'process'; + +interface LogEntry { + timestamp: number; + type: 'info' | 'error' | 'warn' | 'debug'; + source: string; + message: string; + processId?: string; +} + +// ANSI color codes for console output +const colors = { + reset: '\x1b[0m', + bright: '\x1b[1m', + dim: '\x1b[2m', + underscore: '\x1b[4m', + blink: '\x1b[5m', + reverse: '\x1b[7m', + hidden: '\x1b[8m', + + black: '\x1b[30m', + red: '\x1b[31m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + magenta: '\x1b[35m', + cyan: '\x1b[36m', + white: '\x1b[37m', + + bgBlack: '\x1b[40m', + bgRed: '\x1b[41m', + bgGreen: '\x1b[42m', + bgYellow: '\x1b[43m', + bgBlue: '\x1b[44m', + bgMagenta: '\x1b[45m', + bgCyan: '\x1b[46m', + bgWhite: '\x1b[47m', +}; + +// Level colors for different log types +const levelColors = { + info: colors.green, + error: colors.red, + warn: colors.yellow, + debug: colors.cyan, +}; + +// Maximum number of logs to keep in memory +const MAX_LOGS = 1000; + +class LogService { + private logs: LogEntry[] = []; + private logEmitter = new EventEmitter(); + private mainProcessId: string; + private hostname: string; + + constructor() { + this.mainProcessId = process.pid.toString(); + this.hostname = os.hostname(); + this.overrideConsole(); + } + + // Format a timestamp for display + private formatTimestamp(timestamp: number): string { + const date = new Date(timestamp); + return date.toISOString(); + } + + // Format a log message for console output + private formatLogMessage( + type: 'info' | 'error' | 'warn' | 'debug', + source: string, + message: string, + processId?: string, + ): string { + const timestamp = this.formatTimestamp(Date.now()); + const pid = processId || this.mainProcessId; + const level = type.toUpperCase(); + const levelColor = levelColors[type]; + + return `${colors.dim}[${timestamp}]${colors.reset} ${levelColor}${colors.bright}[${level}]${colors.reset} ${colors.blue}[${pid}]${colors.reset} ${colors.magenta}[${source}]${colors.reset} ${message}`; + } + + // Override console methods to capture logs + private overrideConsole() { + const originalConsoleLog = console.log; + const originalConsoleError = console.error; + const originalConsoleWarn = console.warn; + const originalConsoleDebug = console.debug; + + // Helper method to handle common logic for all console methods + const handleConsoleMethod = ( + type: 'info' | 'error' | 'warn' | 'debug', + originalMethod: (...args: any[]) => void, + ...args: any[] + ) => { + const firstArg = args.length > 0 ? this.formatArgument(args[0]) : { text: '' }; + const remainingArgs = args.slice(1).map((arg) => this.formatArgument(arg).text); + const combinedMessage = [firstArg.text, ...remainingArgs].join(' '); + const source = firstArg.source || 'main'; + const processId = firstArg.processId; + this.addLog(type, source, combinedMessage, processId); + originalMethod.apply(console, [ + this.formatLogMessage(type, source, combinedMessage, processId), + ]); + }; + + console.log = (...args: any[]) => { + handleConsoleMethod('info', originalConsoleLog, ...args); + }; + + console.error = (...args: any[]) => { + handleConsoleMethod('error', originalConsoleError, ...args); + }; + + console.warn = (...args: any[]) => { + handleConsoleMethod('warn', originalConsoleWarn, ...args); + }; + + console.debug = (...args: any[]) => { + handleConsoleMethod('debug', originalConsoleDebug, ...args); + }; + } + + // Format an argument for logging and extract structured information + private formatArgument(arg: any): { text: string; source?: string; processId?: string } { + // Handle null and undefined + if (arg === null) return { text: 'null' }; + if (arg === undefined) return { text: 'undefined' }; + + // Handle objects + if (typeof arg === 'object') { + try { + return { text: JSON.stringify(arg, null, 2) }; + } catch (e) { + return { text: String(arg) }; + } + } + + // Handle strings with potential structured information + const argStr = String(arg); + + // Check for patterns like [processId] [source] message or [processId] [source-processId] message + const structuredPattern = /^\s*\[([^\]]+)\]\s*\[([^\]]+)\]\s*(.*)/; + const match = argStr.match(structuredPattern); + + if (match) { + const [_, firstBracket, secondBracket, remainingText] = match; + + // Check if the second bracket has a format like 'source-processId' + const sourcePidPattern = /^([^-]+)-(.+)$/; + const sourcePidMatch = secondBracket.match(sourcePidPattern); + + if (sourcePidMatch) { + // If we have a 'source-processId' format in the second bracket + const [_, source, extractedProcessId] = sourcePidMatch; + return { + text: remainingText.trim(), + source: source.trim(), + processId: firstBracket.trim(), + }; + } + + // Otherwise treat first bracket as processId and second as source + return { + text: remainingText.trim(), + source: secondBracket.trim(), + processId: firstBracket.trim(), + }; + } + + // Return original string if no structured format is detected + return { text: argStr }; + } + + // Add a log entry to the logs array + private addLog( + type: 'info' | 'error' | 'warn' | 'debug', + source: string, + message: string, + processId?: string, + ) { + const log: LogEntry = { + timestamp: Date.now(), + type, + source, + message, + processId: processId || this.mainProcessId, + }; + + this.logs.push(log); + + // Limit the number of logs kept in memory + if (this.logs.length > MAX_LOGS) { + this.logs.shift(); + } + + // Emit the log event for SSE subscribers + this.logEmitter.emit('log', log); + } + + // Get all logs + public getLogs(): LogEntry[] { + return this.logs; + } + + // Subscribe to log events + public subscribe(callback: (log: LogEntry) => void): () => void { + this.logEmitter.on('log', callback); + return () => { + this.logEmitter.off('log', callback); + }; + } + + // Clear all logs + public clearLogs(): void { + this.logs = []; + this.logEmitter.emit('clear'); + } +} + +// Export a singleton instance +const logService = new LogService(); +export default logService; diff --git a/src/services/marketService.ts b/src/services/marketService.ts new file mode 100644 index 0000000000000000000000000000000000000000..bdde610da35241c2bcf5e197121d775c29bba62c --- /dev/null +++ b/src/services/marketService.ts @@ -0,0 +1,116 @@ +import fs from 'fs'; +import { MarketServer } from '../types/index.js'; +import { getConfigFilePath } from '../utils/path.js'; + +// Get path to the servers.json file +export const getServersJsonPath = (): string => { + return getConfigFilePath('servers.json', 'Servers'); +}; + +// Load all market servers from servers.json +export const getMarketServers = (): Record => { + try { + const serversJsonPath = getServersJsonPath(); + const data = fs.readFileSync(serversJsonPath, 'utf8'); + const serversObj = JSON.parse(data) as Record; + + const sortedEntries = Object.entries(serversObj).sort(([, serverA], [, serverB]) => { + if (serverA.is_official && !serverB.is_official) return -1; + if (!serverA.is_official && serverB.is_official) return 1; + return 0; + }); + + return Object.fromEntries(sortedEntries); + } catch (error) { + console.error('Failed to load servers from servers.json:', error); + return {}; + } +}; + +// Get a specific market server by name +export const getMarketServerByName = (name: string): MarketServer | null => { + const servers = getMarketServers(); + return servers[name] || null; +}; + +// Get all categories from market servers +export const getMarketCategories = (): string[] => { + const servers = getMarketServers(); + const categories = new Set(); + + Object.values(servers).forEach((server) => { + server.categories?.forEach((category) => { + categories.add(category); + }); + }); + + return Array.from(categories).sort(); +}; + +// Get all tags from market servers +export const getMarketTags = (): string[] => { + const servers = getMarketServers(); + const tags = new Set(); + + Object.values(servers).forEach((server) => { + server.tags?.forEach((tag) => { + tags.add(tag); + }); + }); + + return Array.from(tags).sort(); +}; + +// Search market servers by query +export const searchMarketServers = (query: string): MarketServer[] => { + const servers = getMarketServers(); + const searchTerms = query + .toLowerCase() + .split(' ') + .filter((term) => term.length > 0); + + if (searchTerms.length === 0) { + return Object.values(servers); + } + + return Object.values(servers).filter((server) => { + // Search in name, display_name, description, categories, and tags + const searchableText = [ + server.name, + server.display_name, + server.description, + ...(server.categories || []), + ...(server.tags || []), + ] + .join(' ') + .toLowerCase(); + + return searchTerms.some((term) => searchableText.includes(term)); + }); +}; + +// Filter market servers by category +export const filterMarketServersByCategory = (category: string): MarketServer[] => { + const servers = getMarketServers(); + + if (!category) { + return Object.values(servers); + } + + return Object.values(servers).filter((server) => { + return server.categories?.includes(category); + }); +}; + +// Filter market servers by tag +export const filterMarketServersByTag = (tag: string): MarketServer[] => { + const servers = getMarketServers(); + + if (!tag) { + return Object.values(servers); + } + + return Object.values(servers).filter((server) => { + return server.tags?.includes(tag); + }); +}; diff --git a/src/services/mcpService.ts b/src/services/mcpService.ts new file mode 100644 index 0000000000000000000000000000000000000000..cd4e4fb65a495f7d6ab96175cbb2edc40d7f3896 --- /dev/null +++ b/src/services/mcpService.ts @@ -0,0 +1,691 @@ +import { Server } from '@modelcontextprotocol/sdk/server/index.js'; +import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'; +import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; +import { ServerInfo, ServerConfig } from '../types/index.js'; +import { loadSettings, saveSettings, expandEnvVars, replaceEnvVars } from '../config/index.js'; +import config from '../config/index.js'; +import { getGroup } from './sseService.js'; +import { getServersInGroup } from './groupService.js'; +import { getSmartRoutingConfig } from '../utils/smartRouting.js'; +import { saveToolsAsVectorEmbeddings, searchToolsByVector } from './vectorSearchService.js'; + +const servers: { [sessionId: string]: Server } = {}; + +export const initUpstreamServers = async (): Promise => { + await registerAllTools(true); +}; + +export const getMcpServer = (sessionId?: string, group?: string): Server => { + if (!sessionId) { + return createMcpServer(config.mcpHubName, config.mcpHubVersion, group); + } + + if (!servers[sessionId]) { + const serverGroup = group || getGroup(sessionId); + const server = createMcpServer(config.mcpHubName, config.mcpHubVersion, serverGroup); + servers[sessionId] = server; + } else { + console.log(`MCP server already exists for sessionId: ${sessionId}`); + } + return servers[sessionId]; +}; + +export const deleteMcpServer = (sessionId: string): void => { + delete servers[sessionId]; +}; + +export const notifyToolChanged = async () => { + await registerAllTools(false); + Object.values(servers).forEach((server) => { + server + .sendToolListChanged() + .catch((error) => { + console.warn('Failed to send tool list changed notification:', error.message); + }) + .then(() => { + console.log('Tool list changed notification sent successfully'); + }); + }); +}; + +// Store all server information +let serverInfos: ServerInfo[] = []; + +// Initialize MCP server clients +export const initializeClientsFromSettings = (isInit: boolean): ServerInfo[] => { + const settings = loadSettings(); + const existingServerInfos = serverInfos; + serverInfos = []; + + for (const [name, conf] of Object.entries(settings.mcpServers)) { + // Skip disabled servers + if (conf.enabled === false) { + console.log(`Skipping disabled server: ${name}`); + serverInfos.push({ + name, + status: 'disconnected', + error: null, + tools: [], + createTime: Date.now(), + enabled: false, + }); + continue; + } + + // Check if server is already connected + const existingServer = existingServerInfos.find( + (s) => s.name === name && s.status === 'connected', + ); + if (existingServer) { + serverInfos.push({ + ...existingServer, + enabled: conf.enabled === undefined ? true : conf.enabled, + }); + console.log(`Server '${name}' is already connected.`); + continue; + } + + let transport; + if (conf.type === 'streamable-http') { + const options: any = {}; + if (conf.headers && Object.keys(conf.headers).length > 0) { + options.requestInit = { + headers: conf.headers, + }; + } + transport = new StreamableHTTPClientTransport(new URL(conf.url || ''), options); + } else if (conf.url) { + // Default to SSE only when 'conf.type' is not specified and 'conf.url' is available + const options: any = {}; + if (conf.headers && Object.keys(conf.headers).length > 0) { + options.eventSourceInit = { + headers: conf.headers, + }; + options.requestInit = { + headers: conf.headers, + }; + } + transport = new SSEClientTransport(new URL(conf.url), options); + } else if (conf.command && conf.args) { + // If type is stdio or if command and args are provided without type + const env: Record = { + ...(process.env as Record), // Inherit all environment variables from parent process + ...replaceEnvVars(conf.env || {}), // Override with configured env vars + }; + env['PATH'] = expandEnvVars(process.env.PATH as string) || ''; + + // Add UV_DEFAULT_INDEX from settings if available (for Python packages) + const settings = loadSettings(); // Add UV_DEFAULT_INDEX from settings if available (for Python packages) + if ( + settings.systemConfig?.install?.pythonIndexUrl && + (conf.command === 'uvx' || conf.command === 'uv' || conf.command === 'python') + ) { + env['UV_DEFAULT_INDEX'] = settings.systemConfig.install.pythonIndexUrl; + } + + // Add npm_config_registry from settings if available (for NPM packages) + if ( + settings.systemConfig?.install?.npmRegistry && + (conf.command === 'npm' || + conf.command === 'npx' || + conf.command === 'pnpm' || + conf.command === 'yarn' || + conf.command === 'node') + ) { + env['npm_config_registry'] = settings.systemConfig.install.npmRegistry; + } + + transport = new StdioClientTransport({ + command: conf.command, + args: conf.args, + env: env, + stderr: 'pipe', + }); + transport.stderr?.on('data', (data) => { + console.log(`[${name}] [child] ${data}`); + }); + } else { + console.warn(`Skipping server '${name}': missing required configuration`); + serverInfos.push({ + name, + status: 'disconnected', + error: 'Missing required configuration', + tools: [], + createTime: Date.now(), + }); + continue; + } + + const client = new Client( + { + name: `mcp-client-${name}`, + version: '1.0.0', + }, + { + capabilities: { + prompts: {}, + resources: {}, + tools: {}, + }, + }, + ); + const timeout = isInit ? Number(config.initTimeout) : Number(config.timeout); + client + .connect(transport, { timeout: timeout }) + .then(() => { + console.log(`Successfully connected client for server: ${name}`); + + client + .listTools({}, { timeout: timeout }) + .then((tools) => { + console.log(`Successfully listed ${tools.tools.length} tools for server: ${name}`); + const serverInfo = getServerByName(name); + if (!serverInfo) { + console.warn(`Server info not found for server: ${name}`); + return; + } + + serverInfo.tools = tools.tools.map((tool) => ({ + name: tool.name, + description: tool.description || '', + inputSchema: tool.inputSchema || {}, + })); + serverInfo.status = 'connected'; + serverInfo.error = null; + + // Save tools as vector embeddings for search (only when smart routing is enabled) + if (serverInfo.tools.length > 0) { + try { + const smartRoutingConfig = getSmartRoutingConfig(); + if (smartRoutingConfig.enabled) { + console.log( + `Smart routing enabled - saving vector embeddings for server ${name}`, + ); + saveToolsAsVectorEmbeddings(name, serverInfo.tools); + } + } catch (vectorError) { + console.warn(`Failed to save vector embeddings for server ${name}:`, vectorError); + } + } + }) + .catch((error) => { + console.error( + `Failed to list tools for server ${name} by error: ${error} with stack: ${error.stack}`, + ); + const serverInfo = getServerByName(name); + if (serverInfo) { + serverInfo.status = 'disconnected'; + serverInfo.error = `Failed to list tools: ${error.stack} `; + } + }); + }) + .catch((error) => { + console.error( + `Failed to connect client for server ${name} by error: ${error} with stack: ${error.stack}`, + ); + const serverInfo = getServerByName(name); + if (serverInfo) { + serverInfo.status = 'disconnected'; + serverInfo.error = `Failed to connect: ${error.stack} `; + } + }); + serverInfos.push({ + name, + status: 'connecting', + error: null, + tools: [], + client, + transport, + createTime: Date.now(), + }); + console.log(`Initialized client for server: ${name}`); + } + + return serverInfos; +}; + +// Register all MCP tools +export const registerAllTools = async (isInit: boolean): Promise => { + initializeClientsFromSettings(isInit); +}; + +// Get all server information +export const getServersInfo = (): Omit[] => { + const settings = loadSettings(); + const infos = serverInfos.map(({ name, status, tools, createTime, error }) => { + const serverConfig = settings.mcpServers[name]; + const enabled = serverConfig ? serverConfig.enabled !== false : true; + return { + name, + status, + error, + tools, + createTime, + enabled, + }; + }); + infos.sort((a, b) => { + if (a.enabled === b.enabled) return 0; + return a.enabled ? -1 : 1; + }); + return infos; +}; + +// Get server by name +const getServerByName = (name: string): ServerInfo | undefined => { + return serverInfos.find((serverInfo) => serverInfo.name === name); +}; + +// Get server by tool name +const getServerByTool = (toolName: string): ServerInfo | undefined => { + return serverInfos.find((serverInfo) => serverInfo.tools.some((tool) => tool.name === toolName)); +}; + +// Add new server +export const addServer = async ( + name: string, + config: ServerConfig, +): Promise<{ success: boolean; message?: string }> => { + try { + const settings = loadSettings(); + if (settings.mcpServers[name]) { + return { success: false, message: 'Server name already exists' }; + } + + settings.mcpServers[name] = config; + if (!saveSettings(settings)) { + return { success: false, message: 'Failed to save settings' }; + } + + return { success: true, message: 'Server added successfully' }; + } catch (error) { + console.error(`Failed to add server: ${name}`, error); + return { success: false, message: 'Failed to add server' }; + } +}; + +// Remove server +export const removeServer = (name: string): { success: boolean; message?: string } => { + try { + const settings = loadSettings(); + if (!settings.mcpServers[name]) { + return { success: false, message: 'Server not found' }; + } + + delete settings.mcpServers[name]; + + if (!saveSettings(settings)) { + return { success: false, message: 'Failed to save settings' }; + } + + serverInfos = serverInfos.filter((serverInfo) => serverInfo.name !== name); + return { success: true, message: 'Server removed successfully' }; + } catch (error) { + console.error(`Failed to remove server: ${name}`, error); + return { success: false, message: `Failed to remove server: ${error}` }; + } +}; + +// Update existing server +export const updateMcpServer = async ( + name: string, + config: ServerConfig, +): Promise<{ success: boolean; message?: string }> => { + try { + const settings = loadSettings(); + if (!settings.mcpServers[name]) { + return { success: false, message: 'Server not found' }; + } + + settings.mcpServers[name] = config; + if (!saveSettings(settings)) { + return { success: false, message: 'Failed to save settings' }; + } + + closeServer(name); + + serverInfos = serverInfos.filter((serverInfo) => serverInfo.name !== name); + return { success: true, message: 'Server updated successfully' }; + } catch (error) { + console.error(`Failed to update server: ${name}`, error); + return { success: false, message: 'Failed to update server' }; + } +}; + +// Close server client and transport +function closeServer(name: string) { + const serverInfo = serverInfos.find((serverInfo) => serverInfo.name === name); + if (serverInfo && serverInfo.client && serverInfo.transport) { + serverInfo.client.close(); + serverInfo.transport.close(); + console.log(`Closed client and transport for server: ${serverInfo.name}`); + // TODO kill process + } +} + +// Toggle server enabled status +export const toggleServerStatus = async ( + name: string, + enabled: boolean, +): Promise<{ success: boolean; message?: string }> => { + try { + const settings = loadSettings(); + if (!settings.mcpServers[name]) { + return { success: false, message: 'Server not found' }; + } + + // Update the enabled status in settings + settings.mcpServers[name].enabled = enabled; + + if (!saveSettings(settings)) { + return { success: false, message: 'Failed to save settings' }; + } + + // If disabling, disconnect the server and remove from active servers + if (!enabled) { + closeServer(name); + + // Update the server info to show as disconnected and disabled + const index = serverInfos.findIndex((s) => s.name === name); + if (index !== -1) { + serverInfos[index] = { + ...serverInfos[index], + status: 'disconnected', + enabled: false, + }; + } + } + + return { success: true, message: `Server ${enabled ? 'enabled' : 'disabled'} successfully` }; + } catch (error) { + console.error(`Failed to toggle server status: ${name}`, error); + return { success: false, message: 'Failed to toggle server status' }; + } +}; + +export const handleListToolsRequest = async (_: any, extra: any) => { + const sessionId = extra.sessionId || ''; + const group = getGroup(sessionId); + console.log(`Handling ListToolsRequest for group: ${group}`); + + // Special handling for $smart group to return special tools + if (group === '$smart') { + return { + tools: [ + { + name: 'search_tools', + description: (() => { + // Get info about available servers + const availableServers = serverInfos.filter( + (server) => server.status === 'connected' && server.enabled !== false, + ); + // Create simple server information with only server names + const serversList = availableServers + .map((server) => { + return `${server.name}`; + }) + .join(', '); + return `STEP 1 of 2: Use this tool FIRST to discover and search for relevant tools across all available servers. This tool and call_tool work together as a two-step process: 1) search_tools to find what you need, 2) call_tool to execute it. + +For optimal results, use specific queries matching your exact needs. Call this tool multiple times with different queries for different parts of complex tasks. Example queries: "image generation tools", "code review tools", "data analysis", "translation capabilities", etc. Results are sorted by relevance using vector similarity. + +After finding relevant tools, you MUST use the call_tool to actually execute them. The search_tools only finds tools - it doesn't execute them. + +Available servers: ${serversList}`; + })(), + inputSchema: { + type: 'object', + properties: { + query: { + type: 'string', + description: + 'The search query to find relevant tools. Be specific and descriptive about the task you want to accomplish.', + }, + limit: { + type: 'integer', + description: + 'Maximum number of results to return. Use higher values (20-30) for broad searches and lower values (5-10) for specific searches.', + default: 10, + }, + }, + required: ['query'], + }, + }, + { + name: 'call_tool', + description: + "STEP 2 of 2: Use this tool AFTER search_tools to actually execute/invoke any tool you found. This is the execution step - search_tools finds tools, call_tool runs them.\n\nWorkflow: search_tools → examine results → call_tool with the chosen tool name and required arguments.\n\nIMPORTANT: Always check the tool's inputSchema from search_tools results before invoking to ensure you provide the correct arguments. The search results will show you exactly what parameters each tool expects.", + inputSchema: { + type: 'object', + properties: { + toolName: { + type: 'string', + description: 'The exact name of the tool to invoke (from search_tools results)', + }, + arguments: { + type: 'object', + description: + 'The arguments to pass to the tool based on its inputSchema (optional if tool requires no arguments)', + }, + }, + required: ['toolName'], + }, + }, + ], + }; + } + + const allServerInfos = serverInfos.filter((serverInfo) => { + if (serverInfo.enabled === false) return false; + if (!group) return true; + const serversInGroup = getServersInGroup(group); + if (!serversInGroup || serversInGroup.length === 0) return serverInfo.name === group; + return serversInGroup.includes(serverInfo.name); + }); + + const allTools = []; + for (const serverInfo of allServerInfos) { + if (serverInfo.tools && serverInfo.tools.length > 0) { + allTools.push(...serverInfo.tools); + } + } + + return { + tools: allTools, + }; +}; + +export const handleCallToolRequest = async (request: any, extra: any) => { + console.log(`Handling CallToolRequest for tool: ${JSON.stringify(request.params)}`); + try { + // Special handling for agent group tools + if (request.params.name === 'search_tools') { + const { query, limit = 10 } = request.params.arguments || {}; + + if (!query || typeof query !== 'string') { + throw new Error('Query parameter is required and must be a string'); + } + + const limitNum = Math.min(Math.max(parseInt(String(limit)) || 10, 1), 100); + + // Dynamically adjust threshold based on query characteristics + let thresholdNum = 0.3; // Default threshold + + // For more general queries, use a lower threshold to get more diverse results + if (query.length < 10 || query.split(' ').length <= 2) { + thresholdNum = 0.2; + } + + // For very specific queries, use a higher threshold for more precise results + if (query.length > 30 || query.includes('specific') || query.includes('exact')) { + thresholdNum = 0.4; + } + + console.log(`Using similarity threshold: ${thresholdNum} for query: "${query}"`); + const servers = undefined; // No server filtering + + const searchResults = await searchToolsByVector(query, limitNum, thresholdNum, servers); + console.log(`Search results: ${JSON.stringify(searchResults)}`); + // Find actual tool information from serverInfos by serverName and toolName + const tools = searchResults.map((result) => { + // Find the server in serverInfos + const server = serverInfos.find( + (serverInfo) => + serverInfo.name === result.serverName && + serverInfo.status === 'connected' && + serverInfo.enabled !== false, + ); + if (server && server.tools && server.tools.length > 0) { + // Find the tool in server.tools + const actualTool = server.tools.find((tool) => tool.name === result.toolName); + if (actualTool) { + // Return the actual tool info from serverInfos + return actualTool; + } + } + + // Fallback to search result if server or tool not found + return { + name: result.toolName, + description: result.description || '', + inputSchema: result.inputSchema || {}, + }; + }); + + // Add usage guidance to the response + const response = { + tools, + metadata: { + query: query, + threshold: thresholdNum, + totalResults: tools.length, + guideline: + tools.length > 0 + ? "Found relevant tools. If these tools don't match exactly what you need, try another search with more specific keywords." + : 'No tools found. Try broadening your search or using different keywords.', + nextSteps: + tools.length > 0 + ? 'To use a tool, call call_tool with the toolName and required arguments.' + : 'Consider searching for related capabilities or more general terms.', + }, + }; + + // Return in the same format as handleListToolsRequest + return { + content: [ + { + type: 'text', + text: JSON.stringify(response), + }, + ], + }; + } + + // Special handling for call_tool + if (request.params.name === 'call_tool') { + const { toolName, arguments: toolArgs = {} } = request.params.arguments || {}; + + if (!toolName) { + throw new Error('toolName parameter is required'); + } + + // arguments parameter is now optional + + let targetServerInfo: ServerInfo | undefined; + if (extra && extra.server) { + targetServerInfo = getServerByName(extra.server); + } else { + // Find the first server that has this tool + targetServerInfo = serverInfos.find( + (serverInfo) => + serverInfo.status === 'connected' && + serverInfo.enabled !== false && + serverInfo.tools.some((tool) => tool.name === toolName), + ); + } + + if (!targetServerInfo) { + throw new Error(`No available servers found with tool: ${toolName}`); + } + + // Check if the tool exists on the server + const toolExists = targetServerInfo.tools.some((tool) => tool.name === toolName); + if (!toolExists) { + throw new Error(`Tool '${toolName}' not found on server '${targetServerInfo.name}'`); + } + + // Call the tool on the target server + const client = targetServerInfo.client; + if (!client) { + throw new Error(`Client not found for server: ${targetServerInfo.name}`); + } + + // Use toolArgs if it has properties, otherwise fallback to request.params.arguments + const finalArgs = + toolArgs && Object.keys(toolArgs).length > 0 ? toolArgs : request.params.arguments || {}; + + console.log( + `Invoking tool '${toolName}' on server '${targetServerInfo.name}' with arguments: ${JSON.stringify(finalArgs)}`, + ); + + const result = await client.callTool({ + name: toolName, + arguments: finalArgs, + }); + + console.log(`Tool invocation result: ${JSON.stringify(result)}`); + return result; + } + + // Regular tool handling + const serverInfo = getServerByTool(request.params.name); + if (!serverInfo) { + throw new Error(`Server not found: ${request.params.name}`); + } + const client = serverInfo.client; + if (!client) { + throw new Error(`Client not found for server: ${request.params.name}`); + } + const result = await client.callTool(request.params); + console.log(`Tool call result: ${JSON.stringify(result)}`); + return result; + } catch (error) { + console.error(`Error handling CallToolRequest: ${error}`); + return { + content: [ + { + type: 'text', + text: `Error: ${error}`, + }, + ], + isError: true, + }; + } +}; + +// Create McpServer instance +export const createMcpServer = (name: string, version: string, group?: string): Server => { + // Determine server name based on routing type + let serverName = name; + + if (group) { + // Check if it's a group or a single server + const serversInGroup = getServersInGroup(group); + if (!serversInGroup || serversInGroup.length === 0) { + // Single server routing + serverName = `${name}_${group}`; + } else { + // Group routing + serverName = `${name}_${group}_group`; + } + } + // If no group, use default name (global routing) + + const server = new Server({ name: serverName, version }, { capabilities: { tools: {} } }); + server.setRequestHandler(ListToolsRequestSchema, handleListToolsRequest); + server.setRequestHandler(CallToolRequestSchema, handleCallToolRequest); + return server; +}; diff --git a/src/services/sseService.ts b/src/services/sseService.ts new file mode 100644 index 0000000000000000000000000000000000000000..30b881e8ebf45917bbfaa656d0db9c7cef3b7032 --- /dev/null +++ b/src/services/sseService.ts @@ -0,0 +1,187 @@ +import { Request, Response } from 'express'; +import { randomUUID } from 'node:crypto'; +import { Transport } from '@modelcontextprotocol/sdk/shared/transport.js'; +import { SSEServerTransport } from '@modelcontextprotocol/sdk/server/sse.js'; +import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js'; +import { isInitializeRequest } from '@modelcontextprotocol/sdk/types.js'; +import { deleteMcpServer, getMcpServer } from './mcpService.js'; +import { loadSettings } from '../config/index.js'; + +const transports: { [sessionId: string]: { transport: Transport; group: string } } = {}; + +export const getGroup = (sessionId: string): string => { + return transports[sessionId]?.group || ''; +}; + +// Helper function to validate bearer auth +const validateBearerAuth = (req: Request): boolean => { + const settings = loadSettings(); + const routingConfig = settings.systemConfig?.routing || { + enableGlobalRoute: true, + enableGroupNameRoute: true, + enableBearerAuth: false, + bearerAuthKey: '', + }; + + if (routingConfig.enableBearerAuth) { + const authHeader = req.headers.authorization; + if (!authHeader || !authHeader.startsWith('Bearer ')) { + return false; + } + + const token = authHeader.substring(7); // Remove "Bearer " prefix + return token === routingConfig.bearerAuthKey; + } + + return true; +}; + +export const handleSseConnection = async (req: Request, res: Response): Promise => { + // Check bearer auth + if (!validateBearerAuth(req)) { + res.status(401).send('Bearer authentication required or invalid token'); + return; + } + + const settings = loadSettings(); + const routingConfig = settings.systemConfig?.routing || { + enableGlobalRoute: true, + enableGroupNameRoute: true, + enableBearerAuth: false, + bearerAuthKey: '', + }; + const group = req.params.group; + + // Check if this is a global route (no group) and if it's allowed + if (!group && !routingConfig.enableGlobalRoute) { + res.status(403).send('Global routes are disabled. Please specify a group ID.'); + return; + } + + const transport = new SSEServerTransport('/messages', res); + transports[transport.sessionId] = { transport, group: group }; + + res.on('close', () => { + delete transports[transport.sessionId]; + deleteMcpServer(transport.sessionId); + console.log(`SSE connection closed: ${transport.sessionId}`); + }); + + console.log( + `New SSE connection established: ${transport.sessionId} with group: ${group || 'global'}`, + ); + await getMcpServer(transport.sessionId, group).connect(transport); +}; + +export const handleSseMessage = async (req: Request, res: Response): Promise => { + // Check bearer auth + if (!validateBearerAuth(req)) { + res.status(401).send('Bearer authentication required or invalid token'); + return; + } + + const sessionId = req.query.sessionId as string; + + // Validate sessionId + if (!sessionId) { + console.error('Missing sessionId in query parameters'); + res.status(400).send('Missing sessionId parameter'); + return; + } + + // Check if transport exists before destructuring + const transportData = transports[sessionId]; + if (!transportData) { + console.warn(`No transport found for sessionId: ${sessionId}`); + res.status(404).send('No transport found for sessionId'); + return; + } + + const { transport, group } = transportData; + req.params.group = group; + req.query.group = group; + console.log(`Received message for sessionId: ${sessionId} in group: ${group}`); + + await (transport as SSEServerTransport).handlePostMessage(req, res); +}; + +export const handleMcpPostRequest = async (req: Request, res: Response): Promise => { + const sessionId = req.headers['mcp-session-id'] as string | undefined; + const group = req.params.group; + console.log(`Handling MCP post request for sessionId: ${sessionId} and group: ${group}`); + // Check bearer auth + if (!validateBearerAuth(req)) { + res.status(401).send('Bearer authentication required or invalid token'); + return; + } + + const settings = loadSettings(); + const routingConfig = settings.systemConfig?.routing || { + enableGlobalRoute: true, + enableGroupNameRoute: true, + }; + if (!group && !routingConfig.enableGlobalRoute) { + res.status(403).send('Global routes are disabled. Please specify a group ID.'); + return; + } + + let transport: StreamableHTTPServerTransport; + if (sessionId && transports[sessionId]) { + console.log(`Reusing existing transport for sessionId: ${sessionId}`); + transport = transports[sessionId].transport as StreamableHTTPServerTransport; + } else if (!sessionId && isInitializeRequest(req.body)) { + transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: () => randomUUID(), + onsessioninitialized: (sessionId) => { + transports[sessionId] = { transport, group }; + }, + }); + + transport.onclose = () => { + console.log(`Transport closed: ${transport.sessionId}`); + if (transport.sessionId) { + delete transports[transport.sessionId]; + deleteMcpServer(transport.sessionId); + console.log(`MCP connection closed: ${transport.sessionId}`); + } + }; + + console.log(`MCP connection established: ${transport.sessionId}`); + await getMcpServer(transport.sessionId, group).connect(transport); + } else { + res.status(400).json({ + jsonrpc: '2.0', + error: { + code: -32000, + message: 'Bad Request: No valid session ID provided', + }, + id: null, + }); + return; + } + + console.log(`Handling request using transport with type ${transport.constructor.name}`); + await transport.handleRequest(req, res, req.body); +}; + +export const handleMcpOtherRequest = async (req: Request, res: Response) => { + console.log('Handling MCP other request'); + // Check bearer auth + if (!validateBearerAuth(req)) { + res.status(401).send('Bearer authentication required or invalid token'); + return; + } + + const sessionId = req.headers['mcp-session-id'] as string | undefined; + if (!sessionId || !transports[sessionId]) { + res.status(400).send('Invalid or missing session ID'); + return; + } + + const { transport } = transports[sessionId]; + await (transport as StreamableHTTPServerTransport).handleRequest(req, res); +}; + +export const getConnectionCount = (): number => { + return Object.keys(transports).length; +}; diff --git a/src/services/vectorSearchService.ts b/src/services/vectorSearchService.ts new file mode 100644 index 0000000000000000000000000000000000000000..7008a3493c2382981edea914fcbdc56727e77619 --- /dev/null +++ b/src/services/vectorSearchService.ts @@ -0,0 +1,678 @@ +import { getRepositoryFactory } from '../db/index.js'; +import { VectorEmbeddingRepository } from '../db/repositories/index.js'; +import { ToolInfo } from '../types/index.js'; +import { getAppDataSource, initializeDatabase } from '../db/connection.js'; +import { getSmartRoutingConfig } from '../utils/smartRouting.js'; +import OpenAI from 'openai'; + +// Get OpenAI configuration from smartRouting settings or fallback to environment variables +const getOpenAIConfig = () => { + const smartRoutingConfig = getSmartRoutingConfig(); + return { + apiKey: smartRoutingConfig.openaiApiKey, + baseURL: smartRoutingConfig.openaiApiBaseUrl, + embeddingModel: smartRoutingConfig.openaiApiEmbeddingModel, + }; +}; + +// Constants for embedding models +const EMBEDDING_DIMENSIONS = 1536; // OpenAI's text-embedding-3-small outputs 1536 dimensions +const BGE_DIMENSIONS = 1024; // BAAI/bge-m3 outputs 1024 dimensions +const FALLBACK_DIMENSIONS = 100; // Fallback implementation uses 100 dimensions + +// Get dimensions for a model +const getDimensionsForModel = (model: string): number => { + if (model.includes('bge-m3')) { + return BGE_DIMENSIONS; + } else if (model.includes('text-embedding-3')) { + return EMBEDDING_DIMENSIONS; + } else if (model === 'fallback' || model === 'simple-hash') { + return FALLBACK_DIMENSIONS; + } + // Default to OpenAI dimensions + return EMBEDDING_DIMENSIONS; +}; + +// Initialize the OpenAI client with smartRouting configuration +const getOpenAIClient = () => { + const config = getOpenAIConfig(); + return new OpenAI({ + apiKey: config.apiKey, // Get API key from smartRouting settings or environment variables + baseURL: config.baseURL, // Get base URL from smartRouting settings or fallback to default + }); +}; + +/** + * Generate text embedding using OpenAI's embedding model + * + * NOTE: embeddings are 1536 dimensions by default. + * If you previously used the fallback implementation (100 dimensions), + * you may need to rebuild your vector database indices after switching. + * + * @param text Text to generate embeddings for + * @returns Promise with vector embedding as number array + */ +async function generateEmbedding(text: string): Promise { + try { + const config = getOpenAIConfig(); + const openai = getOpenAIClient(); + + // Check if API key is configured + if (!openai.apiKey) { + console.warn('OpenAI API key is not configured. Using fallback embedding method.'); + return generateFallbackEmbedding(text); + } + + // Truncate text if it's too long (OpenAI has token limits) + const truncatedText = text.length > 8000 ? text.substring(0, 8000) : text; + + // Call OpenAI's embeddings API + const response = await openai.embeddings.create({ + model: config.embeddingModel, // Modern model with better performance + input: truncatedText, + }); + + // Return the embedding + return response.data[0].embedding; + } catch (error) { + console.error('Error generating embedding:', error); + console.warn('Falling back to simple embedding method'); + return generateFallbackEmbedding(text); + } +} + +/** + * Fallback embedding function using a simple approach when OpenAI API is unavailable + * @param text Text to generate embeddings for + * @returns Vector embedding as number array + */ +function generateFallbackEmbedding(text: string): number[] { + const words = text.toLowerCase().split(/\s+/); + const vocabulary = [ + 'search', + 'find', + 'get', + 'fetch', + 'retrieve', + 'query', + 'map', + 'location', + 'weather', + 'file', + 'directory', + 'email', + 'message', + 'send', + 'create', + 'update', + 'delete', + 'browser', + 'web', + 'page', + 'click', + 'navigate', + 'screenshot', + 'automation', + 'database', + 'table', + 'record', + 'insert', + 'select', + 'schema', + 'data', + 'image', + 'photo', + 'video', + 'media', + 'upload', + 'download', + 'convert', + 'text', + 'document', + 'pdf', + 'excel', + 'word', + 'format', + 'parse', + 'api', + 'rest', + 'http', + 'request', + 'response', + 'json', + 'xml', + 'time', + 'date', + 'calendar', + 'schedule', + 'reminder', + 'clock', + 'math', + 'calculate', + 'number', + 'sum', + 'average', + 'statistics', + 'user', + 'account', + 'login', + 'auth', + 'permission', + 'role', + ]; + + // Create vector with fallback dimensions + const vector = new Array(FALLBACK_DIMENSIONS).fill(0); + + words.forEach((word) => { + const index = vocabulary.indexOf(word); + if (index >= 0 && index < vector.length) { + vector[index] += 1; + } + // Add some randomness based on word hash + const hash = word.split('').reduce((a, b) => a + b.charCodeAt(0), 0); + vector[hash % vector.length] += 0.1; + }); + + // Normalize the vector + const magnitude = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0)); + if (magnitude > 0) { + return vector.map((val) => val / magnitude); + } + + return vector; +} + +/** + * Save tool information as vector embeddings + * @param serverName Server name + * @param tools Array of tools to save + */ +export const saveToolsAsVectorEmbeddings = async ( + serverName: string, + tools: ToolInfo[], +): Promise => { + try { + const config = getOpenAIConfig(); + const vectorRepository = getRepositoryFactory( + 'vectorEmbeddings', + )() as VectorEmbeddingRepository; + + for (const tool of tools) { + // Create searchable text from tool information + const searchableText = [ + tool.name, + tool.description, + // Include input schema properties if available + ...(tool.inputSchema && typeof tool.inputSchema === 'object' + ? Object.keys(tool.inputSchema).filter((key) => key !== 'type' && key !== 'properties') + : []), + // Include schema property names if available + ...(tool.inputSchema && + tool.inputSchema.properties && + typeof tool.inputSchema.properties === 'object' + ? Object.keys(tool.inputSchema.properties) + : []), + ] + .filter(Boolean) + .join(' '); + + try { + // Generate embedding + const embedding = await generateEmbedding(searchableText); + + // Check database compatibility before saving + await checkDatabaseVectorDimensions(embedding.length); + + // Save embedding + await vectorRepository.saveEmbedding( + 'tool', + `${serverName}:${tool.name}`, + searchableText, + embedding, + { + serverName, + toolName: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + }, + config.embeddingModel, // Store the model used for this embedding + ); + } catch (toolError) { + console.error(`Error processing tool ${tool.name} for server ${serverName}:`, toolError); + // Continue with the next tool rather than failing the whole batch + } + } + + console.log(`Saved ${tools.length} tool embeddings for server: ${serverName}`); + } catch (error) { + console.error(`Error saving tool embeddings for server ${serverName}:`, error); + } +}; + +/** + * Search for tools using vector similarity + * @param query Search query text + * @param limit Maximum number of results to return + * @param threshold Similarity threshold (0-1) + * @param serverNames Optional array of server names to filter by + */ +export const searchToolsByVector = async ( + query: string, + limit: number = 10, + threshold: number = 0.7, + serverNames?: string[], +): Promise< + Array<{ + serverName: string; + toolName: string; + description: string; + inputSchema: any; + similarity: number; + searchableText: string; + }> +> => { + try { + const vectorRepository = getRepositoryFactory( + 'vectorEmbeddings', + )() as VectorEmbeddingRepository; + + // Search by text using vector similarity + const results = await vectorRepository.searchByText( + query, + generateEmbedding, + limit, + threshold, + ['tool'], + ); + + // Filter by server names if provided + let filteredResults = results; + if (serverNames && serverNames.length > 0) { + filteredResults = results.filter((result) => { + if (typeof result.embedding.metadata === 'string') { + try { + const parsedMetadata = JSON.parse(result.embedding.metadata); + return serverNames.includes(parsedMetadata.serverName); + } catch (error) { + return false; + } + } + return false; + }); + } + + // Transform results to a more useful format + return filteredResults.map((result) => { + // Check if we have metadata as a string that needs to be parsed + if (result.embedding?.metadata && typeof result.embedding.metadata === 'string') { + try { + // Parse the metadata string as JSON + const parsedMetadata = JSON.parse(result.embedding.metadata); + + if (parsedMetadata.serverName && parsedMetadata.toolName) { + // We have properly structured metadata + return { + serverName: parsedMetadata.serverName, + toolName: parsedMetadata.toolName, + description: parsedMetadata.description || '', + inputSchema: parsedMetadata.inputSchema || {}, + similarity: result.similarity, + searchableText: result.embedding.text_content, + }; + } + } catch (error) { + console.error('Error parsing metadata string:', error); + // Fall through to the extraction logic below + } + } + + // Extract tool info from text_content if metadata is not available or parsing failed + const textContent = result.embedding?.text_content || ''; + + // Extract toolName (first word of text_content) + const toolNameMatch = textContent.match(/^(\S+)/); + const toolName = toolNameMatch ? toolNameMatch[1] : ''; + + // Extract serverName from toolName if it follows the pattern "serverName_toolPart" + const serverNameMatch = toolName.match(/^([^_]+)_/); + const serverName = serverNameMatch ? serverNameMatch[1] : 'unknown'; + + // Extract description (everything after the first word) + const description = textContent.replace(/^\S+\s*/, '').trim(); + + return { + serverName, + toolName, + description, + inputSchema: {}, + similarity: result.similarity, + searchableText: textContent, + }; + }); + } catch (error) { + console.error('Error searching tools by vector:', error); + return []; + } +}; + +/** + * Get all available tools in vector database + * @param serverNames Optional array of server names to filter by + */ +export const getAllVectorizedTools = async ( + serverNames?: string[], +): Promise< + Array<{ + serverName: string; + toolName: string; + description: string; + inputSchema: any; + }> +> => { + try { + const config = getOpenAIConfig(); + const vectorRepository = getRepositoryFactory( + 'vectorEmbeddings', + )() as VectorEmbeddingRepository; + + // Try to determine what dimension our database is using + let dimensionsToUse = getDimensionsForModel(config.embeddingModel); // Default based on the model selected + + try { + const result = await getAppDataSource().query(` + SELECT atttypmod as dimensions + FROM pg_attribute + WHERE attrelid = 'vector_embeddings'::regclass + AND attname = 'embedding' + `); + + if (result && result.length > 0 && result[0].dimensions) { + const rawValue = result[0].dimensions; + + if (rawValue === -1) { + // No type modifier specified + dimensionsToUse = getDimensionsForModel(config.embeddingModel); + } else { + // For this version of pgvector, atttypmod stores the dimension value directly + dimensionsToUse = rawValue; + } + } + } catch (error: any) { + console.warn('Could not determine vector dimensions from database:', error?.message); + } + + // Get all tool embeddings + const results = await vectorRepository.searchSimilar( + new Array(dimensionsToUse).fill(0), // Zero vector with dimensions matching the database + 1000, // Large limit + -1, // No threshold (get all) + ['tool'], + ); + + // Filter by server names if provided + let filteredResults = results; + if (serverNames && serverNames.length > 0) { + filteredResults = results.filter((result) => { + if (typeof result.embedding.metadata === 'string') { + try { + const parsedMetadata = JSON.parse(result.embedding.metadata); + return serverNames.includes(parsedMetadata.serverName); + } catch (error) { + return false; + } + } + return false; + }); + } + + // Transform results + return filteredResults.map((result) => { + if (typeof result.embedding.metadata === 'string') { + try { + const parsedMetadata = JSON.parse(result.embedding.metadata); + return { + serverName: parsedMetadata.serverName, + toolName: parsedMetadata.toolName, + description: parsedMetadata.description, + inputSchema: parsedMetadata.inputSchema, + }; + } catch (error) { + console.error('Error parsing metadata string:', error); + return { + serverName: 'unknown', + toolName: 'unknown', + description: '', + inputSchema: {}, + }; + } + } + return { + serverName: 'unknown', + toolName: 'unknown', + description: '', + inputSchema: {}, + }; + }); + } catch (error) { + console.error('Error getting all vectorized tools:', error); + return []; + } +}; + +/** + * Remove tool embeddings for a server + * @param serverName Server name + */ +export const removeServerToolEmbeddings = async (serverName: string): Promise => { + try { + const vectorRepository = getRepositoryFactory( + 'vectorEmbeddings', + )() as VectorEmbeddingRepository; + + // Note: This would require adding a delete method to VectorEmbeddingRepository + // For now, we'll log that this functionality needs to be implemented + console.log(`TODO: Remove tool embeddings for server: ${serverName}`); + } catch (error) { + console.error(`Error removing tool embeddings for server ${serverName}:`, error); + } +}; + +/** + * Sync all server tools embeddings when smart routing is first enabled + * This function will scan all currently connected servers and save their tools as vector embeddings + */ +export const syncAllServerToolsEmbeddings = async (): Promise => { + try { + console.log('Starting synchronization of all server tools embeddings...'); + + // Import getServersInfo to get all server information + const { getServersInfo } = await import('./mcpService.js'); + + const servers = getServersInfo(); + let totalToolsSynced = 0; + let serversSynced = 0; + + for (const server of servers) { + if (server.status === 'connected' && server.tools && server.tools.length > 0) { + try { + console.log(`Syncing tools for server: ${server.name} (${server.tools.length} tools)`); + await saveToolsAsVectorEmbeddings(server.name, server.tools); + totalToolsSynced += server.tools.length; + serversSynced++; + } catch (error) { + console.error(`Failed to sync tools for server ${server.name}:`, error); + } + } else if (server.status === 'connected' && (!server.tools || server.tools.length === 0)) { + console.log(`Server ${server.name} is connected but has no tools to sync`); + } else { + console.log(`Skipping server ${server.name} (status: ${server.status})`); + } + } + + console.log( + `Smart routing tools sync completed: synced ${totalToolsSynced} tools from ${serversSynced} servers`, + ); + } catch (error) { + console.error('Error during smart routing tools synchronization:', error); + throw error; + } +}; + +/** + * Check database vector dimensions and ensure compatibility + * @param dimensionsNeeded The number of dimensions required + * @returns Promise that resolves when check is complete + */ +async function checkDatabaseVectorDimensions(dimensionsNeeded: number): Promise { + try { + // First check if database is initialized + if (!getAppDataSource().isInitialized) { + console.info('Database not initialized, initializing...'); + await initializeDatabase(); + } + + // Check current vector dimension in the database + // First try to get vector type info directly + let vectorTypeInfo; + try { + vectorTypeInfo = await getAppDataSource().query(` + SELECT + atttypmod, + format_type(atttypid, atttypmod) as formatted_type + FROM pg_attribute + WHERE attrelid = 'vector_embeddings'::regclass + AND attname = 'embedding' + `); + } catch (error) { + console.warn('Could not get vector type info, falling back to atttypmod query'); + } + + // Fallback to original query + const result = await getAppDataSource().query(` + SELECT atttypmod as dimensions + FROM pg_attribute + WHERE attrelid = 'vector_embeddings'::regclass + AND attname = 'embedding' + `); + + let currentDimensions = 0; + + // Parse dimensions from result + if (result && result.length > 0 && result[0].dimensions) { + if (vectorTypeInfo && vectorTypeInfo.length > 0) { + // Try to extract dimensions from formatted type like "vector(1024)" + const match = vectorTypeInfo[0].formatted_type?.match(/vector\((\d+)\)/); + if (match) { + currentDimensions = parseInt(match[1]); + } + } + + // If we couldn't extract from formatted type, use the atttypmod value directly + if (currentDimensions === 0) { + const rawValue = result[0].dimensions; + + if (rawValue === -1) { + // No type modifier specified + currentDimensions = 0; + } else { + // For this version of pgvector, atttypmod stores the dimension value directly + currentDimensions = rawValue; + } + } + } + + // Also check the dimensions stored in actual records for validation + try { + const recordCheck = await getAppDataSource().query(` + SELECT dimensions, model, COUNT(*) as count + FROM vector_embeddings + GROUP BY dimensions, model + ORDER BY count DESC + LIMIT 5 + `); + + if (recordCheck && recordCheck.length > 0) { + // If we couldn't determine dimensions from schema, use the most common dimension from records + if (currentDimensions === 0 && recordCheck[0].dimensions) { + currentDimensions = recordCheck[0].dimensions; + } + } + } catch (error) { + console.warn('Could not check dimensions from actual records:', error); + } + + // If no dimensions are set or they don't match what we need, handle the mismatch + if (currentDimensions === 0 || currentDimensions !== dimensionsNeeded) { + console.log( + `Vector dimensions mismatch: database=${currentDimensions}, needed=${dimensionsNeeded}`, + ); + + if (currentDimensions === 0) { + console.log('Setting up vector dimensions for the first time...'); + } else { + console.log('Dimension mismatch detected. Clearing existing incompatible vector data...'); + + // Clear all existing vector embeddings with mismatched dimensions + await clearMismatchedVectorData(dimensionsNeeded); + } + + // Drop any existing indices first + await getAppDataSource().query(`DROP INDEX IF EXISTS idx_vector_embeddings_embedding;`); + + // Alter the column type with the new dimensions + await getAppDataSource().query(` + ALTER TABLE vector_embeddings + ALTER COLUMN embedding TYPE vector(${dimensionsNeeded}); + `); + + // Create a new index with better error handling + try { + await getAppDataSource().query(` + CREATE INDEX idx_vector_embeddings_embedding + ON vector_embeddings USING ivfflat (embedding vector_cosine_ops) WITH (lists = 100); + `); + } catch (indexError: any) { + // If the index already exists (code 42P07) or there's a duplicate key constraint (code 23505), + // it's not a critical error as the index is already there + if (indexError.code === '42P07' || indexError.code === '23505') { + console.log('Index already exists, continuing...'); + } else { + console.warn('Warning: Failed to create index, but continuing:', indexError.message); + } + } + + console.log(`Successfully configured vector dimensions to ${dimensionsNeeded}`); + } + } catch (error: any) { + console.error('Error checking/updating vector dimensions:', error); + throw new Error(`Vector dimension check failed: ${error?.message || 'Unknown error'}`); + } +} + +/** + * Clear vector embeddings with mismatched dimensions + * @param expectedDimensions The expected dimensions + * @returns Promise that resolves when cleanup is complete + */ +async function clearMismatchedVectorData(expectedDimensions: number): Promise { + try { + console.log( + `Clearing vector embeddings with dimensions different from ${expectedDimensions}...`, + ); + + // Delete all embeddings that don't match the expected dimensions + await getAppDataSource().query( + ` + DELETE FROM vector_embeddings + WHERE dimensions != $1 + `, + [expectedDimensions], + ); + + console.log('Successfully cleared mismatched vector embeddings'); + } catch (error: any) { + console.error('Error clearing mismatched vector data:', error); + throw error; + } +} diff --git a/src/types/index.ts b/src/types/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..b3f5f3012d24cb84115cd97e2054520d384adcba --- /dev/null +++ b/src/types/index.ts @@ -0,0 +1,140 @@ +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'; +import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; +import { SmartRoutingConfig } from '../utils/smartRouting.js'; + +// User interface +export interface IUser { + username: string; + password: string; + isAdmin?: boolean; +} + +// Group interface for server grouping +export interface IGroup { + id: string; // Unique UUID for the group + name: string; // Display name of the group + description?: string; // Optional description of the group + servers: string[]; // Array of server names that belong to this group +} + +// Market server types +export interface MarketServerRepository { + type: string; + url: string; +} + +export interface MarketServerAuthor { + name: string; +} + +export interface MarketServerInstallation { + type: string; + command: string; + args: string[]; + env?: Record; +} + +export interface MarketServerArgument { + description: string; + required: boolean; + example: string; +} + +export interface MarketServerExample { + title: string; + description: string; + prompt: string; +} + +export interface MarketServerTool { + name: string; + description: string; + inputSchema: Record; +} + +export interface MarketServer { + name: string; + display_name: string; + description: string; + repository: MarketServerRepository; + homepage: string; + author: MarketServerAuthor; + license: string; + categories: string[]; + tags: string[]; + examples: MarketServerExample[]; + installations: { + [key: string]: MarketServerInstallation; + }; + arguments: Record; + tools: MarketServerTool[]; + is_official?: boolean; +} + +// Represents the settings for MCP servers +export interface McpSettings { + users?: IUser[]; // Array of user credentials and permissions + mcpServers: { + [key: string]: ServerConfig; // Key-value pairs of server names and their configurations + }; + groups?: IGroup[]; // Array of server groups + systemConfig?: { + routing?: { + enableGlobalRoute?: boolean; // Controls whether the /sse endpoint without group is enabled + enableGroupNameRoute?: boolean; // Controls whether group routing by name is allowed + enableBearerAuth?: boolean; // Controls whether bearer auth is enabled for group routes + bearerAuthKey?: string; // The bearer auth key to validate against + }; + install?: { + pythonIndexUrl?: string; // Python package repository URL (UV_DEFAULT_INDEX) + npmRegistry?: string; // NPM registry URL (npm_config_registry) + }; + smartRouting?: SmartRoutingConfig; + // Add other system configuration sections here in the future + }; +} + +// Configuration details for an individual server +export interface ServerConfig { + type?: 'stdio' | 'sse' | 'streamable-http'; // Type of server + url?: string; // URL for SSE or streamable HTTP servers + command?: string; // Command to execute for stdio-based servers + args?: string[]; // Arguments for the command + env?: Record; // Environment variables + headers?: Record; // HTTP headers for SSE/streamable-http servers + enabled?: boolean; // Flag to enable/disable the server +} + +// Information about a server's status and tools +export interface ServerInfo { + name: string; // Unique name of the server + status: 'connected' | 'connecting' | 'disconnected'; // Current connection status + error: string | null; // Error message if any + tools: ToolInfo[]; // List of tools available on the server + client?: Client; // Client instance for communication + transport?: SSEClientTransport | StdioClientTransport | StreamableHTTPClientTransport; // Transport mechanism used + createTime: number; // Timestamp of when the server was created + enabled?: boolean; // Flag to indicate if the server is enabled +} + +// Details about a tool available on the server +export interface ToolInfo { + name: string; // Name of the tool + description: string; // Brief description of the tool + inputSchema: Record; // Input schema for the tool +} + +// Standardized API response structure +export interface ApiResponse { + success: boolean; // Indicates if the operation was successful + message?: string; // Optional message providing additional details + data?: T; // Optional data payload +} + +// Request payload for adding a new server +export interface AddServerRequest { + name: string; // Name of the server to add + config: ServerConfig; // Configuration details for the server +} diff --git a/src/utils/path.ts b/src/utils/path.ts new file mode 100644 index 0000000000000000000000000000000000000000..b8dbdde7a41163eca19010b8f1b265bdbd0ebb5f --- /dev/null +++ b/src/utils/path.ts @@ -0,0 +1,43 @@ +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { dirname } from 'path'; + +// Get current file's directory +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); +// Project root directory should be the parent directory of src +const rootDir = dirname(dirname(__dirname)); + +/** + * Find the path to a configuration file by checking multiple potential locations. + * @param filename The name of the file to locate (e.g., 'servers.json', 'mcp_settings.json') + * @param description Brief description of the file for logging purposes + * @returns The path to the file + */ +export const getConfigFilePath = (filename: string, description = 'Configuration'): string => { + const envPath = process.env.MCPHUB_SETTING_PATH; + const potentialPaths = [ + ...(envPath ? [envPath] : []), + // Prioritize process.cwd() as the first location to check + path.resolve(process.cwd(), filename), + // Use path relative to the root directory + path.join(rootDir, filename), + // If installed with npx, may need to look one level up + path.join(dirname(rootDir), filename) + ]; + + for (const filePath of potentialPaths) { + if (fs.existsSync(filePath)) { + return filePath; + } + } + + // If all paths do not exist, use default path + // Using the default path is acceptable because it ensures the application can proceed + // even if the configuration file is missing. This fallback is particularly useful in + // development environments or when the file is optional. + const defaultPath = path.resolve(process.cwd(), filename); + console.debug(`${description} file not found at any expected location, using default path: ${defaultPath}`); + return defaultPath; +}; \ No newline at end of file diff --git a/src/utils/smartRouting.ts b/src/utils/smartRouting.ts new file mode 100644 index 0000000000000000000000000000000000000000..d1bda6a2a7ae299e5e166bf515bc9753f09cd1df --- /dev/null +++ b/src/utils/smartRouting.ts @@ -0,0 +1,143 @@ +import { loadSettings, expandEnvVars } from '../config/index.js'; + +/** + * Smart routing configuration interface + */ +export interface SmartRoutingConfig { + enabled: boolean; + dbUrl: string; + openaiApiBaseUrl: string; + openaiApiKey: string; + openaiApiEmbeddingModel: string; +} + +/** + * Gets the complete smart routing configuration from environment variables and settings. + * + * Priority order for each setting: + * 1. Specific environment variables (ENABLE_SMART_ROUTING, SMART_ROUTING_ENABLED, etc.) + * 2. Generic environment variables (OPENAI_API_KEY, DATABASE_URL, etc.) + * 3. Settings configuration (systemConfig.smartRouting) + * 4. Default values + * + * @returns {SmartRoutingConfig} Complete smart routing configuration + */ +export function getSmartRoutingConfig(): SmartRoutingConfig { + let settings = loadSettings(); + const smartRoutingSettings: Partial = + settings.systemConfig?.smartRouting || {}; + + return { + // Enabled status - check multiple environment variables + enabled: getConfigValue( + [process.env.SMART_ROUTING_ENABLED], + smartRoutingSettings.enabled, + false, + parseBooleanEnvVar, + ), + + // Database configuration + dbUrl: getConfigValue([process.env.DB_URL], smartRoutingSettings.dbUrl, '', expandEnvVars), + + // OpenAI API configuration + openaiApiBaseUrl: getConfigValue( + [process.env.OPENAI_API_BASE_URL], + smartRoutingSettings.openaiApiBaseUrl, + 'https://api.openai.com/v1', + expandEnvVars, + ), + + openaiApiKey: getConfigValue( + [process.env.OPENAI_API_KEY], + smartRoutingSettings.openaiApiKey, + '', + expandEnvVars, + ), + + openaiApiEmbeddingModel: getConfigValue( + [process.env.OPENAI_API_EMBEDDING_MODEL], + smartRoutingSettings.openaiApiEmbeddingModel, + 'text-embedding-3-small', + expandEnvVars, + ), + }; +} + +/** + * Gets a configuration value with priority order: environment variables > settings > default. + * + * @param {(string | undefined)[]} envVars - Array of environment variable names to check in order + * @param {any} settingsValue - Value from settings configuration + * @param {any} defaultValue - Default value to use if no other value is found + * @param {Function} transformer - Function to transform the final value to the correct type + * @returns {any} The configuration value with the appropriate transformation applied + */ +function getConfigValue( + envVars: (string | undefined)[], + settingsValue: any, + defaultValue: T, + transformer: (value: any) => T, +): T { + // Check environment variables in order + for (const envVar of envVars) { + if (envVar !== undefined && envVar !== null && envVar !== '') { + try { + return transformer(envVar); + } catch (error) { + console.warn(`Failed to transform environment variable "${envVar}":`, error); + continue; + } + } + } + + // Check settings value + if (settingsValue !== undefined && settingsValue !== null) { + try { + return transformer(settingsValue); + } catch (error) { + console.warn('Failed to transform settings value:', error); + } + } + + // Return default value + return defaultValue; +} + +/** + * Parses a string environment variable value to a boolean. + * Supports common boolean representations: true/false, 1/0, yes/no, on/off + * + * @param {string} value - The environment variable value to parse + * @returns {boolean} The parsed boolean value + */ +function parseBooleanEnvVar(value: string): boolean { + if (typeof value === 'boolean') { + return value; + } + + if (typeof value !== 'string') { + return false; + } + + const normalized = value.toLowerCase().trim(); + + // Handle common truthy values + if (normalized === 'true' || normalized === '1' || normalized === 'yes' || normalized === 'on') { + return true; + } + + // Handle common falsy values + if ( + normalized === 'false' || + normalized === '0' || + normalized === 'no' || + normalized === 'off' || + normalized === '' + ) { + return false; + } + + // Default to false for unrecognized values + console.warn(`Unrecognized boolean value for smart routing: "${value}", defaulting to false`); + return false; +} diff --git a/src/utils/version.ts b/src/utils/version.ts new file mode 100644 index 0000000000000000000000000000000000000000..cd78e060438823854ee4e1985d111e0ddf622dbb --- /dev/null +++ b/src/utils/version.ts @@ -0,0 +1,23 @@ +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +// Get the directory name in ESM +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +/** + * Gets the package version from package.json + * @returns The version string from package.json, or 'dev' if not found + */ +export const getPackageVersion = (): string => { + try { + const packageJsonPath = path.resolve(__dirname, '../../package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + return packageJson.version || 'dev'; + } catch (error) { + console.error('Error reading package version:', error); + return 'dev'; + } +}; diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..227cf73ed3947338fb2a23869e550522f223aff2 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "sourceMap": true, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "strictPropertyInitialization": false + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "**/*.test.ts", "dist"] +}