diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..d600b6c76dd93f7b2472160d42b2797cae50c8e5
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,25 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
+
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000000000000000000000000000000000000..a78447ebf932f1bb3a5b124b472bea8b3a86f80f
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,7 @@
+[*]
+charset = utf-8
+insert_final_newline = true
+end_of_line = lf
+indent_style = space
+indent_size = 2
+max_line_length = 80
\ No newline at end of file
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..c56d875694b48331939a8946dc1c8b956fca0e0d
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,33 @@
+# A comma-separated list of access keys. Example: `ACCESS_KEYS="ABC123,JUD71F,HUWE3"`. Leave blank for unrestricted access.
+ACCESS_KEYS=""
+
+# The timeout in hours for access key validation. Set to 0 to require validation on every page load.
+ACCESS_KEY_TIMEOUT_HOURS="24"
+
+# The default model ID for WebLLM with F16 shaders.
+WEBLLM_DEFAULT_F16_MODEL_ID="Qwen3-1.7B-q4f16_1-MLC"
+
+# The default model ID for WebLLM with F32 shaders.
+WEBLLM_DEFAULT_F32_MODEL_ID="Qwen3-1.7B-q4f32_1-MLC"
+
+# The default model ID for Wllama.
+WLLAMA_DEFAULT_MODEL_ID="qwen-3-0.6b"
+
+# The base URL for the internal OpenAI compatible API. Example: `INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL="https://api.openai.com/v1"`. Leave blank to disable internal OpenAI compatible API.
+INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL=""
+
+# The access key for the internal OpenAI compatible API.
+INTERNAL_OPENAI_COMPATIBLE_API_KEY=""
+
+# The model for the internal OpenAI compatible API.
+INTERNAL_OPENAI_COMPATIBLE_API_MODEL=""
+
+# The name of the internal OpenAI compatible API, displayed in the UI.
+INTERNAL_OPENAI_COMPATIBLE_API_NAME="Internal API"
+
+# The type of inference to use by default. The possible values are:
+# "browser" -> In the browser (Private)
+# "openai" -> Remote Server (API)
+# "horde" -> AI Horde (Pre-configured)
+# "internal" -> $INTERNAL_OPENAI_COMPATIBLE_API_NAME
+DEFAULT_INFERENCE_TYPE="browser"
diff --git a/.github/hf-space-config.yml b/.github/hf-space-config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6d9d5b00160bb6ad09b8a92ab74ab6cd0a360b3b
--- /dev/null
+++ b/.github/hf-space-config.yml
@@ -0,0 +1,11 @@
+title: MiniSearch
+emoji: 👌🔍
+colorFrom: yellow
+colorTo: yellow
+sdk: docker
+short_description: Minimalist web-searching app with browser-based AI assistant
+pinned: true
+custom_headers:
+ cross-origin-embedder-policy: require-corp
+ cross-origin-opener-policy: same-origin
+ cross-origin-resource-policy: cross-origin
diff --git a/.github/workflows/ai-review.yml b/.github/workflows/ai-review.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3ea93f7fd75468eac5a545dd6d98aa66bf543e6f
--- /dev/null
+++ b/.github/workflows/ai-review.yml
@@ -0,0 +1,136 @@
+name: Review Pull Request with AI
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+ branches: ["main"]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ ai-review:
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-ai-review') }}
+ continue-on-error: true
+ runs-on: ubuntu-latest
+ name: AI Review
+ permissions:
+ pull-requests: write
+ contents: read
+ timeout-minutes: 30
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+
+ - name: Create temporary directory
+ run: mkdir -p /tmp/pr_review
+
+ - name: Process PR description
+ id: process_pr
+ run: |
+ PR_BODY_ESCAPED=$(cat << 'EOF'
+ ${{ github.event.pull_request.body }}
+ EOF
+ )
+ PROCESSED_BODY=$(echo "$PR_BODY_ESCAPED" | sed -E 's/\[(.*?)\]\(.*?\)/\1/g')
+ echo "$PROCESSED_BODY" > /tmp/pr_review/processed_body.txt
+
+ - name: Fetch branches and output the diff
+ run: |
+ git fetch origin main:main
+ git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-branch
+ git diff main..pr-branch > /tmp/pr_review/diff.txt
+
+ - name: Prepare review request
+ id: prepare_request
+ run: |
+ PR_TITLE=$(echo "${{ github.event.pull_request.title }}" | sed 's/[()]/\\&/g')
+ DIFF_CONTENT=$(cat /tmp/pr_review/diff.txt)
+ PROCESSED_BODY=$(cat /tmp/pr_review/processed_body.txt)
+
+ jq -n \
+ --arg model "${{ vars.OPENROUTER_MODEL }}" \
+ --arg http_referer "${{ github.event.repository.html_url }}" \
+ --arg title "${{ github.event.repository.name }}" \
+ --arg system "You are an experienced developer reviewing a Pull Request. You focus only on what matters and provide concise, actionable feedback.
+
+ Review Context:
+ Repository Name: \"${{ github.event.repository.name }}\"
+ Repository Description: \"${{ github.event.repository.description }}\"
+ Branch: \"${{ github.event.pull_request.head.ref }}\"
+ PR Title: \"$PR_TITLE\"
+
+ Guidelines:
+ 1. Only comment on issues that:
+ - Could cause bugs or security issues
+ - Significantly impact performance
+ - Make the code harder to maintain
+ - Violate critical best practices
+
+ 2. For each issue:
+ - Point to the specific line/file
+ - Explain why it's a problem
+ - Suggest a concrete fix
+
+ 3. Praise exceptional solutions briefly, only if truly innovative
+
+ 4. Skip commenting on:
+ - Minor style issues
+ - Obvious changes
+ - Working code that could be marginally improved
+ - Things that are just personal preference
+
+ Remember:
+ Less is more. If the code is good and working, just say so, with a short message." \
+ --arg user "This is the description of the pull request:
+ \`\`\`markdown
+ $PROCESSED_BODY
+ \`\`\`
+
+ And here is the diff of the changes, for you to review:
+ \`\`\`diff
+ $DIFF_CONTENT
+ \`\`\`" \
+ '{
+ "model": $model,
+ "messages": [
+ {"role": "system", "content": $system},
+ {"role": "user", "content": $user}
+ ],
+ "temperature": 0.7,
+ "top_p": 0.9,
+ "min_p": 0.1,
+ "extra_headers": {
+ "HTTP-Referer": $http_referer,
+ "X-Title": $title
+ }
+ }' > /tmp/pr_review/request.json
+
+ - name: Get AI Review
+ id: ai_review
+ run: |
+ RESPONSE=$(curl -s https://openrouter.ai/api/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer ${{ secrets.OPENROUTER_API_KEY }}" \
+ -d @/tmp/pr_review/request.json)
+
+ echo "### Review" > /tmp/pr_review/response.txt
+ echo "" >> /tmp/pr_review/response.txt
+ echo "$RESPONSE" | jq -r '.choices[0].message.content' >> /tmp/pr_review/response.txt
+
+ - name: Find Comment
+ uses: peter-evans/find-comment@v3
+ id: find_comment
+ with:
+ issue-number: ${{ github.event.pull_request.number }}
+ comment-author: "github-actions[bot]"
+ body-includes: "### Review"
+
+ - name: Post or Update PR Review
+ uses: peter-evans/create-or-update-comment@v4
+ with:
+ comment-id: ${{ steps.find_comment.outputs.comment-id }}
+ issue-number: ${{ github.event.pull_request.number }}
+ body-path: /tmp/pr_review/response.txt
+ edit-mode: replace
diff --git a/.github/workflows/deploy-to-hugging-face.yml b/.github/workflows/deploy-to-hugging-face.yml
new file mode 100644
index 0000000000000000000000000000000000000000..43ff49b9d2f372d55da2a3c408f40b3ca0bca5fa
--- /dev/null
+++ b/.github/workflows/deploy-to-hugging-face.yml
@@ -0,0 +1,20 @@
+name: Deploy to Hugging Face
+
+on:
+ workflow_dispatch:
+
+jobs:
+ sync-to-hf:
+ name: Sync to Hugging Face Spaces
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ lfs: true
+ - uses: JacobLinCool/huggingface-sync@v1
+ with:
+ github: ${{ secrets.GITHUB_TOKEN }}
+ user: ${{ vars.HF_SPACE_OWNER }}
+ space: ${{ vars.HF_SPACE_NAME }}
+ token: ${{ secrets.HF_TOKEN }}
+ configuration: ".github/hf-space-config.yml"
diff --git a/.github/workflows/on-pull-request-to-main.yml b/.github/workflows/on-pull-request-to-main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6eae98e615c1c1f2c899a9a5f1d785dd3883ff62
--- /dev/null
+++ b/.github/workflows/on-pull-request-to-main.yml
@@ -0,0 +1,9 @@
+name: On Pull Request To Main
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+ branches: ["main"]
+jobs:
+ test-lint-ping:
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-test-lint-ping') }}
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
diff --git a/.github/workflows/on-push-to-main.yml b/.github/workflows/on-push-to-main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8ce693215c4351bab8b54ccac302345e1202ba03
--- /dev/null
+++ b/.github/workflows/on-push-to-main.yml
@@ -0,0 +1,7 @@
+name: On Push To Main
+on:
+ push:
+ branches: ["main"]
+jobs:
+ test-lint-ping:
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
diff --git a/.github/workflows/publish-docker-image.yml b/.github/workflows/publish-docker-image.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d9b2b4a3d7a51a316c5a72e8a2243ec20181c248
--- /dev/null
+++ b/.github/workflows/publish-docker-image.yml
@@ -0,0 +1,39 @@
+name: Publish Docker Image
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build-and-push-image:
+ name: Publish Docker Image to GitHub Packages
+ runs-on: ubuntu-latest
+ env:
+ REGISTRY: ghcr.io
+ IMAGE_NAME: ${{ github.repository }}
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ - name: Log in to the Container registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Extract metadata (tags, labels) for Docker
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ - name: Build and push Docker Image
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ platforms: linux/amd64,linux/arm64
diff --git a/.github/workflows/reusable-test-lint-ping.yml b/.github/workflows/reusable-test-lint-ping.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9fdf4da857b8756180e2540d110b9d3fabe3369b
--- /dev/null
+++ b/.github/workflows/reusable-test-lint-ping.yml
@@ -0,0 +1,26 @@
+on:
+ workflow_call:
+jobs:
+ check-code-quality:
+ name: Check Code Quality
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ cache: "npm"
+ - run: npm ci --ignore-scripts
+ - run: npm test
+ - run: npm run lint
+ check-docker-container:
+ needs: [check-code-quality]
+ name: Check Docker Container
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - run: docker compose -f docker-compose.production.yml up -d
+ - name: Check if main page is available
+ run: until curl -s -o /dev/null -w "%{http_code}" localhost:7860 | grep 200; do sleep 1; done
+ timeout-minutes: 1
+ - run: docker compose -f docker-compose.production.yml down
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f1b26f1ea73cad18af0078381a02bbc532714a0a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+node_modules
+.DS_Store
+/client/dist
+/server/models
+.vscode
+/vite-build-stats.html
+.env
diff --git a/.husky/pre-commit b/.husky/pre-commit
new file mode 100644
index 0000000000000000000000000000000000000000..2312dc587f61186ccf0d627d678d851b9eef7b82
--- /dev/null
+++ b/.husky/pre-commit
@@ -0,0 +1 @@
+npx lint-staged
diff --git a/.npmrc b/.npmrc
new file mode 100644
index 0000000000000000000000000000000000000000..80bcbed90c4f2b3d895d5086dc775e1bd8b32b43
--- /dev/null
+++ b/.npmrc
@@ -0,0 +1 @@
+legacy-peer-deps = true
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..bc736b4098848a063a966af24b48a04b93b5f18e
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,95 @@
+FROM node:lts AS llama-builder
+
+ARG LLAMA_CPP_RELEASE_TAG="b5595"
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ cmake \
+ ccache \
+ git \
+ curl
+
+RUN cd /tmp && \
+ git clone https://github.com/ggerganov/llama.cpp.git && \
+ cd llama.cpp && \
+ git checkout $LLAMA_CPP_RELEASE_TAG && \
+ cmake -B build -DGGML_NATIVE=OFF -DLLAMA_CURL=OFF && \
+ cmake --build build --config Release -j --target llama-server && \
+ mkdir -p /usr/local/lib/llama && \
+ find build -type f \( -name "libllama.so" -o -name "libmtmd.so" -o -name "libggml.so" -o -name "libggml-base.so" -o -name "libggml-cpu.so" \) -exec cp {} /usr/local/lib/llama/ \;
+
+FROM node:lts
+
+ENV PORT=7860
+EXPOSE $PORT
+
+ARG USERNAME=node
+ARG HOME_DIR=/home/${USERNAME}
+ARG APP_DIR=${HOME_DIR}/app
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends \
+ python3 \
+ python3-venv && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /usr/local/searxng /etc/searxng && \
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng /etc/searxng && \
+ chmod 755 /etc/searxng
+
+WORKDIR /usr/local/searxng
+RUN python3 -m venv searxng-venv && \
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng/searxng-venv && \
+ /usr/local/searxng/searxng-venv/bin/pip install --upgrade pip && \
+ /usr/local/searxng/searxng-venv/bin/pip install wheel setuptools pyyaml lxml
+
+RUN git clone https://github.com/searxng/searxng.git /usr/local/searxng/searxng-src && \
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng/searxng-src
+
+ARG SEARXNG_SETTINGS_PATH="/etc/searxng/settings.yml"
+
+WORKDIR /usr/local/searxng/searxng-src
+RUN cp searx/settings.yml $SEARXNG_SETTINGS_PATH && \
+ chown ${USERNAME}:${USERNAME} $SEARXNG_SETTINGS_PATH && \
+ chmod 644 $SEARXNG_SETTINGS_PATH && \
+ sed -i 's/ultrasecretkey/'$(openssl rand -hex 32)'/g' $SEARXNG_SETTINGS_PATH && \
+ sed -i 's/- html/- json/' $SEARXNG_SETTINGS_PATH && \
+ /usr/local/searxng/searxng-venv/bin/pip install -e .
+
+COPY --from=llama-builder /tmp/llama.cpp/build/bin/llama-server /usr/local/bin/
+COPY --from=llama-builder /usr/local/lib/llama/* /usr/local/lib/
+RUN ldconfig /usr/local/lib
+
+USER ${USERNAME}
+
+WORKDIR ${APP_DIR}
+
+ARG ACCESS_KEYS
+ARG ACCESS_KEY_TIMEOUT_HOURS
+ARG WEBLLM_DEFAULT_F16_MODEL_ID
+ARG WEBLLM_DEFAULT_F32_MODEL_ID
+ARG WLLAMA_DEFAULT_MODEL_ID
+ARG INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL
+ARG INTERNAL_OPENAI_COMPATIBLE_API_KEY
+ARG INTERNAL_OPENAI_COMPATIBLE_API_MODEL
+ARG INTERNAL_OPENAI_COMPATIBLE_API_NAME
+ARG DEFAULT_INFERENCE_TYPE
+ARG HOST
+ARG HMR_PORT
+ARG ALLOWED_HOSTS
+
+COPY --chown=${USERNAME}:${USERNAME} ./package.json ./package-lock.json ./.npmrc ./
+
+RUN npm ci
+
+COPY --chown=${USERNAME}:${USERNAME} . .
+
+RUN git config --global --add safe.directory ${APP_DIR} && \
+ npm run build
+
+HEALTHCHECK --interval=5m CMD curl -f http://localhost:7860/status || exit 1
+
+ENTRYPOINT [ "/bin/sh", "-c" ]
+
+CMD ["(cd /usr/local/searxng/searxng-src && /usr/local/searxng/searxng-venv/bin/python -m searx.webapp > /dev/null 2>&1) & (npx pm2 start ecosystem.config.cjs && npx pm2 logs)" ]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ac717fad69617c9d3f443a1d035bfea6d574c18e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,127 @@
+---
+title: MiniSearch
+emoji: 👌🔍
+colorFrom: yellow
+colorTo: yellow
+sdk: docker
+short_description: Minimalist web-searching app with browser-based AI assistant
+pinned: true
+custom_headers:
+ cross-origin-embedder-policy: require-corp
+ cross-origin-opener-policy: same-origin
+ cross-origin-resource-policy: cross-origin
+---
+
+# MiniSearch
+
+A minimalist web-searching app with an AI assistant that runs directly from your browser.
+
+Live demo: https://felladrin-minisearch.hf.space
+
+## Screenshot
+
+
+
+## Features
+
+- **Privacy-focused**: [No tracking, no ads, no data collection](https://docs.searxng.org/own-instance.html#how-does-searxng-protect-privacy)
+- **Easy to use**: Minimalist yet intuitive interface for all users
+- **Cross-platform**: Models run inside the browser, both on desktop and mobile
+- **Integrated**: Search from the browser address bar by setting it as the default search engine
+- **Efficient**: Models are loaded and cached only when needed
+- **Customizable**: Tweakable settings for search results and text generation
+- **Open-source**: [The code is available for inspection and contribution at GitHub](https://github.com/felladrin/MiniSearch)
+
+## Prerequisites
+
+- [Docker](https://docs.docker.com/get-docker/)
+
+## Getting started
+
+Here are the easiest ways to get started with MiniSearch. Pick the one that suits you best.
+
+**Option 1** - Use [MiniSearch's Docker Image](https://github.com/felladrin/MiniSearch/pkgs/container/minisearch) by running in your terminal:
+
+```bash
+docker run -p 7860:7860 ghcr.io/felladrin/minisearch:main
+```
+
+**Option 2** - Add MiniSearch's Docker Image to your existing Docker Compose file:
+
+```yaml
+services:
+ minisearch:
+ image: ghcr.io/felladrin/minisearch:main
+ ports:
+ - "7860:7860"
+```
+
+**Option 3** - Build from source by [downloading the repository files](https://github.com/felladrin/MiniSearch/archive/refs/heads/main.zip) and running:
+
+```bash
+docker compose -f docker-compose.production.yml up --build
+```
+
+Once the container is running, open http://localhost:7860 in your browser and start searching!
+
+## Frequently asked questions
+
+
+ How do I search via the browser's address bar?
+
+ You can set MiniSearch as your browser's address-bar search engine using the pattern http://localhost:7860/?q=%s, in which your search term replaces %s.
+
+
+
+
+ How do I search via Raycast?
+
+ You can add this Quicklink to Raycast, so typing your query will open MiniSearch with the search results. You can also edit it to point to your own domain.
+
+
+
+
+
+ Can I use custom models via OpenAI-Compatible API?
+
+ Yes! For this, open the Menu and change the "AI Processing Location" to Remote server (API). Then configure the Base URL, and optionally set an API Key and a Model to use.
+
+
+
+
+ How do I restrict the access to my MiniSearch instance via password?
+
+ Create a .env file and set a value for ACCESS_KEYS. Then reset the MiniSearch docker container.
+
+
+ For example, if you to set the password to PepperoniPizza, then this is what you should add to your .env:
+ ACCESS_KEYS="PepperoniPizza"
+
+
+ You can find more examples in the .env.example file.
+
+
+
+
+ I want to serve MiniSearch to other users, allowing them to use my own OpenAI-Compatible API key, but without revealing it to them. Is it possible?
+
Yes! In MiniSearch, we call this text-generation feature "Internal OpenAI-Compatible API". To use this it:
+
+
Set up your OpenAI-Compatible API endpoint by configuring the following environment variables in your .env file:
+
+
INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL: The base URL for your API
+
INTERNAL_OPENAI_COMPATIBLE_API_KEY: Your API access key
+
INTERNAL_OPENAI_COMPATIBLE_API_MODEL: The model to use
+
INTERNAL_OPENAI_COMPATIBLE_API_NAME: The name to display in the UI
+
+
+
Restart MiniSearch server.
+
In the MiniSearch menu, select the new option (named as per your INTERNAL_OPENAI_COMPATIBLE_API_NAME setting) from the "AI Processing Location" dropdown.
+
+
+
+
+ How can I contribute to the development of this tool?
+
Fork this repository and clone it. Then, start the development server by running the following command:
+
docker compose up
+
Make your changes, push them to your fork, and open a pull request! All contributions are welcome!