diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..2f8f89bc3d44a794c7c55be76ab7c5421bdd9a77 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,26 @@ +# Ignore Git and GitHub files +.git +.github/ + +# Ignore Husky configuration files +.husky/ + +# Ignore documentation and metadata files +CONTRIBUTING.md +LICENSE +README.md + +# Ignore environment examples and sensitive info +.env +*.local +*.example + +# Ignore node modules, logs and cache files +**/*.log +**/node_modules +**/dist +**/build +**/.cache +logs +dist-ssr +.DS_Store diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..5274ff0128c0fd56a256e7226df1c5e0e65a4284 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +indent_style = space +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 120 +indent_size = 2 + +[*.md] +trim_trailing_whitespace = false diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..7306f36172fada99fa6399326f1ace8d902ca404 --- /dev/null +++ b/.env.example @@ -0,0 +1,82 @@ +# Rename this file to .env once you have filled in the below environment variables! + +# Get your GROQ API Key here - +# https://console.groq.com/keys +# You only need this environment variable set if you want to use Groq models +GROQ_API_KEY= + +# Get your HuggingFace API Key here - +# https://huggingface.co/settings/tokens +# You only need this environment variable set if you want to use HuggingFace models +HuggingFace_API_KEY= + + +# Get your Open AI API Key by following these instructions - +# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key +# You only need this environment variable set if you want to use GPT models +OPENAI_API_KEY= + +# Get your Anthropic API Key in your account settings - +# https://console.anthropic.com/settings/keys +# You only need this environment variable set if you want to use Claude models +ANTHROPIC_API_KEY= + +# Get your OpenRouter API Key in your account settings - +# https://openrouter.ai/settings/keys +# You only need this environment variable set if you want to use OpenRouter models +OPEN_ROUTER_API_KEY= + +# Get your Google Generative AI API Key by following these instructions - +# https://console.cloud.google.com/apis/credentials +# You only need this environment variable set if you want to use Google Generative AI models +GOOGLE_GENERATIVE_AI_API_KEY= + +# You only need this environment variable set if you want to use oLLAMA models +# EXAMPLE http://localhost:11434 +OLLAMA_API_BASE_URL= + +# You only need this environment variable set if you want to use OpenAI Like models +OPENAI_LIKE_API_BASE_URL= + +# You only need this environment variable set if you want to use Together AI models +TOGETHER_API_BASE_URL= + +# You only need this environment variable set if you want to use DeepSeek models through their API +DEEPSEEK_API_KEY= + +# Get your OpenAI Like API Key +OPENAI_LIKE_API_KEY= + +# Get your Together API Key +TOGETHER_API_KEY= + +# Get your Mistral API Key by following these instructions - +# https://console.mistral.ai/api-keys/ +# You only need this environment variable set if you want to use Mistral models +MISTRAL_API_KEY= + +# Get the Cohere Api key by following these instructions - +# https://dashboard.cohere.com/api-keys +# You only need this environment variable set if you want to use Cohere models +COHERE_API_KEY= + +# Get LMStudio Base URL from LM Studio Developer Console +# Make sure to enable CORS +# Example: http://localhost:1234 +LMSTUDIO_API_BASE_URL= + +# Get your xAI API key +# https://x.ai/api +# You only need this environment variable set if you want to use xAI models +XAI_API_KEY= + +# Include this environment variable if you want more logging for debugging locally +VITE_LOG_LEVEL=debug + +# Example Context Values for qwen2.5-coder:32b +# +# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM +# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM +# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM +# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM +DEFAULT_NUM_CTX= diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000000000000000000000000000000000..37ebae5a853a5cc63510ac595da00ccd52786275 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,73 @@ +name: "Bug report" +description: Create a report to help us improve +body: + - type: markdown + attributes: + value: | + Thank you for reporting an issue :pray:. + + This issue tracker is for bugs and issues found with [Bolt.new](https://bolt.new). + If you experience issues related to WebContainer, please file an issue in our [WebContainer repo](https://github.com/stackblitz/webcontainer-core), or file an issue in our [StackBlitz core repo](https://github.com/stackblitz/core) for issues with StackBlitz. + + The more information you fill in, the better we can help you. + - type: textarea + id: description + attributes: + label: Describe the bug + description: Provide a clear and concise description of what you're running into. + validations: + required: true + - type: input + id: link + attributes: + label: Link to the Bolt URL that caused the error + description: Please do not delete it after reporting! + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce + description: Describe the steps we have to take to reproduce the behavior. + placeholder: | + 1. Go to '...' + 2. Click on '....' + 3. Scroll down to '....' + 4. See error + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected behavior + description: Provide a clear and concise description of what you expected to happen. + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screen Recording / Screenshot + description: If applicable, **please include a screen recording** (preferably) or screenshot showcasing the issue. This will assist us in resolving your issue quickly. + - type: textarea + id: platform + attributes: + label: Platform + value: | + - OS: [e.g. macOS, Windows, Linux] + - Browser: [e.g. Chrome, Safari, Firefox] + - Version: [e.g. 91.1] + - type: input + id: provider + attributes: + label: Provider Used + description: Tell us the provider you are using. + - type: input + id: model + attributes: + label: Model Used + description: Tell us the model you are using. + - type: textarea + id: additional + attributes: + label: Additional context + description: Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000000000000000000000000000000..6cd1e7471ee097b93a2da26b9f5c0f80df3ad550 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,23 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' +--- + +**Is your feature request related to a problem? Please describe:** + + + +**Describe the solution you'd like:** + + + +**Describe alternatives you've considered:** + + + +**Additional context:** + + diff --git a/.github/actions/setup-and-build/action.yaml b/.github/actions/setup-and-build/action.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b27bc6fb8e30115679ea27e2d1600cbe8826d380 --- /dev/null +++ b/.github/actions/setup-and-build/action.yaml @@ -0,0 +1,32 @@ +name: Setup and Build +description: Generic setup action +inputs: + pnpm-version: + required: false + type: string + default: '9.4.0' + node-version: + required: false + type: string + default: '20.15.1' + +runs: + using: composite + + steps: + - uses: pnpm/action-setup@v4 + with: + version: ${{ inputs.pnpm-version }} + run_install: false + + - name: Set Node.js version to ${{ inputs.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node-version }} + cache: pnpm + + - name: Install dependencies and build project + shell: bash + run: | + pnpm install + pnpm run build diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ab236d587c6bdd122a48ea7f89cf49665845a99 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,27 @@ +name: CI/CD + +on: + push: + branches: + - master + pull_request: + +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup and Build + uses: ./.github/actions/setup-and-build + + - name: Run type check + run: pnpm run typecheck + + # - name: Run ESLint + # run: pnpm run lint + + - name: Run tests + run: pnpm run test diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ceff508478171711302960255bdb54d1bdce4a6d --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,33 @@ +name: Docs CI/CD + +on: + push: + branches: + - main +permissions: + contents: write +jobs: + build_docs: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./docs + steps: + - uses: actions/checkout@v4 + - name: Configure Git Credentials + run: | + git config user.name github-actions[bot] + git config user.email 41898282+github-actions[bot]@users.noreply.github.com + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v4 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force \ No newline at end of file diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..c9eb890eb1af4fc7ed52e1297be4ff31edc5c560 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,25 @@ +name: Mark Stale Issues and Pull Requests + +on: + schedule: + - cron: '0 2 * * *' # Runs daily at 2:00 AM UTC + workflow_dispatch: # Allows manual triggering of the workflow + +jobs: + stale: + runs-on: ubuntu-latest + + steps: + - name: Mark stale issues and pull requests + uses: actions/stale@v8 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: "This issue has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." + stale-pr-message: "This pull request has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." + days-before-stale: 10 # Number of days before marking an issue or PR as stale + days-before-close: 4 # Number of days after being marked stale before closing + stale-issue-label: "stale" # Label to apply to stale issues + stale-pr-label: "stale" # Label to apply to stale pull requests + exempt-issue-labels: "pinned,important" # Issues with these labels won't be marked stale + exempt-pr-labels: "pinned,important" # PRs with these labels won't be marked stale + operations-per-run: 75 # Limits the number of actions per run to avoid API rate limits diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..7bbcc2ea3f3c5ef915a7832f0ac1268701fa5567 --- /dev/null +++ b/.gitignore @@ -0,0 +1,39 @@ +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +.vscode/* +.vscode/launch.json +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +/.history +/.cache +/build +.env.local +.env +.dev.vars +*.vars +.wrangler +_worker.bundle + +Modelfile +modelfiles + +# docs ignore +site diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100644 index 0000000000000000000000000000000000000000..05fe9ee6912a73904b6bf43f787190c8acbd91c8 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,20 @@ +#!/bin/sh + +echo "πŸ” Running pre-commit hook to check the code looks good... πŸ”" + +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # Load nvm if you're using i + +if ! pnpm typecheck; then + echo "❌ Type checking failed! Please review TypeScript types." + echo "Once you're done, don't forget to add your changes to the commit! πŸš€" + exit 1 +fi + +if ! pnpm lint; then + echo "❌ Linting failed! 'pnpm lint:fix' will help you fix the easy ones." + echo "Once you're done, don't forget to add your beautification to the commit! 🀩" + exit 1 +fi + +echo "πŸ‘ All good! Committing changes..." diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000000000000000000000000000000000..3a08d6e2b8cd539347e0ece9923808c6a05c99f6 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,2 @@ +pnpm-lock.yaml +.astro diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000000000000000000000000000000000000..8d3dfb047c8c168b52ced5119a342a3ff2339806 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,8 @@ +{ + "printWidth": 120, + "singleQuote": true, + "useTabs": false, + "tabWidth": 2, + "semi": true, + "bracketSpacing": true +} diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000000000000000000000000000000000000..427253d38b7e6af4c3f46184b762d21bfc69153b --- /dev/null +++ b/.tool-versions @@ -0,0 +1,2 @@ +nodejs 20.15.1 +pnpm 9.4.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..68215a289bb750a8bd4aa4ed5193168043307a2e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,217 @@ +# Contributing to oTToDev + +First off, thank you for considering contributing to oTToDev! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make oTToDev a better tool for developers worldwide. + +## πŸ“‹ Table of Contents +- [Code of Conduct](#code-of-conduct) +- [How Can I Contribute?](#how-can-i-contribute) +- [Pull Request Guidelines](#pull-request-guidelines) +- [Coding Standards](#coding-standards) +- [Development Setup](#development-setup) +- [Deploymnt with Docker](#docker-deployment-documentation) +- [Project Structure](#project-structure) + +## Code of Conduct + +This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to the project maintainers. + +## How Can I Contribute? + +### 🐞 Reporting Bugs and Feature Requests +- Check the issue tracker to avoid duplicates +- Use the issue templates when available +- Include as much relevant information as possible +- For bugs, add steps to reproduce the issue + +### πŸ”§ Code Contributions +1. Fork the repository +2. Create a new branch for your feature/fix +3. Write your code +4. Submit a pull request + +### ✨ Becoming a Core Contributor +We're looking for dedicated contributors to help maintain and grow this project. If you're interested in becoming a core contributor, please fill out our [Contributor Application Form](https://forms.gle/TBSteXSDCtBDwr5m7). + +## Pull Request Guidelines + +### πŸ“ PR Checklist +- [ ] Branch from the main branch +- [ ] Update documentation if needed +- [ ] Manually verify all new functionality works as expected +- [ ] Keep PRs focused and atomic + +### πŸ‘€ Review Process +1. Manually test the changes +2. At least one maintainer review required +3. Address all review comments +4. Maintain clean commit history + +## Coding Standards + +### πŸ’» General Guidelines +- Follow existing code style +- Comment complex logic +- Keep functions focused and small +- Use meaningful variable names +- Lint your code. This repo contains a pre-commit-hook that will verify your code is linted properly, +so set up your IDE to do that for you! + +## Development Setup + +### πŸ”„ Initial Setup +1. Clone the repository: +```bash +git clone https://github.com/coleam00/bolt.new-any-llm.git +``` + +2. Install dependencies: +```bash +pnpm install +``` + +3. Set up environment variables: + - Rename `.env.example` to `.env.local` + - Add your LLM API keys (only set the ones you plan to use): +```bash +GROQ_API_KEY=XXX +HuggingFace_API_KEY=XXX +OPENAI_API_KEY=XXX +ANTHROPIC_API_KEY=XXX +... +``` + - Optionally set debug level: +```bash +VITE_LOG_LEVEL=debug +``` + + - Optionally set context size: +```bash +DEFAULT_NUM_CTX=32768 +``` + +Some Example Context Values for the qwen2.5-coder:32b models are. + +* DEFAULT_NUM_CTX=32768 - Consumes 36GB of VRAM +* DEFAULT_NUM_CTX=24576 - Consumes 32GB of VRAM +* DEFAULT_NUM_CTX=12288 - Consumes 26GB of VRAM +* DEFAULT_NUM_CTX=6144 - Consumes 24GB of VRAM + +**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore. + +### πŸš€ Running the Development Server +```bash +pnpm run dev +``` + +**Note**: You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway. + +## Testing + +Run the test suite with: + +```bash +pnpm test +``` + +## Deployment + +To deploy the application to Cloudflare Pages: + +```bash +pnpm run deploy +``` + +Make sure you have the necessary permissions and Wrangler is correctly configured for your Cloudflare account. + +# Docker Deployment Documentation + +This guide outlines various methods for building and deploying the application using Docker. + +## Build Methods + +### 1. Using Helper Scripts + +NPM scripts are provided for convenient building: + +```bash +# Development build +npm run dockerbuild + +# Production build +npm run dockerbuild:prod +``` + +### 2. Direct Docker Build Commands + +You can use Docker's target feature to specify the build environment: + +```bash +# Development build +docker build . --target bolt-ai-development + +# Production build +docker build . --target bolt-ai-production +``` + +### 3. Docker Compose with Profiles + +Use Docker Compose profiles to manage different environments: + +```bash +# Development environment +docker-compose --profile development up + +# Production environment +docker-compose --profile production up +``` + +## Running the Application + +After building using any of the methods above, run the container with: + +```bash +# Development +docker run -p 5173:5173 --env-file .env.local bolt-ai:development + +# Production +docker run -p 5173:5173 --env-file .env.local bolt-ai:production +``` + +## Deployment with Coolify + +[Coolify](https://github.com/coollabsio/coolify) provides a straightforward deployment process: + +1. Import your Git repository as a new project +2. Select your target environment (development/production) +3. Choose "Docker Compose" as the Build Pack +4. Configure deployment domains +5. Set the custom start command: + ```bash + docker compose --profile production up + ``` +6. Configure environment variables + - Add necessary AI API keys + - Adjust other environment variables as needed +7. Deploy the application + +## VS Code Integration + +The `docker-compose.yaml` configuration is compatible with VS Code dev containers: + +1. Open the command palette in VS Code +2. Select the dev container configuration +3. Choose the "development" profile from the context menu + +## Environment Files + +Ensure you have the appropriate `.env.local` file configured before running the containers. This file should contain: +- API keys +- Environment-specific configurations +- Other required environment variables + +## Notes + +- Port 5173 is exposed and mapped for both development and production environments +- Environment variables are loaded from `.env.local` +- Different profiles (development/production) can be used for different deployment scenarios +- The configuration supports both local development and production deployment diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9ec95750844036b455be801554f2c50cf5bc0169 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,46 @@ +ARG BASE=node:20.18.0 +FROM ${BASE} AS base + +WORKDIR /app + +# Install dependencies (this step is cached as long as the dependencies don't change) +COPY package.json pnpm-lock.yaml ./ + +RUN corepack enable pnpm && pnpm install + +# Copy the rest of your app's source code +COPY . . + +# Expose the port the app runs on +EXPOSE 5173 + +# Development image +FROM base AS bolt-ai-development + +# Define the same environment variables for development +ARG GROQ_API_KEY +ARG HuggingFace +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL +ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX + +ENV GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_API_KEY=${HuggingFace_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} + +RUN mkdir -p ${WORKDIR}/run +CMD pnpm run dev --host diff --git a/Dockerfile (copy) b/Dockerfile (copy) new file mode 100644 index 0000000000000000000000000000000000000000..06541d30317c979d615e8b4d93eb934581463f7b --- /dev/null +++ b/Dockerfile (copy) @@ -0,0 +1,83 @@ +ARG BASE=node:20.18.0 +FROM ${BASE} AS base + +WORKDIR /app + +# Install dependencies (this step is cached as long as the dependencies don't change) +COPY package.json pnpm-lock.yaml ./ + +RUN corepack enable pnpm && pnpm install + +# Copy the rest of your app's source code +COPY . . + +# Expose the port the app runs on +EXPOSE 5173 + +# Production image +FROM base AS bolt-ai-production + +# Define environment variables with default values or let them be overridden +ARG GROQ_API_KEY +ARG HuggingFace_API_KEY +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL +ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX + +ENV WRANGLER_SEND_METRICS=false \ + GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_KEY=${HuggingFace_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} + +# Pre-configure wrangler to disable metrics +RUN mkdir -p /root/.config/.wrangler && \ + echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json + +RUN npm run build + +CMD [ "pnpm", "run", "dockerstart"] + +# Development image +FROM base AS bolt-ai-development + +# Define the same environment variables for development +ARG GROQ_API_KEY +ARG HuggingFace +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL +ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX + +ENV GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_API_KEY=${HuggingFace_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} + +RUN mkdir -p ${WORKDIR}/run +CMD pnpm run dev --host diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000000000000000000000000000000000000..3e267058c7c6a1534f8364da4bc01def8b27ef3c --- /dev/null +++ b/FAQ.md @@ -0,0 +1,54 @@ +[![Bolt.new: AI-Powered Full-Stack Web Development in the Browser](./public/social_preview_index.jpg)](https://bolt.new) + +# Bolt.new Fork by Cole Medin - oTToDev + +## FAQ + +### How do I get the best results with oTToDev? + +- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly. + +- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting. + +- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps oTToDev understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality. + +- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask oTToDev to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly. + +### Do you plan on merging oTToDev back into the official Bolt.new repo? + +More news coming on this coming early next month - stay tuned! + +### Why are there so many open issues/pull requests? + +oTToDev was started simply to showcase how to edit an open source project and to do something cool with local LLMs on my (@ColeMedin) YouTube channel! However, it quickly +grew into a massive community project that I am working hard to keep up with the demand of by forming a team of maintainers and getting as many people involved as I can. +That effort is going well and all of our maintainers are ABSOLUTE rockstars, but it still takes time to organize everything so we can efficiently get through all +the issues and PRs. But rest assured, we are working hard and even working on some partnerships behind the scenes to really help this project take off! + +### How do local LLMs fair compared to larger models like Claude 3.5 Sonnet for oTToDev/Bolt.new? + +As much as the gap is quickly closing between open source and massive close source models, you’re still going to get the best results with the very large models like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. This is one of the big tasks we have at hand - figuring out how to prompt better, use agents, and improve the platform as a whole to make it work better for even the smaller local LLMs! + +### I'm getting the error: "There was an error processing this request" + +If you see this error within oTToDev, that is just the application telling you there is a problem at a high level, and this could mean a number of different things. To find the actual error, please check BOTH the terminal where you started the application (with Docker or pnpm) and the developer console in the browser. For most browsers, you can access the developer console by pressing F12 or right clicking anywhere in the browser and selecting β€œInspect”. Then go to the β€œconsole” tab in the top right. + +### I'm getting the error: "x-api-key header missing" + +We have seen this error a couple times and for some reason just restarting the Docker container has fixed it. This seems to be Ollama specific. Another thing to try is try to run oTToDev with Docker or pnpm, whichever you didn’t run first. We are still on the hunt for why this happens once and a while! + +### I'm getting a blank preview when oTToDev runs my app! + +We promise you that we are constantly testing new PRs coming into oTToDev and the preview is core functionality, so the application is not broken! When you get a blank preview or don’t get a preview, this is generally because the LLM hallucinated bad code or incorrect commands. We are working on making this more transparent so it is obvious. Sometimes the error will appear in developer console too so check that as well. + +### How to add a LLM: + +To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider. + +By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish! + +When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here! + +### Everything works but the results are bad + +This goes to the point above about how local LLMs are getting very powerful but you still are going to see better (sometimes much better) results with the largest LLMs like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. If you are using smaller LLMs like Qwen-2.5-Coder, consider it more experimental and educational at this point. It can build smaller applications really well, which is super impressive for a local LLM, but for larger scale applications you want to use the larger LLMs still! diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..79290241f979e4079f5fd7d6fdc9acac6614de2b --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 StackBlitz, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/app/components/chat/APIKeyManager.tsx b/app/components/chat/APIKeyManager.tsx new file mode 100644 index 0000000000000000000000000000000000000000..28847bc19a8c103e783de33d2fe094b19ee7b6db --- /dev/null +++ b/app/components/chat/APIKeyManager.tsx @@ -0,0 +1,67 @@ +import React, { useState } from 'react'; +import { IconButton } from '~/components/ui/IconButton'; +import type { ProviderInfo } from '~/types/model'; + +interface APIKeyManagerProps { + provider: ProviderInfo; + apiKey: string; + setApiKey: (key: string) => void; + getApiKeyLink?: string; + labelForGetApiKey?: string; +} + +// eslint-disable-next-line @typescript-eslint/naming-convention +export const APIKeyManager: React.FC = ({ provider, apiKey, setApiKey }) => { + const [isEditing, setIsEditing] = useState(false); + const [tempKey, setTempKey] = useState(apiKey); + + const handleSave = () => { + setApiKey(tempKey); + setIsEditing(false); + }; + + return ( +
+
+ {provider?.name} API Key: + {!isEditing && ( +
+ + {apiKey ? 'β€’β€’β€’β€’β€’β€’β€’β€’' : 'Not set (will still work if set in .env file)'} + + setIsEditing(true)} title="Edit API Key"> +
+ +
+ )} +
+ + {isEditing ? ( +
+ setTempKey(e.target.value)} + className="flex-1 px-2 py-1 text-xs lg:text-sm rounded border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus" + /> + +
+ + setIsEditing(false)} title="Cancel"> +
+ +
+ ) : ( + <> + {provider?.getApiKeyLink && ( + window.open(provider?.getApiKeyLink)} title="Edit API Key"> + {provider?.labelForGetApiKey || 'Get API Key'} +
+ + )} + + )} +
+ ); +}; diff --git a/app/components/chat/Artifact.tsx b/app/components/chat/Artifact.tsx new file mode 100644 index 0000000000000000000000000000000000000000..682a4c761561b03308563d2b99760fce5a87e765 --- /dev/null +++ b/app/components/chat/Artifact.tsx @@ -0,0 +1,241 @@ +import { useStore } from '@nanostores/react'; +import { AnimatePresence, motion } from 'framer-motion'; +import { computed } from 'nanostores'; +import { memo, useEffect, useRef, useState } from 'react'; +import { createHighlighter, type BundledLanguage, type BundledTheme, type HighlighterGeneric } from 'shiki'; +import type { ActionState } from '~/lib/runtime/action-runner'; +import { workbenchStore } from '~/lib/stores/workbench'; +import { classNames } from '~/utils/classNames'; +import { cubicEasingFn } from '~/utils/easings'; +import { WORK_DIR } from '~/utils/constants'; + +const highlighterOptions = { + langs: ['shell'], + themes: ['light-plus', 'dark-plus'], +}; + +const shellHighlighter: HighlighterGeneric = + import.meta.hot?.data.shellHighlighter ?? (await createHighlighter(highlighterOptions)); + +if (import.meta.hot) { + import.meta.hot.data.shellHighlighter = shellHighlighter; +} + +interface ArtifactProps { + messageId: string; +} + +export const Artifact = memo(({ messageId }: ArtifactProps) => { + const userToggledActions = useRef(false); + const [showActions, setShowActions] = useState(false); + + const artifacts = useStore(workbenchStore.artifacts); + const artifact = artifacts[messageId]; + + const actions = useStore( + computed(artifact.runner.actions, (actions) => { + return Object.values(actions); + }), + ); + + const toggleActions = () => { + userToggledActions.current = true; + setShowActions(!showActions); + }; + + useEffect(() => { + if (actions.length && !showActions && !userToggledActions.current) { + setShowActions(true); + } + }, [actions]); + + return ( +
+
+ +
+ + {actions.length && ( + +
+
+
+
+ )} +
+
+ + {showActions && actions.length > 0 && ( + +
+
+ +
+ + )} + +
+ ); +}); + +interface ShellCodeBlockProps { + classsName?: string; + code: string; +} + +function ShellCodeBlock({ classsName, code }: ShellCodeBlockProps) { + return ( +
+ ); +} + +interface ActionListProps { + actions: ActionState[]; +} + +const actionVariants = { + hidden: { opacity: 0, y: 20 }, + visible: { opacity: 1, y: 0 }, +}; + +function openArtifactInWorkbench(filePath: any) { + if (workbenchStore.currentView.get() !== 'code') { + workbenchStore.currentView.set('code'); + } + + workbenchStore.setSelectedFile(`${WORK_DIR}/${filePath}`); +} + +const ActionList = memo(({ actions }: ActionListProps) => { + return ( + +
    + {actions.map((action, index) => { + const { status, type, content } = action; + const isLast = index === actions.length - 1; + + return ( + +
    +
    + {status === 'running' ? ( + <> + {type !== 'start' ? ( +
    + ) : ( +
    + )} + + ) : status === 'pending' ? ( +
    + ) : status === 'complete' ? ( +
    + ) : status === 'failed' || status === 'aborted' ? ( +
    + ) : null} +
    + {type === 'file' ? ( +
    + Create{' '} + openArtifactInWorkbench(action.filePath)} + > + {action.filePath} + +
    + ) : type === 'shell' ? ( +
    + Run command +
    + ) : type === 'start' ? ( + { + e.preventDefault(); + workbenchStore.currentView.set('preview'); + }} + className="flex items-center w-full min-h-[28px]" + > + Start Application + + ) : null} +
    + {(type === 'shell' || type === 'start') && ( + + )} +
    + ); + })} +
+
+ ); +}); + +function getIconColor(status: ActionState['status']) { + switch (status) { + case 'pending': { + return 'text-bolt-elements-textTertiary'; + } + case 'running': { + return 'text-bolt-elements-loader-progress'; + } + case 'complete': { + return 'text-bolt-elements-icon-success'; + } + case 'aborted': { + return 'text-bolt-elements-textSecondary'; + } + case 'failed': { + return 'text-bolt-elements-icon-error'; + } + default: { + return undefined; + } + } +} diff --git a/app/components/chat/AssistantMessage.tsx b/app/components/chat/AssistantMessage.tsx new file mode 100644 index 0000000000000000000000000000000000000000..a5698e9756cf692027952b1184464940c3b473fb --- /dev/null +++ b/app/components/chat/AssistantMessage.tsx @@ -0,0 +1,14 @@ +import { memo } from 'react'; +import { Markdown } from './Markdown'; + +interface AssistantMessageProps { + content: string; +} + +export const AssistantMessage = memo(({ content }: AssistantMessageProps) => { + return ( +
+ {content} +
+ ); +}); diff --git a/app/components/chat/BaseChat.module.scss b/app/components/chat/BaseChat.module.scss new file mode 100644 index 0000000000000000000000000000000000000000..cf530a112dadfe17a597e9494c0de2bd892373ff --- /dev/null +++ b/app/components/chat/BaseChat.module.scss @@ -0,0 +1,123 @@ +.BaseChat { + &[data-chat-visible='false'] { + --workbench-inner-width: 100%; + --workbench-left: 0; + + .Chat { + --at-apply: bolt-ease-cubic-bezier; + transition-property: transform, opacity; + transition-duration: 0.3s; + will-change: transform, opacity; + transform: translateX(-50%); + opacity: 0; + } + } +} + +.Chat { + opacity: 1; +} + +.RayContainer { + --gradient-opacity: 0.85; + --ray-gradient: radial-gradient(rgba(83, 196, 255, var(--gradient-opacity)) 0%, rgba(43, 166, 255, 0) 100%); + transition: opacity 0.25s linear; + position: fixed; + inset: 0; + pointer-events: none; + user-select: none; +} + +.LightRayOne { + width: 480px; + height: 680px; + transform: rotate(80deg); + top: -540px; + left: 250px; + filter: blur(110px); + position: absolute; + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayTwo { + width: 110px; + height: 400px; + transform: rotate(-20deg); + top: -280px; + left: 350px; + mix-blend-mode: overlay; + opacity: 0.6; + filter: blur(60px); + position: absolute; + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayThree { + width: 400px; + height: 370px; + top: -350px; + left: 200px; + mix-blend-mode: overlay; + opacity: 0.6; + filter: blur(21px); + position: absolute; + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayFour { + position: absolute; + width: 330px; + height: 370px; + top: -330px; + left: 50px; + mix-blend-mode: overlay; + opacity: 0.5; + filter: blur(21px); + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayFive { + position: absolute; + width: 110px; + height: 400px; + transform: rotate(-40deg); + top: -280px; + left: -10px; + mix-blend-mode: overlay; + opacity: 0.8; + filter: blur(60px); + border-radius: 100%; + background: var(--ray-gradient); +} + +.PromptEffectContainer { + --prompt-container-offset: 50px; + --prompt-line-stroke-width: 1px; + position: absolute; + pointer-events: none; + inset: calc(var(--prompt-container-offset) / -2); + width: calc(100% + var(--prompt-container-offset)); + height: calc(100% + var(--prompt-container-offset)); +} + +.PromptEffectLine { + width: calc(100% - var(--prompt-container-offset) + var(--prompt-line-stroke-width)); + height: calc(100% - var(--prompt-container-offset) + var(--prompt-line-stroke-width)); + x: calc(var(--prompt-container-offset) / 2 - var(--prompt-line-stroke-width) / 2); + y: calc(var(--prompt-container-offset) / 2 - var(--prompt-line-stroke-width) / 2); + rx: calc(8px - var(--prompt-line-stroke-width)); + fill: transparent; + stroke-width: var(--prompt-line-stroke-width); + stroke: url(#line-gradient); + stroke-dasharray: 35px 65px; + stroke-dashoffset: 10; +} + +.PromptShine { + fill: url(#shine-gradient); + mix-blend-mode: overlay; +} diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx new file mode 100644 index 0000000000000000000000000000000000000000..8c7589a6805fa9b1b09434c8c637e43587ccf4a3 --- /dev/null +++ b/app/components/chat/BaseChat.tsx @@ -0,0 +1,534 @@ +/* + * @ts-nocheck + * Preventing TS checks with files presented in the video for a better presentation. + */ +import type { Message } from 'ai'; +import React, { type RefCallback, useEffect, useState } from 'react'; +import { ClientOnly } from 'remix-utils/client-only'; +import { Menu } from '~/components/sidebar/Menu.client'; +import { IconButton } from '~/components/ui/IconButton'; +import { Workbench } from '~/components/workbench/Workbench.client'; +import { classNames } from '~/utils/classNames'; +import { MODEL_LIST, PROVIDER_LIST, initializeModelList } from '~/utils/constants'; +import { Messages } from './Messages.client'; +import { SendButton } from './SendButton.client'; +import { APIKeyManager } from './APIKeyManager'; +import Cookies from 'js-cookie'; +import * as Tooltip from '@radix-ui/react-tooltip'; + +import styles from './BaseChat.module.scss'; +import type { ProviderInfo } from '~/utils/types'; +import { ExportChatButton } from '~/components/chat/chatExportAndImport/ExportChatButton'; +import { ImportButtons } from '~/components/chat/chatExportAndImport/ImportButtons'; +import { ExamplePrompts } from '~/components/chat/ExamplePrompts'; + +import FilePreview from './FilePreview'; +import { ModelSelector } from '~/components/chat/ModelSelector'; +import { SpeechRecognitionButton } from '~/components/chat/SpeechRecognition'; + +const TEXTAREA_MIN_HEIGHT = 76; + +interface BaseChatProps { + textareaRef?: React.RefObject | undefined; + messageRef?: RefCallback | undefined; + scrollRef?: RefCallback | undefined; + showChat?: boolean; + chatStarted?: boolean; + isStreaming?: boolean; + messages?: Message[]; + description?: string; + enhancingPrompt?: boolean; + promptEnhanced?: boolean; + input?: string; + model?: string; + setModel?: (model: string) => void; + provider?: ProviderInfo; + setProvider?: (provider: ProviderInfo) => void; + handleStop?: () => void; + sendMessage?: (event: React.UIEvent, messageInput?: string) => void; + handleInputChange?: (event: React.ChangeEvent) => void; + enhancePrompt?: () => void; + importChat?: (description: string, messages: Message[]) => Promise; + exportChat?: () => void; + uploadedFiles?: File[]; + setUploadedFiles?: (files: File[]) => void; + imageDataList?: string[]; + setImageDataList?: (dataList: string[]) => void; +} + +export const BaseChat = React.forwardRef( + ( + { + textareaRef, + messageRef, + scrollRef, + showChat = true, + chatStarted = false, + isStreaming = false, + model, + setModel, + provider, + setProvider, + input = '', + enhancingPrompt, + handleInputChange, + promptEnhanced, + enhancePrompt, + sendMessage, + handleStop, + importChat, + exportChat, + uploadedFiles = [], + setUploadedFiles, + imageDataList = [], + setImageDataList, + messages, + }, + ref, + ) => { + const TEXTAREA_MAX_HEIGHT = chatStarted ? 400 : 200; + const [apiKeys, setApiKeys] = useState>({}); + const [modelList, setModelList] = useState(MODEL_LIST); + const [isModelSettingsCollapsed, setIsModelSettingsCollapsed] = useState(false); + const [isListening, setIsListening] = useState(false); + const [recognition, setRecognition] = useState(null); + const [transcript, setTranscript] = useState(''); + + console.log(transcript); + useEffect(() => { + // Load API keys from cookies on component mount + try { + const storedApiKeys = Cookies.get('apiKeys'); + + if (storedApiKeys) { + const parsedKeys = JSON.parse(storedApiKeys); + + if (typeof parsedKeys === 'object' && parsedKeys !== null) { + setApiKeys(parsedKeys); + } + } + } catch (error) { + console.error('Error loading API keys from cookies:', error); + + // Clear invalid cookie data + Cookies.remove('apiKeys'); + } + + initializeModelList().then((modelList) => { + setModelList(modelList); + }); + + if (typeof window !== 'undefined' && ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) { + const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; + const recognition = new SpeechRecognition(); + recognition.continuous = true; + recognition.interimResults = true; + + recognition.onresult = (event) => { + const transcript = Array.from(event.results) + .map((result) => result[0]) + .map((result) => result.transcript) + .join(''); + + setTranscript(transcript); + + if (handleInputChange) { + const syntheticEvent = { + target: { value: transcript }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + }; + + recognition.onerror = (event) => { + console.error('Speech recognition error:', event.error); + setIsListening(false); + }; + + setRecognition(recognition); + } + }, []); + + const startListening = () => { + if (recognition) { + recognition.start(); + setIsListening(true); + } + }; + + const stopListening = () => { + if (recognition) { + recognition.stop(); + setIsListening(false); + } + }; + + const handleSendMessage = (event: React.UIEvent, messageInput?: string) => { + if (sendMessage) { + sendMessage(event, messageInput); + + if (recognition) { + recognition.abort(); // Stop current recognition + setTranscript(''); // Clear transcript + setIsListening(false); + + // Clear the input by triggering handleInputChange with empty value + if (handleInputChange) { + const syntheticEvent = { + target: { value: '' }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + } + } + }; + + const updateApiKey = (provider: string, key: string) => { + try { + const updatedApiKeys = { ...apiKeys, [provider]: key }; + setApiKeys(updatedApiKeys); + + // Save updated API keys to cookies with 30 day expiry and secure settings + Cookies.set('apiKeys', JSON.stringify(updatedApiKeys), { + expires: 30, // 30 days + secure: true, // Only send over HTTPS + sameSite: 'strict', // Protect against CSRF + path: '/', // Accessible across the site + }); + } catch (error) { + console.error('Error saving API keys to cookies:', error); + } + }; + + const handleFileUpload = () => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = 'image/*'; + + input.onchange = async (e) => { + const file = (e.target as HTMLInputElement).files?.[0]; + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + }; + + input.click(); + }; + + const handlePaste = async (e: React.ClipboardEvent) => { + const items = e.clipboardData?.items; + + if (!items) { + return; + } + + for (const item of items) { + if (item.type.startsWith('image/')) { + e.preventDefault(); + + const file = item.getAsFile(); + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + + break; + } + } + }; + + const baseChat = ( +
+
+
+
+
+
+
+
+ {() => } +
+
+ {!chatStarted && ( +
+

+ Where ideas begin +

+

+ Bring ideas to life in seconds or get help on existing projects. +

+
+ )} +
+ + {() => { + return chatStarted ? ( + + ) : null; + }} + +
+ + + + + + + + + + + + + + + + + + +
+
+ +
+ +
+ + {provider && ( + updateApiKey(provider.name, key)} + /> + )} +
+
+ { + setUploadedFiles?.(uploadedFiles.filter((_, i) => i !== index)); + setImageDataList?.(imageDataList.filter((_, i) => i !== index)); + }} + /> +
+