diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..352c2766ae163781a545ed1c61d355f67b7caeb8
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,29 @@
+# Source: https://github.com/a5chin/python-uv
+FROM debian:bookworm-slim AS builder
+
+ENV CARGO_HOME="/opt/.cargo"
+
+SHELL [ "/bin/bash", "-o", "pipefail", "-c" ]
+
+WORKDIR /opt
+
+# The installer requires curl (and certificates) to download the release archive
+# hadolint ignore=DL3008
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends ca-certificates curl
+
+# Run uv installer
+RUN curl -LsSf https://astral.sh/uv/install.sh | sh
+
+
+FROM mcr.microsoft.com/vscode/devcontainers/base:bookworm
+
+
+ENV CARGO_HOME="/opt/.cargo"
+ENV PATH="$CARGO_HOME/bin/:$PATH"
+ENV PYTHONUNBUFFERED=True
+ENV UV_LINK_MODE=copy
+
+WORKDIR /opt
+
+COPY --from=builder --chown=vscode: $CARGO_HOME $CARGO_HOME
\ No newline at end of file
diff --git a/.devcontainer/demo/README.md b/.devcontainer/demo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0a828a009ad0305e961799679e923753ff52279b
--- /dev/null
+++ b/.devcontainer/demo/README.md
@@ -0,0 +1,15 @@
+# Langflow Demo Codespace Readme
+
+These instructions will walk you through the process of running a Langflow demo via GitHub Codespaces.
+
+## Setup
+
+### Create a Codespace in GitHub
+
+To setup the demo, simply navigate to the Langflow repo, click the "+" button, and select "Create new Codespace". This will automatically create a new codespace in your browser, which you can use for the demo.
+
+### Wait for everything to install
+
+After the codespace is opened, you should see a new Terminal window in VS Code where langflow is installed. Once the install completes, `langflow` will launch the webserver and your application will be available via devcontainer port.
+
+Note: VS Code should prompt you with a button to push once the port is available.
diff --git a/.devcontainer/demo/devcontainer.json b/.devcontainer/demo/devcontainer.json
new file mode 100644
index 0000000000000000000000000000000000000000..0fb998b8191c88cd1bc8360bfdfffee646837efb
--- /dev/null
+++ b/.devcontainer/demo/devcontainer.json
@@ -0,0 +1,33 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/universal
+{
+ "name": "Langflow Demo Container",
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
+ "image": "mcr.microsoft.com/devcontainers/python:3.10",
+ "features": {
+ "ghcr.io/devcontainers/features/aws-cli:1": {},
+ "ghcr.io/devcontainers/features/docker-in-docker": {},
+ "ghcr.io/devcontainers/features/node": {}
+ },
+ "customizations": {
+ "vscode": {
+ "extensions": [
+ "actboy168.tasks",
+ "GitHub.copilot",
+ "ms-python.python",
+ "eamodio.gitlens",
+ "GitHub.vscode-pull-request-github"
+ ]
+ }
+ },
+ // Features to add to the dev container. More info: https://containers.dev/features.
+ // "features": {},
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
+ // "forwardPorts": [],
+ // Use 'postCreateCommand' to run commands after the container is created.
+ "postCreateCommand": "pipx install 'langflow>=0.0.33' && langflow --host 0.0.0.0"
+ // Configure tool-specific properties.
+ // "customizations": {},
+ // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
+ // "remoteUser": "root"
+}
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 0000000000000000000000000000000000000000..88c16501327c036537e27647827b64004d9256bc
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,51 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/universal
+{
+ "name": "Langflow Dev Container",
+ "build": {
+ "context": "..",
+ "dockerfile": "Dockerfile"
+ },
+ // Features to add to the dev container. More info: https://containers.dev/features.
+ "features": {
+ "ghcr.io/devcontainers/features/node": {},
+ "ghcr.io/dhoeric/features/hadolint:1": {}
+ },
+
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
+ // "forwardPorts": [],
+
+ // Use 'postCreateCommand' to run commands after the container is created.
+ "postCreateCommand": "make install_frontend && make install_backend",
+ "postStartCommand": "make init",
+
+ // Configure tool-specific properties.
+ "customizations": {
+ "vscode": {
+ "extensions": [
+ "charliermarsh.ruff",
+ "njpwerner.autodocstring",
+ "oderwat.indent-rainbow",
+ "exiasr.hadolint",
+ "actboy168.tasks",
+ "GitHub.copilot",
+ "ms-python.python",
+ "eamodio.gitlens",
+ "ms-vscode.makefile-tools",
+ "GitHub.vscode-pull-request-github"
+ ],
+ "settings": {
+ "terminal.integrated.defaultProfile.linux": "zsh",
+ "terminal.integrated.profiles.linux": {
+ "zsh": {
+ "path": "/bin/zsh"
+ }
+ }
+ }
+ }
+ },
+
+ // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
+ // "remoteUser": "root"
+ "remoteUser": "vscode"
+}
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..dac588a2447ffa7ce341bd953070e508c9bbdc6a
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,104 @@
+# Description: Example of .env file
+# Usage: Copy this file to .env and change the values
+# according to your needs
+# Do not commit .env file to git
+# Do not change .env.example file
+
+# Config directory
+# Directory where files, logs and database will be stored
+# Example: LANGFLOW_CONFIG_DIR=~/.langflow
+LANGFLOW_CONFIG_DIR=
+
+# Save database in the config directory
+# Values: true, false
+# If false, the database will be saved in Langflow's root directory
+# This means that the database will be deleted when Langflow is uninstalled
+# and that the database will not be shared between different virtual environments
+# Example: LANGFLOW_SAVE_DB_IN_CONFIG_DIR=true
+LANGFLOW_SAVE_DB_IN_CONFIG_DIR=
+
+# Database URL
+# Postgres example: LANGFLOW_DATABASE_URL=postgresql://postgres:postgres@localhost:5432/langflow
+# SQLite example:
+LANGFLOW_DATABASE_URL=sqlite:///./langflow.db
+
+# Cache type
+LANGFLOW_LANGCHAIN_CACHE=SQLiteCache
+
+# Server host
+# Example: LANGFLOW_HOST=127.0.0.1
+LANGFLOW_HOST=
+
+# Worker processes
+# Example: LANGFLOW_WORKERS=1
+LANGFLOW_WORKERS=
+
+# Server port
+# Example: LANGFLOW_PORT=7860
+LANGFLOW_PORT=
+
+# Logging level
+# Example: LANGFLOW_LOG_LEVEL=critical
+LANGFLOW_LOG_LEVEL=
+
+# Path to the log file
+# Example: LANGFLOW_LOG_FILE=logs/langflow.log
+LANGFLOW_LOG_FILE=
+
+# Path to the frontend directory containing build files
+# Example: LANGFLOW_FRONTEND_PATH=/path/to/frontend/build/files
+LANGFLOW_FRONTEND_PATH=
+
+# Whether to open the browser after starting the server
+# Values: true, false
+# Example: LANGFLOW_OPEN_BROWSER=true
+LANGFLOW_OPEN_BROWSER=
+
+# Whether to remove API keys from the projects saved in the database
+# Values: true, false
+# Example: LANGFLOW_REMOVE_API_KEYS=false
+LANGFLOW_REMOVE_API_KEYS=
+
+# Whether to use RedisCache or ThreadingInMemoryCache or AsyncInMemoryCache
+# Values: async, memory, redis
+# Example: LANGFLOW_CACHE_TYPE=memory
+# If you want to use redis then the following environment variables must be set:
+# LANGFLOW_REDIS_HOST (default: localhost)
+# LANGFLOW_REDIS_PORT (default: 6379)
+# LANGFLOW_REDIS_DB (default: 0)
+# LANGFLOW_REDIS_CACHE_EXPIRE (default: 3600)
+LANGFLOW_CACHE_TYPE=
+
+# Set AUTO_LOGIN to false if you want to disable auto login
+# and use the login form to login. LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
+# must be set if AUTO_LOGIN is set to false
+# Values: true, false
+LANGFLOW_AUTO_LOGIN=
+
+# Superuser username
+# Example: LANGFLOW_SUPERUSER=admin
+LANGFLOW_SUPERUSER=
+
+# Superuser password
+# Example: LANGFLOW_SUPERUSER_PASSWORD=123456
+LANGFLOW_SUPERUSER_PASSWORD=
+
+# Should store environment variables in the database
+# Values: true, false
+LANGFLOW_STORE_ENVIRONMENT_VARIABLES=
+
+# STORE_URL
+# Example: LANGFLOW_STORE_URL=https://api.langflow.store
+# LANGFLOW_STORE_URL=
+
+# DOWNLOAD_WEBHOOK_URL
+#
+# LANGFLOW_DOWNLOAD_WEBHOOK_URL=
+
+# LIKE_WEBHOOK_URL
+#
+# LANGFLOW_LIKE_WEBHOOK_URL=
+
+# Value must finish with slash /
+#BACKEND_URL=http://localhost:7860/
+BACKEND_URL=
\ No newline at end of file
diff --git a/.eslintrc.json b/.eslintrc.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc12c18c2c5b5de6342d9f869ba070d9578727e8
--- /dev/null
+++ b/.eslintrc.json
@@ -0,0 +1,90 @@
+{
+ "extends": [
+ "eslint:recommended",
+ "plugin:react/recommended",
+ "plugin:prettier/recommended"
+ ],
+ "plugins": [
+ "react",
+ "import-helpers",
+ "prettier"
+ ],
+ "parser": "@typescript-eslint/parser",
+ "parserOptions": {
+ "project": [
+ "./tsconfig.node.json",
+ "./tsconfig.json"
+ ],
+ "extraFileExtensions:": [
+ ".mdx"
+ ],
+ "extensions:": [
+ ".mdx"
+ ]
+ },
+ "env": {
+ "browser": true,
+ "es2021": true
+ },
+ "settings": {
+ "react": {
+ "version": "detect"
+ }
+ },
+ "rules": {
+ "no-console": "warn",
+ "no-self-assign": "warn",
+ "no-self-compare": "warn",
+ "complexity": [
+ "error",
+ {
+ "max": 15
+ }
+ ],
+ "indent": [
+ "error",
+ 2,
+ {
+ "SwitchCase": 1
+ }
+ ],
+ "no-dupe-keys": "error",
+ "no-invalid-regexp": "error",
+ "no-undef": "error",
+ "no-return-assign": "error",
+ "no-redeclare": "error",
+ "no-empty": "error",
+ "no-await-in-loop": "error",
+ "react/react-in-jsx-scope": 0,
+ "node/exports-style": [
+ "error",
+ "module.exports"
+ ],
+ "node/file-extension-in-import": [
+ "error",
+ "always"
+ ],
+ "node/prefer-global/buffer": [
+ "error",
+ "always"
+ ],
+ "node/prefer-global/console": [
+ "error",
+ "always"
+ ],
+ "node/prefer-global/process": [
+ "error",
+ "always"
+ ],
+ "node/prefer-global/url-search-params": [
+ "error",
+ "always"
+ ],
+ "node/prefer-global/url": [
+ "error",
+ "always"
+ ],
+ "node/prefer-promises/dns": "error",
+ "node/prefer-promises/fs": "error"
+ }
+}
diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..e2f73922f9a322d193e96e36feb9b2d89b92089c 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,35 +1,32 @@
-*.7z filter=lfs diff=lfs merge=lfs -text
-*.arrow filter=lfs diff=lfs merge=lfs -text
-*.bin filter=lfs diff=lfs merge=lfs -text
-*.bz2 filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.ftz filter=lfs diff=lfs merge=lfs -text
-*.gz filter=lfs diff=lfs merge=lfs -text
-*.h5 filter=lfs diff=lfs merge=lfs -text
-*.joblib filter=lfs diff=lfs merge=lfs -text
-*.lfs.* filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.model filter=lfs diff=lfs merge=lfs -text
-*.msgpack filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.onnx filter=lfs diff=lfs merge=lfs -text
-*.ot filter=lfs diff=lfs merge=lfs -text
-*.parquet filter=lfs diff=lfs merge=lfs -text
-*.pb filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.pt filter=lfs diff=lfs merge=lfs -text
-*.pth filter=lfs diff=lfs merge=lfs -text
-*.rar filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-saved_model/**/* filter=lfs diff=lfs merge=lfs -text
-*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.tflite filter=lfs diff=lfs merge=lfs -text
-*.tgz filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
-*.xz filter=lfs diff=lfs merge=lfs -text
-*.zip filter=lfs diff=lfs merge=lfs -text
-*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
+# Set the default behavior, in case people don't have core.autocrlf set.
+* text eol=lf
+# Explicitly declare text files you want to always be normalized and converted
+# to native line endings on checkout.
+*.c text
+*.h text
+*.py text
+*.js text
+*.jsx text
+*.ts text
+*.tsx text
+*.md text
+*.mdx text
+*.yml text
+*.yaml text
+*.xml text
+*.csv text
+*.json text
+*.sh text
+*.Dockerfile text
+Dockerfile text
+# Declare files that will always have CRLF line endings on checkout.
+*.sln text eol=crlf
+# Denote all files that are truly binary and should not be modified.
+*.png filter=lfs diff=lfs merge=lfs -text
+*.jpg filter=lfs diff=lfs merge=lfs -text
+*.ico binary
+*.gif binary
+*.mp4 binary
+*.svg binary
+*.csv binary
+*.gif filter=lfs diff=lfs merge=lfs -text
diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..94419e3ab91682a79033437917c5cecfa7bd9130
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.yaml
@@ -0,0 +1,120 @@
+name: "đ Bug Report"
+description: Submit a bug report to help us improve Langflow
+labels: [ "bug" ]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to fill out this bug report!
+
+ - type: textarea
+ id: description
+ attributes:
+ label: Bug Description
+ description: A clear and concise description of what the bug is
+ placeholder: Tell us what you see!
+ validations:
+ required: true
+
+ - type: textarea
+ id: reproduction
+ validations:
+ required: true
+ attributes:
+ label: Reproduction
+ description: |
+ Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
+ If you have code snippets, error messages, or stack traces please provide them here as well.
+ Important! Use code tags to format your code correctly. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
+ Do not use screenshots, as they are hard to read, and (more importantly) don't allow others to copy-and-paste your code.
+ placeholder: |
+ Steps to reproduce the behavior:
+
+ 1.
+ 2.
+ 3.
+
+ - type: textarea
+ id: expected-behavior
+ validations:
+ required: true
+ attributes:
+ label: Expected behavior
+ description: "A clear and concise description of what you would expect to happen."
+
+ - type: textarea
+ id: who-can-help
+ attributes:
+ label: Who can help?
+ description: |
+ Your issue will be replied to more quickly if you can figure out the right person to tag with @.
+ If you know the relevant code owner, please tag them. Otherwise, leave this blank and a core maintainer will direct the issue accordingly.
+
+ Please tag fewer than 3 people.
+
+ Specific Areas:
+
+ Frontend:
+ - @anovazzi1
+ - @Cristhianzl
+ - @lucaseduoli
+ - @igorrCarvalho
+
+ Backend:
+ - @italojohnny
+
+ Full Stack:
+ - @ogabrielluiz
+ - @nicoloboschi
+ - @zzzming
+ - @jordanrfrazier
+ - @mfortman11
+ - @NadirJ
+
+ placeholder: "@Username ..."
+
+ - type: markdown
+ attributes:
+ value: '## Environment'
+
+ - type: input
+ id: os
+ attributes:
+ label: Operating System
+ placeholder: ex. Ubuntu Linux 22.04
+ validations:
+ required: true
+
+ - type: input
+ id: langflow-version
+ attributes:
+ label: Langflow Version
+ placeholder: ex. 1.0.9
+ validations:
+ required: true
+
+ - type: dropdown
+ id: python-version
+ attributes:
+ label: "Python Version"
+ description: |
+
+ **Langflow requires Python version 3.10 or greater.**
+ options:
+ - "3.12"
+ - "3.11"
+ - "3.10"
+
+ - type: textarea
+ id: screenshot
+ attributes:
+ label: Screenshot
+ description: "If applicable, add screenshots to help explain your problem."
+ placeholder: "Paste your screenshot here."
+
+ - type: textarea
+ id: flow-file
+ attributes:
+ label: Flow File
+ description: "Add your flow if applicable to help replicate the problem."
+ placeholder: "Add your flow link here."
diff --git a/.github/ISSUE_TEMPLATE/feature-request.yaml b/.github/ISSUE_TEMPLATE/feature-request.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1a6862a62ae789d0ef190c8799437b32d2a165bc
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.yaml
@@ -0,0 +1,28 @@
+name: "đ Feature Request"
+description: Submit a proposal/request for a new Langflow feature
+labels: [ "enhancement" ]
+body:
+ - type: textarea
+ id: feature-request
+ validations:
+ required: true
+ attributes:
+ label: Feature Request
+ description: |
+ A clear and concise description of the feature proposal. Please provide any relevant links to papers, code, or other resources that support your proposal.
+
+ - type: textarea
+ id: motivation
+ validations:
+ required: true
+ attributes:
+ label: Motivation
+ description: |
+ Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link it here as well.
+
+ - type: textarea
+ id: contribution
+ attributes:
+ label: Your Contribution
+ description: |
+ Is there any way that you could help, e.g., by submitting a PR? Make sure to read the CONTRIBUTING.md guidelines for Langflow before proceeding.
diff --git a/.github/ISSUE_TEMPLATE/work-in-progress.yaml b/.github/ISSUE_TEMPLATE/work-in-progress.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..213cbe8768605e7bc2f7f9b70bf8087858b6e6c8
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/work-in-progress.yaml
@@ -0,0 +1,58 @@
+name: Work in Progress
+description: Use this template to describe the new feature or improvement you are currently working on.
+labels: [enhancement]
+
+body:
+ - type: markdown
+ attributes:
+ value: |
+ ## Work in Progress
+
+ Thank you for contributing to our project! Please fill out the sections below to describe the new feature or improvement you are currently working on.
+
+ - type: input
+ id: title
+ attributes:
+ label: Title
+ description: Provide a concise title for your feature or improvement.
+ placeholder: "Short and descriptive title"
+ validations:
+ required: true
+
+ - type: dropdown
+ id: type
+ attributes:
+ label: Type
+ description: Is this a new feature or an improvement?
+ options:
+ - New Feature
+ - Improvement
+ validations:
+ required: true
+
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Provide a detailed description of the feature or improvement.
+ placeholder: "Explain the feature or improvement in detail"
+ validations:
+ required: true
+
+ - type: textarea
+ id: use-case
+ attributes:
+ label: Use Case
+ description: Describe the use case or user story that this feature or improvement addresses.
+ placeholder: "As a [user], I want to [do something] so that [benefit]."
+ validations:
+ required: false
+
+ - type: textarea
+ id: implementation
+ attributes:
+ label: Implementation Plan
+ description: Outline your plan for implementing this feature or improvement.
+ placeholder: "Describe how you plan to implement this feature or improvement."
+ validations:
+ required: false
diff --git a/.github/actions/install-playwright/action.yml b/.github/actions/install-playwright/action.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8f3d7bef1ab2a6e9b1f6d397c9de89c406951c8b
--- /dev/null
+++ b/.github/actions/install-playwright/action.yml
@@ -0,0 +1,76 @@
+name: Install Playwright
+description: Install Playwright and dependencies with cache
+
+# https://github.com/microsoft/playwright/issues/7249
+
+inputs:
+ working-directory:
+ description: Where to install Playwright
+ default: ./
+ browsers:
+ description: Browsers to install
+ default: chromium webkit firefox
+
+outputs:
+ version:
+ description: Installed version of Playwright
+ value: ${{ steps.version.outputs.version }}
+ cache-hit:
+ description: Whether cache for Playwright was found
+ value: ${{ steps.cache.outputs.cache-hit }}
+
+runs:
+ using: composite
+ steps:
+ - name: Get Playwright version
+ uses: actions/github-script@v7
+ id: version
+ with:
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ // Get working directory
+ const workingDirectory = "${{ inputs.working-directory }}";
+ console.debug("Specified working directory:", workingDirectory);
+ if (workingDirectory) process.chdir(workingDirectory);
+ console.debug("Actual working directory:", process.cwd());
+
+ // Read package.json
+ let version = "";
+ try {
+ const packageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'));
+ version = (
+ packageJson.devDependencies?.['@playwright/test'] ||
+ packageJson.dependencies?.['@playwright/test'] ||
+ packageJson.dependencies?.['playwright'] ||
+ packageJson.devDependencies?.['playwright']
+ )?.replace(/[\^~]/g, '');
+ } catch (error) {
+ console.log(error.message);
+ }
+
+ console.debug("Version:", version);
+ if (version) {
+ core.exportVariable("PLAYWRIGHT_VERSION", version);
+ core.setOutput("version", version);
+ } else core.setFailed("Couldn't get Playwright version");
+
+ - name: Cache Playwright
+ id: cache
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/ms-playwright
+ key: playwright-${{ env.PLAYWRIGHT_VERSION }}
+
+ - name: Install Playwright and its dependencies
+ shell: bash
+ if: steps.cache.outputs.cache-hit != 'true'
+ working-directory: ${{ inputs.working-directory }}
+ run: npx playwright install ${{ inputs.browsers }} --with-deps
+
+ - name: Install just Playwright's dependencies
+ shell: bash
+ if: steps.cache.outputs.cache-hit == 'true'
+ working-directory: ${{ inputs.working-directory }}
+ run: npx playwright install-deps
diff --git a/.github/actions/poetry_caching/action.yml b/.github/actions/poetry_caching/action.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4bb6415acb7988ce288606797319b1abf802df43
--- /dev/null
+++ b/.github/actions/poetry_caching/action.yml
@@ -0,0 +1,99 @@
+# An action for setting up poetry install with caching.
+# Using a custom action since the default action does not
+# take poetry install groups into account.
+# Action code from:
+# https://github.com/actions/setup-python/issues/505#issuecomment-1273013236
+# Copy of https://github.com/langchain-ai/langchain/blob/2f8dd1a1619f25daa4737df4d378b1acd6ff83c4/.github/actions/poetry_setup/action.yml
+name: poetry-install-with-caching
+description: Poetry install with support for caching of dependency groups.
+
+inputs:
+ python-version:
+ description: Python version, supporting MAJOR.MINOR only
+ required: true
+
+ poetry-version:
+ description: Poetry version
+ required: true
+
+ cache-key:
+ description: Cache key to use for manual handling of caching
+ required: true
+
+ working-directory:
+ description: Directory whose poetry.lock file should be cached
+ required: true
+
+runs:
+ using: composite
+ steps:
+ - uses: actions/setup-python@v5
+ name: Setup python ${{ inputs.python-version }}
+ id: setup-python
+ with:
+ python-version: ${{ inputs.python-version }}
+
+ - uses: actions/cache@v4
+ id: cache-bin-poetry
+ name: Cache Poetry binary - Python ${{ inputs.python-version }}
+ env:
+ SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
+ with:
+ path: |
+ /opt/pipx/venvs/poetry
+ # This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
+ key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
+
+ - name: Refresh shell hashtable and fixup softlinks
+ if: steps.cache-bin-poetry.outputs.cache-hit == 'true'
+ shell: bash
+ env:
+ POETRY_VERSION: ${{ inputs.poetry-version }}
+ PYTHON_VERSION: ${{ inputs.python-version }}
+ run: |
+ set -eux
+
+ # Refresh the shell hashtable, to ensure correct `which` output.
+ hash -r
+
+ # `actions/cache@v3` doesn't always seem able to correctly unpack softlinks.
+ # Delete and recreate the softlinks pipx expects to have.
+ rm /opt/pipx/venvs/poetry/bin/python
+ cd /opt/pipx/venvs/poetry/bin
+ ln -s "$(which "python$PYTHON_VERSION")" python
+ chmod +x python
+ cd /opt/pipx_bin/
+ ln -s /opt/pipx/venvs/poetry/bin/poetry poetry
+ chmod +x poetry
+
+ # Ensure everything got set up correctly.
+ /opt/pipx/venvs/poetry/bin/python --version
+ /opt/pipx_bin/poetry --version
+
+ - name: Install poetry
+ if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
+ shell: bash
+ env:
+ POETRY_VERSION: ${{ inputs.poetry-version || env.POETRY_VERSION }}
+ PYTHON_VERSION: ${{ inputs.python-version }}
+ # Install poetry using the python version installed by setup-python step.
+ run: |
+ pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
+ pipx ensurepath
+ # Ensure the poetry binary is available in the PATH.
+ # Test that the poetry binary is available.
+ poetry --version
+
+ - name: Restore pip and poetry cached dependencies
+ uses: actions/cache@v4
+ env:
+ SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
+ WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
+ with:
+ path: |
+ ~/.cache/pip
+ ~/.cache/pypoetry/virtualenvs
+ ~/.cache/pypoetry/cache
+ ~/.cache/pypoetry/artifacts
+ ${{ env.WORKDIR }}/.venv
+ key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}
diff --git a/.github/actions/setup-uv/action.yml b/.github/actions/setup-uv/action.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1b75878f3f836a5f3521c39591749dcb3c8a939e
--- /dev/null
+++ b/.github/actions/setup-uv/action.yml
@@ -0,0 +1,25 @@
+name: "Setup uv"
+description: "Checks out code, installs uv, and sets up Python environment"
+
+runs:
+ using: "composite"
+ steps:
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ enable-cache: true
+ cache-dependency-glob: "uv.lock"
+
+ - name: "Set up Python"
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: "pyproject.toml"
+
+ - name: Restore uv cache
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.uv-cache
+ key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ restore-keys: |
+ uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ uv-${{ runner.os }}
diff --git a/.github/changes-filter.yaml b/.github/changes-filter.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..04cb094a25eaf249f39d4bbd5f9cf15791a6e356
--- /dev/null
+++ b/.github/changes-filter.yaml
@@ -0,0 +1,71 @@
+# https://github.com/dorny/paths-filter
+python:
+ - "src/backend/**"
+ - "src/backend/**.py"
+ - "pyproject.toml"
+ - "poetry.lock"
+ - "**/python_test.yml"
+components-changes:
+ - "src/backend/base/langflow/components/**"
+starter-projects-changes:
+ - "src/backend/base/langflow/initial_setup/**"
+frontend-tests:
+ - "src/frontend/tests/**"
+frontend:
+ - "src/frontend/**"
+ - "**/typescript_test.yml"
+docs:
+ - "docs/**"
+
+# Test categories and their associated paths
+starter-projects:
+ - "src/backend/base/langflow/initial_setup/**"
+ - "src/backend/base/langflow/components/**"
+ - "src/frontend/src/pages/MainPage/**"
+ - "src/frontend/src/utils/reactflowUtils.ts"
+ - "src/frontend/tests/extended/features/**"
+
+components:
+ - "src/frontend/src/components/**"
+ - "src/frontend/src/modals/**"
+ - "src/frontend/src/pages/FlowPage/**"
+ - "src/frontend/src/shared/**"
+ - "src/frontend/src/hooks/**"
+ - "src/frontend/src/CustomNodes/**"
+ - "src/frontend/src/style/**"
+ - "src/frontend/src/utils/styleUtils.ts"
+ - "src/backend/base/langflow/components/**"
+ - "src/frontend/tests/core/features/**"
+ - "src/frontend/tests/core/integrations/**"
+ - "src/frontend/tests/core/regression/**"
+ - "src/frontend/tests/extended/integrations/**"
+ - "src/frontend/tests/extended/features/**"
+ - "src/frontend/tests/extended/regression/**"
+
+workspace:
+ - "src/backend/base/langflow/inputs/**"
+ - "src/frontend/src/components/core/parameterRenderComponent/**"
+ - "src/frontend/src/CustomNodes/**"
+ - "src/frontend/src/modals/**"
+ - "src/frontend/src/style/**"
+ - "src/frontend/src/CustomEdges/**"
+ - "src/frontend/src/utils/reactflowUtils.ts"
+ - "src/frontend/src/utils/buildUtils.ts"
+ - "src/frontend/tests/core/features/**"
+ - "src/frontend/tests/core/unit/**"
+ - "src/frontend/tests/extended/features/**"
+ - "src/frontend/tests/core/regression/**"
+
+api:
+ - "src/backend/base/langflow/api/**"
+ - "src/frontend/src/controllers/**"
+ - "src/frontend/tests/core/features/**"
+ - "src/frontend/tests/extended/features/**"
+ - "src/frontend/tests/extended/regression/**"
+
+database:
+ - "src/backend/base/langflow/services/database/**"
+ - "src/backend/base/langflow/alembic/**"
+ - "src/frontend/src/controllers/**"
+ - "src/frontend/tests/core/features/**"
+ - "src/frontend/tests/extended/features/**"
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ecf2b5aac1a23b887b817b3798a7059e88bd8a81
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# Set update schedule for GitHub Actions
+
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "monthly"
+ commit-message:
+ prefix: "build(deps):"
+ include: scope
diff --git a/.github/release.yml b/.github/release.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fd78aad923cddd872dee022a41a7f5c902de86d8
--- /dev/null
+++ b/.github/release.yml
@@ -0,0 +1,35 @@
+changelog:
+ categories:
+ - title: đ¨ Breaking Changes
+ description: Changes that break existing functionality
+ labels:
+ - breaking
+ - title: ⨠New Features
+ description: New features and enhancements
+ labels:
+ - enhancement
+ - title: đ Bug Fixes
+ description: Bug fixes and patches
+ labels:
+ - fix
+ - bug
+ - title: đ Documentation Updates
+ description: Changes to documentation
+ labels:
+ - documentation
+ - title: đ Maintenance Tasks
+ description: Maintenance tasks and housekeeping
+ labels:
+ - chore
+ - refactor
+ - style
+ - performance
+ - build
+ - title: â Tests
+ description: Changes to tests
+ labels:
+ - test
+ - title: Others
+ description: Other changes
+ labels:
+ - "*"
diff --git a/.github/semantic.yml b/.github/semantic.yml
new file mode 100644
index 0000000000000000000000000000000000000000..efa3b0285a1406bbf702b5fd09d17c3cd541d803
--- /dev/null
+++ b/.github/semantic.yml
@@ -0,0 +1,2 @@
+titleOnly: true
+targetUrl: https://www.conventionalcommits.org/en/v1.0.0/#summary
diff --git a/.github/workflows/auto-update.yml b/.github/workflows/auto-update.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1d46fa1758fd85dfb8b1452f02faed84d2cdff66
--- /dev/null
+++ b/.github/workflows/auto-update.yml
@@ -0,0 +1,13 @@
+name: Auto-update
+
+on:
+ push:
+ branches:
+ - main
+
+jobs:
+ Auto:
+ name: Auto-update
+ runs-on: ubuntu-latest
+ steps:
+ - uses: tibdex/auto-update@v2
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3a60d6fb0b2a9cf9e5217f6083a9ccc39af6e7aa
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,140 @@
+name: CI
+
+on:
+ workflow_call:
+ inputs:
+ python-versions:
+ description: "Python Versions"
+ required: false
+ type: string
+ default: "['3.10']"
+ frontend-tests-folder:
+ description: "Frontend Tests Folder"
+ required: false
+ type: string
+ default: "tests/core"
+ release:
+ description: "Release"
+ required: false
+ type: boolean
+ default: false
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: "(Optional) Branch to checkout"
+ required: false
+ type: string
+ openai_api_key:
+ description: "OpenAI API Key"
+ required: false
+ type: string
+ store_api_key:
+ description: "Store API Key"
+ required: false
+ type: string
+ python-versions:
+ description: "Python Versions"
+ required: false
+ type: string
+ default: "['3.10']"
+ pull_request:
+ types: [synchronize, labeled]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ set-ci-condition:
+ name: Should Run CI
+ runs-on: ubuntu-latest
+ outputs:
+ should-run-ci: ${{ (contains( github.event.pull_request.labels.*.name, 'lgtm') && github.event.pull_request.draft == false) || (github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call') }}
+ steps:
+ # Do anything just to make the job run
+ - run: echo "Debug CI Condition"
+ - run: echo "Labels -> ${{ join(github.event.pull_request.labels.*.name, ',') }}"
+ - run: echo "IsDraft -> ${{ github.event.pull_request.draft }}"
+ - run: echo "Event name -> ${{ github.event_name }}"
+ path-filter:
+ needs: set-ci-condition
+ if: ${{ needs.set-ci-condition.outputs.should-run-ci == 'true' }}
+ name: Filter Paths
+ runs-on: ubuntu-latest
+ outputs:
+ python: ${{ steps.filter.outputs.python }}
+ frontend: ${{ steps.filter.outputs.frontend }}
+ docs: ${{ steps.filter.outputs.docs }}
+ frontend-tests: ${{ steps.filter.outputs.frontend-tests }}
+ components-changes: ${{ steps.filter.outputs.components-changes }}
+ starter-projects-changes: ${{ steps.filter.outputs.starter-projects-changes }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.branch || github.ref }}
+ - name: Filter Paths
+ id: filter
+ uses: dorny/paths-filter@v3
+ with:
+ filters: ./.github/changes-filter.yaml
+
+ test-backend:
+ needs: path-filter
+ name: Run Backend Tests
+ if: ${{ needs.path-filter.outputs.python == 'true'}}
+ uses: ./.github/workflows/python_test.yml
+ with:
+ python-versions: ${{ inputs.python-versions || '["3.10"]' }}
+
+ test-frontend:
+ needs: path-filter
+ name: Run Frontend Tests
+ if: ${{ needs.path-filter.outputs.frontend == 'true' || needs.path-filter.outputs.frontend-tests == 'true' || needs.path-filter.outputs.components-changes == 'true' || needs.path-filter.outputs.starter-projects-changes == 'true' }}
+ uses: ./.github/workflows/typescript_test.yml
+ with:
+ tests_folder: ${{ inputs.frontend-tests-folder }}
+ release: ${{ inputs.release || false }}
+ secrets:
+ OPENAI_API_KEY: "${{ secrets.OPENAI_API_KEY }}"
+ STORE_API_KEY: "${{ secrets.STORE_API_KEY }}"
+ ANTHROPIC_API_KEY: "${{ secrets.ANTHROPIC_API_KEY }}"
+ TAVILY_API_KEY: "${{ secrets.TAVILY_API_KEY }}"
+
+ lint-backend:
+ needs: path-filter
+ if: ${{ needs.path-filter.outputs.python == 'true'}}
+ name: Lint Backend
+ uses: ./.github/workflows/lint-py.yml
+
+ test-docs-build:
+ needs: path-filter
+ if: ${{ needs.path-filter.outputs.docs == 'true' }}
+ name: Test Docs Build
+ uses: ./.github/workflows/docs_test.yml
+
+ # https://github.com/langchain-ai/langchain/blob/master/.github/workflows/check_diffs.yml
+ ci_success:
+ name: "CI Success"
+ needs:
+ [
+ test-backend,
+ test-frontend,
+ lint-backend,
+ test-docs-build,
+ set-ci-condition,
+ ]
+
+ if: always()
+ runs-on: ubuntu-latest
+ env:
+ JOBS_JSON: ${{ toJSON(needs) }}
+ RESULTS_JSON: ${{ toJSON(needs.*.result) }}
+ EXIT_CODE: ${{!contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && needs.set-ci-condition.outputs.should-run-ci == 'true' && '0' || '1'}}
+ steps:
+ - name: "CI Success"
+ run: |
+ echo $JOBS_JSON
+ echo $RESULTS_JSON
+ echo "Exiting with $EXIT_CODE"
+ exit $EXIT_CODE
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c7975830dcd404588a648465512ffff7a38c9161
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,66 @@
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ 'dev', 'main' ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ 'dev' ]
+ schedule:
+ - cron: '17 2 * * 1'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
+ timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'python', 'javascript' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Use only 'java' to analyze code written in Java, Kotlin or both
+ # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v3
+
+ # âšī¸ Command-line programs to run using the OS shell.
+ # đ See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ # - run: |
+ # echo "Run, Build Application using script"
+ # ./location_of_script_within_repo/buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml
new file mode 100644
index 0000000000000000000000000000000000000000..34909e84d2ced32f3b530d1065d7902714f4bae6
--- /dev/null
+++ b/.github/workflows/codspeed.yml
@@ -0,0 +1,45 @@
+name: Run benchmarks
+
+on:
+ push:
+ paths:
+ - "src/backend/base/**"
+ - "src/backend/tests/performance/**"
+ branches:
+ - "main" # or "master"
+ pull_request:
+ paths:
+ - "src/backend/base/**"
+ - "src/backend/tests/performance/**"
+ - "!src/backend/base/langflow/components/**"
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ codspeed:
+ name: Run benchmarks
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version:
+ - "3.12"
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Run benchmarks
+ uses: CodSpeedHQ/action@v3
+ with:
+ token: ${{ secrets.CODSPEED_TOKEN }}
+ run: |
+ uv run pytest src/backend/tests \
+ --ignore=src/backend/tests/integration \
+ --codspeed \
+ -m "not api_key_required" \
+ -n auto
+ - name: Minimize uv cache
+ run: uv cache prune --ci
diff --git a/.github/workflows/conventional-labels.yml b/.github/workflows/conventional-labels.yml
new file mode 100644
index 0000000000000000000000000000000000000000..54e9521eafcae7f3aeda25409b6880f1000f02cc
--- /dev/null
+++ b/.github/workflows/conventional-labels.yml
@@ -0,0 +1,28 @@
+# Warning, do not check out untrusted code with
+# the pull_request_target event.
+name: Label PRs with Conventional Commits
+on:
+ pull_request_target:
+ types: [opened, edited]
+
+jobs:
+ validate-pr:
+ name: Validate PR
+ runs-on: ubuntu-latest
+ steps:
+ - name: Validate the pull request
+ id: validate
+ uses: Namchee/conventional-pr@v0.15.5
+ with:
+ access_token: ${{ secrets.GITHUB_TOKEN }}
+ issue: false
+
+ label:
+ needs: validate-pr
+ name: Label PR
+ runs-on: ubuntu-latest
+ if: ${{ github.event.pull_request.user.type != 'Bot'}}
+ steps:
+ - uses: bcoe/conventional-release-labels@v1
+ with:
+ type_labels: '{"feat": "enhancement","fix": "bug","docs": "documentation","style": "style","refactor": "refactor","perf": "performance","test": "test","chore": "chore","build": "build"}'
diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml
new file mode 100644
index 0000000000000000000000000000000000000000..98329a3ec047f6fff4fc6fa4ffc263a4c82928fe
--- /dev/null
+++ b/.github/workflows/create-release.yml
@@ -0,0 +1,36 @@
+name: Create Release
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: "Version to release"
+ required: true
+ type: string
+ ref:
+ description: "Commit to tag the release"
+ required: true
+ type: string
+ pre_release:
+ description: "Pre-release tag"
+ required: true
+ type: boolean
+
+jobs:
+ create_release:
+ name: Create Release Job
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/download-artifact@v4
+ with:
+ name: dist-main
+ path: dist
+ - name: Create Release Notes
+ uses: ncipollo/release-action@v1
+ with:
+ artifacts: "dist/*"
+ token: ${{ secrets.GITHUB_TOKEN }}
+ draft: false
+ generateReleaseNotes: true
+ prerelease: ${{ inputs.pre_release }}
+ tag: v${{ inputs.version }}
+ commit: ${{ inputs.ref }}
\ No newline at end of file
diff --git a/.github/workflows/deploy_gh-pages.yml b/.github/workflows/deploy_gh-pages.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d928c3605929aa622579f1d94f6dc2b74b8325aa
--- /dev/null
+++ b/.github/workflows/deploy_gh-pages.yml
@@ -0,0 +1,41 @@
+name: Deploy to GitHub Pages
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'docs/**'
+ # Review gh actions docs if you want to further define triggers, paths, etc
+ # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#on
+
+jobs:
+ deploy:
+ name: Deploy to GitHub Pages
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 18
+ cache: npm
+ cache-dependency-path: ./docs/package-lock.json
+
+ - name: Install dependencies
+ run: cd docs && npm install --legacy-peer-deps
+ - name: Build website
+ run: cd docs && npm run build
+
+ # Popular action to deploy to GitHub Pages:
+ # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
+ - name: Deploy to GitHub Pages
+ uses: peaceiris/actions-gh-pages@v4
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ # Build output to publish to the `gh-pages` branch:
+ publish_dir: ./docs/build
+ # The following lines assign commit authorship to the official
+ # GH-Actions bot for deploys to `gh-pages` branch:
+ # https://github.com/actions/checkout/issues/13#issuecomment-724415212
+ # The GH actions bot is used by default if you didn't specify the two fields.
+ # You can swap them out with your own user credentials.
diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5307fffb8da0de57e71038d30c13df1177b518d1
--- /dev/null
+++ b/.github/workflows/docker-build.yml
@@ -0,0 +1,323 @@
+name: Docker Build and Push
+run-name: Docker Build and Push @${{ inputs.release_type }} by @${{ github.actor }}
+on:
+ workflow_call:
+ inputs:
+ main_version:
+ required: true
+ type: string
+ description: "Main version to tag images with. Required for both main and base releases."
+ base_version:
+ required: false
+ type: string
+ description: "Base version to tag images with. Required for base release type."
+ release_type:
+ required: true
+ type: string
+ description: "Release type. One of 'main', 'main-ep', 'base', 'nightly-main', 'nightly-base'."
+ pre_release:
+ required: false
+ type: boolean
+ default: false
+ ref:
+ required: false
+ type: string
+ description: "Ref to check out. If not specified, will default to the main version or current branch."
+
+ workflow_dispatch:
+ inputs:
+ main_version:
+ description: "Main version to tag images with. Required for both main and base releases."
+ required: false
+ type: string
+ base_version:
+ description: "Base version to tag images with. Required for base release type."
+ required: false
+ type: string
+ release_type:
+ description: "Type of release. One of 'main', 'main-ep', 'base', 'nightly-main', 'nightly-base'."
+ required: true
+ type: string
+ pre_release:
+ required: false
+ type: boolean
+ default: false
+ ref:
+ required: false
+ type: string
+ description: "Ref to check out. If not specified, will default to the main version or current branch."
+
+
+env:
+ POETRY_VERSION: "1.8.2"
+ TEST_TAG: "langflowai/langflow:test"
+
+jobs:
+ get-version:
+ name: Get Version
+ runs-on: ubuntu-latest
+ outputs:
+ version: ${{ steps.get-version-input.outputs.version || steps.get-version-base.outputs.version || steps.get-version-main.outputs.version }}
+ steps:
+ - name: Verify a main version exists
+ if: ${{ inputs.main_version == '' }}
+ run: |
+ # due to our how we split packages, we need to have a main version to check out.
+ echo "Must specify a main version to check out."
+ exit 1
+
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || inputs.main_version || github.ref }}
+ persist-credentials: true
+
+ - name: Get Version to Tag
+ if: ${{ inputs.main_version != '' }}
+ id: get-version-input
+ run: |
+ # Produces the versions we will use to tag the docker images with.
+
+ if [[ "${{ inputs.release_type }}" == "base" && "${{ inputs.base_version }}" == '' ]]; then
+ echo "Must specify a base version for base release type."
+ exit 1
+ fi
+
+ if [[ "${{ inputs.release_type }}" == "nightly-base" && "${{ inputs.base_version }}" == '' ]]; then
+ echo "Must specify a base version for nightly-base release type."
+ exit 1
+ fi
+
+ if [[ "${{ inputs.release_type }}" == "main" && "${{ inputs.main_version }}" == '' ]]; then
+ echo "Must specify a main version for main release type."
+ exit 1
+ fi
+
+ if [[ "${{ inputs.release_type }}" == "main-ep" && "${{ inputs.main_version }}" == '' ]]; then
+ echo "Must specify a main version for main-ep release type."
+ exit 1
+ fi
+
+ if [[ "${{ inputs.release_type }}" == "nightly-main" && "${{ inputs.main_version }}" == '' ]]; then
+ echo "Must specify a main version for nightly-main release type."
+ exit 1
+ fi
+
+ if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then
+ version=${{ inputs.base_version }}
+ echo "base version=${{ inputs.base_version }}"
+ echo version=$version
+ echo version=$version >> $GITHUB_OUTPUT
+ elif [[ "${{ inputs.release_type }}" == "main" || "${{ inputs.release_type }}" == "main-ep" || "${{ inputs.release_type }}" == "nightly-main" ]]; then
+ version=${{ inputs.main_version }}
+ echo version=$version
+ echo version=$version >> $GITHUB_OUTPUT
+ else
+ echo "No version or ref specified. Exiting the workflow."
+ exit 1
+ fi
+ - name: Get Version Base
+ if: ${{ inputs.base_version == '' && (inputs.release_type == 'base' || inputs.release_type == 'nightly-base') }}
+ id: get-version-base
+ run: |
+ version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | sed 's/^v//')
+ if [ -z "$version" ]; then
+ echo "Failed to extract version from uv tree output"
+ exit 1
+ fi
+ echo version=$version
+ echo version=$version >> $GITHUB_OUTPUT
+ - name: Get Version Main
+ if: ${{ inputs.main_version == '' && (inputs.release_type == 'main' || inputs.release_type == 'main-ep' || inputs.release_type == 'nightly-main') }}
+ id: get-version-main
+ run: |
+ version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}' | sed 's/^v//')
+ echo version=$version
+ echo version=$version >> $GITHUB_OUTPUT
+ setup:
+ runs-on: ubuntu-latest
+ needs: get-version
+ outputs:
+ docker_tags: ${{ steps.set-vars.outputs.docker_tags }}
+ ghcr_tags: ${{ steps.set-vars.outputs.ghcr_tags }}
+ file: ${{ steps.set-vars.outputs.file }}
+ steps:
+ - name: Set Dockerfile and Tags
+ id: set-vars
+ run: |
+ nightly_suffix=''
+ if [[ "${{ inputs.release_type }}" == "nightly-base" || "${{ inputs.release_type }}" == "nightly-main" ]]; then
+ nightly_suffix="-nightly"
+ fi
+
+ if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then
+ # LANGFLOW-BASE RELEASE
+ echo "docker_tags=langflowai/langflow${nightly_suffix}:base-${{ needs.get-version.outputs.version }},langflowai/langflow${nightly_suffix}:base-latest" >> $GITHUB_OUTPUT
+ echo "ghcr_tags=ghcr.io/langflow-ai/langflow${nightly_suffix}:base-${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow${nightly_suffix}:base-latest" >> $GITHUB_OUTPUT
+ echo "file=./docker/build_and_push_base.Dockerfile" >> $GITHUB_OUTPUT
+ else
+ if [[ "${{ inputs.pre_release }}" == "true" ]]; then
+ # LANGFLOW-MAIN PRE-RELEASE
+ echo "docker_tags=langflowai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }}" >> $GITHUB_OUTPUT
+ echo "ghcr_tags=ghcr.io/langflow-ai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }}" >> $GITHUB_OUTPUT
+ echo "file=./docker/build_and_push.Dockerfile" >> $GITHUB_OUTPUT
+ elif [[ "${{ inputs.release_type }}" == "main-ep" ]]; then
+ # LANGFLOW-MAIN (ENTRYPOINT) RELEASE
+ echo "docker_tags=langflowai/langflow-ep${nightly_suffix}:${{ needs.get-version.outputs.version }},langflowai/langflow-ep${nightly_suffix}:latest" >> $GITHUB_OUTPUT
+ echo "ghcr_tags=ghcr.io/langflow-ai/langflow-ep${nightly_suffix}:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow-ep${nightly_suffix}:latest" >> $GITHUB_OUTPUT
+ echo "file=./docker/build_and_push_ep.Dockerfile" >> $GITHUB_OUTPUT
+ elif [[ "${{ inputs.release_type }}" == "main" || "${{ inputs.release_type }}" == "nightly-main" ]]; then
+ # LANGFLOW-MAIN RELEASE
+ echo "docker_tags=langflowai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }},langflowai/langflow${nightly_suffix}:latest" >> $GITHUB_OUTPUT
+ echo "ghcr_tags=ghcr.io/langflow-ai/langflow${nightly_suffix}:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow${nightly_suffix}:latest" >> $GITHUB_OUTPUT
+ echo "file=./docker/build_and_push.Dockerfile" >> $GITHUB_OUTPUT
+ else
+ echo "Invalid release type. Exiting the workflow."
+ exit 1
+ fi
+ fi
+ build:
+ runs-on: ubuntu-latest
+ needs: [get-version, setup]
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || inputs.main_version || github.ref }}
+ persist-credentials: true
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: |
+ if [[ "${{ inputs.release_type }}" == "base" || "${{ inputs.release_type }}" == "nightly-base" ]]; then
+ uv sync --directory src/backend/base --no-dev --no-sources
+ else
+ uv sync --no-dev --no-sources
+ fi
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Build and Push to Docker Hub
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: true
+ file: ${{ needs.setup.outputs.file }}
+ tags: ${{ needs.setup.outputs.docker_tags }}
+ platforms: linux/amd64,linux/arm64
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
+ - name: Login to Github Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.TEMP_GHCR_TOKEN}}
+
+ - name: Build and push to Github Container Registry
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: true
+ file: ${{ needs.setup.outputs.file }}
+ tags: ${{ needs.setup.outputs.ghcr_tags }}
+ platforms: linux/amd64,linux/arm64
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
+ build_components:
+ if: ${{ inputs.release_type == 'main' }}
+ runs-on: ubuntu-latest
+ permissions:
+ packages: write
+ needs: [build, get-version]
+ strategy:
+ matrix:
+ component: [docker-backend, docker-frontend, ghcr-backend, ghcr-frontend]
+ include:
+ - component: docker-backend
+ dockerfile: ./docker/build_and_push_backend.Dockerfile
+ tags: langflowai/langflow-backend:${{ needs.get-version.outputs.version }},langflowai/langflow-backend:latest
+ langflow_image: langflowai/langflow:${{ needs.get-version.outputs.version }}
+ - component: docker-frontend
+ dockerfile: ./docker/frontend/build_and_push_frontend.Dockerfile
+ tags: langflowai/langflow-frontend:${{ needs.get-version.outputs.version }},langflowai/langflow-frontend:latest
+ langflow_image: langflowai/langflow:${{ needs.get-version.outputs.version }}
+ - component: ghcr-backend
+ dockerfile: ./docker/build_and_push_backend.Dockerfile
+ tags: ghcr.io/langflow-ai/langflow-backend:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow-backend:latest
+ langflow_image: ghcr.io/langflow-ai/langflow:${{ needs.get-version.outputs.version }}
+ - component: ghcr-frontend
+ dockerfile: ./docker/frontend/build_and_push_frontend.Dockerfile
+ tags: ghcr.io/langflow-ai/langflow-frontend:${{ needs.get-version.outputs.version }},ghcr.io/langflow-ai/langflow-frontend:latest
+ langflow_image: ghcr.io/langflow-ai/langflow:${{ needs.get-version.outputs.version }}
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || inputs.main_version || github.ref }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Login to Docker Hub
+ if: ${{ matrix.component == 'docker-backend' }} || ${{ matrix.component == 'docker-frontend' }}
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Login to Github Container Registry
+ if: ${{ matrix.component == 'ghcr-backend' }} || ${{ matrix.component == 'ghcr-frontend' }}
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.TEMP_GHCR_TOKEN}}
+
+ - name: Wait for propagation (for backend)
+ run: sleep 120
+
+ - name: Build and push ${{ matrix.component }}
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: true
+ build-args: |
+ LANGFLOW_IMAGE=${{ matrix.langflow_image }}
+ file: ${{ matrix.dockerfile }}
+ tags: ${{ matrix.tags }}
+ # provenance: false will result in a single manifest for all platforms which makes the image pullable from arm64 machines via the emulation (e.g. Apple Silicon machines)
+ provenance: false
+
+ restart-space:
+ name: Restart HuggingFace Spaces
+ if: ${{ inputs.release_type == 'main' }}
+ runs-on: ubuntu-latest
+ needs: [build, get-version]
+ strategy:
+ matrix:
+ python-version:
+ - "3.12"
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || inputs.main_version || github.ref }}
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+
+ - name: Restart HuggingFace Spaces Build
+ run: |
+ uv run ./scripts/factory_restart_space.py --space "Langflow/Langflow" --token ${{ secrets.HUGGINGFACE_API_TOKEN }}
+
+
+
diff --git a/.github/workflows/docker_test.yml b/.github/workflows/docker_test.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ea4824ef3fb9b8cb0662ef826d23758f758829c3
--- /dev/null
+++ b/.github/workflows/docker_test.yml
@@ -0,0 +1,64 @@
+name: Test Docker images
+
+on:
+ push:
+ branches: [main]
+ paths:
+ - "docker/**"
+ - "poetry.lock"
+ - "pyproject.toml"
+ - "src/backend/**"
+ - ".github/workflows/docker_test.yml"
+ pull_request:
+ branches: [dev]
+ paths:
+ - "docker/**"
+ - "poetry.lock"
+ - "pyproject.toml"
+ - "src/**"
+ - ".github/workflows/docker_test.yml"
+
+env:
+ POETRY_VERSION: "1.8.2"
+
+jobs:
+ test-docker:
+ runs-on: ubuntu-latest
+ name: Test docker images
+ steps:
+ - uses: actions/checkout@v4
+ - name: Build image
+ run: |
+ docker build -t langflowai/langflow:latest-dev \
+ -f docker/build_and_push.Dockerfile \
+ .
+ - name: Test image
+ run: |
+ expected_version=$(cat pyproject.toml | grep version | head -n 1 | cut -d '"' -f 2)
+ version=$(docker run --rm --entrypoint bash langflowai/langflow:latest-dev -c "python -c 'from langflow.utils.version import get_version_info; print(get_version_info()[\"version\"])'")
+ if [ "$expected_version" != "$version" ]; then
+ echo "Expected version: $expected_version"
+ echo "Actual version: $version"
+ exit 1
+ fi
+
+ - name: Build backend image
+ run: |
+ docker build -t langflowai/langflow-backend:latest-dev \
+ --build-arg LANGFLOW_IMAGE=langflowai/langflow:latest-dev \
+ -f docker/build_and_push_backend.Dockerfile \
+ .
+ - name: Test backend image
+ run: |
+ expected_version=$(cat pyproject.toml | grep version | head -n 1 | cut -d '"' -f 2)
+ version=$(docker run --rm --entrypoint bash langflowai/langflow-backend:latest-dev -c "python -c 'from langflow.utils.version import get_version_info; print(get_version_info()[\"version\"])'")
+ if [ "$expected_version" != "$version" ]; then
+ echo "Expected version: $expected_version"
+ echo "Actual version: $version"
+ exit 1
+ fi
+ - name: Build frontend image
+ run: |
+ docker build -t langflowai/langflow-frontend:latest-dev \
+ -f docker/frontend/build_and_push_frontend.Dockerfile \
+ .
diff --git a/.github/workflows/docs_test.yml b/.github/workflows/docs_test.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0298c30fa4bb019e57734bdb9cde90011f5e87f5
--- /dev/null
+++ b/.github/workflows/docs_test.yml
@@ -0,0 +1,49 @@
+name: Test Docs Build
+
+on:
+ workflow_call:
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: "(Optional) Branch to checkout"
+ required: false
+ type: string
+
+env:
+ NODE_VERSION: "21"
+
+jobs:
+ test-docs-build:
+ name: Test Docs Build
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.branch || github.ref }}
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+
+ - name: Cache Node.js dependencies
+ uses: actions/cache@v4
+ id: npm-cache
+ with:
+ path: ~/.npm
+ key: ${{ runner.os }}-node-${{ hashFiles('docs/package-lock.json') }}
+ restore-keys: |
+ ${{ runner.os }}-node-
+
+ - name: Install Node.js dependencies
+ run: |
+ cd docs
+ npm install --legacy-peer-deps
+ if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
+
+ - name: Build Docs
+ run: |
+ cd docs
+ npm run build
diff --git a/.github/workflows/fetch_docs_notion.yml b/.github/workflows/fetch_docs_notion.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fcc572cb5038fef171d2a962fb4f696cf634b6ee
--- /dev/null
+++ b/.github/workflows/fetch_docs_notion.yml
@@ -0,0 +1,61 @@
+name: Fetch Docs from Notion
+
+on:
+ workflow_dispatch:
+
+env:
+ NODE_VERSION: "21"
+
+jobs:
+ fetch-docs:
+ name: Fetch Docs from Notion
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Get current date
+ id: date
+ run: echo "DATE=$(date +'%Y%m%d%H%M%S')" >> "$GITHUB_OUTPUT"
+
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+
+ - name: Cache Node.js dependencies
+ uses: actions/cache@v4
+ id: npm-cache
+ with:
+ path: ~/.npm
+ key: ${{ runner.os }}-node-${{ hashFiles('docs/package-lock.json') }}
+ restore-keys: |
+ ${{ runner.os }}-node-
+
+ - name: Install Node.js dependencies
+ run: |
+ cd docs
+ npm install --legacy-peer-deps
+ if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
+
+ - name: Fetch Docs from Notion
+ run: |
+ cd docs
+ npm run pull
+ env:
+ NOTION_TOKEN: ${{ secrets.NOTION_TOKEN }}
+ NOTION_DOCS_ROOT_PAGE_ID: ${{ secrets.NOTION_DOCS_ROOT_PAGE_ID }}
+
+ - name: Create Pull Request
+ id: create_pr
+ uses: peter-evans/create-pull-request@v7
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ commit-message: Update docs from Notion
+ branch: update-docs-${{ steps.date.outputs.DATE }}
+ base: main
+ title: "docs: update docs from notion"
+ body: This PR updates the documentation from Notion.
+ labels: documentation
diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml
new file mode 100644
index 0000000000000000000000000000000000000000..df5480ac3606e7a16b53ccd4e559e09482ebbe08
--- /dev/null
+++ b/.github/workflows/integration_tests.yml
@@ -0,0 +1,50 @@
+name: Integration Tests
+
+on:
+ workflow_dispatch:
+ inputs:
+ ref:
+ description: "(Optional) ref to checkout"
+ required: false
+ type: string
+ workflow_call:
+ inputs:
+ python-versions:
+ description: "(Optional) Python versions to test"
+ required: true
+ type: string
+ default: "['3.10', '3.11', '3.12']"
+ ref:
+ description: "(Optional) ref to checkout"
+ required: false
+ type: string
+
+env:
+ POETRY_VERSION: "1.8.2"
+
+jobs:
+ integration-tests:
+ name: Run Integration Tests
+ runs-on: ubuntu-latest
+ strategy:
+ max-parallel: 1 # Currently, we can only run at a time for collection-per-db-constraints
+ matrix:
+ python-version:
+ - "3.12"
+ - "3.11"
+ - "3.10"
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
+ ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || github.ref }}
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Run integration tests with api keys
+ timeout-minutes: 20
+ run: |
+ make integration_tests_api_keys
diff --git a/.github/workflows/js_autofix.yml b/.github/workflows/js_autofix.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fee655946511373947dd6d271d728ab415bd889d
--- /dev/null
+++ b/.github/workflows/js_autofix.yml
@@ -0,0 +1,45 @@
+name: autofix.ci
+
+on:
+ pull_request:
+ paths:
+ - "src/frontend/**"
+
+permissions:
+ contents: read
+
+env:
+ NODE_VERSION: "21"
+jobs:
+ autofix:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+
+ - name: Cache Node.js dependencies
+ uses: actions/cache@v4
+ id: npm-cache
+ with:
+ path: ~/.npm
+ key: ${{ runner.os }}-node-${{ hashFiles('src/frontend/package-lock.json') }}
+ restore-keys: |
+ ${{ runner.os }}-node-
+
+ - name: Install Node.js dependencies
+ run: |
+ cd src/frontend
+ npm ci
+ if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
+ - name: Run Prettier
+ run: |
+ cd src/frontend
+ npm run format
+
+ - uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c
diff --git a/.github/workflows/lint-js.yml b/.github/workflows/lint-js.yml
new file mode 100644
index 0000000000000000000000000000000000000000..892347312f3df7685c7b65dedd86c2a6dae8455b
--- /dev/null
+++ b/.github/workflows/lint-js.yml
@@ -0,0 +1,53 @@
+name: Lint Frontend
+
+on:
+ workflow_call:
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: "(Optional) Branch to checkout"
+ required: false
+ type: string
+
+
+env:
+ NODE_VERSION: "21"
+
+jobs:
+ run-linters:
+ name: Run Prettier
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.branch || github.ref }}
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+
+ - name: Cache Node.js dependencies
+ uses: actions/cache@v4
+ id: npm-cache
+ with:
+ path: ~/.npm
+ key: ${{ runner.os }}-node-${{ hashFiles('src/frontend/package-lock.json') }}
+ restore-keys: |
+ ${{ runner.os }}-node-
+
+ - name: Install Node.js dependencies
+ run: |
+ cd src/frontend
+ npm install
+ if: ${{ steps.setup-node.outputs.cache-hit != 'true' }}
+
+ - name: Run Prettier
+ run: |
+ cd src/frontend
+ npm run check-format
diff --git a/.github/workflows/lint-py.yml b/.github/workflows/lint-py.yml
new file mode 100644
index 0000000000000000000000000000000000000000..937de410dd83e5fe60dc5d7fad43126859ed97e1
--- /dev/null
+++ b/.github/workflows/lint-py.yml
@@ -0,0 +1,41 @@
+name: Lint Python
+
+on:
+ workflow_call:
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: "(Optional) Branch to checkout"
+ required: false
+ type: string
+env:
+ POETRY_VERSION: "1.8.2"
+
+
+jobs:
+ lint:
+ name: Run Mypy
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version:
+ - "3.12"
+ - "3.11"
+ - "3.10"
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.branch || github.ref }}
+ persist-credentials: true
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: uv sync --dev
+ - name: Run Mypy
+ run: |
+ uv run mypy --namespace-packages -p "langflow"
+ env:
+ GITHUB_TOKEN: ${{ secrets.github_token }}
+ - name: Minimize uv cache
+ run: uv cache prune --ci
diff --git a/.github/workflows/matchers/ruff.json b/.github/workflows/matchers/ruff.json
new file mode 100644
index 0000000000000000000000000000000000000000..53b21be23dfee94403e66d83c92a2359c6774719
--- /dev/null
+++ b/.github/workflows/matchers/ruff.json
@@ -0,0 +1,14 @@
+{
+ "problemMatcher": [
+ {
+ "owner": "ruff",
+ "pattern": [
+ {
+ "regexp": "^(Would reformat): (.+)$",
+ "message": 1,
+ "file": 2
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/workflows/nightly_build.yml b/.github/workflows/nightly_build.yml
new file mode 100644
index 0000000000000000000000000000000000000000..70f0c21b4ce0983d9e1c585ba09356d30ae0a4e5
--- /dev/null
+++ b/.github/workflows/nightly_build.yml
@@ -0,0 +1,204 @@
+name: Nightly Build
+
+on:
+ workflow_dispatch:
+ schedule:
+ # Run job at 6:30 UTC, 10.30pm PST, or 11.30pm PDT
+ - cron: "30 6 * * *"
+
+env:
+ POETRY_VERSION: "1.8.3"
+ PYTHON_VERSION: "3.12"
+
+jobs:
+ create-nightly-tag:
+ if: github.repository == 'langflow-ai/langflow'
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash -ex -o pipefail {0}
+ permissions:
+ # Required to create tag
+ contents: write
+ outputs:
+ main_tag: ${{ steps.generate_main_tag.outputs.main_tag }}
+ base_tag: ${{ steps.set_base_tag.outputs.base_tag }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ persist-credentials: true
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: uv sync --dev
+
+ - name: Generate main nightly tag
+ id: generate_main_tag
+ run: |
+ # NOTE: This outputs the tag with the `v` prefix.
+ MAIN_TAG="$(uv run ./scripts/ci/pypi_nightly_tag.py main)"
+ echo "main_tag=$MAIN_TAG" >> $GITHUB_OUTPUT
+ echo "main_tag=$MAIN_TAG"
+
+ - name: Check if main tag already exists
+ id: check_main_tag
+ run: |
+ git fetch --tags
+ if git rev-parse -q --verify "refs/tags/${{ steps.generate_main_tag.outputs.main_tag }}" >/dev/null; then
+ echo "main_tag_exists=true" >> $GITHUB_OUTPUT
+ else
+ echo "main_tag_exists=false" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Generate base nightly tag
+ id: generate_base_tag
+ if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'false' }}
+ run: |
+ # NOTE: This outputs the tag with the `v` prefix.
+ BASE_TAG="$(uv run ./scripts/ci/pypi_nightly_tag.py base)"
+ echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT
+ echo "base_tag=$BASE_TAG"
+
+ - name: Commit tag
+ id: commit_tag
+ if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'false' }}
+ run: |
+ # If the main tag does not exist in GH, we create the base tag from the existing codebase.
+
+ git config --global user.email "bot-nightly-builds@langflow.org"
+ git config --global user.name "Langflow Bot"
+
+ MAIN_TAG="${{ steps.generate_main_tag.outputs.main_tag }}"
+ BASE_TAG="${{ steps.generate_base_tag.outputs.base_tag }}"
+ echo "Updating base project version to $BASE_TAG and updating main project version to $MAIN_TAG"
+ uv run ./scripts/ci/update_pyproject_combined.py main $MAIN_TAG $BASE_TAG
+
+ uv lock
+ cd src/backend/base && uv lock && cd ../../..
+
+ git add pyproject.toml src/backend/base/pyproject.toml uv.lock src/backend/base/uv.lock
+ git commit -m "Update version and project name"
+
+ echo "Tagging main with $MAIN_TAG"
+ if ! git tag -a $MAIN_TAG -m "Langflow nightly $MAIN_TAG"; then
+ echo "Tag creation failed. Exiting the workflow."
+ exit 1
+ fi
+
+ echo "Pushing main tag $MAIN_TAG"
+ if ! git push origin $MAIN_TAG; then
+ echo "Tag push failed. Check if the tag already exists. Exiting the workflow."
+ exit 1
+ fi
+ # TODO: notify on failure
+
+ - name: Checkout main nightly tag
+ uses: actions/checkout@v4
+ if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'true' }}
+ with:
+ ref: ${{ steps.generate_main_tag.outputs.main_tag }}
+
+ - name: Retrieve Base Tag
+ id: retrieve_base_tag
+ if: ${{ steps.check_main_tag.outputs.main_tag_exists == 'true' }}
+ working-directory: src/backend/base
+ run: |
+ # If the main tag already exists, we need to retrieve the base version from the main tag codebase.
+ version=$(uv tree | grep 'langflow-base' | awk '{print $3}')
+ echo "base_tag=$version" >> $GITHUB_OUTPUT
+ echo "base_tag=$version"
+
+ - name: Set Base Tag
+ id: set_base_tag
+ run: |
+ if [ "${{ steps.retrieve_base_tag.conclusion }}" != "skipped" ] && [ "${{ steps.retrieve_base_tag.outputs.base_tag }}" ]; then
+ BASE_TAG="${{ steps.retrieve_base_tag.outputs.base_tag }}"
+ echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT
+ echo "base_tag=$BASE_TAG"
+ elif [ "${{ steps.commit_tag.conclusion }}" != "skipped" ] && [ "${{ steps.generate_base_tag.outputs.base_tag }}" ]; then
+ BASE_TAG="${{ steps.generate_base_tag.outputs.base_tag }}"
+ echo "base_tag=$BASE_TAG" >> $GITHUB_OUTPUT
+ echo "base_tag=$BASE_TAG"
+ else
+ echo "No base tag found. Exiting the workflow."
+ exit 1
+ fi
+
+ frontend-tests:
+ if: github.repository == 'langflow-ai/langflow'
+ name: Run Frontend Tests
+ needs: create-nightly-tag
+ uses: ./.github/workflows/typescript_test.yml
+ with:
+ tests_folder: "tests"
+ release: true
+ secrets:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ STORE_API_KEY: ${{ secrets.STORE_API_KEY }}
+ ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
+ TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
+
+ backend-unit-tests:
+ if: github.repository == 'langflow-ai/langflow'
+ name: Run Backend Unit Tests
+ needs: create-nightly-tag
+ uses: ./.github/workflows/python_test.yml
+ with:
+ python-versions: '["3.10", "3.11", "3.12"]'
+
+ # Not making nightly builds dependent on integration test success
+ # due to inherent flakiness of 3rd party integrations
+ # Revisit when https://github.com/langflow-ai/langflow/pull/3607 is merged.
+ # backend-integration-tests:
+ # name: Run Backend Integration Tests
+ # needs: create-nightly-tag
+ # uses: ./.github/workflows/integration_tests.yml
+ # with:
+ # python-versions: '["3.10", "3.11", "3.12"]'
+ # ref: ${{ needs.create-nightly-tag.outputs.tag }}
+
+ release-nightly-build:
+ if: github.repository == 'langflow-ai/langflow'
+ name: Run Nightly Langflow Build
+ needs: [frontend-tests, backend-unit-tests, create-nightly-tag]
+ uses: ./.github/workflows/release_nightly.yml
+ with:
+ build_docker_base: true
+ build_docker_main: true
+ nightly_tag_main: ${{ needs.create-nightly-tag.outputs.main_tag }}
+ nightly_tag_base: ${{ needs.create-nightly-tag.outputs.base_tag }}
+ secrets: inherit
+
+ # slack-notification:
+ # name: Send Slack Notification
+ # needs: run-nightly-build
+ # runs-on: ubuntu-latest
+ # steps:
+ # - name: Send success notification to Slack
+ # if: success()
+ # uses: slackapi/slack-github-action@v1.26.0
+ # with:
+ # payload: |
+ # {
+ # "channel": "#langflow-nightly-builds",
+ # "username": "GitHub Actions",
+ # "text": "Nightly Build Successful :white_check_mark:",
+ # "icon_emoji": ":rocket:"
+ # }
+ # env:
+ # SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
+
+ # - name: Send failure notification to Slack
+ # if: failure()
+ # uses: slackapi/slack-github-action@v1.26.0
+ # with:
+ # payload: |
+ # {
+ # "channel": "#langflow-nightly-builds",
+ # "username": "GitHub Actions",
+ # "text": "Nightly Build Failed :x:",
+ # "icon_emoji": ":warning:"
+ # }
+ # env:
+ # SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
diff --git a/.github/workflows/py_autofix.yml b/.github/workflows/py_autofix.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7fe391c973052845dd85c928d6ffd500b477289d
--- /dev/null
+++ b/.github/workflows/py_autofix.yml
@@ -0,0 +1,21 @@
+name: autofix.ci
+on:
+ pull_request:
+ paths:
+ - "**/*.py"
+env:
+ POETRY_VERSION: "1.8.2"
+
+jobs:
+ lint:
+ name: Run Ruff Check and Format
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - run: uv run ruff check --fix-only .
+ - run: uv run ruff format .
+ - uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c
+ - name: Minimize uv cache
+ run: uv cache prune --ci
diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b6796e6a22bd584fd05ee51efea617cc00eaee8c
--- /dev/null
+++ b/.github/workflows/python_test.yml
@@ -0,0 +1,175 @@
+name: Python tests
+
+on:
+ workflow_call:
+ inputs:
+ python-versions:
+ description: "(Optional) Python versions to test"
+ required: true
+ type: string
+ default: "['3.10', '3.11', '3.12']"
+ ref:
+ description: "(Optional) ref to checkout"
+ required: false
+ type: string
+ nightly:
+ description: "Whether run is from the nightly build"
+ required: false
+ type: boolean
+ default: false
+ workflow_dispatch:
+ inputs:
+ python-versions:
+ description: "(Optional) Python versions to test"
+ required: true
+ type: string
+ default: "['3.10', '3.11', '3.12']"
+env:
+ POETRY_VERSION: "1.8.2"
+ NODE_VERSION: "21"
+ PYTEST_RUN_PATH: "src/backend/tests"
+
+jobs:
+ build:
+ name: Unit Tests - Python ${{ matrix.python-version }} - Group ${{ matrix.group }}
+ runs-on: ubuntu-latest
+ env:
+ UV_CACHE_DIR: /tmp/.uv-cache
+ strategy:
+ matrix:
+ python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12"]' ) }}
+ splitCount: [5]
+ group: [1, 2, 3, 4, 5]
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || github.ref }}
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ enable-cache: true
+ cache-dependency-glob: "uv.lock"
+ - name: "Set up Python"
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: "pyproject.toml"
+ - name: Restore uv cache
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.uv-cache
+ key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ restore-keys: |
+ uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ uv-${{ runner.os }}
+ - name: Install the project
+ run: uv sync --dev
+ - name: Run unit tests
+ uses: nick-fields/retry@v3
+ with:
+ timeout_minutes: 12
+ max_attempts: 2
+ command: make unit_tests async=false args="-x --splits ${{ matrix.splitCount }} --group ${{ matrix.group }}"
+ - name: Minimize uv cache
+ run: uv cache prune --ci
+ integration-tests:
+ name: Integration Tests - Python ${{ matrix.python-version }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12"]' ) }}
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || github.ref }}
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ enable-cache: true
+ cache-dependency-glob: "uv.lock"
+ - name: "Set up Python"
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: "pyproject.toml"
+ - name: Restore uv cache
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.uv-cache
+ key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ restore-keys: |
+ uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ uv-${{ runner.os }}
+ - name: Install the project
+ run: uv sync --dev
+ - name: Run integration tests
+ run: make integration_tests_no_api_keys
+ - name: Minimize uv cache
+ run: uv cache prune --ci
+ test-cli:
+ name: Test CLI - Python ${{ matrix.python-version }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ${{ fromJson(inputs.python-versions || '["3.10", "3.11", "3.12"]') }}
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || github.ref }}
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+
+ - name: Check Version
+ id: check-version
+ # We need to print $3 because langflow-base is a dependency of langflow
+ # For langlow we'd use print $2
+ run: |
+ version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | sed 's/^v//')
+ url="https://pypi.org/pypi/langflow-base/json"
+ if [ ${{ inputs.nightly }} == true ]; then
+ url="https://pypi.org/pypi/langflow-base-nightly/json"
+ fi
+
+ last_released_version=$(curl -s $url | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
+ if [ "$version" != "$last_released_version" ]; then
+ echo "Version $version has not been released yet. Skipping the rest of the job."
+ echo skipped=true >> $GITHUB_OUTPUT
+ exit 0
+ else
+ echo version=$version >> $GITHUB_OUTPUT
+ echo skipped=false >> $GITHUB_OUTPUT
+ fi
+ - name: Build wheel
+ if: steps.check-version.outputs.skipped == 'false'
+ run: |
+ make build main=true
+ - name: Install wheel and Test CLI
+ if: steps.check-version.outputs.skipped == 'false'
+ run: |
+ uv venv new-venv
+ source new-venv/bin/activate
+ uv pip install dist/*.whl
+ - name: Test CLI
+ if: steps.check-version.outputs.skipped == 'false'
+ run: |
+ source new-venv/bin/activate
+ python -m langflow run --host 127.0.0.1 --port 7860 --backend-only &
+ SERVER_PID=$!
+ # Wait for the server to start
+ timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/api/v1/auto_login; do sleep 5; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
+ # Terminate the server
+ kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
+ sleep 20 # give the server some time to terminate
+ # Check if the server is still running
+ if kill -0 $SERVER_PID 2>/dev/null; then
+ echo "Failed to terminate the server"
+ exit 0
+ else
+ echo "Server terminated successfully"
+ fi
+ - name: Minimize uv cache
+ run: uv cache prune --ci
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a25578adc89ff904e4ce9099521c16dc08155631
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,243 @@
+name: Langflow Release
+run-name: Langflow Release by @${{ github.actor }}
+
+on:
+ workflow_dispatch:
+ inputs:
+ release_package_base:
+ description: "Release Langflow Base"
+ required: true
+ type: boolean
+ default: false
+ release_package_main:
+ description: "Release Langflow"
+ required: true
+ type: boolean
+ default: false
+ build_docker_base:
+ description: "Build Docker Image for Langflow Base"
+ required: true
+ type: boolean
+ default: false
+ build_docker_main:
+ description: "Build Docker Image for Langflow"
+ required: true
+ type: boolean
+ default: false
+ build_docker_ep:
+ description: "Build Docker Image for Langflow with Entrypoint"
+ required: false
+ type: boolean
+ default: false
+ pre_release:
+ description: "Pre-release"
+ required: false
+ type: boolean
+ default: false
+ create_release:
+ description: "Whether to create a gh release"
+ required: false
+ type: boolean
+ default: true
+
+
+jobs:
+ ci:
+ if: ${{ github.event.inputs.release_package_base == 'true' || github.event.inputs.release_package_main == 'true' }}
+ name: CI
+ uses: ./.github/workflows/ci.yml
+ with:
+ python-versions: "['3.10', '3.11', '3.12']"
+ frontend-tests-folder: "tests"
+ release: true
+
+ release-base:
+ name: Release Langflow Base
+ needs: [ci]
+ if: inputs.release_package_base == true
+ runs-on: ubuntu-latest
+ outputs:
+ version: ${{ steps.check-version.outputs.version }}
+ skipped: ${{ steps.check-version.outputs.skipped }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Setup Environment
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: uv sync --dev
+ - name: Check Version
+ id: check-version
+ run: |
+ version=$(uv tree | grep 'langflow-base' | awk '{print $3}' | sed 's/^v//')
+ last_released_version=$(curl -s "https://pypi.org/pypi/langflow-base/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
+ if [ "$version" = "$last_released_version" ]; then
+ echo "Version $version is already released. Skipping release."
+ echo skipped=true >> $GITHUB_OUTPUT
+ exit 0
+ else
+ echo version=$version >> $GITHUB_OUTPUT
+ echo skipped=false >> $GITHUB_OUTPUT
+ fi
+ - name: Build project for distribution
+ if: steps.check-version.outputs.skipped == 'false'
+ run: make build base=true args="--wheel"
+ - name: Test CLI
+ if: steps.check-version.outputs.skipped == 'false'
+ run: |
+ # TODO: Unsure why the whl is not built in src/backend/base/dist
+ mkdir src/backend/base/dist
+ mv dist/*.whl src/backend/base/dist
+ uv pip install src/backend/base/dist/*.whl
+ uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only &
+ SERVER_PID=$!
+ # Wait for the server to start
+ timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/api/v1/auto_login; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
+ # Terminate the server
+ kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
+ sleep 20 # give the server some time to terminate
+ # Check if the server is still running
+ if kill -0 $SERVER_PID 2>/dev/null; then
+ echo "Failed to terminate the server"
+ exit 0
+ else
+ echo "Server terminated successfully"
+ fi
+ - name: Publish to PyPI
+ if: steps.check-version.outputs.skipped == 'false'
+ env:
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
+ run: |
+ make publish base=true
+ - name: Upload Artifact
+ if: steps.check-version.outputs.skipped == 'false'
+ uses: actions/upload-artifact@v4
+ with:
+ name: dist-base
+ path: src/backend/base/dist
+
+ release-main:
+ name: Release Langflow Main
+ if: inputs.release_package_main == true
+ needs: [release-base]
+ runs-on: ubuntu-latest
+ outputs:
+ version: ${{ steps.check-version.outputs.version }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Setup Environment
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: uv sync --dev
+
+ # If pre-release is true, we need to check if ["a", "b", "rc", "dev", "post"] is in the version string
+ # if the version string is incorrect, we need to exit the workflow
+ - name: Check if pre-release
+ if: inputs.pre_release == 'true'
+ run: |
+ version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}' | sed 's/^v//')
+ if [[ "${version}" =~ ^([0-9]+\.)?([0-9]+\.)?[0-9]+((a|b|rc|dev|post)([0-9]+))$ ]]; then
+ echo "Pre-release version detected. Continuing with the release."
+ else
+ echo "Invalid pre-release version detected. Exiting the workflow."
+ exit 1
+ fi
+ - name: Check Version
+ id: check-version
+ run: |
+ version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}' | sed 's/^v//')
+ last_released_version=$(curl -s "https://pypi.org/pypi/langflow/json" | jq -r '.releases | keys | .[]' | sort -V | tail -n 1)
+ if [ "$version" = "$last_released_version" ]; then
+ echo "Version $version is already released. Skipping release."
+ exit 1
+ else
+ echo version=$version >> $GITHUB_OUTPUT
+ fi
+ - name: Wait for PyPI Propagation
+ if: needs.release-base.outputs.skipped == 'false'
+ run: sleep 300 # wait for 5 minutes to ensure PyPI propagation
+
+ - name: Build project for distribution
+ run: make build main=true args="--no-sources --wheel"
+ - name: Test CLI
+ run: |
+ uv pip install dist/*.whl
+ uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only &
+ SERVER_PID=$!
+ # Wait for the server to start
+ timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/health_check; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
+ # Terminate the server
+ kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
+ sleep 20 # give the server some time to terminate
+ # Check if the server is still running
+ if kill -0 $SERVER_PID 2>/dev/null; then
+ echo "Failed to terminate the server"
+ exit 0
+ else
+ echo "Server terminated successfully"
+ fi
+ - name: Publish to PyPI
+ env:
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
+ run: |
+ make publish main=true
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: dist-main
+ path: dist
+
+ call_docker_build_base:
+ name: Call Docker Build Workflow for Langflow Base
+ if: inputs.build_docker_base == true
+ needs: [release-base, release-main]
+ uses: ./.github/workflows/docker-build.yml
+ with:
+ base_version: ${{ needs.release-base.outputs.version }}
+ main_version: ${{ needs.release-main.outputs.version }}
+ release_type: base
+ pre_release: ${{ inputs.pre_release }}
+ secrets: inherit
+
+ call_docker_build_main:
+ name: Call Docker Build Workflow for Langflow
+ if: inputs.build_docker_main == true
+ needs: [release-main]
+ uses: ./.github/workflows/docker-build.yml
+ with:
+ main_version: ${{ needs.release-main.outputs.version }}
+ release_type: main
+ pre_release: ${{ inputs.pre_release }}
+ secrets: inherit
+
+ call_docker_build_main_ep:
+ name: Call Docker Build Workflow for Langflow with Entrypoint
+ if: inputs.build_docker_ep == true
+ needs: [release-main]
+ uses: ./.github/workflows/docker-build.yml
+ with:
+ main_version: ${{ needs.release-main.outputs.version }}
+ release_type: main-ep
+ pre_release: False
+ secrets: inherit
+
+ create_release:
+ name: Create Release
+ runs-on: ubuntu-latest
+ needs: release-main
+ steps:
+ - uses: actions/download-artifact@v4
+ with:
+ name: dist-main
+ path: dist
+ - name: Create Release
+ uses: ncipollo/release-action@v1
+ with:
+ artifacts: "dist/*"
+ token: ${{ secrets.GITHUB_TOKEN }}
+ draft: false
+ generateReleaseNotes: true
+ prerelease: ${{ inputs.pre_release }}
+ tag: ${{ needs.release-main.outputs.version }}
+ commit: ${{ github.ref }}
diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml
new file mode 100644
index 0000000000000000000000000000000000000000..88c1e0420cd33010419c0318bb50c75cadfdb1d2
--- /dev/null
+++ b/.github/workflows/release_nightly.yml
@@ -0,0 +1,233 @@
+name: Langflow Nightly Build
+run-name: Langflow Nightly Release by @${{ github.actor }}
+
+on:
+ workflow_dispatch:
+ inputs:
+ build_docker_base:
+ description: "Build Docker Image for Langflow Nightly Base"
+ required: true
+ type: boolean
+ default: false
+ build_docker_main:
+ description: "Build Docker Image for Langflow Nightly"
+ required: true
+ type: boolean
+ default: false
+ build_docker_ep:
+ description: "Build Docker Image for Langflow Nightly with Entrypoint"
+ required: false
+ type: boolean
+ default: false
+ nightly_tag_main:
+ description: "Tag for the nightly main build"
+ required: true
+ type: string
+ nightly_tag_base:
+ description: "Tag for the nightly base build"
+ required: true
+ type: string
+ workflow_call:
+ inputs:
+ build_docker_base:
+ description: "Build Docker Image for Langflow Nightly Base"
+ required: true
+ type: boolean
+ default: false
+ build_docker_main:
+ description: "Build Docker Image for Langflow Nightly"
+ required: true
+ type: boolean
+ default: false
+ build_docker_ep:
+ description: "Build Docker Image for Langflow Nightly with Entrypoint"
+ required: false
+ type: boolean
+ default: false
+ nightly_tag_main:
+ description: "Tag for the nightly main build"
+ required: true
+ type: string
+ nightly_tag_base:
+ description: "Tag for the nightly base build"
+ required: true
+ type: string
+
+env:
+ POETRY_VERSION: "1.8.3"
+ PYTHON_VERSION: "3.12"
+
+jobs:
+ release-nightly-base:
+ name: Release Langflow Nightly Base
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash
+ outputs:
+ version: ${{ steps.verify.outputs.version }}
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.nightly_tag_main }}
+ persist-credentials: true
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: uv sync --dev
+
+ - name: Verify Nightly Name and Version
+ id: verify
+ run: |
+ name=$(uv tree | grep 'langflow-base' | awk '{print $2}')
+ version=$(uv tree | grep 'langflow-base' | awk '{print $3}')
+ if [ "$name" != "langflow-base-nightly" ]; then
+ echo "Name $name does not match langflow-base-nightly. Exiting the workflow."
+ exit 1
+ fi
+ if [ "$version" != "${{ inputs.nightly_tag_base }}" ]; then
+ echo "Version $version does not match nightly tag ${{ inputs.nightly_tag_base }}. Exiting the workflow."
+ exit 1
+ fi
+ # Strip the leading `v` from the version
+ version=$(echo $version | sed 's/^v//')
+ echo "version=$version" >> $GITHUB_OUTPUT
+
+ - name: Build project for distribution
+ run: make build base=true args="--wheel"
+
+ - name: Test CLI
+ run: |
+ # TODO: Unsure why the whl is not built in src/backend/base/dist
+ mkdir src/backend/base/dist
+ mv dist/*.whl src/backend/base/dist/
+ uv pip install src/backend/base/dist/*.whl
+ uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only &
+ SERVER_PID=$!
+ # Wait for the server to start
+ timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/api/v1/auto_login; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
+ # Terminate the server
+ kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
+ sleep 20 # give the server some time to terminate
+ # Check if the server is still running
+ if kill -0 $SERVER_PID 2>/dev/null; then
+ echo "Failed to terminate the server"
+ exit 0
+ else
+ echo "Server terminated successfully"
+ fi
+
+ - name: Publish to PyPI
+ env:
+ POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
+ run: |
+ make publish base=true
+
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: dist-base
+ path: src/backend/base/dist
+
+ release-nightly-main:
+ name: Release Langflow Nightly Main
+ needs: [release-nightly-base]
+ runs-on: ubuntu-latest
+ outputs:
+ version: ${{ steps.verify.outputs.version }}
+ defaults:
+ run:
+ shell: bash
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.nightly_tag_main}}
+ persist-credentials: true
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Install the project
+ run: uv sync --dev
+
+ - name: Verify Nightly Name and Version
+ id: verify
+ run: |
+ name=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $1}')
+ version=$(uv tree | grep 'langflow' | grep -v 'langflow-base' | awk '{print $2}')
+ if [ "$name" != "langflow-nightly" ]; then
+ echo "Name $name does not match langflow-nightly. Exiting the workflow."
+ exit 1
+ fi
+ if [ "$version" != "${{ inputs.nightly_tag_main }}" ]; then
+ echo "Version $version does not match nightly tag ${{ inputs.nightly_tag_main }}. Exiting the workflow."
+ exit 1
+ fi
+ # Strip the leading `v` from the version
+ version=$(echo $version | sed 's/^v//')
+ echo "version=$version" >> $GITHUB_OUTPUT
+ - name: Wait for PyPI Propagation
+ run: sleep 300 # wait for 5 minutes to ensure PyPI propagation of base
+
+ - name: Build project for distribution
+ run: make build main=true args="--no-sources --wheel"
+ - name: Test CLI
+ run: |
+ uv pip install dist/*.whl
+ uv run python -m langflow run --host 127.0.0.1 --port 7860 --backend-only &
+ SERVER_PID=$!
+ # Wait for the server to start
+ timeout 120 bash -c 'until curl -f http://127.0.0.1:7860/health_check; do sleep 2; done' || (echo "Server did not start in time" && kill $SERVER_PID && exit 1)
+ # Terminate the server
+ kill $SERVER_PID || (echo "Failed to terminate the server" && exit 1)
+ sleep 20 # give the server some time to terminate
+ # Check if the server is still running
+ if kill -0 $SERVER_PID 2>/dev/null; then
+ echo "Failed to terminate the server"
+ exit 0
+ else
+ echo "Server terminated successfully"
+ fi
+ - name: Publish to PyPI
+ env:
+ POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
+ run: |
+ make publish main=true
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: dist-main
+ path: dist
+
+ call_docker_build_base:
+ name: Call Docker Build Workflow for Langflow Base
+ if: always() && ${{ inputs.build_docker_base == 'true' }}
+ needs: [release-nightly-base, release-nightly-main]
+ uses: ./.github/workflows/docker-build.yml
+ with:
+ release_type: nightly-base
+ base_version: ${{ inputs.nightly_tag_base }}
+ main_version: ${{ inputs.nightly_tag_main }}
+ secrets: inherit
+
+ call_docker_build_main:
+ name: Call Docker Build Workflow for Langflow
+ if: always() && ${{ inputs.build_docker_main == 'true' }}
+ needs: [release-nightly-main]
+ uses: ./.github/workflows/docker-build.yml
+ with:
+ release_type: nightly-main
+ main_version: ${{ inputs.nightly_tag_main }}
+ secrets: inherit
+
+ call_docker_build_main_ep:
+ name: Call Docker Build Workflow for Langflow with Entrypoint
+ if: always() && ${{ inputs.build_docker_ep == 'true' }}
+ needs: [release-nightly-main]
+ uses: ./.github/workflows/docker-build.yml
+ with:
+ release_type: main-ep
+ main_version: ${{ inputs.nightly_tag_main }}
+ secrets: inherit
diff --git a/.github/workflows/store_pytest_durations.yml b/.github/workflows/store_pytest_durations.yml
new file mode 100644
index 0000000000000000000000000000000000000000..89c28e44c4c406c393f618f426c7c932b76c5994
--- /dev/null
+++ b/.github/workflows/store_pytest_durations.yml
@@ -0,0 +1,65 @@
+name: Store pytest durations
+
+on:
+ workflow_dispatch:
+ schedule:
+ # Run job at 6:30 UTC, 10.30pm PST, or 11.30pm PDT
+ - cron: "30 6 * * *"
+
+env:
+ PYTEST_RUN_PATH: "src/backend/tests"
+
+jobs:
+ build:
+ name: Run pytest and store durations
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
+ env:
+ UV_CACHE_DIR: /tmp/.uv-cache
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ enable-cache: true
+ cache-dependency-glob: "uv.lock"
+ - name: "Set up Python"
+ uses: actions/setup-python@v5
+ with:
+ python-version-file: "pyproject.toml"
+ - name: Restore uv cache
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.uv-cache
+ key: uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ restore-keys: |
+ uv-${{ runner.os }}-${{ hashFiles('uv.lock') }}
+ uv-${{ runner.os }}
+ - name: Install the project
+ run: uv sync --dev
+ - name: Run unit tests
+ uses: nick-fields/retry@v3
+ with:
+ timeout_minutes: 12
+ max_attempts: 2
+ command: uv run pytest src/backend/tests --durations-path src/backend/tests/.test_durations --splitting-algorithm least_duration --store-durations
+ - name: Minimize uv cache
+ run: uv cache prune --ci
+
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v7
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ branch-token: ${{ secrets.GITHUB_TOKEN }}
+ commit-message: "chore: update test durations"
+ title: "chore: update test durations"
+ body: |
+ Automated PR to update test durations file.
+
+ This PR was automatically created by the store_pytest_durations workflow.
+ branch: update-test-durations
+ branch-suffix: timestamp
+ delete-branch: true
+ maintainer-can-modify: true
diff --git a/.github/workflows/style-check-py.yml b/.github/workflows/style-check-py.yml
new file mode 100644
index 0000000000000000000000000000000000000000..25abc5e4319d3c29fe0fa1511c3892490d5d6ccb
--- /dev/null
+++ b/.github/workflows/style-check-py.yml
@@ -0,0 +1,31 @@
+name: Ruff Style Check
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened, auto_merge_enabled]
+ paths:
+ - "**/*.py"
+
+
+
+
+
+jobs:
+ lint:
+ name: Ruff Style Check
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version:
+ - "3.12"
+ steps:
+ - name: Check out the code at a specific ref
+ uses: actions/checkout@v4
+ - name: "Setup Environment"
+ uses: ./.github/actions/setup-uv
+ - name: Register problem matcher
+ run: echo "::add-matcher::.github/workflows/matchers/ruff.json"
+ - name: Run Ruff Check
+ run: uv run --only-dev ruff check --output-format=github .
+ - name: Minimize uv cache
+ run: uv cache prune --ci
diff --git a/.github/workflows/typescript_test.yml b/.github/workflows/typescript_test.yml
new file mode 100644
index 0000000000000000000000000000000000000000..db242b337909f9c14d5913891e8020e6844cd40d
--- /dev/null
+++ b/.github/workflows/typescript_test.yml
@@ -0,0 +1,348 @@
+name: Run Frontend Tests
+
+on:
+ workflow_call:
+ secrets:
+ OPENAI_API_KEY:
+ required: true
+ STORE_API_KEY:
+ required: true
+ ANTHROPIC_API_KEY:
+ required: true
+ TAVILY_API_KEY:
+ required: true
+ inputs:
+ suites:
+ description: "Test suites to run (JSON array)"
+ required: false
+ type: string
+ default: '[]'
+ release:
+ description: "Whether this is a release build"
+ required: false
+ type: boolean
+ default: false
+ tests_folder:
+ description: "(Optional) Tests to run"
+ required: false
+ type: string
+ default: "tests"
+ ref:
+ description: "(Optional) ref to checkout"
+ required: false
+ type: string
+ workflow_dispatch:
+ inputs:
+ suites:
+ description: "Test suites to run (JSON array)"
+ required: false
+ type: string
+ default: '[]'
+ release:
+ description: "Whether this is a release build"
+ required: false
+ type: boolean
+ default: false
+ tests_folder:
+ description: "(Optional) Tests to run"
+ required: false
+ type: string
+ default: "tests"
+
+env:
+ NODE_VERSION: "21"
+ PYTHON_VERSION: "3.12"
+ # Define the directory where Playwright browsers will be installed.
+ # Adjust if your project uses a different path.
+ PLAYWRIGHT_BROWSERS_PATH: "ms-playwright"
+
+jobs:
+ determine-test-suite:
+ name: Determine Test Suites and Shard Distribution
+ runs-on: ubuntu-latest
+ outputs:
+ matrix: ${{ steps.setup-matrix.outputs.matrix }}
+ test_grep: ${{ steps.set-matrix.outputs.test_grep }}
+ suites: ${{ steps.set-matrix.outputs.suites }}
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || github.ref }}
+ fetch-depth: 0
+
+ - name: Paths Filter
+ id: filter
+ uses: dorny/paths-filter@v3
+ with:
+ filters: .github/changes-filter.yaml
+
+ - name: Determine Test Suites from Changes
+ id: set-matrix
+ run: |
+ # Start with input suites if provided, otherwise empty array
+ echo "Changes filter output: ${{ steps.filter.outputs }}"
+ SUITES='${{ inputs.suites }}'
+ echo "Initial suites: $SUITES"
+ TEST_GREP=""
+
+ RELEASE="${{ inputs.release || 'false' }}"
+ echo "Release build: $RELEASE"
+
+ # Only set to release if it's explicitly a release build
+ if [[ "$RELEASE" == "true" ]]; then
+ SUITES='["release"]'
+ echo "Release build detected - setting suites to: $SUITES"
+ # grep pattern for release is the @release tag - run all tests
+ TEST_GREP="--grep=\"@release\""
+ else
+ # If input suites were not provided, determine based on changes
+ if [[ "$SUITES" == "[]" ]]; then
+ echo "No input suites provided - determining from changes"
+ TAGS=()
+ # Add suites and tags based on changed files
+ if [[ "${{ steps.filter.outputs.components }}" == "true" ]]; then
+ SUITES=$(echo $SUITES | jq -c '. += ["components"]')
+ TAGS+=("@components")
+ echo "Added components suite"
+ fi
+ if [[ "${{ steps.filter.outputs.starter-projects }}" == "true" ]]; then
+ SUITES=$(echo $SUITES | jq -c '. += ["starter-projects"]')
+ TAGS+=("@starter-projects")
+ echo "Added starter-projects suite"
+ fi
+ if [[ "${{ steps.filter.outputs.workspace }}" == "true" ]]; then
+ SUITES=$(echo $SUITES | jq -c '. += ["workspace"]')
+ TAGS+=("@workspace")
+ echo "Added workspace suite"
+ fi
+ if [[ "${{ steps.filter.outputs.api }}" == "true" ]]; then
+ SUITES=$(echo $SUITES | jq -c '. += ["api"]')
+ TAGS+=("@api")
+ echo "Added api suite"
+ fi
+ if [[ "${{ steps.filter.outputs.database }}" == "true" ]]; then
+ SUITES=$(echo $SUITES | jq -c '. += ["database"]')
+ TAGS+=("@database")
+ echo "Added database suite"
+ fi
+
+ # Create grep pattern if we have tags
+ if [ ${#TAGS[@]} -gt 0 ]; then
+ # Join tags with | for OR logic
+ REGEX_PATTERN=$(IFS='|'; echo "${TAGS[*]}")
+ TEST_GREP="--grep=\"${REGEX_PATTERN}\""
+ fi
+ else
+ # Process input suites to tags
+ TAGS=()
+ if echo "$SUITES" | jq -e 'contains(["components"])' > /dev/null; then
+ TAGS+=("@components")
+ fi
+ if echo "$SUITES" | jq -e 'contains(["starter-projects"])' > /dev/null; then
+ TAGS+=("@starter-projects")
+ fi
+ if echo "$SUITES" | jq -e 'contains(["workspace"])' > /dev/null; then
+ TAGS+=("@workspace")
+ fi
+ if echo "$SUITES" | jq -e 'contains(["api"])' > /dev/null; then
+ TAGS+=("@api")
+ fi
+ if echo "$SUITES" | jq -e 'contains(["database"])' > /dev/null; then
+ TAGS+=("@database")
+ fi
+
+ if [ ${#TAGS[@]} -gt 0 ]; then
+ # Join tags with | for OR logic
+ REGEX_PATTERN=$(IFS='|'; echo "${TAGS[*]}")
+ TEST_GREP="--grep \"${REGEX_PATTERN}\""
+ fi
+ fi
+ fi
+
+ # Ensure compact JSON output
+ SUITES=$(echo "$SUITES" | jq -c '.')
+
+ echo "Final test suites to run: $SUITES"
+ echo "Test grep pattern: $TEST_GREP"
+ # Ensure proper JSON formatting for matrix output
+ echo "matrix=$(echo $SUITES | jq -c .)" >> $GITHUB_OUTPUT
+ echo "test_grep=$TEST_GREP" >> $GITHUB_OUTPUT
+
+ - name: Setup Node ${{ env.NODE_VERSION }}
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ cache: "npm"
+ cache-dependency-path: ./src/frontend/package-lock.json
+
+ - name: Install Frontend Dependencies
+ run: npm ci
+ working-directory: ./src/frontend
+
+ - name: Calculate Test Shards Distribution
+ id: setup-matrix
+ run: |
+ cd src/frontend
+
+ # Get the test count using playwright's built-in grep
+ if [ -n "${{ steps.set-matrix.outputs.test_grep }}" ]; then
+ TEST_COUNT=$(npx playwright test ${{ inputs.tests_folder }} ${{ steps.set-matrix.outputs.test_grep }} --list | wc -l)
+ else
+ TEST_COUNT=$(npx playwright test ${{ inputs.tests_folder }} --list | wc -l)
+ fi
+
+ echo "Total tests to run: $TEST_COUNT"
+
+ # Calculate optimal shard count - 1 shard per 5 tests, min 1, max 10
+ SHARD_COUNT=$(( (TEST_COUNT + 4) / 5 ))
+ if [ $SHARD_COUNT -lt 1 ]; then
+ SHARD_COUNT=1
+ elif [ $SHARD_COUNT -gt 10 ]; then
+ SHARD_COUNT=10
+ fi
+
+ # Create the matrix combinations string
+ MATRIX_COMBINATIONS=""
+ for i in $(seq 1 $SHARD_COUNT); do
+ if [ $i -gt 1 ]; then
+ MATRIX_COMBINATIONS="$MATRIX_COMBINATIONS,"
+ fi
+ MATRIX_COMBINATIONS="$MATRIX_COMBINATIONS{\"shardIndex\": $i, \"shardTotal\": $SHARD_COUNT}"
+ done
+
+ echo "matrix={\"include\":[$MATRIX_COMBINATIONS]}" >> "$GITHUB_OUTPUT"
+
+ setup-and-test:
+ name: Playwright Tests - Shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }}
+ runs-on: ubuntu-latest
+ if: ${{ needs.determine-test-suite.outputs.test_grep != '' }}
+ needs: determine-test-suite
+ strategy:
+ fail-fast: false
+ matrix: ${{ fromJson(needs.determine-test-suite.outputs.matrix) }}
+ env:
+ OPENAI_API_KEY: ${{ inputs.openai_api_key || secrets.OPENAI_API_KEY }}
+ STORE_API_KEY: ${{ inputs.store_api_key || secrets.STORE_API_KEY }}
+ SEARCH_API_KEY: "${{ secrets.SEARCH_API_KEY }}"
+ ASTRA_DB_APPLICATION_TOKEN: "${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}"
+ ASTRA_DB_API_ENDPOINT: "${{ secrets.ASTRA_DB_API_ENDPOINT }}"
+ ANTHROPIC_API_KEY: "${{ secrets.ANTHROPIC_API_KEY }}"
+ TAVILY_API_KEY: "${{ secrets.TAVILY_API_KEY }}"
+ UV_CACHE_DIR: /tmp/.uv-cache
+ outputs:
+ failed: ${{ steps.check-failure.outputs.failed }}
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.ref || github.ref }}
+
+ - name: Setup Node.js Environment
+ uses: actions/setup-node@v4
+ id: setup-node
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ cache: "npm"
+ cache-dependency-path: ./src/frontend/package-lock.json
+
+ - name: Install Frontend Dependencies
+ run: npm ci
+ working-directory: ./src/frontend
+
+ - name: Install Playwright Browser Dependencies
+ id: install-playwright
+ uses: ./.github/actions/install-playwright
+ with:
+ working-directory: ./src/frontend
+ browsers: chromium
+
+ - name: Setup Python Environment with UV
+ uses: ./.github/actions/setup-uv
+
+ - name: Install Python Dependencies
+ run: uv sync --dev
+
+ - name: Configure Environment Variables
+ run: |
+ touch .env
+ echo "${{ secrets.ENV_VARS }}" > .env
+
+ - name: Execute Playwright Tests
+ uses: nick-fields/retry@v3
+ with:
+ timeout_minutes: 12
+ max_attempts: 2
+ command: |
+ cd src/frontend
+ echo 'Running tests with pattern: ${{ needs.determine-test-suite.outputs.test_grep }}'
+ npx playwright test ${{ inputs.tests_folder }} ${{ needs.determine-test-suite.outputs.test_grep }} --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --list
+ # echo command before running
+ echo "npx playwright test ${{ inputs.tests_folder }} ${{ needs.determine-test-suite.outputs.test_grep }} --trace on --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --workers 2"
+
+ npx playwright test ${{ inputs.tests_folder }} ${{ needs.determine-test-suite.outputs.test_grep }} --trace on --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }} --workers 2
+
+ - name: Upload Test Results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: blob-report-${{ matrix.shardIndex }}
+ path: src/frontend/blob-report
+ retention-days: 1
+
+ - name: Cleanup UV Cache
+ run: uv cache prune --ci
+
+ merge-reports:
+ # We need to repeat the condition at every step
+ # https://github.com/actions/runner/issues/662
+ needs: setup-and-test
+ runs-on: ubuntu-latest
+ if: always()
+ env:
+ EXIT_CODE: ${{!contains(needs.setup-and-test.result, 'failure') && !contains(needs.setup-and-test.result, 'cancelled') && '0' || '1'}}
+ steps:
+ - name: "Should Merge Reports"
+ # If the CI was successful, we don't need to merge the reports
+ # so we can skip all the steps below
+ id: should_merge_reports
+ run: |
+ if [ "$EXIT_CODE" == "0" ]; then
+ echo "should_merge_reports=false" >> $GITHUB_OUTPUT
+ else
+ echo "should_merge_reports=true" >> $GITHUB_OUTPUT
+ fi
+ - name: Checkout code
+ if: ${{ steps.should_merge_reports.outputs.should_merge_reports == 'true' }}
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+
+ if: ${{ steps.should_merge_reports.outputs.should_merge_reports == 'true' }}
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+
+ - name: Download blob reports from GitHub Actions Artifacts
+
+ if: ${{ steps.should_merge_reports.outputs.should_merge_reports == 'true' }}
+ uses: actions/download-artifact@v4
+ with:
+ path: all-blob-reports
+ pattern: blob-report-*
+ merge-multiple: true
+
+ - name: Merge into HTML Report
+
+ if: ${{ steps.should_merge_reports.outputs.should_merge_reports == 'true' }}
+ run: |
+ npx playwright merge-reports --reporter html ./all-blob-reports
+
+ - name: Upload HTML report
+
+ if: ${{ steps.should_merge_reports.outputs.should_merge_reports == 'true' }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: html-report--attempt-${{ github.run_attempt }}
+ path: playwright-report
+ retention-days: 14
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..9be9589dce31acd89f0936bab2afd82631d25b08
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,279 @@
+# This is to avoid Opencommit hook from getting pushed
+prepare-commit-msg
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+lerna-debug.log*
+qdrant_storage
+
+.dspy_cache
+# Mac
+.DS_Store
+
+# VSCode
+.vscode/settings.json
+.chroma
+.ruff_cache
+
+# PyCharm
+.idea/
+
+# Diagnostic reports (https://nodejs.org/api/report.html)
+report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+*.lcov
+
+# nyc test coverage
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+bower_components
+
+# node-waf configuration
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+
+# Dependency directories
+node_modules/
+jspm_packages/
+
+# TypeScript v1 declaration files
+typings/
+
+# TypeScript cache
+*.tsbuildinfo
+
+# Optional npm cache directory
+.npm
+
+# Optional eslint cache
+.eslintcache
+
+# Microbundle cache
+.rpt2_cache/
+.rts2_cache_cjs/
+.rts2_cache_es/
+.rts2_cache_umd/
+
+# Optional REPL history
+.node_repl_history
+
+# Output of 'npm pack'
+*.tgz
+
+# Yarn Integrity file
+.yarn-integrity
+
+# dotenv environment variables file
+.env
+.env.test
+
+# parcel-bundler cache (https://parceljs.org/)
+.cache
+
+# Next.js build output
+.next
+
+# Nuxt.js build / generate output
+.nuxt
+dist
+
+# Gatsby files
+.cache/
+# Comment in the public line in if your project uses Gatsby and *not* Next.js
+# https://nextjs.org/blog/next-9-1#public-directory-support
+# public
+
+# vuepress build output
+.vuepress/dist
+
+# Serverless directories
+.serverless/
+
+# FuseBox cache
+.fusebox/
+
+# DynamoDB Local files
+.dynamodb/
+
+# TernJS port file
+.tern-port
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+notebooks
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+.testmondata*
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+*.db-shm
+*.db-wal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Poetry
+.testenv/*
+langflow.db
+
+
+.githooks/prepare-commit-msg
+.langchain.db
+
+# docusaurus
+.docusaurus/
+
+/tmp/*
+src/backend/langflow/frontend/
+src/backend/base/langflow/frontend/
+.docker
+scratchpad*
+chroma*/*
+stuff/*
+src/frontend/playwright-report/index.html
+*.bak
+prof/*
+
+src/frontend/temp
+*-shm
+*-wal
+.history
+
+.dspy_cache/
+*.db
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..42b3ae39bba55357d4e8f9e7c797543509d2b000
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,25 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.1.0
+ hooks:
+ - id: check-case-conflict
+ - id: end-of-file-fixer
+ # python, js and ts only
+ files: \.(py|js|ts)$
+ - id: mixed-line-ending
+ files: \.(py|js|ts)$
+ args:
+ - --fix=lf
+ - id: trailing-whitespace
+ - repo: local
+ hooks:
+ - id: ruff-check
+ name: ruff check
+ language: system
+ entry: bash -c "uv run ruff check"
+ types: [file, python]
+ - id: ruff-format
+ name: ruff format
+ language: system
+ entry: bash -c "uv run ruff format"
+ types: [file, python]
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000000000000000000000000000000000000..1bc163fb5815f89dc46e64bc0f3bda7d32539948
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,80 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Debug Backend",
+ "type": "debugpy",
+ "request": "launch",
+ "module": "uvicorn",
+ "preLaunchTask": "Install Backend",
+
+ "args": [
+ "--factory",
+ "langflow.main:create_app",
+ "--port",
+ "7860",
+ "--reload",
+ "--log-level",
+ "debug",
+ "--loop",
+ "asyncio",
+ "--reload-include",
+ "./src/backend/*"
+ ],
+ "jinja": true,
+ "justMyCode": false,
+ "env": {
+ "LANGFLOW_LOG_LEVEL": "debug",
+ "LANGFLOW_PROMETHEUS_PORT": "9090"
+ },
+ "envFile": "${workspaceFolder}/.env"
+ },
+ {
+ "name": "Debug CLI",
+ "type": "debugpy",
+ "request": "launch",
+ "module": "langflow",
+ "args": [
+ "run",
+ "--path",
+ "${workspaceFolder}/src/backend/base/langflow/frontend",
+ "--env-file",
+ "${workspaceFolder}/.env"
+ ],
+ // "python": "/path/to/your/python_env/python", // Replace with the path to your Python executable
+ "jinja": true,
+ "justMyCode": false
+ },
+ {
+ "name": "Python: Remote Attach",
+ "type": "debugpy",
+ "request": "attach",
+ "justMyCode": true,
+ "connect": {
+ "port": 5678
+ },
+ "pathMappings": [
+ {
+ "localRoot": "${workspaceFolder}",
+ "remoteRoot": "."
+ }
+ ]
+ },
+ {
+ "name": "Debug Frontend",
+ "type": "chrome",
+ "request": "launch",
+ "url": "http://localhost:3000/",
+ "webRoot": "${workspaceRoot}/src/frontend"
+ },
+ {
+ "name": "Python: Debug Tests",
+ "type": "debugpy",
+ "request": "launch",
+ "program": "${file}",
+ "purpose": ["debug-test"],
+ "console": "integratedTerminal",
+ "justMyCode": false
+ }
+ ]
+}
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
new file mode 100644
index 0000000000000000000000000000000000000000..5142610af1f15ddbdde606b3e9bba4b3e03af0f0
--- /dev/null
+++ b/.vscode/tasks.json
@@ -0,0 +1,54 @@
+{
+ // See https://go.microsoft.com/fwlink/?LinkId=733558
+ // for the documentation about the tasks.json format
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": "Init",
+ "type": "shell",
+ "command": "make init"
+ },
+ // make backend
+ {
+ "label": "Backend",
+ "type": "shell",
+ "command": "make backend"
+ },
+ // make frontend
+ {
+ "label": "Frontend",
+ "type": "shell",
+ "command": "make frontend"
+ },
+ // make test
+ {
+ "label": "Test",
+ "type": "shell",
+ "command": "make unit_tests"
+ },
+ // make lint
+ {
+ "label": "Lint",
+ "type": "shell",
+ "command": "make lint"
+ },
+ // make format
+ {
+ "label": "Format",
+ "type": "shell",
+ "command": "make format"
+ },
+ // make install
+ {
+ "label": "Install",
+ "type": "shell",
+ "command": "make install_backend && make install_frontend"
+ },
+ // install backend
+ {
+ "label": "Install Backend",
+ "type": "shell",
+ "command": "make install_backend"
+ }
+ ]
+}
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..a4f49057f74f0ac69d503e85656bd667aa19eabc
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socioeconomic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+- Demonstrating empathy and kindness toward other people
+- Being respectful of differing opinions, viewpoints, and experiences
+- Giving and gracefully accepting constructive feedback
+- Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+- Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+- The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+- Trolling, insulting or derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+contact@langflow.org.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..960f6522afccd62988d937975da10a15780d36fb
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,102 @@
+# Contributing to Langflow
+
+This guide is intended to help you get started contributing to Langflow.
+As an open-source project in a rapidly developing field, we are extremely open
+to contributions, whether it be in the form of a new feature, improved infra, or better documentation.
+
+To contribute to this project, please follow the [fork and pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
+
+## Reporting bugs or suggesting improvements
+
+Our [GitHub issues](https://github.com/langflow-ai/langflow/issues) page is kept up to date
+with bugs, improvements, and feature requests. There is a taxonomy of labels to help
+with sorting and discovery of issues of interest. [See this page](https://github.com/langflow-ai/langflow/labels) for an overview of
+the system we use to tag our issues and pull requests.
+
+If you're looking for help with your code, consider posting a question on the
+[GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). Please
+understand that we won't be able to provide individual support via email. We
+also believe that help is much more valuable if it's **shared publicly**,
+so that more people can benefit from it.
+
+- **Describing your issue:** Try to provide as many details as possible. What
+ exactly goes wrong? _How_ is it failing? Is there an error?
+ "XY doesn't work" usually isn't that helpful for tracking down problems. Always
+ remember to include the code you ran and if possible, extract only the relevant
+ parts and don't just dump your entire script. This will make it easier for us to
+ reproduce the error.
+
+- **Sharing long blocks of code or logs:** If you need to include long code,
+ logs or tracebacks, you can wrap them in `` and ``. This
+ [collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details)
+ so it only becomes visible on click, making the issue easier to read and follow.
+
+## Contributing code and documentation
+
+You can develop Langflow locally via uv + NodeJS.
+
+### Clone the Langflow Repository
+
+Navigate to the [Langflow GitHub repository](https://github.com/langflow-ai/langflow) and press "Fork" in the upper right-hand corner.
+
+Add the new remote to your local repository on your local machine:
+
+```bash
+git remote add fork https://github.com//langflow.git
+```
+
+We also provide a .vscode/launch.json file for debugging the backend in VSCode, which is a lot faster than using docker compose.
+
+### Prepare the environment
+
+Setting up hooks:
+
+```bash
+make init
+```
+
+This will set up the development environment by installing backend and frontend dependencies, building the frontend static files, and initializing the project. It runs `make install_backend`, `make install_frontend`, `make build_frontend`, and finally `uv run langflow run` to start the application.
+
+It is advised to run `make lint`, `make format`, and `make unit_tests` before pushing to the repository.
+
+### Run locally (uv and Node.js)
+
+Langflow can run locally by cloning the repository and installing the dependencies. We recommend using a virtual environment to isolate the dependencies from your system.
+
+Before you start, make sure you have the following installed:
+
+- uv (>=0.4)
+- Node.js
+
+Then, in the root folder, install the dependencies and start the development server for the backend:
+
+```bash
+make backend
+```
+
+And the frontend:
+
+```bash
+make frontend
+```
+
+### Run documentation
+
+The documentation is built using [Docusaurus](https://docusaurus.io/). To run the documentation locally, run the following commands:
+
+```bash
+cd docs
+npm install
+npm run start
+```
+
+The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder.
+
+## Opening a pull request
+
+Once you wrote and manually tested your change, you can start sending the patch to the main repository.
+
+- Open a new GitHub pull request with the patch against the `main` branch.
+- Ensure the PR title follows semantic commits conventions.
+ - For example, `feat: add new feature`, `fix: correct issue with X`.
+- Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..ee3d6be71bdd1b5e4cc5b85da4a4aabc7bfe3347
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 Langflow
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..b928ccc0a44cb62e4fd9ba69589ddbd4b3e56468
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,446 @@
+.PHONY: all init format lint build build_frontend install_frontend run_frontend run_backend dev help tests coverage clean_python_cache clean_npm_cache clean_all
+
+# Configurations
+VERSION=$(shell grep "^version" pyproject.toml | sed 's/.*\"\(.*\)\"$$/\1/')
+DOCKERFILE=docker/build_and_push.Dockerfile
+DOCKERFILE_BACKEND=docker/build_and_push_backend.Dockerfile
+DOCKERFILE_FRONTEND=docker/frontend/build_and_push_frontend.Dockerfile
+DOCKER_COMPOSE=docker_example/docker-compose.yml
+PYTHON_REQUIRED=$(shell grep '^requires-python[[:space:]]*=' pyproject.toml | sed -n 's/.*"\([^"]*\)".*/\1/p')
+RED=\033[0;31m
+NC=\033[0m # No Color
+GREEN=\033[0;32m
+
+log_level ?= debug
+host ?= 0.0.0.0
+port ?= 7860
+env ?= .env
+open_browser ?= true
+path = src/backend/base/langflow/frontend
+workers ?= 1
+async ?= true
+lf ?= false
+ff ?= true
+all: help
+
+######################
+# UTILITIES
+######################
+
+# increment the patch version of the current package
+patch: ## bump the version in langflow and langflow-base
+ @echo 'Patching the version'
+ @poetry version patch
+ @echo 'Patching the version in langflow-base'
+ @cd src/backend/base && poetry version patch
+ @make lock
+
+# check for required tools
+check_tools:
+ @command -v uv >/dev/null 2>&1 || { echo >&2 "$(RED)uv is not installed. Aborting.$(NC)"; exit 1; }
+ @command -v npm >/dev/null 2>&1 || { echo >&2 "$(RED)NPM is not installed. Aborting.$(NC)"; exit 1; }
+ @echo "$(GREEN)All required tools are installed.$(NC)"
+
+
+help: ## show this help message
+ @echo '----'
+ @grep -hE '^\S+:.*##' $(MAKEFILE_LIST) | \
+ awk -F ':.*##' '{printf "\033[36mmake %s\033[0m: %s\n", $$1, $$2}' | \
+ column -c2 -t -s :
+ @echo '----'
+
+######################
+# INSTALL PROJECT
+######################
+
+reinstall_backend: ## forces reinstall all dependencies (no caching)
+ @echo 'Installing backend dependencies'
+ @uv sync -n --reinstall --frozen
+
+install_backend: ## install the backend dependencies
+ @echo 'Installing backend dependencies'
+ @uv sync --frozen
+
+install_frontend: ## install the frontend dependencies
+ @echo 'Installing frontend dependencies'
+ @cd src/frontend && npm install > /dev/null 2>&1
+
+build_frontend: ## build the frontend static files
+ @echo 'Building frontend static files'
+ @cd src/frontend && CI='' npm run build > /dev/null 2>&1
+ @rm -rf src/backend/base/langflow/frontend
+ @cp -r src/frontend/build src/backend/base/langflow/frontend
+
+init: check_tools clean_python_cache clean_npm_cache ## initialize the project
+ @make install_backend
+ @make install_frontend
+ @make build_frontend
+ @echo "$(GREEN)All requirements are installed.$(NC)"
+ @uv run langflow run
+
+######################
+# CLEAN PROJECT
+######################
+
+clean_python_cache:
+ @echo "Cleaning Python cache..."
+ find . -type d -name '__pycache__' -exec rm -r {} +
+ find . -type f -name '*.py[cod]' -exec rm -f {} +
+ find . -type f -name '*~' -exec rm -f {} +
+ find . -type f -name '.*~' -exec rm -f {} +
+ find . -type d -empty -delete
+ @echo "$(GREEN)Python cache cleaned.$(NC)"
+
+clean_npm_cache:
+ @echo "Cleaning npm cache..."
+ cd src/frontend && npm cache clean --force
+ rm -rf src/frontend/node_modules src/frontend/build src/backend/base/langflow/frontend src/frontend/package-lock.json
+ @echo "$(GREEN)NPM cache and frontend directories cleaned.$(NC)"
+
+clean_all: clean_python_cache clean_npm_cache # clean all caches and temporary directories
+ @echo "$(GREEN)All caches and temporary directories cleaned.$(NC)"
+
+setup_uv: ## install poetry using pipx
+ pipx install uv
+
+add:
+ @echo 'Adding dependencies'
+ifdef devel
+ @cd src/backend/base && uv add --group dev $(devel)
+endif
+
+ifdef main
+ @uv add $(main)
+endif
+
+ifdef base
+ @cd src/backend/base && uv add $(base)
+endif
+
+
+
+######################
+# CODE TESTS
+######################
+
+coverage: ## run the tests and generate a coverage report
+ @uv run coverage run
+ @uv run coverage erase
+
+unit_tests: ## run unit tests
+ @uv sync --extra dev --frozen
+ @EXTRA_ARGS=""
+ @if [ "$(async)" = "true" ]; then \
+ EXTRA_ARGS="$$EXTRA_ARGS --instafail -n auto"; \
+ fi; \
+ if [ "$(lf)" = "true" ]; then \
+ EXTRA_ARGS="$$EXTRA_ARGS --lf"; \
+ fi; \
+ if [ "$(ff)" = "true" ]; then \
+ EXTRA_ARGS="$$EXTRA_ARGS --ff"; \
+ fi; \
+ uv run pytest src/backend/tests --ignore=src/backend/tests/integration $$EXTRA_ARGS --instafail -ra -m 'not api_key_required' --durations-path src/backend/tests/.test_durations --splitting-algorithm least_duration $(args)
+
+unit_tests_looponfail:
+ @make unit_tests args="-f"
+
+integration_tests:
+ uv run pytest src/backend/tests/integration \
+ --instafail -ra \
+ $(args)
+
+integration_tests_no_api_keys:
+ uv run pytest src/backend/tests/integration \
+ --instafail -ra -m "not api_key_required" \
+ $(args)
+
+integration_tests_api_keys:
+ uv run pytest src/backend/tests/integration \
+ --instafail -ra -m "api_key_required" \
+ $(args)
+
+tests: ## run unit, integration, coverage tests
+ @echo 'Running Unit Tests...'
+ make unit_tests
+ @echo 'Running Integration Tests...'
+ make integration_tests
+ @echo 'Running Coverage Tests...'
+ make coverage
+
+######################
+# CODE QUALITY
+######################
+
+codespell: ## run codespell to check spelling
+ @poetry install --with spelling
+ poetry run codespell --toml pyproject.toml
+
+fix_codespell: ## run codespell to fix spelling errors
+ @poetry install --with spelling
+ poetry run codespell --toml pyproject.toml --write
+
+format: ## run code formatters
+ @uv run ruff check . --fix
+ @uv run ruff format . --config pyproject.toml
+ @cd src/frontend && npm run format
+
+unsafe_fix:
+ @uv run ruff check . --fix --unsafe-fixes
+
+lint: install_backend ## run linters
+ @uv run mypy --namespace-packages -p "langflow"
+
+install_frontendci:
+ @cd src/frontend && npm ci > /dev/null 2>&1
+
+install_frontendc:
+ @cd src/frontend && rm -rf node_modules package-lock.json && npm install > /dev/null 2>&1
+
+run_frontend: ## run the frontend
+ @-kill -9 `lsof -t -i:3000`
+ @cd src/frontend && npm start
+
+tests_frontend: ## run frontend tests
+ifeq ($(UI), true)
+ @cd src/frontend && npx playwright test --ui --project=chromium
+else
+ @cd src/frontend && npx playwright test --project=chromium
+endif
+
+run_cli: install_frontend install_backend build_frontend ## run the CLI
+ @echo 'Running the CLI'
+ @uv run langflow run \
+ --frontend-path $(path) \
+ --log-level $(log_level) \
+ --host $(host) \
+ --port $(port) \
+ $(if $(env),--env-file $(env),) \
+ $(if $(filter false,$(open_browser)),--no-open-browser)
+
+run_cli_debug:
+ @echo 'Running the CLI in debug mode'
+ @make install_frontend > /dev/null
+ @echo 'Building the frontend'
+ @make build_frontend > /dev/null
+ @echo 'Install backend dependencies'
+ @make install_backend > /dev/null
+ifdef env
+ @make start env=$(env) host=$(host) port=$(port) log_level=debug
+else
+ @make start host=$(host) port=$(port) log_level=debug
+endif
+
+
+setup_devcontainer: ## set up the development container
+ make install_backend
+ make install_frontend
+ make build_frontend
+ uv run langflow --frontend-path src/frontend/build
+
+setup_env: ## set up the environment
+ @sh ./scripts/setup/setup_env.sh
+
+frontend: install_frontend ## run the frontend in development mode
+ make run_frontend
+
+frontendc: install_frontendc
+ make run_frontend
+
+
+backend: setup_env install_backend ## run the backend in development mode
+ @-kill -9 $$(lsof -t -i:7860) || true
+ifdef login
+ @echo "Running backend autologin is $(login)";
+ LANGFLOW_AUTO_LOGIN=$(login) uv run uvicorn \
+ --factory langflow.main:create_app \
+ --host 0.0.0.0 \
+ --port $(port) \
+ $(if $(filter-out 1,$(workers)),, --reload) \
+ --env-file $(env) \
+ --loop asyncio \
+ $(if $(workers),--workers $(workers),)
+else
+ @echo "Running backend respecting the $(env) file";
+ uv run uvicorn \
+ --factory langflow.main:create_app \
+ --host 0.0.0.0 \
+ --port $(port) \
+ $(if $(filter-out 1,$(workers)),, --reload) \
+ --env-file $(env) \
+ --loop asyncio \
+ $(if $(workers),--workers $(workers),)
+endif
+
+build_and_run: setup_env ## build the project and run it
+ rm -rf dist
+ rm -rf src/backend/base/dist
+ make build
+ uv run pip install dist/*.tar.gz
+ uv run langflow run
+
+build_and_install: ## build the project and install it
+ @echo 'Removing dist folder'
+ rm -rf dist
+ rm -rf src/backend/base/dist
+ make build && uv run pip install dist/*.whl && pip install src/backend/base/dist/*.whl --force-reinstall
+
+build: setup_env ## build the frontend static files and package the project
+ifdef base
+ make install_frontendci
+ make build_frontend
+ make build_langflow_base args="$(args)"
+endif
+
+ifdef main
+ make install_frontendci
+ make build_frontend
+ make build_langflow_base args="$(args)"
+ make build_langflow args="$(args)"
+endif
+
+build_langflow_base:
+ cd src/backend/base && uv build $(args)
+ rm -rf src/backend/base/langflow/frontend
+
+build_langflow_backup:
+ uv lock && uv build
+
+build_langflow:
+ uv lock --no-upgrade
+ uv build $(args)
+ifdef restore
+ mv pyproject.toml.bak pyproject.toml
+ mv uv.lock.bak uv.lock
+endif
+
+
+docker_build: dockerfile_build clear_dockerimage ## build DockerFile
+
+docker_build_backend: dockerfile_build_be clear_dockerimage ## build Backend DockerFile
+
+docker_build_frontend: dockerfile_build_fe clear_dockerimage ## build Frontend Dockerfile
+
+dockerfile_build:
+ @echo 'BUILDING DOCKER IMAGE: ${DOCKERFILE}'
+ @docker build --rm \
+ -f ${DOCKERFILE} \
+ -t langflow:${VERSION} .
+
+dockerfile_build_be: dockerfile_build
+ @echo 'BUILDING DOCKER IMAGE BACKEND: ${DOCKERFILE_BACKEND}'
+ @docker build --rm \
+ --build-arg LANGFLOW_IMAGE=langflow:${VERSION} \
+ -f ${DOCKERFILE_BACKEND} \
+ -t langflow_backend:${VERSION} .
+
+dockerfile_build_fe: dockerfile_build
+ @echo 'BUILDING DOCKER IMAGE FRONTEND: ${DOCKERFILE_FRONTEND}'
+ @docker build --rm \
+ --build-arg LANGFLOW_IMAGE=langflow:${VERSION} \
+ -f ${DOCKERFILE_FRONTEND} \
+ -t langflow_frontend:${VERSION} .
+
+clear_dockerimage:
+ @echo 'Clearing the docker build'
+ @if docker images -f "dangling=true" -q | grep -q '.*'; then \
+ docker rmi $$(docker images -f "dangling=true" -q); \
+ fi
+
+docker_compose_up: docker_build docker_compose_down
+ @echo 'Running docker compose up'
+ docker compose -f $(DOCKER_COMPOSE) up --remove-orphans
+
+docker_compose_down:
+ @echo 'Running docker compose down'
+ docker compose -f $(DOCKER_COMPOSE) down || true
+
+dcdev_up:
+ @echo 'Running docker compose up'
+ docker compose -f docker/dev.docker-compose.yml down || true
+ docker compose -f docker/dev.docker-compose.yml up --remove-orphans
+
+lock_base:
+ cd src/backend/base && uv lock
+
+lock_langflow:
+ uv lock
+
+lock: ## lock dependencies
+ @echo 'Locking dependencies'
+ cd src/backend/base && uv lock
+ uv lock
+
+update: ## update dependencies
+ @echo 'Updating dependencies'
+ cd src/backend/base && uv sync --upgrade
+ uv sync --upgrade
+
+publish_base:
+ cd src/backend/base && uv publish
+
+publish_langflow:
+ uv publish
+
+publish_base_testpypi:
+ # TODO: update this to use the test-pypi repository
+ cd src/backend/base && uv publish -r test-pypi
+
+publish_langflow_testpypi:
+ # TODO: update this to use the test-pypi repository
+ uv publish -r test-pypi
+
+publish: ## build the frontend static files and package the project and publish it to PyPI
+ @echo 'Publishing the project'
+ifdef base
+ make publish_base
+endif
+
+ifdef main
+ make publish_langflow
+endif
+
+publish_testpypi: ## build the frontend static files and package the project and publish it to PyPI
+ @echo 'Publishing the project'
+
+ifdef base
+ #TODO: replace with uvx twine upload dist/*
+ poetry config repositories.test-pypi https://test.pypi.org/legacy/
+ make publish_base_testpypi
+endif
+
+ifdef main
+ #TODO: replace with uvx twine upload dist/*
+ poetry config repositories.test-pypi https://test.pypi.org/legacy/
+ make publish_langflow_testpypi
+endif
+
+
+# example make alembic-revision message="Add user table"
+alembic-revision: ## generate a new migration
+ @echo 'Generating a new Alembic revision'
+ cd src/backend/base/langflow/ && uv run alembic revision --autogenerate -m "$(message)"
+
+
+alembic-upgrade: ## upgrade database to the latest version
+ @echo 'Upgrading database to the latest version'
+ cd src/backend/base/langflow/ && uv run alembic upgrade head
+
+alembic-downgrade: ## downgrade database by one version
+ @echo 'Downgrading database by one version'
+ cd src/backend/base/langflow/ && uv run alembic downgrade -1
+
+alembic-current: ## show current revision
+ @echo 'Showing current Alembic revision'
+ cd src/backend/base/langflow/ && uv run alembic current
+
+alembic-history: ## show migration history
+ @echo 'Showing Alembic migration history'
+ cd src/backend/base/langflow/ && uv run alembic history --verbose
+
+alembic-check: ## check migration status
+ @echo 'Running alembic check'
+ cd src/backend/base/langflow/ && uv run alembic check
+
+alembic-stamp: ## stamp the database with a specific revision
+ @echo 'Stamping the database with revision $(revision)'
+ cd src/backend/base/langflow/ && uv run alembic stamp $(revision)
diff --git a/README.md b/README.md
index 42f63fc3f92b9b2d8afd6d3c94c133d58e8c8d74..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +0,0 @@
----
-title: Agent Flow
-emoji: đ
-colorFrom: indigo
-colorTo: purple
-sdk: static
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/deploy/.env.example b/deploy/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..a0b80bd561f4c8cf777ffb4178dac06189905cdf
--- /dev/null
+++ b/deploy/.env.example
@@ -0,0 +1,52 @@
+DOMAIN=localhost
+STACK_NAME=langflow-stack
+
+TRAEFIK_PUBLIC_NETWORK=traefik-public
+TRAEFIK_TAG=langflow-traefik
+TRAEFIK_PUBLIC_TAG=traefik-public
+
+
+# Langflow backend configuration
+LANGFLOW_LOG_LEVEL=debug
+LANGFLOW_SUPERUSER=superuser
+LANGFLOW_SUPERUSER_PASSWORD=superuser
+LANGFLOW_NEW_USER_IS_ACTIVE=False
+
+
+# Langflow frontend configuration
+BACKEND_URL=http://backend:7860
+
+# RabbitMQ configuration
+RABBITMQ_DEFAULT_USER=langflow
+RABBITMQ_DEFAULT_PASS=langflow
+
+# Database configuration
+DB_USER=langflow
+DB_PASSWORD=langflow
+DB_HOST=db
+DB_PORT=5432
+DB_NAME=langflow
+
+# DB configuration
+POSTGRES_USER=langflow
+POSTGRES_PASSWORD=langflow
+POSTGRES_DB=langflow
+POSTGRES_PORT=5432
+
+# Flower configuration
+# Disable until https://github.com/langflow-ai/langflow/pull/2655 gets released
+#LANGFLOW_CACHE_TYPE=redis
+LANGFLOW_REDIS_HOST=result_backend
+LANGFLOW_REDIS_PORT=6379
+LANGFLOW_REDIS_DB=0
+LANGFLOW_REDIS_EXPIRE=3600
+LANGFLOW_REDIS_PASSWORD=
+FLOWER_UNAUTHENTICATED_API=True
+BROKER_URL=amqp://langflow:langflow@broker:5672
+RESULT_BACKEND=redis://result_backend:6379/0
+C_FORCE_ROOT="true"
+
+
+# PGAdmin configuration
+PGADMIN_DEFAULT_EMAIL=admin@admin.com
+PGADMIN_DEFAULT_PASSWORD=admin
diff --git a/deploy/.gitignore b/deploy/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fdbcefec6e35a4173781a9b60f337a762bfe11af
--- /dev/null
+++ b/deploy/.gitignore
@@ -0,0 +1 @@
+pgadmin
\ No newline at end of file
diff --git a/deploy/README.md b/deploy/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5456713c673c502181ce08609248489403e23d26
--- /dev/null
+++ b/deploy/README.md
@@ -0,0 +1,21 @@
+# Run Langflow
+
+
+## Docker compose
+To run Langflow with Docker compose, you need to have Docker and Docker compose installed on your machine. You can install Docker and Docker compose by following the instructions on the [official Docker documentation](https://docs.docker.com/get-docker/).
+
+The docker-compose file uses `latest` tag; it's recommended to pull the latest version of the images before running the docker-compose file.
+
+```bash
+docker compose pull
+```
+
+To start the Langflow services, run the following command:
+
+```bash
+docker compose up
+```
+
+After running the command, you can access the Langflow services at the following url: http://localhost:80.
+
+Edit the `.env` file to change the port or other configurations.
\ No newline at end of file
diff --git a/deploy/docker-compose.override.yml b/deploy/docker-compose.override.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0bff922993fc8b59bc60ecde58f34828011b3869
--- /dev/null
+++ b/deploy/docker-compose.override.yml
@@ -0,0 +1,65 @@
+services:
+ proxy:
+ ports:
+ - "80:80"
+ - "8090:8080"
+ command:
+ # Enable Docker in Traefik, so that it reads labels from Docker services
+ - --providers.docker
+ # Add a constraint to only use services with the label for this stack
+ # from the env var TRAEFIK_TAG
+ - --providers.docker.constraints=Label(`traefik.constraint-label-stack`, `${TRAEFIK_TAG?Variable not set}`)
+ # Do not expose all Docker services, only the ones explicitly exposed
+ - --providers.docker.exposedbydefault=false
+ # Disable Docker Swarm mode for local development
+ # - --providers.docker.swarmmode
+ # Enable the access log, with HTTP requests
+ - --accesslog
+ # Enable the Traefik log, for configurations and errors
+ - --log
+ # Enable the Dashboard and API
+ - --api
+ # Enable the Dashboard and API in insecure mode for local development
+ - --api.insecure=true
+ labels:
+ - traefik.enable=true
+ - traefik.http.routers.${STACK_NAME?Variable not set}-traefik-public-http.rule=Host(`${DOMAIN?Variable not set}`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-traefik-public.loadbalancer.server.port=80
+
+ result_backend:
+ ports:
+ - "6379:6379"
+
+ pgadmin:
+ ports:
+ - "5050:5050"
+
+ flower:
+ ports:
+ - "5555:5555"
+
+ backend:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-backend-http.rule=PathPrefix(`/api/v1`) || PathPrefix(`/docs`) || PathPrefix(`/health`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-backend.loadbalancer.server.port=7860
+
+ frontend:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-frontend-http.rule=PathPrefix(`/`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-frontend.loadbalancer.server.port=80
+
+ celeryworker:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-celeryworker-http.rule=PathPrefix(`/api/v1`) || PathPrefix(`/docs`) || PathPrefix(`/health`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-celeryworker.loadbalancer.server.port=7860
+
+networks:
+ traefik-public:
+ # For local dev, don't expect an external Traefik network
+ external: false
diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d4c00fd9587e77262e4a417db847d21faff53688
--- /dev/null
+++ b/deploy/docker-compose.yml
@@ -0,0 +1,237 @@
+services:
+ proxy:
+ image: traefik:v3.0
+ env_file:
+ - .env
+ networks:
+ - ${TRAEFIK_PUBLIC_NETWORK?Variable not set}
+ - default
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ command:
+ # Enable Docker in Traefik, so that it reads labels from Docker services
+ - --providers.docker
+ # Add a constraint to only use services with the label for this stack
+ # from the env var TRAEFIK_TAG
+ - --providers.docker.constraints=Label(`traefik.constraint-label-stack`, `${TRAEFIK_TAG?Variable not set}`)
+ # Do not expose all Docker services, only the ones explicitly exposed
+ - --providers.docker.exposedbydefault=false
+ # Enable the access log, with HTTP requests
+ - --accesslog
+ # Enable the Traefik log, for configurations and errors
+ - --log
+ # Enable the Dashboard and API
+ - --api
+ deploy:
+ placement:
+ constraints:
+ - node.role == manager
+ labels:
+ # Enable Traefik for this service, to make it available in the public network
+ - traefik.enable=true
+ # Use the traefik-public network (declared below)
+ - traefik.docker.network=${TRAEFIK_PUBLIC_NETWORK?Variable not set}
+ # Use the custom label "traefik.constraint-label=traefik-public"
+ # This public Traefik will only use services with this label
+ - traefik.constraint-label=${TRAEFIK_PUBLIC_TAG?Variable not set}
+ # traefik-http set up only to use the middleware to redirect to https
+ - traefik.http.middlewares.${STACK_NAME?Variable not set}-https-redirect.redirectscheme.scheme=https
+ - traefik.http.middlewares.${STACK_NAME?Variable not set}-https-redirect.redirectscheme.permanent=true
+ # Handle host with and without "www" to redirect to only one of them
+ # Uses environment variable DOMAIN
+ # To disable www redirection remove the Host() you want to discard, here and
+ # below for HTTPS
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-http.rule=Host(`${DOMAIN?Variable not set}`) || Host(`www.${DOMAIN?Variable not set}`)
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-http.entrypoints=http
+ # traefik-https the actual router using HTTPS
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-https.rule=Host(`${DOMAIN?Variable not set}`) || Host(`www.${DOMAIN?Variable not set}`)
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-https.entrypoints=https
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-https.tls=true
+ # Use the "le" (Let's Encrypt) resolver created below
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-https.tls.certresolver=le
+ # Define the port inside of the Docker service to use
+ - traefik.http.services.${STACK_NAME?Variable not set}-proxy.loadbalancer.server.port=80
+ # Handle domain with and without "www" to redirect to only one
+ # To disable www redirection remove the next line
+ - traefik.http.middlewares.${STACK_NAME?Variable not set}-www-redirect.redirectregex.regex=^https?://(www.)?(${DOMAIN?Variable not set})/(.*)
+ # Redirect a domain with www to non-www
+ # To disable it remove the next line
+ - traefik.http.middlewares.${STACK_NAME?Variable not set}-www-redirect.redirectregex.replacement=https://${DOMAIN?Variable not set}/$${3}
+ # Redirect a domain without www to www
+ # To enable it remove the previous line and uncomment the next
+ # - traefik.http.middlewares.${STACK_NAME}-www-redirect.redirectregex.replacement=https://www.${DOMAIN}/$${3}
+ # Middleware to redirect www, to disable it remove the next line
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-https.middlewares=${STACK_NAME?Variable not set}-www-redirect
+ # Middleware to redirect www, and redirect HTTP to HTTPS
+ # to disable www redirection remove the section: ${STACK_NAME?Variable not set}-www-redirect,
+ - traefik.http.routers.${STACK_NAME?Variable not set}-proxy-http.middlewares=${STACK_NAME?Variable not set}-www-redirect,${STACK_NAME?Variable not set}-https-redirect
+
+ backend: &backend
+ image: "langflowai/langflow-backend:latest"
+ depends_on:
+ - db
+ - broker
+ - result_backend
+ env_file:
+ - .env
+ healthcheck:
+ test: "exit 0"
+ deploy:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-backend-http.rule=PathPrefix(`/api/v1`) || PathPrefix(`/docs`) || PathPrefix(`/health`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-backend.loadbalancer.server.port=7860
+
+ db:
+ image: postgres:15.4
+ volumes:
+ - app-db-data:/var/lib/postgresql/data/pgdata
+ environment:
+ - PGDATA=/var/lib/postgresql/data/pgdata
+ deploy:
+ placement:
+ constraints:
+ - node.labels.app-db-data == true
+ healthcheck:
+ test: "exit 0"
+ env_file:
+ - .env
+
+ pgadmin:
+ image: dpage/pgadmin4
+ networks:
+ - ${TRAEFIK_PUBLIC_NETWORK?Variable not set}
+ - default
+ volumes:
+ - pgadmin-data:/var/lib/pgadmin
+ env_file:
+ - .env
+ deploy:
+ labels:
+ - traefik.enable=true
+ - traefik.docker.network=${TRAEFIK_PUBLIC_NETWORK?Variable not set}
+ - traefik.constraint-label=${TRAEFIK_PUBLIC_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-http.rule=Host(`pgadmin.${DOMAIN?Variable not set}`)
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-http.entrypoints=http
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-http.middlewares=${STACK_NAME?Variable not set}-https-redirect
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-https.rule=Host(`pgadmin.${DOMAIN?Variable not set}`)
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-https.entrypoints=https
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-https.tls=true
+ - traefik.http.routers.${STACK_NAME?Variable not set}-pgadmin-https.tls.certresolver=le
+ - traefik.http.services.${STACK_NAME?Variable not set}-pgadmin.loadbalancer.server.port=5050
+
+ result_backend:
+ image: redis:6.2.5
+ env_file:
+ - .env
+ ports:
+ - 6379:6379
+ healthcheck:
+ test: "exit 0"
+
+ celeryworker:
+ <<: *backend
+ env_file:
+ - .env
+ command: /bin/sh -c "python -m celery -A langflow.worker.celery_app worker --loglevel=INFO --concurrency=1 -n lf-worker@%h -P eventlet"
+ healthcheck:
+ test: "exit 0"
+ deploy:
+ replicas: 1
+
+ flower:
+ <<: *backend
+ env_file:
+ - .env
+ networks:
+ - default
+ environment:
+ - FLOWER_PORT=5555
+
+ command: /bin/sh -c "python -m celery -A langflow.worker.celery_app --broker=${BROKER_URL?Variable not set} flower --port=5555"
+ deploy:
+ labels:
+ - traefik.enable=true
+ - traefik.docker.network=${TRAEFIK_PUBLIC_NETWORK?Variable not set}
+ - traefik.constraint-label=${TRAEFIK_PUBLIC_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-http.rule=Host(`flower.${DOMAIN?Variable not set}`)
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-http.entrypoints=http
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-http.middlewares=${STACK_NAME?Variable not set}-https-redirect
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-https.rule=Host(`flower.${DOMAIN?Variable not set}`)
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-https.entrypoints=https
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-https.tls=true
+ - traefik.http.routers.${STACK_NAME?Variable not set}-flower-https.tls.certresolver=le
+ - traefik.http.services.${STACK_NAME?Variable not set}-flower.loadbalancer.server.port=5555
+
+ frontend:
+ image: "langflowai/langflow-frontend:latest"
+ env_file:
+ - .env
+ restart: on-failure
+ deploy:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-frontend-http.rule=PathPrefix(`/`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-frontend.loadbalancer.server.port=80
+
+ broker:
+ # RabbitMQ management console
+ image: rabbitmq:3-management
+ environment:
+ - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER:-admin}
+ - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS:-admin}
+ volumes:
+ - rabbitmq_data:/etc/rabbitmq/
+ - rabbitmq_data:/var/lib/rabbitmq/
+ - rabbitmq_log:/var/log/rabbitmq/
+ ports:
+ - 5672:5672
+ - 15672:15672
+
+ prometheus:
+ image: prom/prometheus:v2.37.9
+ env_file:
+ - .env
+ volumes:
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml
+ command:
+ - "--config.file=/etc/prometheus/prometheus.yml"
+ # ports:
+ # - 9090:9090
+ healthcheck:
+ test: "exit 0"
+ deploy:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-prometheus-http.rule=PathPrefix(`/metrics`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-prometheus.loadbalancer.server.port=9090
+
+ grafana:
+ image: grafana/grafana:8.2.6
+ env_file:
+ - .env
+ # ports:
+ # - 3000:3000
+ volumes:
+ - grafana_data:/var/lib/grafana
+ deploy:
+ labels:
+ - traefik.enable=true
+ - traefik.constraint-label-stack=${TRAEFIK_TAG?Variable not set}
+ - traefik.http.routers.${STACK_NAME?Variable not set}-grafana-http.rule=PathPrefix(`/grafana`)
+ - traefik.http.services.${STACK_NAME?Variable not set}-grafana.loadbalancer.server.port=3000
+
+volumes:
+ grafana_data:
+ app-db-data:
+ rabbitmq_data:
+ rabbitmq_log:
+ pgadmin-data:
+
+networks:
+ traefik-public:
+ # Allow setting it to false for testing
+ external: false # ${TRAEFIK_PUBLIC_NETWORK_IS_EXTERNAL-true}
diff --git a/deploy/prometheus.yml b/deploy/prometheus.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1d53d5b5fdb8e468828a98ade604897d2f8afb0f
--- /dev/null
+++ b/deploy/prometheus.yml
@@ -0,0 +1,11 @@
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+scrape_configs:
+ - job_name: prometheus
+ static_configs:
+ - targets: ["prometheus:9090"]
+ - job_name: flower
+ static_configs:
+ - targets: ["flower:5555"]
diff --git a/docker/.dockerignore b/docker/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..737244fba615a5bd9175be07bf232d68fd179c2c
--- /dev/null
+++ b/docker/.dockerignore
@@ -0,0 +1,9 @@
+.venv/
+**/aws
+node_modules
+**/node_modules/
+dist/
+**/build/
+src/backend/langflow/frontend
+**/langflow-pre.db
+**/langflow.db
\ No newline at end of file
diff --git a/docker/build_and_push.Dockerfile b/docker/build_and_push.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..dcebb0f7e7a5d1f2c16321b71a8b46792781826a
--- /dev/null
+++ b/docker/build_and_push.Dockerfile
@@ -0,0 +1,86 @@
+# syntax=docker/dockerfile:1
+# Keep this syntax directive! It's used to enable Docker BuildKit
+
+################################
+# BUILDER-BASE
+# Used to build deps + create our virtual environment
+################################
+
+# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved
+# 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture
+# Use a Python image with uv pre-installed
+FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder
+
+# Install the project into `/app`
+WORKDIR /app
+
+# Enable bytecode compilation
+ENV UV_COMPILE_BYTECODE=1
+
+# Copy from the cache instead of linking since it's a mounted volume
+ENV UV_LINK_MODE=copy
+
+RUN apt-get update \
+ && apt-get install --no-install-recommends -y \
+ # deps for building python deps
+ build-essential \
+ git \
+ # npm
+ npm \
+ # gcc
+ gcc \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN --mount=type=cache,target=/root/.cache/uv \
+ --mount=type=bind,source=uv.lock,target=uv.lock \
+ --mount=type=bind,source=README.md,target=README.md \
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
+ --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \
+ --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \
+ --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \
+ uv sync --frozen --no-install-project --no-editable
+
+ADD ./src /app/src
+
+COPY src/frontend /tmp/src/frontend
+WORKDIR /tmp/src/frontend
+RUN --mount=type=cache,target=/root/.npm \
+ npm ci \
+ && npm run build \
+ && cp -r build /app/src/backend/langflow/frontend \
+ && rm -rf /tmp/src/frontend
+
+WORKDIR /app
+ADD ./pyproject.toml /app/pyproject.toml
+ADD ./uv.lock /app/uv.lock
+ADD ./README.md /app/README.md
+
+RUN --mount=type=cache,target=/root/.cache/uv \
+ uv sync --frozen --no-editable
+
+################################
+# RUNTIME
+# Setup user, utilities and copy the virtual environment only
+################################
+FROM python:3.12.3-slim AS runtime
+
+RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data
+COPY --from=builder --chown=1000 /app/.venv /app/.venv
+
+# Place executables in the environment at the front of the path
+ENV PATH="/app/.venv/bin:$PATH"
+
+LABEL org.opencontainers.image.title=langflow
+LABEL org.opencontainers.image.authors=['Langflow']
+LABEL org.opencontainers.image.licenses=MIT
+LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow
+LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow
+
+USER user
+WORKDIR /app
+
+ENV LANGFLOW_HOST=0.0.0.0
+ENV LANGFLOW_PORT=7860
+
+CMD ["langflow", "run"]
diff --git a/docker/build_and_push_backend.Dockerfile b/docker/build_and_push_backend.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..66cf652125488c053018674dfb934b3ea182543d
--- /dev/null
+++ b/docker/build_and_push_backend.Dockerfile
@@ -0,0 +1,9 @@
+# syntax=docker/dockerfile:1
+# Keep this syntax directive! It's used to enable Docker BuildKit
+
+ARG LANGFLOW_IMAGE
+FROM $LANGFLOW_IMAGE
+
+RUN rm -rf /app/.venv/langflow/frontend
+
+CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860", "--backend-only"]
diff --git a/docker/build_and_push_base.Dockerfile b/docker/build_and_push_base.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..1c46d2e4db0de7d89bfff0679c96451f76eb301e
--- /dev/null
+++ b/docker/build_and_push_base.Dockerfile
@@ -0,0 +1,92 @@
+# syntax=docker/dockerfile:1
+# Keep this syntax directive! It's used to enable Docker BuildKit
+
+
+################################
+# BUILDER-BASE
+# Used to build deps + create our virtual environment
+################################
+
+# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved
+# 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture
+# Use a Python image with uv pre-installed
+FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder
+
+# Install the project into `/app`
+WORKDIR /app
+
+# Enable bytecode compilation
+ENV UV_COMPILE_BYTECODE=1
+
+# Copy from the cache instead of linking since it's a mounted volume
+ENV UV_LINK_MODE=copy
+
+RUN apt-get update \
+ && apt-get install --no-install-recommends -y \
+ # deps for building python deps
+ build-essential \
+ git \
+ # npm
+ npm \
+ # gcc
+ gcc \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install the project's dependencies using the lockfile and settings
+# We need to mount the root uv.lock and pyproject.toml to build the base with uv because we're still using uv workspaces
+RUN --mount=type=cache,target=/root/.cache/uv \
+ --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \
+ --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \
+ --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \
+ --mount=type=bind,source=uv.lock,target=uv.lock \
+ --mount=type=bind,source=README.md,target=README.md \
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
+ cd src/backend/base && uv sync --frozen --no-install-project --no-dev --no-editable
+
+ADD ./src /app/src
+
+COPY src/frontend /tmp/src/frontend
+WORKDIR /tmp/src/frontend
+RUN npm install \
+ && npm run build \
+ && cp -r build /app/src/backend/base/langflow/frontend \
+ && rm -rf /tmp/src/frontend
+
+ADD ./src/backend/base /app/src/backend/base
+WORKDIR /app/src/backend/base
+# again we need these because of workspaces
+ADD ./pyproject.toml /app/pyproject.toml
+ADD ./uv.lock /app/uv.lock
+ADD ./src/backend/base/pyproject.toml /app/src/backend/base/pyproject.toml
+ADD ./src/backend/base/uv.lock /app/src/backend/base/uv.lock
+ADD ./src/backend/base/README.md /app/src/backend/base/README.md
+RUN --mount=type=cache,target=/root/.cache/uv \
+ uv sync --frozen --no-dev --no-editable
+
+################################
+# RUNTIME
+# Setup user, utilities and copy the virtual environment only
+################################
+FROM python:3.12.3-slim AS runtime
+
+RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data
+# and we use the venv at the root because workspaces
+COPY --from=builder --chown=1000 /app/.venv /app/.venv
+
+# Place executables in the environment at the front of the path
+ENV PATH="/app/.venv/bin:$PATH"
+
+LABEL org.opencontainers.image.title=langflow
+LABEL org.opencontainers.image.authors=['Langflow']
+LABEL org.opencontainers.image.licenses=MIT
+LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow
+LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow
+
+USER user
+WORKDIR /app
+
+ENV LANGFLOW_HOST=0.0.0.0
+ENV LANGFLOW_PORT=7860
+
+CMD ["langflow-base", "run"]
diff --git a/docker/build_and_push_ep.Dockerfile b/docker/build_and_push_ep.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..1e19dada0c981b5d7786394619ab8023a39c6349
--- /dev/null
+++ b/docker/build_and_push_ep.Dockerfile
@@ -0,0 +1,91 @@
+# syntax=docker/dockerfile:1
+# Keep this syntax directive! It's used to enable Docker BuildKit
+
+################################
+# BUILDER-BASE
+# Used to build deps + create our virtual environment
+################################
+
+# 1. use python:3.12.3-slim as the base image until https://github.com/pydantic/pydantic-core/issues/1292 gets resolved
+# 2. do not add --platform=$BUILDPLATFORM because the pydantic binaries must be resolved for the final architecture
+# Use a Python image with uv pre-installed
+FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder
+
+# Install the project into `/app`
+WORKDIR /app
+
+# Enable bytecode compilation
+ENV UV_COMPILE_BYTECODE=1
+
+# Copy from the cache instead of linking since it's a mounted volume
+ENV UV_LINK_MODE=copy
+
+RUN apt-get update \
+ && apt-get install --no-install-recommends -y \
+ # deps for building python deps
+ build-essential \
+ git \
+ # npm
+ npm \
+ # gcc
+ gcc \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN --mount=type=cache,target=/root/.cache/uv \
+ --mount=type=bind,source=uv.lock,target=uv.lock \
+ --mount=type=bind,source=README.md,target=README.md \
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
+ --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \
+ --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \
+ --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \
+ uv sync --frozen --no-install-project --no-editable
+
+ADD ./src /app/src
+
+COPY src/frontend /tmp/src/frontend
+WORKDIR /tmp/src/frontend
+RUN --mount=type=cache,target=/root/.npm \
+ npm ci \
+ && npm run build \
+ && cp -r build /app/src/backend/langflow/frontend \
+ && rm -rf /tmp/src/frontend
+
+WORKDIR /app
+ADD ./pyproject.toml /app/pyproject.toml
+ADD ./uv.lock /app/uv.lock
+ADD ./README.md /app/README.md
+
+RUN --mount=type=cache,target=/root/.cache/uv \
+ uv sync --frozen --no-editable
+
+################################
+# RUNTIME
+# Setup user, utilities and copy the virtual environment only
+################################
+FROM python:3.12.3-slim AS runtime
+
+RUN useradd user -u 1000 -g 0 --no-create-home --home-dir /app/data && \
+ mkdir /data && chown -R 1000:0 /data
+
+COPY --from=builder --chown=1000 /app/.venv /app/.venv
+
+# curl is required for langflow health checks
+RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
+
+# Place executables in the environment at the front of the path
+ENV PATH="/app/.venv/bin:$PATH"
+
+LABEL org.opencontainers.image.title=langflow
+LABEL org.opencontainers.image.authors=['Langflow']
+LABEL org.opencontainers.image.licenses=MIT
+LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow
+LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow
+
+WORKDIR /app
+
+ENV LANGFLOW_HOST=0.0.0.0
+ENV LANGFLOW_PORT=7860
+
+USER 1000
+ENTRYPOINT ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--backend-only"]
diff --git a/docker/cdk-docker-compose.yml b/docker/cdk-docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..987d198aa5d8586d82b31aa32e85971525985fc3
--- /dev/null
+++ b/docker/cdk-docker-compose.yml
@@ -0,0 +1,37 @@
+version: "3"
+networks:
+ langflow:
+
+services:
+ backend:
+ build:
+ context: ./
+ dockerfile: ./dev.Dockerfile
+ env_file:
+ - .env
+ ports:
+ - "7860:7860"
+ volumes:
+ - ./:/app
+ command: bash -c "uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --loop asyncio"
+ networks:
+ - langflow
+ frontend:
+ build:
+ context: ./src/frontend
+ dockerfile: ./cdk.Dockerfile
+ args:
+ - BACKEND_URL=http://backend:7860
+ depends_on:
+ - backend
+ environment:
+ - VITE_PROXY_TARGET=http://backend:7860
+ ports:
+ - "8080:3000"
+ volumes:
+ - ./src/frontend/public:/home/node/app/public
+ - ./src/frontend/src:/home/node/app/src
+ - ./src/frontend/package.json:/home/node/app/package.json
+ restart: on-failure
+ networks:
+ - langflow
diff --git a/docker/cdk.Dockerfile b/docker/cdk.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..5370c992893bc075a6b928167d3e1f84e084d57b
--- /dev/null
+++ b/docker/cdk.Dockerfile
@@ -0,0 +1,21 @@
+FROM --platform=linux/amd64 python:3.10-slim
+
+WORKDIR /app
+
+# Install Poetry
+RUN apt-get update && apt-get install gcc g++ curl build-essential postgresql-server-dev-all -y
+RUN curl -sSL https://install.python-poetry.org | python3 -
+# # Add Poetry to PATH
+ENV PATH="${PATH}:/root/.local/bin"
+# # Copy the pyproject.toml and poetry.lock files
+COPY poetry.lock pyproject.toml ./
+# Copy the rest of the application codes
+COPY ./ ./
+
+# Install dependencies
+RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
+
+RUN poetry add botocore
+RUN poetry add pymysql
+
+CMD ["sh", "./container-cmd-cdk.sh"]
diff --git a/docker/container-cmd-cdk.sh b/docker/container-cmd-cdk.sh
new file mode 100644
index 0000000000000000000000000000000000000000..dbab8f42fbec1a7070d19c1bd40a0ae80a2b0590
--- /dev/null
+++ b/docker/container-cmd-cdk.sh
@@ -0,0 +1,5 @@
+export LANGFLOW_DATABASE_URL="mysql+pymysql://${username}:${password}@${host}:3306/${dbname}"
+# echo $LANGFLOW_DATABASE_URL
+uvicorn --factory langflow.main:create_app --host 0.0.0.0 --port 7860 --reload --log-level debug --loop asyncio
+
+# python -m langflow run --host 0.0.0.0 --port 7860
\ No newline at end of file
diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..ad1497ef384ffbd32b978372e43e5e264f2ffe44
--- /dev/null
+++ b/docker/dev.Dockerfile
@@ -0,0 +1,28 @@
+FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
+ENV TZ=UTC
+
+WORKDIR /app
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ curl \
+ npm \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+COPY . /app
+
+# Install dependencies using uv
+RUN --mount=type=cache,target=/root/.cache/uv \
+ --mount=type=bind,source=uv.lock,target=uv.lock \
+ --mount=type=bind,source=README.md,target=README.md \
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
+ --mount=type=bind,source=src/backend/base/README.md,target=src/backend/base/README.md \
+ --mount=type=bind,source=src/backend/base/uv.lock,target=src/backend/base/uv.lock \
+ --mount=type=bind,source=src/backend/base/pyproject.toml,target=src/backend/base/pyproject.toml \
+ uv sync --frozen --no-install-project --no-dev
+
+EXPOSE 7860
+EXPOSE 3000
+
+CMD ["./docker/dev.start.sh"]
diff --git a/docker/dev.docker-compose.yml b/docker/dev.docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5cad3f5d2a9ddf32e3cf54dc54f7644f139d52dd
--- /dev/null
+++ b/docker/dev.docker-compose.yml
@@ -0,0 +1,42 @@
+networks:
+ dev-langflow:
+
+services:
+ langflow:
+ build:
+ context: ..
+ dockerfile: docker/dev.Dockerfile
+ image: dev-langflow
+ container_name: dev-langflow
+ restart: always
+ ports:
+ - "7860:7860"
+ - "3000:3000"
+ environment:
+ - PYTHONDONTWRITEBYTECODE=1
+ - LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
+ - LANGFLOW_SUPERUSER=langflow
+ - LANGFLOW_SUPERUSER_PASSWORD=langflow
+ - LANGFLOW_CONFIG_DIR=/var/lib/langflow
+ env_file:
+ - ../.env
+ volumes:
+ - ../:/app
+ depends_on:
+ - postgres
+ networks:
+ - dev-langflow
+
+
+ postgres:
+ container_name: postgres
+ image: pgvector/pgvector:pg16
+ environment:
+ POSTGRES_USER: langflow
+ POSTGRES_PASSWORD: langflow
+ POSTGRES_DB: langflow
+ ports:
+ - "5432:5432"
+ networks:
+ - dev-langflow
+
diff --git a/docker/dev.start.sh b/docker/dev.start.sh
new file mode 100755
index 0000000000000000000000000000000000000000..defa6c513555e5a1c8ae4561b5dbef2a1dfc789a
--- /dev/null
+++ b/docker/dev.start.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+cd src/frontend \
+ && rm -rf node_modules \
+ && npm install \
+ && npm run dev:docker &
+make backend
diff --git a/docker/frontend/build_and_push_frontend.Dockerfile b/docker/frontend/build_and_push_frontend.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..55f5701873871e41ffd27e9ed0b18afd40a75f79
--- /dev/null
+++ b/docker/frontend/build_and_push_frontend.Dockerfile
@@ -0,0 +1,29 @@
+# syntax=docker/dockerfile:1
+# Keep this syntax directive! It's used to enable Docker BuildKit
+
+################################
+# BUILDER-BASE
+################################
+
+# 1. force platform to the current architecture to increase build speed time on multi-platform builds
+FROM --platform=$BUILDPLATFORM node:lts-bookworm-slim as builder-base
+COPY src/frontend /frontend
+
+RUN cd /frontend && npm install && npm run build
+
+################################
+# RUNTIME
+################################
+FROM nginxinc/nginx-unprivileged:stable-bookworm-perl as runtime
+
+LABEL org.opencontainers.image.title=langflow-frontend
+LABEL org.opencontainers.image.authors=['Langflow']
+LABEL org.opencontainers.image.licenses=MIT
+LABEL org.opencontainers.image.url=https://github.com/langflow-ai/langflow
+LABEL org.opencontainers.image.source=https://github.com/langflow-ai/langflow
+
+COPY --from=builder-base --chown=nginx /frontend/build /usr/share/nginx/html
+COPY --chown=nginx ./docker/frontend/nginx.conf /etc/nginx/conf.d/default.conf
+COPY --chown=nginx ./docker/frontend/start-nginx.sh /start-nginx.sh
+RUN chmod +x /start-nginx.sh
+ENTRYPOINT ["/start-nginx.sh"]
\ No newline at end of file
diff --git a/docker/frontend/nginx.conf b/docker/frontend/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..b064a5a790547467c118dde3e29fb46a50ec904c
--- /dev/null
+++ b/docker/frontend/nginx.conf
@@ -0,0 +1,28 @@
+server {
+ gzip on;
+ gzip_comp_level 2;
+ gzip_min_length 1000;
+ gzip_types text/xml text/css;
+ gzip_http_version 1.1;
+ gzip_vary on;
+ gzip_disable "MSIE [4-6] \.";
+
+ listen __FRONTEND_PORT__;
+
+ location / {
+ root /usr/share/nginx/html;
+ index index.html index.htm;
+ try_files $uri $uri/ /index.html =404;
+ }
+ location /api {
+ proxy_pass __BACKEND_URL__;
+ }
+ location /health_check {
+ proxy_pass __BACKEND_URL__;
+ }
+ location /health {
+ proxy_pass __BACKEND_URL__;
+ }
+
+ include /etc/nginx/extra-conf.d/*.conf;
+}
diff --git a/docker/frontend/start-nginx.sh b/docker/frontend/start-nginx.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6ef09745c59f95fb7add8103cc3db5750d270533
--- /dev/null
+++ b/docker/frontend/start-nginx.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+set -e
+trap 'kill -TERM $PID' TERM INT
+if [ -z "$BACKEND_URL" ]; then
+ BACKEND_URL="$1"
+fi
+if [ -z "$FRONTEND_PORT" ]; then
+ FRONTEND_PORT="$2"
+fi
+if [ -z "$FRONTEND_PORT" ]; then
+ FRONTEND_PORT="80"
+fi
+if [ -z "$BACKEND_URL" ]; then
+ echo "BACKEND_URL must be set as an environment variable or as first parameter. (e.g. http://localhost:7860)"
+ exit 1
+fi
+echo "BACKEND_URL: $BACKEND_URL"
+echo "FRONTEND_PORT: $FRONTEND_PORT"
+sed -i "s|__BACKEND_URL__|$BACKEND_URL|g" /etc/nginx/conf.d/default.conf
+sed -i "s|__FRONTEND_PORT__|$FRONTEND_PORT|g" /etc/nginx/conf.d/default.conf
+cat /etc/nginx/conf.d/default.conf
+
+
+# Start nginx
+exec nginx -g 'daemon off;'
diff --git a/docker/render.Dockerfile b/docker/render.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e8462ab193c60858d3ae6bb6ebeeca17e5eebc5a
--- /dev/null
+++ b/docker/render.Dockerfile
@@ -0,0 +1,3 @@
+FROM langflowai/langflow:latest
+
+ENTRYPOINT ["python", "-m", "langflow", "run"]
diff --git a/docker/render.pre-release.Dockerfile b/docker/render.pre-release.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..d3aa9cbdee46221dc02154a0a4286270d084efda
--- /dev/null
+++ b/docker/render.pre-release.Dockerfile
@@ -0,0 +1 @@
+FROM langflowai/langflow:1.0-alpha
diff --git a/docker_example/Dockerfile b/docker_example/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e7f0b422aa200a63cadb46507f8d7ac5b1e08b87
--- /dev/null
+++ b/docker_example/Dockerfile
@@ -0,0 +1,3 @@
+FROM langflowai/langflow:latest
+
+CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]
diff --git a/docker_example/README.md b/docker_example/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2adb26834814e15dcf851220c0b83a332a199
--- /dev/null
+++ b/docker_example/README.md
@@ -0,0 +1,65 @@
+# Running LangFlow with Docker
+
+This guide will help you get LangFlow up and running using Docker and Docker Compose.
+
+## Prerequisites
+
+- Docker
+- Docker Compose
+
+## Steps
+
+1. Clone the LangFlow repository:
+
+ ```sh
+ git clone https://github.com/langflow-ai/langflow.git
+ ```
+
+2. Navigate to the `docker_example` directory:
+
+ ```sh
+ cd langflow/docker_example
+ ```
+
+3. Run the Docker Compose file:
+
+ ```sh
+ docker compose up
+ ```
+
+LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
+
+## Docker Compose Configuration
+
+The Docker Compose configuration spins up two services: `langflow` and `postgres`.
+
+### LangFlow Service
+
+The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
+
+Environment variables:
+
+- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
+- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
+
+Volumes:
+
+- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
+
+### PostgreSQL Service
+
+The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
+
+Environment variables:
+
+- `POSTGRES_USER`: The username for the PostgreSQL database.
+- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
+- `POSTGRES_DB`: The name of the PostgreSQL database.
+
+Volumes:
+
+- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
+
+## Switching to a Specific LangFlow Version
+
+If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.
diff --git a/docker_example/docker-compose.yml b/docker_example/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4dda514422534654e63d5dcb19d903b6c0fa3cd0
--- /dev/null
+++ b/docker_example/docker-compose.yml
@@ -0,0 +1,30 @@
+version: "3.8"
+
+services:
+ langflow:
+ image: langflowai/langflow:latest # or another version tag on https://hub.docker.com/r/langflowai/langflow
+ pull_policy: always # set to 'always' when using 'latest' image
+ ports:
+ - "7860:7860"
+ depends_on:
+ - postgres
+ environment:
+ - LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
+ # This variable defines where the logs, file storage, monitor data and secret keys are stored.
+ volumes:
+ - langflow-data:/app/langflow
+
+ postgres:
+ image: postgres:16
+ environment:
+ POSTGRES_USER: langflow
+ POSTGRES_PASSWORD: langflow
+ POSTGRES_DB: langflow
+ ports:
+ - "5432:5432"
+ volumes:
+ - langflow-postgres:/var/lib/postgresql/data
+
+volumes:
+ langflow-postgres:
+ langflow-data:
diff --git a/docker_example/pre.Dockerfile b/docker_example/pre.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..a72b72595174d731534505c6eb8b93b4b1330669
--- /dev/null
+++ b/docker_example/pre.Dockerfile
@@ -0,0 +1,3 @@
+FROM langflowai/langflow:1.0-alpha
+
+CMD ["python", "-m", "langflow", "run", "--host", "0.0.0.0", "--port", "7860"]
diff --git a/docker_example/pre.docker-compose.yml b/docker_example/pre.docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3df573df533e7ce3f258da408b75feb2e299b0c2
--- /dev/null
+++ b/docker_example/pre.docker-compose.yml
@@ -0,0 +1,30 @@
+version: "3.8"
+
+services:
+ langflow:
+ image: langflowai/langflow:1.0-alpha
+ ports:
+ - "7860:7860"
+ depends_on:
+ - postgres
+ environment:
+ - LANGFLOW_DATABASE_URL=postgresql://langflow:langflow@postgres:5432/langflow
+ # This variable defines where the logs, file storage, monitor data and secret keys are stored.
+ - LANGFLOW_CONFIG_DIR=app/langflow
+ volumes:
+ - langflow-data:/app/langflow
+
+ postgres:
+ image: postgres:16
+ environment:
+ POSTGRES_USER: langflow
+ POSTGRES_PASSWORD: langflow
+ POSTGRES_DB: langflow
+ ports:
+ - "5432:5432"
+ volumes:
+ - langflow-postgres:/var/lib/postgresql/data
+
+volumes:
+ langflow-postgres:
+ langflow-data:
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..192bd9612032977c787eb6b1ad882cdfcbfc2b19
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1,22 @@
+# Dependencies
+/node_modules
+
+# Production
+/build
+
+# Generated files
+.docusaurus
+.cache-loader
+
+# Misc
+.DS_Store
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+*.orig
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..aaba2fa1e16eebb0ff68df9127e1afc6395c74d8
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,41 @@
+# Website
+
+This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
+
+### Installation
+
+```
+$ yarn
+```
+
+### Local Development
+
+```
+$ yarn start
+```
+
+This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
+
+### Build
+
+```
+$ yarn build
+```
+
+This command generates static content into the `build` directory and can be served using any static contents hosting service.
+
+### Deployment
+
+Using SSH:
+
+```
+$ USE_SSH=true yarn deploy
+```
+
+Not using SSH:
+
+```
+$ GIT_USER= yarn deploy
+```
+
+If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
diff --git a/docs/babel.config.js b/docs/babel.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..e00595dae7d69190e2a9d07202616c2ea932e487
--- /dev/null
+++ b/docs/babel.config.js
@@ -0,0 +1,3 @@
+module.exports = {
+ presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
+};
diff --git a/docs/css/custom.css b/docs/css/custom.css
new file mode 100644
index 0000000000000000000000000000000000000000..5ab062952b2e791aac36d5efc0346c2bc9091aa1
--- /dev/null
+++ b/docs/css/custom.css
@@ -0,0 +1,247 @@
+/**
+ * Any CSS included here will be global. The classic template
+ * bundles Infima by default. Infima is a CSS framework designed to
+ * work well for content-centric websites.
+ */
+:root {
+ --ifm-background-color: var(--token-primary-bg-c);
+ --ifm-color-primary: hsla(330, 81%, 60%, 1);
+ --ifm-navbar-link-hover-color: initial;
+ --ifm-navbar-padding-vertical: 0;
+ --ifm-navbar-item-padding-vertical: 0;
+ --ifm-font-family-base: Inter, -apple-system, BlinkMacSystemFont, Helvetica,
+ Arial, sans-serif, "Apple Color Emoji", "Segoe UI emoji";
+ --ifm-font-family-monospace: "SFMono-Regular", "Roboto Mono", Consolas,
+ "Liberation Mono", Menlo, Courier, monospace;
+}
+
+.theme-doc-sidebar-item-category.menu__list-item:not(:first-child) {
+ margin-top: 1.5rem !important;
+}
+
+.docusaurus-highlight-code-line {
+ background-color: rgba(0, 0, 0, 0.1);
+ display: block;
+ margin: 0 calc(-1 * var(--ifm-pre-padding));
+ padding: 0 var(--ifm-pre-padding);
+}
+
+.diagonal-box {
+ transform: skewY(-6deg);
+}
+
+.diagonal-content {
+ transform: skewY(6deg);
+}
+
+[class^="announcementBar"] {
+ z-index: 10;
+}
+
+.showcase {
+ background-color: #fff;
+}
+
+.showcase-border {
+ border-color: rgba(243, 244, 246, 1);
+}
+
+.text-description {
+ color: rgba(107, 114, 128, 1);
+}
+
+p {
+ text-align: justify;
+}
+
+/* apply */
+#hero-apply {
+ z-index: -1;
+ background-image: linear-gradient(
+ var(--ifm-footer-background-color),
+ var(--ifm-navbar-background-color)
+ );
+}
+
+.apply-form {
+ background-image: linear-gradient(#fff, #f5f5fa);
+ max-width: 600px;
+}
+
+.apply-text {
+ color: #36395a;
+}
+
+/* index */
+#hero {
+ background-image: linear-gradient(
+ var(--ifm-footer-background-color),
+ var(--ifm-navbar-background-color)
+ );
+}
+
+/**
+ * Hero component title overrides to match other heading styles
+ */
+.hero-title {
+ color: rgb(28, 30, 33);
+ font-family: var(--ifm-heading-font-family);
+}
+h1 {
+ font-size: 26px;
+}
+h2 {
+ font-size: 22px;
+}
+h3 {
+ font-size: 18px;
+}
+
+body {
+ font-size: 16px;
+}
+
+.docsearch-logo {
+ color: #21243d;
+}
+
+.apply-button:hover {
+ color: #fff;
+}
+
+/* GitHub */
+.header-github-link:hover {
+ opacity: 0.6;
+}
+
+.header-github-link:before {
+ content: "";
+ width: 24px;
+ height: 24px;
+ display: flex;
+ background: url("/logos/gitLight.svg") no-repeat;
+}
+
+[data-theme="dark"] .header-github-link:before {
+ content: "";
+ width: 24px;
+ height: 24px;
+ display: flex;
+ background: url("/logos/gitDark.svg") no-repeat;
+}
+
+/* Twitter */
+.header-twitter-link:hover {
+ opacity: 0.6;
+}
+
+.header-twitter-link::before {
+ content: "";
+ width: 24px;
+ height: 24px;
+ display: flex;
+ background: url("/logos/xLight.svg");
+ background-size: contain;
+}
+
+[data-theme="dark"] .header-twitter-link::before {
+ content: "";
+ width: 24px;
+ height: 24px;
+ display: flex;
+ background: url("/logos/xDark.svg");
+ background-size: contain;
+}
+
+/* Discord */
+
+.header-discord-link {
+ margin-right: 0.5rem;
+}
+
+.header-discord-link:hover {
+ opacity: 0.6;
+}
+
+[data-theme="dark"] .header-discord-link::before {
+ content: "";
+ width: 24px;
+ height: 24px;
+ display: flex;
+ background: url("/logos/discordDark.svg");
+ background-size: contain;
+}
+
+.header-discord-link::before {
+ content: "";
+ width: 24px;
+ height: 24px;
+ display: flex;
+ background: url("/logos/discordLight.svg");
+ background-size: contain;
+}
+
+/* Images */
+.image-rendering-crisp {
+ image-rendering: crisp-edges;
+
+ /* alias for google chrome */
+ image-rendering: -webkit-optimize-contrast;
+}
+
+.image-rendering-pixel {
+ image-rendering: pixelated;
+}
+
+.img-center {
+ display: flex;
+ justify-content: center;
+ width: 100%;
+}
+
+.resized-image {
+ width: 400px;
+}
+
+/* Reduce width on mobile for Mendable Search */
+@media (max-width: 767px) {
+ .mendable-search {
+ width: 200px;
+ }
+}
+
+@media (max-width: 500px) {
+ .mendable-search {
+ width: 150px;
+ }
+}
+
+@media (max-width: 380px) {
+ .mendable-search {
+ width: 140px;
+ }
+}
+/*
+.ch-scrollycoding {
+ gap: 10rem !important;
+} */
+
+.ch-scrollycoding-content {
+ max-width: 55% !important;
+ min-width: 40% !important;
+}
+
+.ch-scrollycoding-sticker {
+ max-width: 60% !important;
+ min-width: 45% !important;
+}
+
+.ch-scrollycoding-step-content {
+ min-height: 70px;
+}
+
+.theme-doc-sidebar-item-category.theme-doc-sidebar-item-category-level-2.menu__list-item:not(
+ :first-child
+ ) {
+ margin-top: 0.25rem !important;
+}
diff --git a/docs/css/docu-notion-styles.css b/docs/css/docu-notion-styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..6332fd23655dbd23ac2d5a0380a5f1bb0c3ab454
--- /dev/null
+++ b/docs/css/docu-notion-styles.css
@@ -0,0 +1,60 @@
+/* This should be added to the docusaurus.config.js in order to show some notion things correctly.
+See the option: --css-output-directory
+See the docusaurus docs: https://docusaurus.io/docs/styling-layout
+See the use in the docu-notion-sample-site: https://github.com/sillsdev/docu-notion-sample-site/blob/main/docusaurus.config.js
+*/
+
+/* Copied from
+ https://github1s.com/NotionX/react-notion-x/blob/master/packages/react-notion-x/src/styles.css#L934
+ and
+ https://github1s.com/NotionX/react-notion-x/blob/master/packages/react-notion-x/src/styles.css#L1063
+*/
+.notion-column {
+ display: flex;
+ flex-direction: column;
+ padding-top: 12px;
+ padding-bottom: 12px;
+}
+
+.notion-column > *:first-child {
+ margin-top: 0;
+ margin-left: 0;
+ margin-right: 0;
+}
+
+.notion-column > *:last-child {
+ margin-left: 0;
+ margin-right: 0;
+ margin-bottom: 0;
+}
+
+.notion-row {
+ display: flex;
+ overflow: hidden;
+ width: 100%;
+ max-width: 100%;
+}
+
+@media (max-width: 640px) {
+ .notion-row {
+ flex-direction: column;
+ }
+
+ .notion-row .notion-column {
+ width: 100% !important;
+ }
+
+ .notion-row .notion-spacer {
+ display: none;
+ }
+}
+
+.notion-spacer {
+ /* This matches the value in ColumnTransformer.ts */
+ width: calc(min(32px, 4vw));
+}
+
+.notion-spacer:last-child {
+ display: none;
+}
+/* End copied from NotionX */
diff --git a/docs/css/gifplayer.css b/docs/css/gifplayer.css
new file mode 100644
index 0000000000000000000000000000000000000000..120b9b3899d375b4c58403b5af7787881547daf7
--- /dev/null
+++ b/docs/css/gifplayer.css
@@ -0,0 +1,36 @@
+.gif_player {
+ display: inline-block;
+ position: relative;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ -webkit-touch-callout: none;
+ -webkit-tap-highlight-color: transparent; }
+ .gif_player .play_button {
+ background-color: rgba(0, 0, 0, 0.5);
+ border: 2px dashed #fff;
+ border-radius: 50%;
+ box-shadow: 0 0 0 3px rgba(0, 0, 0, 0.5);
+ color: #fff;
+ cursor: pointer;
+ font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+ font-size: 24px;
+ left: 50%;
+ opacity: 1;
+ padding: 14px 10px;
+ position: absolute;
+ top: 50%;
+ transform: translate(-50%, -50%) scale(1) rotate(0deg);
+ transition: transform 0.4s, opacity 0.4s; }
+ .gif_player .play_button:hover {
+ background-color: rgba(0, 0, 0, 0.7);
+ box-shadow: 0 0 0 3px rgba(0, 0, 0, 0.7); }
+ .gif_player .play_button::after {
+ content: "GIF"; }
+ .gif_player.playing .play_button {
+ transform: translate(-50%, -50%) scale(0) rotate(180deg);
+ opacity: 0.5; }
+ .gif_player img {
+ max-width: 100%; }
+
diff --git a/docs/docs/Agents/_category_.json b/docs/docs/Agents/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..279ca75eea5eff574e3fbab8d7b49fa4d57f2a79
--- /dev/null
+++ b/docs/docs/Agents/_category_.json
@@ -0,0 +1 @@
+{"position":5, "label":"Agents"}
\ No newline at end of file
diff --git a/docs/docs/Agents/agent-tool-calling-agent-component.md b/docs/docs/Agents/agent-tool-calling-agent-component.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0c3bdbd971a95dd17f6907b1334b51699520d24
--- /dev/null
+++ b/docs/docs/Agents/agent-tool-calling-agent-component.md
@@ -0,0 +1,188 @@
+---
+title: Create a problem-solving agent
+sidebar_position: 2
+slug: /agents-tool-calling-agent-component
+---
+
+Developing **agents** in Langchain is complex.
+
+The `AgentComponent` is a component for easily creating an AI agent capable of analyzing tasks using tools you provide.
+
+The component contains all of the elements you'll need for creating an agent. Instead of managing LLM models and providers, pick your model and enter your API key. Instead of connecting a **Prompt** component, enter instructions in the component's **Agent Instruction** fields.
+
+
+
+Learn how to build a flow starting with the **Tool calling agent** component, and see how it can help you solve problems.
+
+## Prerequisites
+
+- [An OpenAI API key](https://platform.openai.com/)
+- [A Search API key](https://www.searchapi.io/)
+
+## Create a problem-solving agent with AgentComponent
+
+Create a problem-solving agent in Langflow, starting with the **Tool calling agent**.
+
+1. Click **New Flow**, and then click **Blank Flow**.
+2. Click and drag an **Agent** component to your workspace.
+The default settings are acceptable for now, so this guide assumes you're using **Open AI** for the LLM.
+3. Add your **Open AI API Key** to the **Agent** component.
+4. Add **Chat input** and **Chat output** components to your flow, and connect them to the tool calling agent.
+
+
+
+This basic flow enables you to chat with the agent with the **Playground** after you've connected some **Tools**.
+
+5. Connect the **Search API** tool component to your agent.
+6. Add your **Search API key** to the component.
+Your agent can now query the Search API for information.
+7. Connect a **Calculator** tool for solving basic math problems.
+8. Connect an **API Request** component to the agent.
+This component is not in the **Tools** category, but the agent can still use it as a tool by enabling **Tool Mode**.
+**Tool Mode** makes a component into a tool by adding a **Toolset** port that can be connected to an agent's **Tools** port.
+To enable **Tool Mode** on the component, click **Tool Mode**.
+The component's fields change dynamically based on the mode it's in.
+
+
+
+## Solve problems with the agent
+
+Your agent now has tools for performing a web search, doing basic math, and performing API requests. You can solve many problems with just these capabilities.
+
+* Your tabletop game group cancelled, and you're stuck at home.
+Point **API Request** to an online rules document, tell your agent `You are a fun game organizer who uses the tools at your disposal`, and play a game.
+* You need to learn a new software language quickly.
+Point **API Request** to some docs, tell your agent `You are a knowledgeable software developer who uses the tools at your disposal`, and start learning.
+
+See what problems you can solve with this flow. As your problem becomes more specialized, add a tool. For example, the [simple agent starter project](/starter-projects-simple-agent) adds a Python REPL component to solve math problems that are too challenging for the calculator.
+
+## Use an agent as a tool
+
+The agent component itself also supports **Tool Mode** for creating multi-agent flows.
+
+Add an agent to your problem-solving flow that uses a different OpenAI model for more specialized problem solving.
+
+1. Click and drag an **Agent** component to your workspace.
+2. Add your **Open AI API Key** to the **Agent** component.
+3. In the **Model Name** field, select `gpt-4o`.
+4. Click **Tool Mode** to use this new agent as a tool.
+5. Connect the new agent's **Toolset** port to the previously created agent's **Tools** port.
+6. Connect **Search API** and **API Request** to the new agent.
+The new agent will use `gpt-4o` for the larger tasks of scraping and searching information that requires large context windows.
+The problem-solving agent will now use this agent as a tool, with its unique LLM and toolset.
+
+
+
+## Add custom components as tools {#components-as-tools}
+
+An agent can use custom components as tools.
+
+1. To add a custom component to the problem-solving agent flow, click **New Custom Component**.
+
+2. Add custom Python code to the custom component.
+Here's an example text analyzer for sentiment analysis.
+
+```python
+from langflow.custom import Component
+from langflow.io import MessageTextInput, Output
+from langflow.schema import Data
+import re
+
+class TextAnalyzerComponent(Component):
+ display_name = "Text Analyzer"
+ description = "Analyzes and transforms input text."
+ documentation: str = "http://docs.langflow.org/components/custom"
+ icon = "chart-bar"
+ name = "TextAnalyzerComponent"
+
+ inputs = [
+ MessageTextInput(
+ name="input_text",
+ display_name="Input Text",
+ info="Enter text to analyze",
+ value="Hello, World!",
+ tool_mode=True,
+ ),
+ ]
+
+ outputs = [
+ Output(display_name="Analysis Result", name="output", method="analyze_text"),
+ ]
+
+ def analyze_text(self) -> Data:
+ text = self.input_text
+
+ # Perform text analysis
+ word_count = len(text.split())
+ char_count = len(text)
+ sentence_count = len(re.findall(r'\w+[.!?]', text))
+
+ # Transform text
+ reversed_text = text[::-1]
+ uppercase_text = text.upper()
+
+ analysis_result = {
+ "original_text": text,
+ "word_count": word_count,
+ "character_count": char_count,
+ "sentence_count": sentence_count,
+ "reversed_text": reversed_text,
+ "uppercase_text": uppercase_text
+ }
+
+ data = Data(value=analysis_result)
+ self.status = data
+ return data
+```
+
+3. To enable the custom component as a tool, click **Tool Mode**.
+4. Connect the tool output to the agent's tools input.
+5. Ask the agent, `What tools are you using to answer my questions?`
+Your response will be similar to the following, and will include your custom component.
+```plain
+I have access to several tools that assist me in answering your questions, including:
+Search API: This allows me to search for recent information or results on the web.
+HTTP Requests: I can make HTTP requests to various URLs to retrieve data or interact with APIs.
+Calculator: I can evaluate basic arithmetic expressions.
+Text Analyzer: I can analyze and transform input text.
+Current Date and Time: I can retrieve the current date and time in various time zones.
+```
+
+## Make any component a tool
+
+These components support **Tool Mode**:
+
+* **URL**
+* **API request**
+* **Calculator**
+* **Current date**
+
+If the component you want to use as a tool doesn't have a **Tool Mode** button, add `tool_mode=True` to the component's code under `MessageTextInput`.
+
+For example, in the [components as tools](#components-as-tools) example above, `tool_mode=True,` is added so the custom component can be used as a tool.
+
+**Tool Mode** supports the `MessageTextInput` type.
+
+```python
+inputs = [
+ MessageTextInput(
+ name="input_text",
+ display_name="Input Text",
+ info="Enter text to analyze",
+ value="Hello, World!",
+ tool_mode=True,
+ ),
+]
+```
+
+## Add flows as tools
+
+An agent can use flows that are saved in your workspace as tools with the [Flow as Tool](/components-logic#flow-as-tool) component.
+
+1. To add a **Flow as Tool** component, click and drag a **Flow as Tool** component to your workspace.
+2. Select the flow you want the agent to use as a tool.
+3. Connect the tool output to the agent's tools input.
+4. Ask the agent, `What tools are you using to answer my questions?`
+Your **Flow as Tool** flow should be visible in the response.
+
+
diff --git a/docs/docs/Agents/agents-overview.md b/docs/docs/Agents/agents-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..d8901eb76ba059d1e6a9f255d02891dc818c95ec
--- /dev/null
+++ b/docs/docs/Agents/agents-overview.md
@@ -0,0 +1,15 @@
+---
+title: Agents overview
+sidebar_position: 1
+slug: /agents-overview
+---
+
+**Agents** are AI systems that use LLMs as a brain to analyze problems and select external tools.
+
+Instead of developers having to create logical statements to direct every possible path of a program, an agent can operate with autonomy. An agent can leverage external tools and APIs to gather information and take action, demonstrate chain-of-thought reasoning, and generate tailored text for specific purposes.
+
+To simplify the development of agents, Langflow created a custom [Tool calling agent](/components-agents#agent-component) component that simplifies configuration and lets developers focus on solving problems with agents.
+
+
+
+To get started, see [Create a problem solving agent](/agents-tool-calling-agent-component).
\ No newline at end of file
diff --git a/docs/docs/Components/_category_.json b/docs/docs/Components/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..62531e49ed926298413598e9615ded1745de85c4
--- /dev/null
+++ b/docs/docs/Components/_category_.json
@@ -0,0 +1 @@
+{"position":6, "label":"Components"}
\ No newline at end of file
diff --git a/docs/docs/Components/components-agents.md b/docs/docs/Components/components-agents.md
new file mode 100644
index 0000000000000000000000000000000000000000..53b73768fb9f0dd7116af47353ad6439b81d06e8
--- /dev/null
+++ b/docs/docs/Components/components-agents.md
@@ -0,0 +1,309 @@
+---
+title: Agents
+sidebar_position: 12
+slug: /components-agents
+---
+
+# Agent components in Langflow
+
+Agent components are used to define the behavior and capabilities of AI agents in your flow. Agents can interact with APIs, databases, and other services and use LLMs as a reasoning engine to decide which course to take in your flow.
+
+## Agent component {#agent-component}
+
+This component creates an agent that can use tools to answer questions and perform tasks based on given instructions.
+
+For more information on this component, see the [tool calling agent documentation](/agents-tool-calling-agent-component).
+
+### Inputs
+
+| Name | Type | Description |
+|----------------------|----------|-------------------------------------------------------------------------------------------------|
+| agent_llm | Dropdown | The provider of the language model that the agent will use to generate responses. |
+| system_prompt | String | Initial instructions and context provided to guide the agent's behavior. |
+| tools | List | List of tools available for the agent to use. |
+| input_value | String | The input task or question for the agent to process. |
+| add_current_date_tool| Boolean | If true, adds a tool to the agent that returns the current date. |
+
+### Outputs
+
+| Name | Type | Description |
+|----------|---------|-------------------------------------------------|
+| response | Message | The agent's response to the given input task. |
+
+## CSV Agent
+
+This component creates a CSV agent from a CSV file and LLM.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| path | File | Path to the CSV file |
+| agent_type | String | Type of agent to create (zero-shot-react-description, openai-functions, or openai-tools) |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | CSV agent instance |
+
+## CrewAI Agent
+
+This component represents an Agent of CrewAI, allowing for the creation of specialized AI agents with defined roles, goals, and capabilities within a crew.
+
+For more information, see the [CrewAI documentation](https://docs.crewai.com/core-concepts/Agents/).
+
+### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| role | Role | The role of the agent |
+| goal | Goal | The objective of the agent |
+| backstory | Backstory | The backstory of the agent |
+| tools | Tools | Tools at agent's disposal |
+| llm | Language Model | Language model that will run the agent |
+| memory | Memory | Whether the agent should have memory or not |
+| verbose | Verbose | Enables verbose output |
+| allow_delegation | Allow Delegation | Whether the agent is allowed to delegate tasks to other agents |
+| allow_code_execution | Allow Code Execution | Whether the agent is allowed to execute code |
+| kwargs | kwargs | Additional keyword arguments for the agent |
+
+### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| output | Agent | The constructed CrewAI Agent object |
+
+## Hierarchical Crew
+
+This component represents a group of agents, managing how they should collaborate and the tasks they should perform in a hierarchical structure. This component allows for the creation of a crew with a manager overseeing the task execution.
+
+For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Hierarchical/).
+
+### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| agents | Agents | List of Agent objects representing the crew members |
+| tasks | Tasks | List of HierarchicalTask objects representing the tasks to be executed |
+| manager_llm | Manager LLM | Language model for the manager agent (optional) |
+| manager_agent | Manager Agent | Specific agent to act as the manager (optional) |
+| verbose | Verbose | Enables verbose output for detailed logging |
+| memory | Memory | Specifies the memory configuration for the crew |
+| use_cache | Use Cache | Enables caching of results |
+| max_rpm | Max RPM | Sets the maximum requests per minute |
+| share_crew | Share Crew | Determines if the crew information is shared among agents |
+| function_calling_llm | Function Calling LLM | Specifies the language model for function calling |
+
+### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| crew | Crew | The constructed Crew object with hierarchical task execution |
+
+## JSON Agent
+
+This component creates a JSON agent from a JSON or YAML file and an LLM.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| path | File | Path to the JSON or YAML file |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | JSON agent instance |
+
+## OpenAI Tools Agent
+
+This component creates an OpenAI Tools Agent using LangChain.
+
+For more information, see the [LangChain documentation](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/openai_tools/).
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent (must be tool-enabled) |
+| system_prompt | String | System prompt for the agent |
+| user_prompt | String | User prompt template (must contain 'input' key) |
+| chat_history | List[Data] | Optional chat history for the agent |
+| tools | List[Tool] | List of tools available to the agent |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | OpenAI Tools Agent instance |
+
+## OpenAPI Agent
+
+This component creates an OpenAPI Agent to interact with APIs defined by OpenAPI specifications.
+
+For more information, see the LangChain documentation on OpenAPI Agents.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| path | File | Path to the OpenAPI specification file (JSON or YAML) |
+| allow_dangerous_requests | Boolean | Whether to allow potentially dangerous API requests |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | OpenAPI Agent instance |
+
+## SQL Agent
+
+This component creates a SQL Agent to interact with SQL databases.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| database_uri | String | URI of the SQL database to connect to |
+| extra_tools | List[Tool] | Additional tools to provide to the agent (optional) |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | SQL Agent instance |
+
+## Sequential Crew
+
+This component represents a group of agents with tasks that are executed sequentially. This component allows for the creation of a crew that performs tasks in a specific order.
+
+For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Sequential/).
+
+### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| tasks | Tasks | List of SequentialTask objects representing the tasks to be executed |
+| verbose | Verbose | Enables verbose output for detailed logging |
+| memory | Memory | Specifies the memory configuration for the crew |
+| use_cache | Use Cache | Enables caching of results |
+| max_rpm | Max RPM | Sets the maximum requests per minute |
+| share_crew | Share Crew | Determines if the crew information is shared among agents |
+| function_calling_llm | Function Calling LLM | Specifies the language model for function calling |
+
+### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| crew | Crew | The constructed Crew object with sequential task execution |
+
+## Sequential task agent
+
+This component creates a CrewAI Task and its associated Agent, allowing for the definition of sequential tasks with specific agent roles and capabilities.
+
+For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Sequential/).
+
+### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| role | Role | The role of the agent |
+| goal | Goal | The objective of the agent |
+| backstory | Backstory | The backstory of the agent |
+| tools | Tools | Tools at agent's disposal |
+| llm | Language Model | Language model that will run the agent |
+| memory | Memory | Whether the agent should have memory or not |
+| verbose | Verbose | Enables verbose output |
+| allow_delegation | Allow Delegation | Whether the agent is allowed to delegate tasks to other agents |
+| allow_code_execution | Allow Code Execution | Whether the agent is allowed to execute code |
+| agent_kwargs | Agent kwargs | Additional kwargs for the agent |
+| task_description | Task Description | Descriptive text detailing task's purpose and execution |
+| expected_output | Expected Task Output | Clear definition of expected task outcome |
+| async_execution | Async Execution | Boolean flag indicating asynchronous task execution |
+| previous_task | Previous Task | The previous task in the sequence (for chaining) |
+
+### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| task_output | Sequential Task | List of SequentialTask objects representing the created task(s) |
+
+## Tool Calling Agent
+
+This component creates a Tool Calling Agent using LangChain.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| system_prompt | String | System prompt for the agent |
+| user_prompt | String | User prompt template (must contain 'input' key) |
+| chat_history | List[Data] | Optional chat history for the agent |
+| tools | List[Tool] | List of tools available to the agent |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | Tool Calling Agent instance |
+
+## Vector Store Agent
+
+This component creates a Vector Store Agent using LangChain.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| vectorstore | VectorStoreInfo | Vector store information for the agent to use |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | Vector Store Agent instance |
+
+## Vector Store Router Agent
+
+This component creates a Vector Store Router Agent using LangChain.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| vectorstores | List[VectorStoreInfo] | List of vector store information for the agent to route between |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | Vector Store Router Agent instance |
+
+## XML Agent
+
+This component creates an XML Agent using LangChain.
+
+The agent uses XML formatting for tool instructions to the Language Model.
+
+### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| llm | LanguageModel | Language model to use for the agent |
+| user_prompt | String | Custom prompt template for the agent (includes XML formatting instructions) |
+| tools | List[Tool] | List of tools available to the agent |
+
+### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| agent | AgentExecutor | XML Agent instance |
\ No newline at end of file
diff --git a/docs/docs/Components/components-custom-components.md b/docs/docs/Components/components-custom-components.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b5ef11a576130dd69f64dace027ff18c0b8e247
--- /dev/null
+++ b/docs/docs/Components/components-custom-components.md
@@ -0,0 +1,494 @@
+---
+title: Custom Components
+sidebar_position: 8
+slug: /components-custom-components
+---
+
+# Custom Components
+
+Custom components are created within Langflow and extend the platform's functionality with custom, resusable Python code.
+
+Since Langflow operates with Python behind the scenes, you can implement any Python function within a Custom Component. This means you can leverage the power of libraries such as Pandas, Scikit-learn, Numpy, and thousands of other packages to create components that handle data processing in unlimited ways. You can use any type as long as the type is properly annotated in the output methods (e.g., `> list[int]`).
+
+Custom Components create reusable and configurable components to enhance the capabilities of Langflow, making it a powerful tool for developing complex processing between user and AI messages.
+
+## How to Create Custom Components
+
+Creating custom components in Langflow involves creating a Python class that defines the component's functionality, inputs, and outputs.
+The default code provides a working structure for your custom component.
+```python
+# from langflow.field_typing import Data
+from langflow.custom import Component
+from langflow.io import MessageTextInput, Output
+from langflow.schema import Data
+
+
+class CustomComponent(Component):
+ display_name = "Custom Component"
+ description = "Use as a template to create your own component."
+ documentation: str = "http://docs.langflow.org/components/custom"
+ icon = "custom_components"
+ name = "CustomComponent"
+
+ inputs = [
+ MessageTextInput(name="input_value", display_name="Input Value", value="Hello, World!"),
+ ]
+
+ outputs = [
+ Output(display_name="Output", name="output", method="build_output"),
+ ]
+
+ def build_output(self) -> Data:
+ data = Data(value=self.input_value)
+ self.status = data
+ return data
+
+```
+
+You can create your class in your favorite text editor outside of Langflow and paste it in later, or just follow along in the code pane.
+
+1. In Langflow, from under **Helpers**, drag a **Custom Component** into the workspace.
+2. Open the component's code pane.
+3. Import dependencies.
+Your custom component inherits from the langflow `Component` class so you need to include it.
+```python
+from langflow.custom import Component
+from langflow.io import MessageTextInput, Output
+from langflow.schema import Data
+```
+4. **Define the Class**: Start by defining a Python class that inherits from `Component`. This class will encapsulate the functionality of your custom component.
+
+```python
+class CustomComponent(Component):
+ display_name = "Custom Component"
+ description = "Use as a template to create your own component."
+ documentation: str = "http://docs.langflow.org/components/custom"
+ icon = "custom_components"
+ name = "CustomComponent"
+```
+5. **Specify Inputs and Outputs**: Use Langflow's input and output classes to define the inputs and outputs of your component. They should be declared as class attributes.
+```python
+ inputs = [
+ MessageTextInput(name="input_value", display_name="Input Value", value="Hello, World!"),
+ ]
+
+ outputs = [
+ Output(display_name="Output", name="output", method="build_output"),
+ ]
+```
+6. **Implement Output Methods**: Implement methods for each output, which contains the logic of your component. These methods can access input values using `self.` , return processed values and define what to be displayed in the component with the `self.status` attribute.
+```python
+ def build_output(self) -> Data:
+ data = Data(value=self.input_value)
+ self.status = data
+ return data
+```
+7. **Use Proper Annotations**: Ensure that output methods are properly annotated with their types. Langflow uses these annotations to validate and handle data correctly. For example, this method is annotated to output `Data`.
+```python
+ def build_output(self) -> Data:
+```
+8. Click **Check & Save** to confirm your component works.
+You now have an operational custom component.
+
+
+## Add inputs and modify output methods
+
+This code defines a custom component that accepts 5 inputs and outputs a Message.
+
+Copy and paste it into the Custom Component code pane and click **Check & Save.**
+
+```python
+from langflow.custom import Component
+from langflow.inputs import StrInput, MultilineInput, SecretStrInput, IntInput, DropdownInput
+from langflow.template import Output, Input
+from langflow.schema.message import Message
+
+class MyCustomComponent(Component):
+ display_name = "My Custom Component"
+ description = "An example of a custom component with various input types."
+
+ inputs = [
+ StrInput(
+ name="username",
+ display_name="Username",
+ info="Enter your username."
+ ),
+ SecretStrInput(
+ name="password",
+ display_name="Password",
+ info="Enter your password."
+ ),
+ MessageTextInput(
+ name="special_message",
+ display_name="special_message",
+ info="Enter a special message.",
+ ),
+ IntInput(
+ name="age",
+ display_name="Age",
+ info="Enter your age."
+ ),
+ DropdownInput(
+ name="gender",
+ display_name="Gender",
+ options=["Male", "Female", "Other"],
+ info="Select your gender."
+ )
+ ]
+
+ outputs = [
+ Output(display_name="Result", name="result", method="process_inputs"),
+ ]
+
+ def process_inputs(self) -> Message:
+ """
+ Process the user inputs and return a Message object.
+
+ Returns:
+ Message: A Message object containing the processed information.
+ """
+ try:
+ processed_text = f"User {self.username} (Age: {self.age}, Gender: {self.gender}) " \
+ f"sent the following special message: {self.special_message}"
+ return Message(text=processed_text)
+ except AttributeError as e:
+ return Message(text=f"Error processing inputs: {str(e)}")
+```
+
+Since the component outputs a `Message`, you can wire it into a chat and pass messages to yourself.
+
+Your Custom Component accepts the Chat Input message through `MessageTextInput`, fills in the variables with the `process_inputs` method, and finally passes the message `User Username (Age: 49, Gender: Male) sent the following special message: Hello!` to Chat Output.
+
+By defining inputs this way, Langflow can automatically handle the validation and display of these fields in the user interface, making it easier to create robust and user-friendly custom components.
+
+All of the types detailed above derive from a general class that can also be accessed through the generic `Input` class.
+
+:::tip
+Use `MessageInput` to get the entire Message object instead of just the text.
+:::
+
+## Input Types {#3815589831f24ab792328ed233c8b00d}
+
+---
+
+
+Langflow provides several higher-level input types to simplify the creation of custom components. These input types standardize how inputs are defined, validated, and used. Hereâs a guide on how to use these inputs and their primary purposes:
+
+
+### **HandleInput** {#fb06c48a326043ffa46badc1ab3ba467}
+
+
+Represents an input that has a handle to a specific type (e.g., `BaseLanguageModel`, `BaseRetriever`, etc.).
+
+- **Usage:**Â Useful for connecting to specific component types in a flow.
+
+### **DataInput** {#0e1dcb768e38487180d720b0884a90f5}
+
+
+Represents an input that receives a `Data` object.
+
+- **Usage:**Â Ideal for components that process or manipulate data objects.
+- **Input Types:**Â `["Data"]`
+
+### **StrInput** {#4ec6e68ad9ab4cd194e8e607bc5b3411}
+
+
+Represents a standard string input field.
+
+- **Usage:**Â Used for any text input where the user needs to provide a string.
+- **Input Types:**Â `["Text"]`
+
+### **MessageInput** {#9292ac0105e14177af5eff2131b9c71b}
+
+
+Represents an input field specifically for `Message` objects.
+
+- **Usage:**Â Used in components that handle or process messages.
+- **Input Types:**Â `["Message"]`
+
+### **MessageTextInput** {#5511f5e32b944b4e973379a6bd5405e4}
+
+
+Represents a text input for messages.
+
+- **Usage:**Â Suitable for components that need to extract text from message objects.
+- **Input Types:**Â `["Message"]`
+
+### **MultilineInput** {#e6d8315b0fb44a2fb8c62c3f3184bbe9}
+
+
+Represents a text field that supports multiple lines.
+
+- **Usage:**Â Ideal for longer text inputs where the user might need to write extended text.
+- **Input Types:**Â `["Text"]`
+- **Attributes:**Â `multiline=True`
+
+### **SecretStrInput** {#2283c13aa5f745b8b0009f7d40e59419}
+
+
+Represents a password input field.
+
+- **Usage:**Â Used for sensitive text inputs where the input should be hidden (e.g., passwords, API keys).
+- **Attributes:**Â `password=True`
+- **Input Types:**Â Does not accept input types, meaning it has no input handles for previous nodes/components to connect to it.
+
+### **IntInput** {#612680db6578451daef695bd19827a56}
+
+
+Represents an integer input field.
+
+- **Usage:**Â Used for numeric inputs where the value should be an integer.
+- **Input Types:**Â `["Integer"]`
+
+### **FloatInput** {#a15e1fdae15b49fc9bfbf38f8bd7b203}
+
+
+Represents a float input field.
+
+- **Usage:**Â Used for numeric inputs where the value should be a floating-point number.
+- **Input Types:**Â `["Float"]`
+
+### **BoolInput** {#3083671e0e7f4390a03396485114be66}
+
+
+Represents a boolean input field.
+
+- **Usage:**Â Used for true/false or yes/no type inputs.
+- **Input Types:**Â `["Boolean"]`
+
+### **NestedDictInput** {#2866fc4018e743d8a45afde53f1e57be}
+
+
+Represents an input field for nested dictionaries.
+
+- **Usage:**Â Used for more complex data structures where the input needs to be a dictionary.
+- **Input Types:**Â `["NestedDict"]`
+
+### **DictInput** {#daa2c2398f694ec199b425e2ed4bcf93}
+
+
+Represents an input field for dictionaries.
+
+- **Usage:**Â Suitable for inputs that require a dictionary format.
+- **Input Types:**Â `["Dict"]`
+
+### **DropdownInput** {#14dcdef11bab4d3f8127eaf2e36a77b9}
+
+
+Represents a dropdown input field.
+
+- **Usage:**Â Used where the user needs to select from a predefined list of options.
+- **Attributes:**Â `options`Â to define the list of selectable options.
+- **Input Types:**Â `["Text"]`
+
+### **FileInput** {#73e6377dc5f446f39517a558a1291410}
+
+
+Represents a file input field.
+
+- **Usage:**Â Used to upload files.
+- **Attributes:**Â `file_types`Â to specify the types of files that can be uploaded.
+- **Input Types:**Â `["File"]`
+
+
+### Generic Input {#278e2027493e45b68746af0a5b6c06f6}
+
+
+---
+
+
+Langflow offers native input types, but you can use any type as long as they are properly annotated in the output methods (e.g., `-> list[int]`).
+
+
+The `Input` class is highly customizable, allowing you to specify a wide range of attributes for each input field. It has several attributes that can be customized:
+
+- `field_type`: Specifies the type of field (e.g., `str`, `int`). Default is `str`.
+- `required`: Boolean indicating if the field is required. Default is `False`.
+- `placeholder`: Placeholder text for the input field. Default is an empty string.
+- `is_list`: Boolean indicating if the field should accept a list of values. Default is `False`.
+- `show`: Boolean indicating if the field should be shown. Default is `True`.
+- `multiline`: Boolean indicating if the field should allow multi-line input. Default is `False`.
+- `value`: Default value for the input field. Default is `None`.
+- `file_types`: List of accepted file types (for file inputs). Default is an empty list.
+- `file_path`: File path if the field is a file input. Default is `None`.
+- `password`: Boolean indicating if the field is a password. Default is `False`.
+- `options`: List of options for the field (for dropdowns). Default is `None`.
+- `name`: Name of the input field. Default is `None`.
+- `display_name`: Display name for the input field. Default is `None`.
+- `advanced`: Boolean indicating if the field is an advanced parameter. Default is `False`.
+- `input_types`: List of accepted input types. Default is `None`.
+- `dynamic`: Boolean indicating if the field is dynamic. Default is `False`.
+- `info`: Additional information or tooltip for the input field. Default is an empty string.
+- `real_time_refresh`: Boolean indicating if the field should refresh in real-time. Default is `None`.
+- `refresh_button`: Boolean indicating if the field should have a refresh button. Default is `None`.
+- `refresh_button_text`: Text for the refresh button. Default is `None`.
+- `range_spec`: Range specification for numeric fields. Default is `None`.
+- `load_from_db`: Boolean indicating if the field should load from the database. Default is `False`.
+- `title_case`: Boolean indicating if the display name should be in title case. Default is `True`.
+
+## Create a Custom Component with Generic Input
+
+Here is an example of how to define inputs for a component using the `Input` class.
+
+Copy and paste it into the Custom Component code pane and click **Check & Save.**
+
+```python
+from langflow.template import Input, Output
+from langflow.custom import Component
+from langflow.field_typing import Text
+from langflow.schema.message import Message
+from typing import Dict, Any
+
+class TextAnalyzerComponent(Component):
+ display_name = "Text Analyzer"
+ description = "Analyzes input text and provides basic statistics."
+
+ inputs = [
+ Input(
+ name="input_text",
+ display_name="Input Text",
+ field_type="Message",
+ required=True,
+ placeholder="Enter text to analyze",
+ multiline=True,
+ info="The text you want to analyze.",
+ input_types=["Text"]
+ ),
+ Input(
+ name="include_word_count",
+ display_name="Include Word Count",
+ field_type="bool",
+ required=False,
+ info="Whether to include word count in the analysis.",
+ ),
+ Input(
+ name="perform_sentiment_analysis",
+ display_name="Perform Sentiment Analysis",
+ field_type="bool",
+ required=False,
+ info="Whether to perform basic sentiment analysis.",
+ ),
+ ]
+
+ outputs = [
+ Output(display_name="Analysis Results", name="results", method="analyze_text"),
+ ]
+
+ def analyze_text(self) -> Message:
+ # Extract text from the Message object
+ if isinstance(self.input_text, Message):
+ text = self.input_text.text
+ else:
+ text = str(self.input_text)
+
+ results = {
+ "character_count": len(text),
+ "sentence_count": text.count('.') + text.count('!') + text.count('?')
+ }
+
+ if self.include_word_count:
+ results["word_count"] = len(text.split())
+
+ if self.perform_sentiment_analysis:
+ # Basic sentiment analysis
+ text_lower = text.lower()
+ if "happy" in text_lower or "good" in text_lower:
+ sentiment = "positive"
+ elif "sad" in text_lower or "bad" in text_lower:
+ sentiment = "negative"
+ else:
+ sentiment = "neutral"
+
+ results["sentiment"] = sentiment
+
+ # Convert the results dictionary to a formatted string
+ formatted_results = "\n".join([f"{key}: {value}" for key, value in results.items()])
+
+ # Return a Message object
+ return Message(text=formatted_results)
+
+# Define how to use the inputs and outputs
+component = TextAnalyzerComponent()
+```
+
+In this custom component:
+
+- The `input_text` input is a required multi-line text field that accepts a Message object or a string. It's used to provide the text for analysis.
+
+- The `include_word_count` input is an optional boolean field. When set to True, it adds a word count to the analysis results.
+
+- The `perform_sentiment_analysis` input is an optional boolean field. When set to True, it triggers a basic sentiment analysis of the input text.
+
+The component performs basic text analysis, including character count and sentence count (based on punctuation marks). If word count is enabled, it splits the text and counts the words. If sentiment analysis is enabled, it performs a simple keyword-based sentiment classification (positive, negative, or neutral).
+
+Since the component inputs and outputs a `Message`, you can wire the component into a chat and see how the basic custom component logic interacts with your input.
+
+## Create a Custom Component with Multiple Outputs {#6f225be8a142450aa19ee8e46a3b3c8c}
+
+---
+
+
+In Langflow, custom components can have multiple outputs. Each output can be associated with a specific method in the component, allowing you to define distinct behaviors for each output path. This feature is particularly useful when you want to route data based on certain conditions or process it in multiple ways.
+
+1. **Definition of Outputs**: Each output is defined in the `outputs` list of the component. Each output is associated with a display name, an internal name, and a method that gets called to generate the output.
+2. **Output Methods**: The methods associated with outputs are responsible for generating the data for that particular output. These methods are called when the component is executed, and each method can independently produce its result.
+
+This example component has two outputs:
+
+- `process_data`: Processes the input text (e.g., converts it to uppercase) and returns it.
+- `get_processing_function`: Returns the `process_data` method itself to be reused in composition.
+
+```python
+from typing import Callable
+from langflow.custom import Component
+from langflow.inputs import StrInput
+from langflow.template import Output
+from langflow.field_typing import Text
+
+class DualOutputComponent(Component):
+ display_name = "Dual Output"
+ description = "Processes input text and returns both the result and the processing function."
+ icon = "double-arrow"
+
+ inputs = [
+ StrInput(
+ name="input_text",
+ display_name="Input Text",
+ info="The text input to be processed.",
+ ),
+ ]
+
+ outputs = [
+ Output(display_name="Processed Data", name="processed_data", method="process_data"),
+ Output(display_name="Processing Function", name="processing_function", method="get_processing_function"),
+ ]
+
+ def process_data(self) -> Text:
+ # Process the input text (e.g., convert to uppercase)
+ processed = self.input_text.upper()
+ self.status = processed
+ return processed
+
+ def get_processing_function(self) -> Callable[[], Text]:
+ # Return the processing function itself
+ return self.process_data
+```
+
+This example shows how to define multiple outputs in a custom component. The first output returns the processed data, while the second output returns the processing function itself.
+
+The `processing_function` output can be used in scenarios where the function itself is needed for further processing or dynamic flow control. Notice how both outputs are properly annotated with their respective types, ensuring clarity and type safety.
+
+
+## Special Operations
+
+Advanced methods and attributes offer additional control and functionality. Understanding how to leverage these can enhance your custom components' capabilities.
+
+- `self.inputs`: Access all defined inputs. Useful when an output method needs to interact with multiple inputs.
+- `self.outputs`: Access all defined outputs. This is particularly useful if an output function needs to trigger another output function.
+- `self.status`: Use this to update the component's status or intermediate results. It helps track the component's internal state or store temporary data.
+- `self.graph.flow_id`: Retrieve the flow ID, useful for maintaining context or debugging.
+- `self.stop("output_name")`: Use this method within an output function to prevent data from being sent through other components. This method stops next component execution and is particularly useful for specific operations where a component should stop from running based on specific conditions.
+
+## Contribute Custom Components to Langflow
+
+See [How to Contribute](/contributing-how-to-contribute#submitting-components) to contribute your custom component to Langflow.
\ No newline at end of file
diff --git a/docs/docs/Components/components-data.md b/docs/docs/Components/components-data.md
new file mode 100644
index 0000000000000000000000000000000000000000..87ea75ac73148f3f8ea315fb9928e8faff7fd586
--- /dev/null
+++ b/docs/docs/Components/components-data.md
@@ -0,0 +1,160 @@
+---
+title: Data
+sidebar_position: 3
+slug: /components-data
+---
+
+## API Request
+
+This component sends HTTP requests to the specified URLs.
+
+Use this component to interact with external APIs or services and retrieve data. Ensure that the URLs are valid and that you configure the method, headers, body, and timeout correctly.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+| ------- | ------------ | -------------------------------------------------------------------------- |
+| URLs | URLs | The URLs to target |
+| curl | curl | Paste a curl command to fill in the dictionary fields for headers and body |
+| Method | HTTP Method | The HTTP method to use, such as GET or POST |
+| Headers | Headers | The headers to include with the request |
+| Body | Request Body | The data to send with the request (for methods like POST, PATCH, PUT) |
+| Timeout | Timeout | The maximum time to wait for a response |
+
+## Directory
+
+This component recursively loads files from a directory, with options for file types, depth, and concurrency.
+
+### Parameters
+
+| Input | Type | Description |
+| ------------------ | ---------------- | -------------------------------------------------- |
+| path | MessageTextInput | Path to the directory to load files from |
+| types | MessageTextInput | File types to load (leave empty to load all types) |
+| depth | IntInput | Depth to search for files |
+| max_concurrency | IntInput | Maximum concurrency for loading files |
+| load_hidden | BoolInput | If true, hidden files will be loaded |
+| recursive | BoolInput | If true, the search will be recursive |
+| silent_errors | BoolInput | If true, errors will not raise an exception |
+| use_multithreading | BoolInput | If true, multithreading will be used |
+
+| Output | Type | Description |
+| ------ | ---------- | ----------------------------------- |
+| data | List[Data] | Loaded file data from the directory |
+
+## File
+
+The FileComponent is a class that loads and parses text files of various supported formats, converting the content into a Data object. It supports multiple file types and provides an option for silent error handling.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+| ------------- | ------------- | -------------------------------------------- |
+| path | Path | File path to load. |
+| silent_errors | Silent Errors | If true, errors will not raise an exception. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+| ---- | ------------ | -------------------------------------------- |
+| data | Data | Parsed content of the file as a Data object. |
+
+## URL
+
+The URLComponent is a class that fetches content from one or more URLs, processes the content, and returns it as a list of Data objects. It ensures that the provided URLs are valid and uses WebBaseLoader to fetch the content.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+| ---- | ------------ | ---------------------- |
+| urls | URLs | Enter one or more URLs |
+
+#### Outputs
+
+| Name | Display Name | Info |
+| ---- | ------------ | ------------------------------------------------------------ |
+| data | Data | List of Data objects containing fetched content and metadata |
+
+## Gmail Loader
+
+This component loads emails from Gmail using provided credentials and filters.
+
+For more on creating a service account JSON, see [Service Account JSON](https://developers.google.com/identity/protocols/oauth2/service-account).
+
+### Parameters
+
+| Input | Type | Description |
+| ----------- | ---------------- | ------------------------------------------------------------------------------------ |
+| json_string | SecretStrInput | JSON string containing OAuth 2.0 access token information for service account access |
+| label_ids | MessageTextInput | Comma-separated list of label IDs to filter emails |
+| max_results | MessageTextInput | Maximum number of emails to load |
+
+| Output | Type | Description |
+| ------ | ---- | ----------------- |
+| data | Data | Loaded email data |
+
+## Google Drive Loader
+
+This component loads documents from Google Drive using provided credentials and a single document ID.
+
+For more on creating a service account JSON, see [Service Account JSON](https://developers.google.com/identity/protocols/oauth2/service-account).
+
+### Parameters
+
+| Input | Type | Description |
+| ----------- | ---------------- | ------------------------------------------------------------------------------------ |
+| json_string | SecretStrInput | JSON string containing OAuth 2.0 access token information for service account access |
+| document_id | MessageTextInput | Single Google Drive document ID |
+
+| Output | Type | Description |
+| ------ | ---- | -------------------- |
+| docs | Data | Loaded document data |
+
+## Google Drive Search
+
+This component searches Google Drive files using provided credentials and query parameters.
+
+For more on creating a service account JSON, see [Service Account JSON](https://developers.google.com/identity/protocols/oauth2/service-account).
+
+### Parameters
+
+| Input | Type | Description |
+| -------------- | ---------------- | ------------------------------------------------------------------------------------ |
+| token_string | SecretStrInput | JSON string containing OAuth 2.0 access token information for service account access |
+| query_item | DropdownInput | The field to query |
+| valid_operator | DropdownInput | Operator to use in the query |
+| search_term | MessageTextInput | The value to search for in the specified query item |
+| query_string | MessageTextInput | The query string used for searching (can be edited manually) |
+
+| Output | Type | Description |
+| ---------- | --------- | ----------------------------------------------- |
+| doc_urls | List[str] | URLs of the found documents |
+| doc_ids | List[str] | IDs of the found documents |
+| doc_titles | List[str] | Titles of the found documents |
+| Data | Data | Document titles and URLs in a structured format |
+
+## Webhook
+
+This component defines a webhook input for the flow. The flow can be triggered by an external HTTP POST request (webhook) sending a JSON payload.
+
+If the input is not valid JSON, the component will wrap it in a "payload" field. The component's status will reflect any errors or the processed data.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| ---- | ------ | ---------------------------------------------- |
+| data | String | JSON payload for testing the webhook component |
+
+#### Outputs
+
+| Name | Type | Description |
+| ----------- | ---- | ------------------------------------- |
+| output_data | Data | Processed data from the webhook input |
diff --git a/docs/docs/Components/components-embedding-models.md b/docs/docs/Components/components-embedding-models.md
new file mode 100644
index 0000000000000000000000000000000000000000..b44394ced6bde15765803be74fdb674c237c90be
--- /dev/null
+++ b/docs/docs/Components/components-embedding-models.md
@@ -0,0 +1,348 @@
+---
+title: Embedding Models
+sidebar_position: 6
+slug: /components-embedding-models
+---
+
+# Embedding Models
+
+Embeddings models are used to convert text into numerical vectors. These vectors can be used for various tasks such as similarity search, clustering, and classification.
+
+## AI/ML
+
+This component generates embeddings using the [AI/ML API](https://docs.aimlapi.com/api-overview/embeddings).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| model_name | String | The name of the AI/ML embedding model to use |
+| aiml_api_key | SecretString | API key for authenticating with the AI/ML service |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance of AIMLEmbeddingsImpl for generating embeddings |
+
+## Amazon Bedrock Embeddings
+
+This component is used to load embedding models from [Amazon Bedrock](https://aws.amazon.com/bedrock/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| credentials_profile_name | String | Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config, which has access keys or role information |
+| model_id | String | ID of the model to call, e.g., `amazon.titan-embed-text-v1`. This is equivalent to the `modelId` property in the `list-foundation-models` API |
+| endpoint_url | String | URL to set a specific service endpoint other than the default AWS endpoint |
+| region_name | String | AWS region to use, e.g., `us-west-2`. Falls back to `AWS_DEFAULT_REGION` environment variable or region specified in ~/.aws/config if not provided |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using Amazon Bedrock |
+
+## Astra DB vectorize
+
+Connect this component to the **Embeddings** port of the [Astra DB vector store component](components-vector-stores#astra-db-serverless) to generate embeddings.
+
+This component requires that your Astra DB database has a collection that uses a vectorize embedding provider integration.
+For more information and instructions, see [Embedding Generation](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| provider | Embedding Provider | The embedding provider to use |
+| model_name | Model Name | The embedding model to use |
+| authentication | Authentication | The name of the API key in Astra that stores your [vectorize embedding provider credentials](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html#embedding-provider-authentication). (Not required if using an [Astra-hosted embedding provider](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html#supported-embedding-providers).) |
+| provider_api_key | Provider API Key | As an alternative to `authentication`, directly provide your embedding provider credentials. |
+| model_parameters | Model Parameters | Additional model parameters |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using Astra vectorize | | |
+
+## Azure OpenAI Embeddings
+
+This component generates embeddings using Azure OpenAI models.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| Model | String | Name of the model to use (default: `text-embedding-3-small`) |
+| Azure Endpoint | String | Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/` |
+| Deployment Name | String | The name of the deployment |
+| API Version | String | The API version to use, options include various dates |
+| API Key | String | The API key to access the Azure OpenAI service |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using Azure OpenAI |
+
+## Cohere Embeddings
+
+This component is used to load embedding models from [Cohere](https://cohere.com/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| cohere_api_key | String | API key required to authenticate with the Cohere service |
+| model | String | Language model used for embedding text documents and performing queries (default: `embed-english-v2.0`) |
+| truncate | Boolean | Whether to truncate the input text to fit within the model's constraints (default: `False`) |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using Cohere |
+
+## Embedding similarity
+
+This component computes selected forms of similarity between two embedding vectors.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| embedding_vectors | Embedding Vectors | A list containing exactly two data objects with embedding vectors to compare. |
+| similarity_metric | Similarity Metric | Select the similarity metric to use. Options: "Cosine Similarity", "Euclidean Distance", "Manhattan Distance". |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| similarity_data | Similarity Data | Data object containing the computed similarity score and additional information. |
+
+## Google generative AI embeddings
+
+This component connects to Google's generative AI embedding service using the GoogleGenerativeAIEmbeddings class from the `langchain-google-genai` package.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| api_key | API Key | Secret API key for accessing Google's generative AI service (required) |
+| model_name | Model Name | Name of the embedding model to use (default: "models/text-embedding-004") |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| embeddings | Embeddings | Built GoogleGenerativeAIEmbeddings object |
+
+## Hugging Face Embeddings
+
+:::note
+This component is deprecated as of Langflow version 1.0.18.
+Instead, use the [Hugging Face API Embeddings component](#hugging-face-embeddings-inference-api).
+:::
+
+This component loads embedding models from HuggingFace.
+
+Use this component to generate embeddings using locally downloaded Hugging Face models. Ensure you have sufficient computational resources to run the models.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| Cache Folder | Cache Folder | Folder path to cache HuggingFace models |
+| Encode Kwargs | Encoding Arguments | Additional arguments for the encoding process |
+| Model Kwargs | Model Arguments | Additional arguments for the model |
+| Model Name | Model Name | Name of the HuggingFace model to use |
+| Multi Process | Multi-Process | Whether to use multiple processes |
+
+## Hugging Face embeddings Inference API
+
+This component generates embeddings using Hugging Face Inference API models.
+
+Use this component to create embeddings with Hugging Face's hosted models. Ensure you have a valid Hugging Face API key.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| API Key | API Key | API key for accessing the Hugging Face Inference API |
+| API URL | API URL | URL of the Hugging Face Inference API |
+| Model Name | Model Name | Name of the model to use for embeddings |
+| Cache Folder | Cache Folder | Folder path to cache Hugging Face models |
+| Encode Kwargs | Encoding Arguments | Additional arguments for the encoding process |
+| Model Kwargs | Model Arguments | Additional arguments for the model |
+| Multi Process | Multi-Process | Whether to use multiple processes |
+
+## MistralAI
+
+This component generates embeddings using MistralAI models.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| model | String | The MistralAI model to use (default: "mistral-embed") |
+| mistral_api_key | SecretString | API key for authenticating with MistralAI |
+| max_concurrent_requests | Integer | Maximum number of concurrent API requests (default: 64) |
+| max_retries | Integer | Maximum number of retry attempts for failed requests (default: 5) |
+| timeout | Integer | Request timeout in seconds (default: 120) |
+| endpoint | String | Custom API endpoint URL (default: "https://api.mistral.ai/v1/") |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | MistralAIEmbeddings instance for generating embeddings |
+
+## NVIDIA
+
+This component generates embeddings using NVIDIA models.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| model | String | The NVIDIA model to use for embeddings (e.g., nvidia/nv-embed-v1) |
+| base_url | String | Base URL for the NVIDIA API (default: https://integrate.api.nvidia.com/v1) |
+| nvidia_api_key | SecretString | API key for authenticating with NVIDIA's service |
+| temperature | Float | Model temperature for embedding generation (default: 0.1) |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | NVIDIAEmbeddings instance for generating embeddings |
+
+## Ollama Embeddings
+
+This component generates embeddings using Ollama models.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| Ollama Model | String | Name of the Ollama model to use (default: `llama2`) |
+| Ollama Base URL | String | Base URL of the Ollama API (default: `http://localhost:11434`) |
+| Model Temperature | Float | Temperature parameter for the model. Adjusts the randomness in the generated embeddings |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using Ollama |
+
+## OpenAI Embeddings
+
+This component is used to load embedding models from [OpenAI](https://openai.com/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| OpenAI API Key | String | The API key to use for accessing the OpenAI API |
+| Default Headers | Dict | Default headers for the HTTP requests |
+| Default Query | NestedDict | Default query parameters for the HTTP requests |
+| Allowed Special | List | Special tokens allowed for processing (default: `[]`) |
+| Disallowed Special | List | Special tokens disallowed for processing (default: `["all"]`) |
+| Chunk Size | Integer | Chunk size for processing (default: `1000`) |
+| Client | Any | HTTP client for making requests |
+| Deployment | String | Deployment name for the model (default: `text-embedding-3-small`) |
+| Embedding Context Length | Integer | Length of embedding context (default: `8191`) |
+| Max Retries | Integer | Maximum number of retries for failed requests (default: `6`) |
+| Model | String | Name of the model to use (default: `text-embedding-3-small`) |
+| Model Kwargs | NestedDict | Additional keyword arguments for the model |
+| OpenAI API Base | String | Base URL of the OpenAI API |
+| OpenAI API Type | String | Type of the OpenAI API |
+| OpenAI API Version | String | Version of the OpenAI API |
+| OpenAI Organization | String | Organization associated with the API key |
+| OpenAI Proxy | String | Proxy server for the requests |
+| Request Timeout | Float | Timeout for the HTTP requests |
+| Show Progress Bar | Boolean | Whether to show a progress bar for processing (default: `False`) |
+| Skip Empty | Boolean | Whether to skip empty inputs (default: `False`) |
+| TikToken Enable | Boolean | Whether to enable TikToken (default: `True`) |
+| TikToken Model Name | String | Name of the TikToken model |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using OpenAI |
+
+## Text embedder
+
+This component generates embeddings for a given message using a specified embedding model.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| embedding_model | Embedding Model | The embedding model to use for generating embeddings. |
+| message | Message | The message for which to generate embeddings. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| embeddings | Embedding Data | Data object containing the original text and its embedding vector. |
+
+## VertexAI Embeddings
+
+This component is a wrapper around [Google Vertex AI](https://cloud.google.com/vertex-ai) [Embeddings API](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| credentials | Credentials | The default custom credentials to use |
+| location | String | The default location to use when making API calls (default: `us-central1`) |
+| max_output_tokens | Integer | Token limit determines the maximum amount of text output from one prompt (default: `128`) |
+| model_name | String | The name of the Vertex AI large language model (default: `text-bison`) |
+| project | String | The default GCP project to use when making Vertex API calls |
+| request_parallelism | Integer | The amount of parallelism allowed for requests issued to VertexAI models (default: `5`) |
+| temperature | Float | Tunes the degree of randomness in text generations. Should be a non-negative value (default: `0`) |
+| top_k | Integer | How the model selects tokens for output, the next token is selected from the top `k` tokens (default: `40`) |
+| top_p | Float | Tokens are selected from the most probable to least until the sum of their probabilities exceeds the top `p` value (default: `0.95`) |
+| tuned_model_name | String | The name of a tuned model. If provided, `model_name` is ignored |
+| verbose | Boolean | This parameter controls the level of detail in the output. When set to `True`, it prints internal states of the chain to help debug (default: `False`) |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| embeddings | Embeddings | An instance for generating embeddings using VertexAI |
+
diff --git a/docs/docs/Components/components-helpers.md b/docs/docs/Components/components-helpers.md
new file mode 100644
index 0000000000000000000000000000000000000000..77234609a2d538f8df9c6ffba5d858d6eee9a8ce
--- /dev/null
+++ b/docs/docs/Components/components-helpers.md
@@ -0,0 +1,247 @@
+---
+title: Helpers
+sidebar_position: 4
+slug: /components-helpers
+---
+
+# Helpers
+
+Helper components provide utility functions to help manage data, tasks, and other components in your flow.
+
+## Chat Memory
+
+This component retrieves and manages chat messages from Langflow tables or an external memory.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| memory | External Memory | Retrieve messages from an external memory. If empty, it will use the Langflow tables. |
+| sender | Sender Type | Filter by sender type. |
+| sender_name | Sender Name | Filter by sender name. |
+| n_messages | Number of Messages | Number of messages to retrieve. |
+| session_id | Session ID | The session ID of the chat. If empty, the current session ID parameter will be used. |
+| order | Order | Order of the messages. |
+| template | Template | The template to use for formatting the data. It can contain the keys `{text}`, `{sender}` or any other key in the message data. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| messages | Messages (Data) | Retrieved messages as Data objects. |
+| messages_text | Messages (Text) | Retrieved messages formatted as text. |
+| lc_memory | Memory | Built LangChain memory object. |
+
+## Combine Text
+
+This component concatenates two text sources into a single text chunk using a specified delimiter.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| first_text | First Text | The first text input to concatenate. |
+| second_text | Second Text | The second text input to concatenate. |
+| delimiter | Delimiter | A string used to separate the two text inputs. Defaults to a space. |
+
+## Create List
+
+This component dynamically creates a record with a specified number of fields.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| n_fields | Number of Fields | Number of fields to be added to the record. |
+| text_key | Text Key | Key used as text. |
+
+## Custom Component
+
+Use this component as a template to create your custom component.
+
+For more, see [Custom Components](components-custom-components).
+
+## Filter Data
+
+This component filters a Data object based on a list of keys.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| data | Data | Data object to filter. |
+| filter_criteria | Filter Criteria | List of keys to filter by. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| filtered_data | Filtered Data | A new Data object containing only the key-value pairs that match the filter criteria. |
+
+## Hierarchical Task
+
+This component creates and manages hierarchical tasks for CrewAI agents in a Playground environment.
+
+For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Hierarchical/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| task_description | Description | Descriptive text detailing task's purpose and execution. |
+| expected_output | Expected Output | Clear definition of expected task outcome. |
+| tools | Tools | List of tools/resources limited for task execution. Uses the Agent tools by default. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| task_output | Task | The built hierarchical task. |
+
+## ID Generator
+
+This component generates a unique ID.
+
+### Parameters
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| value | Value | Unique ID generated. |
+
+## Parse JSON
+
+This component converts and extracts JSON fields using JQ queries.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| input_value | Input | Data object to filter. Can be a Message or Data object. |
+| query | JQ Query | JQ Query to filter the data. The input is always a JSON list. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| filtered_data | Filtered Data | Filtered data as a list of Data objects. |
+
+## Merge Data
+
+This component combines multiple data sources into a single unified Data object.
+
+The component iterates through the input list of Data objects, merging them into a single Data object. If the input list is empty, it returns an empty Data object. If there's only one input Data object, it returns that object unchanged. The merging process uses the addition operator to combine Data objects.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| data | Data | A list of Data objects to be merged |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| merged_data | Merged Data | A single Data object containing the combined information from all input Data objects |
+
+
+## Parse Data
+
+The ParseData component converts Data objects into plain text using a specified template.
+This component transforms structured data into human-readable text formats, allowing for customizable output through the use of templates.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| data | Data | The data to convert to text |
+| template | Template | The template to use for formatting the data. It can contain the keys `{text}`, `{data}` or any other key in the Data |
+| sep | Separator | The separator to use between multiple data items |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| text | Text | The resulting formatted text string as a Message object |
+
+## Sequential Task
+
+This component creates and manage sequential tasks for CrewAI agents. It builds a SequentialTask object with the provided description, expected output, and agent, allowing for the specification of tools and asynchronous execution.
+
+For more information, see the [CrewAI documentation](https://docs.crewai.com/how-to/Sequential/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| task_description | Description | Descriptive text detailing task's purpose and execution. |
+| expected_output | Expected Output | Clear definition of expected task outcome. |
+| tools | Tools | List of tools/resources limited for task execution. Uses the Agent tools by default. |
+| agent | Agent | CrewAI Agent that will perform the task. |
+| task | Task | CrewAI Task that will perform the task. |
+| async_execution | Async Execution | Boolean flag indicating asynchronous task execution. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| task_output | Task | The built sequential task or list of tasks. |
+
+## Split Text
+
+This component splits text into chunks of a specified length.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| texts | Texts | Texts to split. |
+| separators | Separators | Characters to split on. Defaults to a space. |
+| max_chunk_size | Max Chunk Size | The maximum length (in characters) of each chunk. |
+| chunk_overlap | Chunk Overlap | The amount of character overlap between chunks. |
+| recursive | Recursive | Whether to split recursively. |
+
+## Store Message
+
+This component stores chat messages or text into Langflow tables or an external memory.
+
+It provides flexibility in managing message storage and retrieval within a chat system.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| message | Message | The chat message to be stored. (Required) |
+| memory | External Memory | The external memory to store the message. If empty, it will use the Langflow tables. |
+| sender | Sender | The sender of the message. Can be Machine or User. If empty, the current sender parameter will be used. |
+| sender_name | Sender Name | The name of the sender. Can be AI or User. If empty, the current sender parameter will be used. |
+| session_id | Session ID | The session ID of the chat. If empty, the current session ID parameter will be used. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| stored_messages | Stored Messages | The list of stored messages after the current message has been added. |
diff --git a/docs/docs/Components/components-io.md b/docs/docs/Components/components-io.md
new file mode 100644
index 0000000000000000000000000000000000000000..2b8f22c8d50720de7d29c0b68a65c70f2edc2d5e
--- /dev/null
+++ b/docs/docs/Components/components-io.md
@@ -0,0 +1,89 @@
+---
+title: Inputs & Outputs
+sidebar_position: 1
+slug: /components-io
+---
+
+# Inputs & Outputs
+
+This category of components defines where data enters and exits your flow. They dynamically alter the Playground and can be renamed to facilitate building and maintaining your flows.
+
+## Inputs
+
+Inputs are components used to define where data enters your flow. They can receive data from various sources, such as users, databases, or any other source that can be converted to Text or Data.
+
+### Chat Input
+
+This component collects user input from the chat.
+
+The difference between Chat Input and other Input components is the output format, the number of configurable fields, and the way they are displayed in the Playground.
+
+Chat Input components can output Text or Data. When you want to pass the sender name or sender to the next component, use the Data output. To pass only the message, use the Text output. Passing only the message is useful when saving the message to a database or a memory system like Zep.
+
+#### Parameters
+
+| Name | Display Name | Info |
+|--------------|--------------|---------------------------------------------------------------------|
+| Sender Type | Sender Type | Specifies the sender type (User or Machine). Defaults to User |
+| Sender Name | Sender Name | Specifies the name of the sender. Defaults to User |
+| Message | Message | Specifies the message text. Multiline text input |
+| Session ID | Session ID | Specifies the session ID of the chat history |
+
+:::note
+If "As Data" is true and the "Message" is a Data, the data will be updated with the Sender, Sender Name, and Session ID.
+:::
+
+### Text Input
+
+This component adds an Input field on the Playground, allowing parameter definition while running and testing your flow.
+
+The Data Template field specifies how a Data should be converted into Text. This is particularly useful when you want to extract specific information from a Data and pass it as text to the next component in the sequence.
+
+For example, if you have a Data with the following structure:
+
+```json
+{ "name": "John Doe", "age": 30, "email": "johndoe@email.com"}
+```
+
+A template with Name: `{name}, Age: {age}` will convert the Data into a text string of `Name: John Doe, Age: 30`.
+
+If you pass more than one Data, the text will be concatenated with a new line separator.
+
+#### Parameters
+
+| Name | Display Name | Info |
+|---------------|---------------|--------------------------------------------------------------------|
+| Value | Value | Specifies the text input value. Defaults to an empty string |
+| Data Template | Data Template | Specifies how a Data should be converted into Text |
+
+## Outputs
+
+Outputs define where data exits your flow. They can send data to the user, the Playground, or define how data will be displayed in the Playground.
+
+### Chat Output
+
+This component sends a message to the chat.
+
+#### Parameters
+
+| Name | Display Name | Info |
+|--------------|--------------|---------------------------------------------------------------------|
+| Sender Type | Sender Type | Specifies the sender type (Machine or User). Defaults to Machine |
+| Sender Name | Sender Name | Specifies the sender's name. Defaults to AI |
+| Session ID | Session ID | Specifies the session ID of the chat history |
+| Message | Message | Specifies the text of the message |
+
+:::note
+If "As Data" is true and the "Message" is a Data, the data will be updated with the Sender, Sender Name, and Session ID.
+:::
+
+### Text Output
+
+This component displays text data to the user without sending it to the chat. Defaults to an empty string.
+
+#### Parameters
+
+| Name | Display Name | Info |
+|-------|--------------|----------------------------------------------------------|
+| Value | Value | Specifies the text data to be displayed |
+
diff --git a/docs/docs/Components/components-loaders.md b/docs/docs/Components/components-loaders.md
new file mode 100644
index 0000000000000000000000000000000000000000..44fad826cb463fd60e5638cacf36cca5f6c262bf
--- /dev/null
+++ b/docs/docs/Components/components-loaders.md
@@ -0,0 +1,74 @@
+---
+title: Loaders
+sidebar_position: 10
+slug: /components-loaders
+---
+
+# Loaders
+
+Loaders are components used to load documents from various sources, such as databases, websites, and local files. They can be used to fetch data from external sources and convert it into a format that can be processed by other components.
+
+## Confluence
+
+The Confluence component integrates with the Confluence wiki collaboration platform to load and process documents. It utilizes the ConfluenceLoader from LangChain to fetch content from a specified Confluence space.
+
+### Parameters
+
+#### Inputs:
+
+| Name | Display Name | Info |
+| --- | --- | --- |
+| url | Site URL | The base URL of the Confluence Space (e.g., https://company.atlassian.net/wiki) |
+| username | Username | Atlassian User E-mail (e.g., email@example.com) |
+| api_key | API Key | Atlassian API Key (Create at: https://id.atlassian.com/manage-profile/security/api-tokens) |
+| space_key | Space Key | The key of the Confluence space to access |
+| cloud | Use Cloud? | Whether to use Confluence Cloud (default: true) |
+| content_format | Content Format | Specify content format (default: STORAGE) |
+| max_pages | Max Pages | Maximum number of pages to retrieve (default: 1000) |
+
+#### Outputs:
+
+| Name | Display Name | Info |
+| --- | --- | --- |
+| data | Data | List of Data objects containing the loaded Confluence documents |
+
+## GitLoader
+
+The GitLoader component uses the GitLoader from LangChain to fetch and load documents from a specified Git repository.
+
+### Parameters
+
+#### Inputs:
+
+| Name | Display Name | Info |
+| --- | --- | --- |
+| repo_path | Repository Path | The local path to the Git repository |
+| clone_url | Clone URL | The URL to clone the Git repository from (optional) |
+| branch | Branch | The branch to load files from (default: 'main') |
+| file_filter | File Filter | Patterns to filter files (e.g., '.py' to include only .py files, '!.py' to exclude .py files) |
+| content_filter | Content Filter | A regex pattern to filter files based on their content |
+
+#### Outputs:
+
+| Name | Display Name | Info |
+| --- | --- | --- |
+| data | Data | List of Data objects containing the loaded Git repository documents |
+
+## Unstructured
+
+This component uses the [Unstructured.io](https://unstructured.io/) Serverless API to load and parse files into structured data.
+
+### Parameters
+
+#### Inputs:
+
+| Name | Display Name | Info |
+| --- | --- | --- |
+| file | File | The path to the file to be parsed (supported types are listed [here](https://docs.unstructured.io/api-reference/api-services/overview#supported-file-types)) |
+| api_key | API Key | Unstructured.io Serverless API Key |
+
+#### Outputs:
+
+| Name | Display Name | Info |
+| --- | --- | --- |
+| data | Data | List of Data objects containing the parsed content from the input file |
diff --git a/docs/docs/Components/components-logic.md b/docs/docs/Components/components-logic.md
new file mode 100644
index 0000000000000000000000000000000000000000..3ab9002ad6f1c341fc37cf34c0d95eb005a4f2e7
--- /dev/null
+++ b/docs/docs/Components/components-logic.md
@@ -0,0 +1,152 @@
+---
+title: Logic
+sidebar_position: 13
+slug: /components-logic
+---
+# Logic components in Langflow
+
+Logic components provide functionalities for routing, conditional processing, and flow management.
+
+## Conditional router
+
+This component routes an input message to a corresponding output based on text comparison.
+
+The ConditionalRouterComponent routes messages based on text comparison. It evaluates a condition by comparing two text inputs using a specified operator and routes the message accordingly.
+
+### Inputs
+
+| Name | Type | Description |
+|----------------|----------|-------------------------------------------------------------------|
+| input_text | String | The primary text input for the operation. |
+| match_text | String | The text input to compare against. |
+| operator | Dropdown | The operator to apply for comparing the texts. |
+| case_sensitive | Boolean | If true, the comparison will be case sensitive. |
+| message | Message | The message to pass through either route. |
+| max_iterations | Integer | The maximum number of iterations for the conditional router. |
+| default_route | Dropdown | The default route to take when max iterations are reached. |
+
+### Outputs
+
+| Name | Type | Description |
+|--------------|---------|--------------------------------------------|
+| true_result | Message | The output when the condition is true. |
+| false_result | Message | The output when the condition is false. |
+
+## Data conditional router
+
+This component routes `Data` objects based on a condition applied to a specified key, including boolean validation.
+
+This component is particularly useful in workflows that require conditional routing of complex data structures, enabling dynamic decision-making based on data content.
+
+### Inputs
+
+| Name | Type | Description |
+|---------------|----------|-----------------------------------------------------------------------------------|
+| data_input | Data | The data object or list of data objects to process. |
+| key_name | String | The name of the key in the data object to check. |
+| operator | Dropdown | The operator to apply for comparing the values. |
+| compare_value | String | The value to compare against (not used for boolean validator). |
+
+### Outputs
+
+| Name | Type | Description |
+|--------------|-------------|------------------------------------------------------|
+| true_output | Data/List | Output when the condition is met. |
+| false_output | Data/List | Output when the condition is not met. |
+
+
+## Flow as Tool {#flow-as-tool}
+
+This component constructs a tool from a function that runs a loaded flow.
+
+### Inputs
+
+| Name | Type | Description |
+|------------------|----------|------------------------------------------------------------|
+| flow_name | Dropdown | The name of the flow to run. |
+| tool_name | String | The name of the tool. |
+| tool_description | String | The description of the tool. |
+| return_direct | Boolean | If true, returns the result directly from the tool. |
+
+### Outputs
+
+| Name | Type | Description |
+|----------------|------|----------------------------------------|
+| api_build_tool | Tool | The constructed tool from the flow. |
+
+## Listen
+
+This component listens for a notification and retrieves its associated state.
+
+### Inputs
+
+| Name | Type | Description |
+|------|--------|------------------------------------------------|
+| name | String | The name of the notification to listen for. |
+
+### Outputs
+
+| Name | Type | Description |
+|--------|------|--------------------------------------------|
+| output | Data | The state associated with the notification. |
+
+## Notify
+
+This component generates a notification for the Listen component to use.
+
+### Inputs
+
+| Name | Type | Description |
+|--------|---------|-------------------------------------------------------------------|
+| name | String | The name of the notification. |
+| data | Data | The data to store in the notification. |
+| append | Boolean | If true, the record will be appended to the existing notification.|
+
+### Outputs
+
+| Name | Type | Description |
+|--------|------|-----------------------------------------|
+| output | Data | The data stored in the notification. |
+
+## Run flow
+
+This component allows you to run a specified flow with given inputs and tweaks.
+
+The RunFlowComponent executes a specified flow within a larger workflow. It provides the ability to run a flow with custom inputs and apply tweaks to modify its behavior.
+
+### Inputs
+
+| Name | Type | Description |
+|-------------|--------------|-------------------------------------------------------|
+| input_value | String | The input value for the flow to process. |
+| flow_name | Dropdown | The name of the flow to run. |
+| tweaks | Nested Dict | Tweaks to apply to the flow. |
+
+### Outputs
+
+| Name | Type | Description |
+|-------------|-------------|------------------------------------------------|
+| run_outputs | List[Data] | The results generated from running the flow. |
+
+## Sub Flow
+
+This `SubFlowComponent` generates a component from a flow with all of its inputs and outputs.
+
+This component can integrate entire flows as components within a larger workflow. It dynamically generates inputs based on the selected flow and executes the flow with provided parameters.
+
+### Inputs
+
+| Name | Type | Description |
+|-----------|----------|------------------------------------|
+| flow_name | Dropdown | The name of the flow to run. |
+
+### Outputs
+
+| Name | Type | Description |
+|--------------|-------------|---------------------------------------|
+| flow_outputs | List[Data] | The outputs generated from the flow. |
+
+
+
+
+
diff --git a/docs/docs/Components/components-memories.md b/docs/docs/Components/components-memories.md
new file mode 100644
index 0000000000000000000000000000000000000000..5dfd45d0a436a2c2c22f4062b05cf68dce15a5d3
--- /dev/null
+++ b/docs/docs/Components/components-memories.md
@@ -0,0 +1,70 @@
+# Memories
+
+Chat memory components store and retrieve chat messages by `session_id`.
+
+## AstraDBChatMemory Component
+
+This component creates an `AstraDBChatMessageHistory` instance, which allows for storing and retrieving chat messages using Astra DB, a cloud-native database service.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------|---------------|-----------------------------------------------------------------------|
+| collection_name | String | Name of the Astra DB collection for storing messages. Required. |
+| token | SecretString | Authentication token for Astra DB access. Required. |
+| api_endpoint | SecretString | API endpoint URL for the Astra DB service. Required. |
+| namespace | String | Optional namespace within Astra DB for the collection. |
+| session_id | MessageText | Chat session ID. Uses current session ID if not provided. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-----------------|-------------------------|-----------------------------------------------------------|
+| message_history | BaseChatMessageHistory | An instance of AstraDBChatMessageHistory for the session. |
+
+## CassandraChatMemory Component
+
+This component creates a `CassandraChatMessageHistory` instance, enabling storage and retrieval of chat messages using Apache Cassandra or DataStax Astra DB.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|----------------|---------------|-------------------------------------------------------------------------------|
+| database_ref | MessageText | Contact points for Cassandra or Astra DB database ID. Required. |
+| username | MessageText | Username for Cassandra (leave empty for Astra DB). |
+| token | SecretString | Password for Cassandra or token for Astra DB. Required. |
+| keyspace | MessageText | Keyspace in Cassandra or namespace in Astra DB. Required. |
+| table_name | MessageText | Name of the table or collection for storing messages. Required. |
+| session_id | MessageText | Unique identifier for the chat session. Optional. |
+| cluster_kwargs | Dictionary | Additional keyword arguments for Cassandra cluster configuration. Optional. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-----------------|-------------------------|--------------------------------------------------------------|
+| message_history | BaseChatMessageHistory | An instance of CassandraChatMessageHistory for the session. |
+
+## ZepChatMemory Component
+
+This component creates a `ZepChatMessageHistory` instance, enabling storage and retrieval of chat messages using Zep, a memory server for Large Language Models (LLMs).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------|---------------|-----------------------------------------------------------|
+| url | MessageText | URL of the Zep instance. Required. |
+| api_key | SecretString | API Key for authentication with the Zep instance. |
+| api_base_path | Dropdown | API version to use. Options: "api/v1" or "api/v2". |
+| session_id | MessageText | Unique identifier for the chat session. Optional. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-----------------|-------------------------|-------------------------------------------------------|
+| message_history | BaseChatMessageHistory | An instance of ZepChatMessageHistory for the session. |
\ No newline at end of file
diff --git a/docs/docs/Components/components-models.md b/docs/docs/Components/components-models.md
new file mode 100644
index 0000000000000000000000000000000000000000..4ae3997e07b300b646209e595b405fa8d7f6057d
--- /dev/null
+++ b/docs/docs/Components/components-models.md
@@ -0,0 +1,385 @@
+---
+title: Models
+sidebar_position: 5
+slug: /components-models
+---
+
+# Models
+
+Model components are used to generate text using language models. These components can be used to generate text for various tasks such as chatbots, content generation, and more.
+
+## AI/ML API
+
+This component creates a ChatOpenAI model instance using the AIML API.
+
+For more information, see [AIML documentation](https://docs.aimlapi.com/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|--------------|-------------|---------------------------------------------------------------------------------------------|
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. Range: 0-128000. |
+| model_kwargs | Dictionary | Additional keyword arguments for the model. |
+| model_name | String | The name of the AIML model to use. Options are predefined in AIML_CHAT_MODELS. |
+| aiml_api_base| String | The base URL of the AIML API. Defaults to https://api.aimlapi.com. |
+| api_key | SecretString| The AIML API Key to use for the model. |
+| temperature | Float | Controls randomness in the output. Default: 0.1. |
+| seed | Integer | Controls reproducibility of the job. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of ChatOpenAI configured with the specified parameters. |
+
+## Amazon Bedrock
+
+This component generates text using Amazon Bedrock LLMs.
+
+For more information, see [Amazon Bedrock documentation](https://docs.aws.amazon.com/bedrock).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------|--------------|-------------------------------------------------------------------------------------|
+| model_id | String | The ID of the Amazon Bedrock model to use. Options include various models. |
+| aws_access_key | SecretString | AWS Access Key for authentication. |
+| aws_secret_key | SecretString | AWS Secret Key for authentication. |
+| credentials_profile_name | String | Name of the AWS credentials profile to use (advanced). |
+| region_name | String | AWS region name. Default: "us-east-1". |
+| model_kwargs | Dictionary | Additional keyword arguments for the model (advanced). |
+| endpoint_url | String | Custom endpoint URL for the Bedrock service (advanced). |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|-------------------------------------------------------------------|
+| model | LanguageModel | An instance of ChatBedrock configured with the specified parameters. |
+
+## Anthropic
+
+This component allows the generation of text using Anthropic Chat and Language models.
+
+For more information, see the [Anthropic documentation](https://docs.anthropic.com/en/docs/welcome).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------|-------------|----------------------------------------------------------------------------------------|
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. Default: 4096.|
+| model | String | The name of the Anthropic model to use. Options include various Claude 3 models. |
+| anthropic_api_key | SecretString| Your Anthropic API key for authentication. |
+| temperature | Float | Controls randomness in the output. Default: 0.1. |
+| anthropic_api_url | String | Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified (advanced). |
+| prefill | String | Prefill text to guide the model's response (advanced). |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of ChatAnthropic configured with the specified parameters. |
+
+## Azure OpenAI
+
+This component generates text using Azure OpenAI LLM.
+
+For more information, see the [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|---------------------|---------------------|---------------------------------------------------------------------------------|
+| Model Name | Model Name | Specifies the name of the Azure OpenAI model to be used for text generation. |
+| Azure Endpoint | Azure Endpoint | Your Azure endpoint, including the resource. |
+| Deployment Name | Deployment Name | Specifies the name of the deployment. |
+| API Version | API Version | Specifies the version of the Azure OpenAI API to be used. |
+| API Key | API Key | Your Azure OpenAI API key. |
+| Temperature | Temperature | Specifies the sampling temperature. Defaults to `0.7`. |
+| Max Tokens | Max Tokens | Specifies the maximum number of tokens to generate. Defaults to `1000`. |
+| Input Value | Input Value | Specifies the input text for text generation. |
+| Stream | Stream | Specifies whether to stream the response from the model. Defaults to `False`. |
+
+## Cohere
+
+This component generates text using Cohere's language models.
+
+For more information, see the [Cohere documentation](https://cohere.ai/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|---------------------|--------------------|----------------------------------------------------------|
+| Cohere API Key | Cohere API Key | Your Cohere API key. |
+| Max Tokens | Max Tokens | Specifies the maximum number of tokens to generate. Defaults to `256`. |
+| Temperature | Temperature | Specifies the sampling temperature. Defaults to `0.75`. |
+| Input Value | Input Value | Specifies the input text for text generation. |
+
+## Google Generative AI
+
+This component generates text using Google's Generative AI models.
+
+For more information, see the [Google Generative AI documentation](https://cloud.google.com/ai-platform/training/docs/algorithms/gpt-3).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|---------------------|--------------------|-----------------------------------------------------------------------|
+| Google API Key | Google API Key | Your Google API key to use for the Google Generative AI. |
+| Model | Model | The name of the model to use, such as `"gemini-pro"`. |
+| Max Output Tokens | Max Output Tokens | The maximum number of tokens to generate. |
+| Temperature | Temperature | Run inference with this temperature. |
+| Top K | Top K | Consider the set of top K most probable tokens. |
+| Top P | Top P | The maximum cumulative probability of tokens to consider when sampling. |
+| N | N | Number of chat completions to generate for each prompt. |
+
+## Groq
+
+This component generates text using Groq's language models.
+
+For more information, see the [Groq documentation](https://groq.com/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|----------------|---------------|-----------------------------------------------------------------|
+| groq_api_key | SecretString | API key for the Groq API. |
+| groq_api_base | String | Base URL path for API requests. Default: "https://api.groq.com" (advanced). |
+| max_tokens | Integer | The maximum number of tokens to generate (advanced). |
+| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.1. |
+| n | Integer | Number of chat completions to generate for each prompt (advanced). |
+| model_name | String | The name of the Groq model to use. Options are dynamically fetched from the Groq API. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of ChatGroq configured with the specified parameters. |
+
+## Hugging Face API
+
+This component generates text using Hugging Face's language models.
+
+For more information, see the [Hugging Face documentation](https://huggingface.co/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|---------------------|-------------------|-------------------------------------------|
+| Endpoint URL | Endpoint URL | The URL of the Hugging Face Inference API endpoint. |
+| Task | Task | Specifies the task for text generation. |
+| API Token | API Token | The API token required for authentication.|
+| Model Kwargs | Model Kwargs | Additional keyword arguments for the model.|
+| Input Value | Input Value | The input text for text generation. |
+
+## Maritalk
+
+This component generates text using Maritalk LLMs.
+
+For more information, see [Maritalk documentation](https://www.maritalk.com/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|----------------|---------------|-----------------------------------------------------------------|
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. Default: 512. |
+| model_name | String | The name of the Maritalk model to use. Options: "sabia-2-small", "sabia-2-medium". Default: "sabia-2-small". |
+| api_key | SecretString | The Maritalk API Key to use for authentication. |
+| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.5. |
+| endpoint_url | String | The Maritalk API endpoint. Default: https://api.maritalk.com. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of ChatMaritalk configured with the specified parameters. |
+
+## Mistral
+
+This component generates text using MistralAI LLMs.
+
+For more information, see [Mistral AI documentation](https://docs.mistral.ai/).
+
+### Parameters
+
+#### Inputs
+| Name | Type | Description |
+|---------------------|--------------|-----------------------------------------------------------------------------------------------|
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens (advanced). |
+| model_name | String | The name of the Mistral AI model to use. Options include "open-mixtral-8x7b", "open-mixtral-8x22b", "mistral-small-latest", "mistral-medium-latest", "mistral-large-latest", and "codestral-latest". Default: "codestral-latest". |
+| mistral_api_base | String | The base URL of the Mistral API. Defaults to https://api.mistral.ai/v1 (advanced). |
+| api_key | SecretString | The Mistral API Key to use for authentication. |
+| temperature | Float | Controls randomness in the output. Default: 0.5. |
+| max_retries | Integer | Maximum number of retries for API calls. Default: 5 (advanced). |
+| timeout | Integer | Timeout for API calls in seconds. Default: 60 (advanced). |
+| max_concurrent_requests | Integer | Maximum number of concurrent API requests. Default: 3 (advanced). |
+| top_p | Float | Nucleus sampling parameter. Default: 1 (advanced). |
+| random_seed | Integer | Seed for random number generation. Default: 1 (advanced). |
+| safe_mode | Boolean | Enables safe mode for content generation (advanced). |
+
+#### Outputs
+| Name | Type | Description |
+|--------|---------------|-----------------------------------------------------|
+| model | LanguageModel | An instance of ChatMistralAI configured with the specified parameters. |
+
+## NVIDIA
+
+This component generates text using NVIDIA LLMs.
+
+For more information, see [NVIDIA AI Foundation Models documentation](https://developer.nvidia.com/ai-foundation-models).
+
+### Parameters
+
+#### Inputs
+| Name | Type | Description |
+|---------------------|--------------|-----------------------------------------------------------------------------------------------|
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens (advanced). |
+| model_name | String | The name of the NVIDIA model to use. Default: "mistralai/mixtral-8x7b-instruct-v0.1". |
+| base_url | String | The base URL of the NVIDIA API. Default: "https://integrate.api.nvidia.com/v1". |
+| nvidia_api_key | SecretString | The NVIDIA API Key for authentication. |
+| temperature | Float | Controls randomness in the output. Default: 0.1. |
+| seed | Integer | The seed controls the reproducibility of the job (advanced). Default: 1. |
+
+#### Outputs
+| Name | Type | Description |
+|--------|---------------|-----------------------------------------------------|
+| model | LanguageModel | An instance of ChatNVIDIA configured with the specified parameters. |
+
+## Ollama
+
+This component generates text using Ollama's language models.
+
+For more information, see [Ollama documentation](https://ollama.com/).
+
+### Parameters
+
+#### Inputs
+| Name | Display Name | Info |
+|---------------------|---------------|---------------------------------------------|
+| Base URL | Base URL | Endpoint of the Ollama API. |
+| Model Name | Model Name | The model name to use. |
+| Temperature | Temperature | Controls the creativity of model responses. |
+
+## OpenAI
+
+This component generates text using OpenAI's language models.
+
+For more information, see [OpenAI documentation](https://beta.openai.com/docs/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------|---------------|------------------------------------------------------------------|
+| api_key | SecretString | Your OpenAI API Key. |
+| model | String | The name of the OpenAI model to use. Options include "gpt-3.5-turbo" and "gpt-4". |
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. |
+| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.7. |
+| top_p | Float | Controls the nucleus sampling. Range: [0.0, 1.0]. Default: 1.0. |
+| frequency_penalty | Float | Controls the frequency penalty. Range: [0.0, 2.0]. Default: 0.0. |
+| presence_penalty | Float | Controls the presence penalty. Range: [0.0, 2.0]. Default: 0.0. |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of OpenAI model configured with the specified parameters. |
+
+## Qianfan
+
+This component generates text using Qianfan's language models.
+
+For more information, see [Qianfan documentation](https://github.com/baidubce/bce-qianfan-sdk).
+
+## Perplexity
+
+This component generates text using Perplexity's language models.
+
+For more information, see [Perplexity documentation](https://perplexity.ai/).
+
+### Parameters
+
+#### Inputs
+| Name | Type | Description |
+|---------------------|--------------|-----------------------------------------------------------------------------------------------|
+| model_name | String | The name of the Perplexity model to use. Options include various Llama 3.1 models. |
+| max_output_tokens | Integer | The maximum number of tokens to generate. |
+| api_key | SecretString | The Perplexity API Key for authentication. |
+| temperature | Float | Controls randomness in the output. Default: 0.75. |
+| top_p | Float | The maximum cumulative probability of tokens to consider when sampling (advanced). |
+| n | Integer | Number of chat completions to generate for each prompt (advanced). |
+| top_k | Integer | Number of top tokens to consider for top-k sampling. Must be positive (advanced). |
+
+#### Outputs
+| Name | Type | Description |
+|--------|---------------|-----------------------------------------------------|
+| model | LanguageModel | An instance of ChatPerplexity configured with the specified parameters. |
+
+## SambaNova
+
+This component generates text using SambaNova LLMs.
+
+For more information, see [Sambanova Cloud documentation](https://cloud.sambanova.ai/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------|---------------|------------------------------------------------------------------|
+| sambanova_url | String | Base URL path for API requests. Default: "https://api.sambanova.ai/v1/chat/completions". |
+| sambanova_api_key | SecretString | Your SambaNova API Key. |
+| model_name | String | The name of the Sambanova model to use. Options include various Llama models. |
+| max_tokens | Integer | The maximum number of tokens to generate. Set to 0 for unlimited tokens. |
+| temperature | Float | Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.07. |
+#### Outputs
+
+| Name | Type | Description |
+|-------|---------------|------------------------------------------------------------------|
+| model | LanguageModel | An instance of SambaNova model configured with the specified parameters. |
+
+## VertexAI
+
+This component generates text using Vertex AI LLMs.
+
+For more information, see [Google Vertex AI documentation](https://cloud.google.com/vertex-ai).
+
+### Parameters
+
+#### Inputs
+| Name | Type | Description |
+|---------------------|--------------|-----------------------------------------------------------------------------------------------|
+| credentials | File | JSON credentials file. Leave empty to fallback to environment variables. File type: JSON. |
+| model_name | String | The name of the Vertex AI model to use. Default: "gemini-1.5-pro". |
+| project | String | The project ID (advanced). |
+| location | String | The location for the Vertex AI API. Default: "us-central1" (advanced). |
+| max_output_tokens | Integer | The maximum number of tokens to generate (advanced). |
+| max_retries | Integer | Maximum number of retries for API calls. Default: 1 (advanced). |
+| temperature | Float | Controls randomness in the output. Default: 0.0. |
+| top_k | Integer | The number of highest probability vocabulary tokens to keep for top-k-filtering (advanced). |
+| top_p | Float | The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Default: 0.95 (advanced). |
+| verbose | Boolean | Whether to print verbose output. Default: False (advanced). |
+
+#### Outputs
+| Name | Type | Description |
+|--------|---------------|-----------------------------------------------------|
+| model | LanguageModel | An instance of ChatVertexAI configured with the specified parameters. |
+
diff --git a/docs/docs/Components/components-overview.md b/docs/docs/Components/components-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..53e89bdce330b1a9776fd12a61d550c6272d1cf7
--- /dev/null
+++ b/docs/docs/Components/components-overview.md
@@ -0,0 +1,71 @@
+---
+title: How to build flows with components
+sidebar_position: 0
+slug: /components-overview
+---
+
+A component is a single building block within a flow. It consists of inputs, outputs, and parameters that define their functionality. These elements provide a convenient and straightforward way to compose LLM-based applications. Learn more about components and how they work below.
+
+
+During the flow creation process, you will notice handles (colored circles) attached to one or both sides of a component. These handles use distinct colors to indicate the types of inputs and outputs that can be interconnected. Hover over a handle to see connection details.
+
+
+
+
+
+On the top right corner of the component, you'll find the a play button to run a component. Once it runs, a status icon appears and you can hover over that to visualize success or error messages. Start interacting with your AI by clicking the **Playground** at the bottom right of the workspace.
+
+
+## Component menu {#7e3f2f8ff5074b2fb3eee97c9cfaabe7}
+
+
+Each component is unique, but they all have a menu bar at the top that looks something like this.
+
+
+
+
+
+It consists of options such as:
+
+- **Code**Â â Modify the component's Python code and save it.
+- **Controls**Â â Adjust all parameters of a component.
+- **Freeze Path**Â â After a component runs, lock its previous output state to prevent it from re-running.
+
+Click **All** (the "..." button) to see all options.
+
+
+## Output preview {#ed7b3c34e0774b8a916b0e68821c9a7a}
+
+
+Langflow includes an output visualizer for components that opens a pop-up screen. This allows you to easily inspect and monitor transmissions between components, providing instant feedback on your workflows.
+
+
+## Advanced settings {#b6430d4903df44f0ba4618a558c83d7b}
+
+
+Langflow components can be edited by clicking the **Advanced Settings** button.
+
+
+Hide parameters with the **Show** button to reduce complexity and keep the workspace clean and intuitive for experimentation.
+
+
+You can also double-click a component's name and description to modify those. Component descriptions accept markdown syntax.
+
+
+## Group components {#c3f5ed818e3b40ceb6534dc358e1a5f2}
+
+
+Multiple components can be grouped into a single component for reuse. This is useful when combining large flows into single components (like RAG with a vector database, for example) and saving space.
+
+1. Hold **Shift** and drag to select components.
+2. Select **Group**.
+3. The components merge into a single component.
+4. Double-click the name and description to change them.
+5. Save your grouped component to in the sidebar for later use!
+
+## Component version {#887fd587589448dc8c27336d1c235b9b}
+
+A component's state is stored in a database, while sidebar components are like starter templates. As soon as you drag a component from the sidebar to the workspace, the two components are no longer in parity.
+
+
+The component will keep the version number it was initialized to the workspace with. Click the **Update Component** icon (exclamation mark) to bring the component up to the `latest` version. This will change the code of the component in place so you can validate that the component was updated by checking its Python code before and after updating it.
diff --git a/docs/docs/Components/components-prompts.md b/docs/docs/Components/components-prompts.md
new file mode 100644
index 0000000000000000000000000000000000000000..55ff7d7473623be9d00e280a298b58bcab0a5160
--- /dev/null
+++ b/docs/docs/Components/components-prompts.md
@@ -0,0 +1,46 @@
+---
+title: Prompts
+sidebar_position: 2
+slug: /components-prompts
+---
+
+# Prompts
+
+A prompt serves as the input to a language model, comprising multiple components that can be parameterized using prompt templates.
+
+Prompt templates provide a systematic approach for generating prompts, allowing for reproducible customization through defined input variables.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|----------|--------------|-------------------------------------------------------------------|
+| template | Template | Create a prompt template with dynamic variables. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|--------|----------------|--------------------------------------------------------|
+| prompt | Prompt Message | The built prompt message returned by the `build_prompt` method. |
+
+## Langchain Hub Prompt Template
+
+This component fetches prompts from the [Langchain Hub](https://docs.smith.langchain.com/old/category/prompt-hub).
+
+When a prompt is loaded, the component generates input fields for custom variables. For example, the default prompt "efriis/my-first-prompt" generates fields for `profession` and `question`.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|--------------------|---------------------------|------------------------------------------|
+| langchain_api_key | Your LangChain API Key | The LangChain API Key to use. |
+| langchain_hub_prompt| LangChain Hub Prompt | The LangChain Hub prompt to use. |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|--------|--------------|-------------------------------------------------------------------|
+| prompt | Build Prompt | The built prompt message returned by the `build_prompt` method. |
diff --git a/docs/docs/Components/components-rag.md b/docs/docs/Components/components-rag.md
new file mode 100644
index 0000000000000000000000000000000000000000..9af9468fbfa4d27dbdcf6f7049949496e93fef59
--- /dev/null
+++ b/docs/docs/Components/components-rag.md
@@ -0,0 +1,36 @@
+---
+title: RAG
+sidebar_position: 9
+slug: /components-rag
+---
+
+RAG (Retrieval-Augmented Generation) components process a user query by retrieving relevant documents and generating a concise summary that addresses the user's question.
+
+## Vectara RAG
+
+This component leverages Vectara's Retrieval Augmented Generation (RAG) capabilities to search and summarize documents based on the provided input. For more information, see the [Vectara documentation](https://docs.vectara.com/docs/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-----------------------|--------------|------------------------------------------------------------|
+| vectara_customer_id | String | Vectara customer ID |
+| vectara_corpus_id | String | Vectara corpus ID |
+| vectara_api_key | SecretString | Vectara API key |
+| search_query | String | The query to receive an answer on |
+| lexical_interpolation | Float | Hybrid search factor (0.005 to 0.1) |
+| filter | String | Metadata filters to narrow the search |
+| reranker | String | Reranker type (mmr, rerank_multilingual_v1, none) |
+| reranker_k | Integer | Number of results to rerank (1 to 100) |
+| diversity_bias | Float | Diversity bias for MMR reranker (0 to 1) |
+| max_results | Integer | Maximum number of search results to summarize (1 to 100) |
+| response_lang | String | Language code for the response (e.g., "eng", "auto") |
+| prompt | String | Prompt name for summarization |
+
+#### Outputs
+
+| Name | Type | Description |
+|--------|---------|-----------------------|
+| answer | Message | Generated RAG response|
\ No newline at end of file
diff --git a/docs/docs/Components/components-tools.md b/docs/docs/Components/components-tools.md
new file mode 100644
index 0000000000000000000000000000000000000000..0a6d95a5108b48248e59c0fe6f1a90f179943fcd
--- /dev/null
+++ b/docs/docs/Components/components-tools.md
@@ -0,0 +1,366 @@
+# Tools
+
+Tool components are used to interact with external services, APIs, and tools. They can be used to search the web, query databases, and perform other tasks.
+
+## Bing Search API
+
+This component allows you to call the Bing Search API.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------|--------------|---------------------------------------|
+| bing_subscription_key | SecretString | Bing API subscription key |
+| input_value | String | Search query input |
+| bing_search_url | String | Custom Bing Search URL (optional) |
+| k | Integer | Number of search results to return |
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|--------------------------------------|
+| results | List[Data]| List of search results |
+| tool | Tool | Bing Search tool for use in LangChain|
+
+## Calculator Tool
+
+This component creates a tool for performing basic arithmetic operations on a given expression.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------|--------|--------------------------------------------------------------------|
+| expression | String | The arithmetic expression to evaluate (e.g., `4*4*(33/22)+12-20`). |
+
+#### Outputs
+
+| Name | Type | Description |
+|--------|------|-------------------------------------------------|
+| result | Tool | Calculator tool for use in LangChain |
+
+This component allows you to evaluate basic arithmetic expressions. It supports addition, subtraction, multiplication, division, and exponentiation. The tool uses a secure evaluation method that prevents the execution of arbitrary Python code.
+
+## Combinatorial Reasoner
+
+This component runs Icosa's Combinatorial Reasoning (CR) pipeline on an input to create an optimized prompt with embedded reasons. Sign up for access here: https://forms.gle/oWNv2NKjBNaqqvCx6
+
+### Parameters
+
+#### Inputs
+| Name | Display Name | Description |
+|------------------------|--------------|---------------------------------------|
+| prompt | Prompt | Input to run CR on |
+| openai_api_key | OpenAI API Key | OpenAI API key for authentication |
+| username | Username | Username for Icosa API authentication |
+| password | Password | Password for Icosa API authentication |
+| model_name | Model Name | OpenAI LLM to use for reason generation|
+
+#### Outputs
+
+| Name | Display Name | Description |
+|---------|-----------|--------------------------------------|
+| optimized_prompt | Optimized Prompt| A message object containing the optimized prompt |
+| reasons | Selected Reasons| A list of the selected reasons that are embedded in the optimized prompt|
+
+## Glean Search API
+
+This component allows you to call the Glean Search API.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------|--------------|---------------------------------------|
+| glean_api_url | String | URL of the Glean API |
+| glean_access_token | SecretString | Access token for Glean API authentication |
+| query | String | Search query input |
+| page_size | Integer | Number of results per page (default: 10) |
+| request_options | Dict | Additional options for the API request (optional) |
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|--------------------------------------|
+| results | List[Data]| List of search results |
+| tool | Tool | Glean Search tool for use in LangChain|
+
+## Google Search API
+
+This component allows you to call the Google Search API.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------|--------------|---------------------------------------|
+| google_api_key | SecretString | Google API key for authentication |
+| google_cse_id | SecretString | Google Custom Search Engine ID |
+| input_value | String | Search query input |
+| k | Integer | Number of search results to return |
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|--------------------------------------|
+| results | List[Data]| List of search results |
+| tool | Tool | Google Search tool for use in LangChain|
+
+## Google Serper API
+
+This component allows you to call the Serper.dev Google Search API.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------|--------------|---------------------------------------|
+| serper_api_key | SecretString | API key for Serper.dev authentication |
+| input_value | String | Search query input |
+| k | Integer | Number of search results to return |
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|--------------------------------------|
+| results | List[Data]| List of search results |
+| tool | Tool | Google Serper search tool for use in LangChain|
+
+## Python Code Structured Tool
+
+This component creates a structured tool from Python code using a dataclass.
+
+The component dynamically updates its configuration based on the provided Python code, allowing for custom function arguments and descriptions.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------|--------------|---------------------------------------|
+| tool_code | String | Python code for the tool's dataclass |
+| tool_name | String | Name of the tool |
+| tool_description | String | Description of the tool |
+| return_direct | Boolean | Whether to return the function output directly |
+| tool_function | String | Selected function for the tool |
+| global_variables | Dict | Global variables or data for the tool |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------------|-------|-----------------------------------------|
+| result_tool | Tool â Structured tool created from the Python code |
+
+## Python REPL Tool
+
+This component creates a Python REPL (Read-Eval-Print Loop) tool for executing Python code.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-----------------|--------------|--------------------------------------------------------|
+| name | String | The name of the tool (default: "python_repl") |
+| description | String | A description of the tool's functionality |
+| global_imports | List[String] | List of modules to import globally (default: ["math"]) |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|--------------------------------------------|
+| tool | Tool | Python REPL tool for use in LangChain |
+
+## Retriever Tool
+
+This component creates a tool for interacting with a retriever in LangChain.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------|---------------|---------------------------------------------|
+| retriever | BaseRetriever | The retriever to interact with |
+| name | String | The name of the tool |
+| description | String | A description of the tool's functionality |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|--------------------------------------------|
+| tool | Tool | Retriever tool for use in LangChain |
+
+## SearXNG Search Tool
+
+This component creates a tool for searching using SearXNG, a metasearch engine.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------|--------------|---------------------------------------|
+| url | String | The URL of the SearXNG instance |
+| max_results | Integer | Maximum number of results to return |
+| categories | List[String] | Categories to search in |
+| language | String | Language for the search results |
+
+#### Outputs
+
+| Name | Type | Description |
+|-------------|------|--------------------------------------------|
+| result_tool | Tool | SearXNG search tool for use in LangChain |
+
+## Search API
+
+This component calls the `searchapi.io` API. It can be used to search the web for information.
+
+For more information, see the [SearchAPI documentation](https://www.searchapi.io/docs/google).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|----------------|---------------------|-----------------------------------------------------|
+| engine | Engine | The search engine to use (default: "google") |
+| api_key | SearchAPI API Key | The API key for authenticating with SearchAPI |
+| input_value | Input | The search query or input for the API call |
+| search_params | Search parameters | Additional parameters for customizing the search |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|-----------------|------------------------------------------------------|
+| data | Search Results | List of Data objects containing search results |
+| tool | Search API Tool | A Tool object for use in LangChain workflows |
+
+## Serp Search API
+
+This component creates a tool for searching using the Serp API.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------|--------------|---------------------------------------------|
+| serpapi_api_key | SecretString | API key for Serp API authentication |
+| input_value | String | Search query input |
+| search_params | Dict | Additional search parameters (optional) |
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|---------------------------------------------|
+| results | List[Data]| List of search results |
+| tool | Tool | Serp API search tool for use in LangChain |
+
+## Wikipedia API
+
+This component creates a tool for searching and retrieving information from Wikipedia.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------------------|---------|-----------------------------------------------------------|
+| input_value | String | Search query input |
+| lang | String | Language code for Wikipedia (default: "en") |
+| k | Integer | Number of results to return |
+| load_all_available_meta | Boolean | Whether to load all available metadata (advanced) |
+| doc_content_chars_max | Integer | Maximum number of characters for document content (advanced)|
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|---------------------------------------|
+| results | List[Data]| List of Wikipedia search results |
+| tool | Tool | Wikipedia search tool for use in LangChain |
+
+## Wolfram Alpha API
+
+This component creates a tool for querying the Wolfram Alpha API.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------|--------------|--------------------------------|
+| input_value | String | Query input for Wolfram Alpha |
+| app_id | SecretString | Wolfram Alpha API App ID |
+
+#### Outputs
+
+| Name | Type | Description |
+|---------|-----------|------------------------------------------------|
+| results | List[Data]| List containing the Wolfram Alpha API response |
+| tool | Tool | Wolfram Alpha API tool for use in LangChain |
+
+## Yahoo Finance News Tool
+
+This component creates a tool for retrieving news from Yahoo Finance.
+
+### Parameters
+
+This component does not have any input parameters.
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|----------------------------------------------|
+| tool | Tool | Yahoo Finance News tool for use in LangChain |
+
+
+## Astra DB Tool
+
+The `Astra DB Tool` allows agents to connect to and query data from Astra DB Collections.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------------|--------|----------------------------------------------------------------------------------------------------------------------------------|
+| Tool Name | String | The name used to reference the tool in the agent's prompt. |
+| Tool Description | String | A brief description of the tool. This helps the model decide when to use it. |
+| Collection Name | String | The name of the Astra DB collection to query. |
+| Token | SecretString | The authentication token for accessing Astra DB. |
+| API Endpoint | String | The Astra DB API endpoint. |
+| Projection Fields | String | The attributes to return, separated by commas. Default: "*". |
+| Tool Parameters | Dict | Parameters the model needs to fill to execute the tool. For required parameters, use an exclamation mark (e.g., "!customer_id"). |
+| Static Filters | Dict | Attribute-value pairs used to filter query results. |
+| Limit | String | The number of documents to return. |
+
+
+
+## Astra DB CQL Tool
+
+The `Astra DB CQL Tool` allows agents to query data from CQL Tables in Astra DB.
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------|
+| Tool Name | String | The name used to reference the tool in the agent's prompt. |
+| Tool Description | String | A brief description of the tool to guide the model in using it. |
+| Keyspace | String | The name of the keyspace. |
+| Table Name | String | The name of the Astra DB CQL table to query. |
+| Token | SecretString | The authentication token for Astra DB. |
+| API Endpoint | String | The Astra DB API endpoint. |
+| Projection Fields | String | The attributes to return, separated by commas. Default: "*". |
+| Partition Keys | Dict | Required parameters that the model must fill to query the tool. |
+| Clustering Keys | Dict | Optional parameters the model can fill to refine the query. Required parameters should be marked with an exclamation mark (e.g., "!customer_id"). |
+| Static Filters | Dict | Attribute-value pairs used to filter query results. |
+| Limit | String | The number of records to return. |
diff --git a/docs/docs/Components/components-vector-stores.md b/docs/docs/Components/components-vector-stores.md
new file mode 100644
index 0000000000000000000000000000000000000000..da8c181d1ee785eec24e5388019d1ce34274238e
--- /dev/null
+++ b/docs/docs/Components/components-vector-stores.md
@@ -0,0 +1,621 @@
+---
+title: Vector Stores
+sidebar_position: 7
+slug: /components-vector-stores
+---
+# Vector Stores
+
+Vector databases are used to store and search for vectors. They can be used to store embeddings, search for similar vectors, and perform other vector operations.
+
+## Astra DB Vector Store
+
+This component implements a Vector Store using Astra DB with search capabilities.
+
+For more information, see the [DataStax documentation](https://docs.datastax.com/en/astra-db-serverless/databases/create-database.html).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| collection_name | Collection Name | The name of the collection within Astra DB where the vectors will be stored (required) |
+| token | Astra DB Application Token | Authentication token for accessing Astra DB (required) |
+| api_endpoint | API Endpoint | API endpoint URL for the Astra DB service (required) |
+| search_input | Search Input | Query string for similarity search |
+| ingest_data | Ingest Data | Data to be ingested into the vector store |
+| namespace | Namespace | Optional namespace within Astra DB to use for the collection |
+| embedding_choice | Embedding Model or Astra Vectorize | Determines whether to use an Embedding Model or Astra Vectorize for the collection |
+| embedding | Embedding Model | Allows an embedding model configuration (when using Embedding Model) |
+| provider | Vectorize Provider | Provider for Astra Vectorize (when using Astra Vectorize) |
+| metric | Metric | Optional distance metric for vector comparisons |
+| batch_size | Batch Size | Optional number of data to process in a single batch |
+| setup_mode | Setup Mode | Configuration mode for setting up the vector store (options: "Sync", "Async", "Off", default: "Sync") |
+| pre_delete_collection | Pre Delete Collection | Boolean flag to determine whether to delete the collection before creating a new one |
+| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) |
+| search_type | Search Type | Search type to use (options: "Similarity", "Similarity with score threshold", "MMR (Max Marginal Relevance)") |
+| search_score_threshold | Search Score Threshold | Minimum similarity score threshold for search results |
+| search_filter | Search Metadata Filter | Optional dictionary of filters to apply to the search query |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| vector_store | Vector Store | Built Astra DB vector store |
+| search_results | Search Results | Results of the similarity search as a list of Data objects |
+
+## Cassandra
+
+This component creates a Cassandra Vector Store with search capabilities.
+For more information, see the [Cassandra documentation](https://cassandra.apache.org/doc/latest/cassandra/vector-search/overview.html).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| database_ref | String | Contact points for the database or AstraDB database ID |
+| username | String | Username for the database (leave empty for AstraDB) |
+| token | SecretString | User password for the database or AstraDB token |
+| keyspace | String | Table Keyspace or AstraDB namespace |
+| table_name | String | Name of the table or AstraDB collection |
+| ttl_seconds | Integer | Time-to-live for added texts |
+| batch_size | Integer | Number of data to process in a single batch |
+| setup_mode | String | Configuration mode for setting up the Cassandra table |
+| cluster_kwargs | Dict | Additional keyword arguments for the Cassandra cluster |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+| search_type | String | Type of search to perform |
+| search_score_threshold | Float | Minimum similarity score for search results |
+| search_filter | Dict | Metadata filters for search query |
+| body_search | String | Document textual search terms |
+| enable_body_search | Boolean | Flag to enable body search |
+
+#### Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| vector_store | Cassandra | Cassandra vector store instance |
+| search_results | List[Data] | Results of similarity search |
+
+## Cassandra Graph Vector Store
+
+This component implements a Cassandra Graph Vector Store with search capabilities.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| database_ref | Contact Points / Astra Database ID | Contact points for the database or AstraDB database ID (required) |
+| username | Username | Username for the database (leave empty for AstraDB) |
+| token | Password / AstraDB Token | User password for the database or AstraDB token (required) |
+| keyspace | Keyspace | Table Keyspace or AstraDB namespace (required) |
+| table_name | Table Name | The name of the table or AstraDB collection where vectors will be stored (required) |
+| setup_mode | Setup Mode | Configuration mode for setting up the Cassandra table (options: "Sync", "Off", default: "Sync") |
+| cluster_kwargs | Cluster arguments | Optional dictionary of additional keyword arguments for the Cassandra cluster |
+| search_query | Search Query | Query string for similarity search |
+| ingest_data | Ingest Data | Data to be ingested into the vector store (list of Data objects) |
+| embedding | Embedding | Embedding model to use |
+| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) |
+| search_type | Search Type | Search type to use (options: "Traversal", "MMR traversal", "Similarity", "Similarity with score threshold", "MMR (Max Marginal Relevance)", default: "Traversal") |
+| depth | Depth of traversal | The maximum depth of edges to traverse (for "Traversal" or "MMR traversal" search types, default: 1) |
+| search_score_threshold | Search Score Threshold | Minimum similarity score threshold for search results (for "Similarity with score threshold" search type) |
+| search_filter | Search Metadata Filter | Optional dictionary of filters to apply to the search query |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| vector_store | Vector Store | Built Cassandra Graph vector store |
+| search_results | Search Results | Results of the similarity search as a list of Data objects |
+
+## Chroma DB
+
+This component creates a Chroma Vector Store with search capabilities.
+For more information, see the [Chroma documentation](https://docs.trychroma.com/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|------------------------------|---------------|--------------------------------------------------|
+| collection_name | String | The name of the Chroma collection. Default: "langflow". |
+| persist_directory | String | The directory to persist the Chroma database. |
+| search_query | String | The query to search for in the vector store. |
+| ingest_data | Data | The data to ingest into the vector store (list of Data objects). |
+| embedding | Embeddings | The embedding function to use for the vector store. |
+| chroma_server_cors_allow_origins | String | CORS allow origins for the Chroma server. |
+| chroma_server_host | String | Host for the Chroma server. |
+| chroma_server_http_port | Integer | HTTP port for the Chroma server. |
+| chroma_server_grpc_port | Integer | gRPC port for the Chroma server. |
+| chroma_server_ssl_enabled | Boolean | Enable SSL for the Chroma server. |
+| allow_duplicates | Boolean | Allow duplicate documents in the vector store. |
+| search_type | String | Type of search to perform: "Similarity" or "MMR". |
+| number_of_results | Integer | Number of results to return from the search. Default: 10. |
+| limit | Integer | Limit the number of records to compare when Allow Duplicates is False. |
+
+#### Outputs
+
+| Name | Type | Description |
+|----------------|---------------|--------------------------------|
+| vector_store | Chroma | Chroma vector store instance |
+| search_results | List[Data] | Results of similarity search |
+
+## Clickhouse
+
+This component implements a Clickhouse Vector Store with search capabilities.
+For more information, see the [CLickhouse Documentation](https://clickhouse.com/docs/en/intro).
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| host | hostname | Clickhouse server hostname (required, default: "localhost") |
+| port | port | Clickhouse server port (required, default: 8123) |
+| database | database | Clickhouse database name (required) |
+| table | Table name | Clickhouse table name (required) |
+| username | The ClickHouse user name. | Username for authentication (required) |
+| password | The password for username. | Password for authentication (required) |
+| index_type | index_type | Type of the index (options: "annoy", "vector_similarity", default: "annoy") |
+| metric | metric | Metric to compute distance (options: "angular", "euclidean", "manhattan", "hamming", "dot", default: "angular") |
+| secure | Use https/TLS | Overrides inferred values from the interface or port arguments (default: false) |
+| index_param | Param of the index | Index parameters (default: "'L2Distance',100") |
+| index_query_params | index query params | Additional index query parameters |
+| search_query | Search Query | Query string for similarity search |
+| ingest_data | Ingest Data | Data to be ingested into the vector store |
+| embedding | Embedding | Embedding model to use |
+| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) |
+| score_threshold | Score threshold | Threshold for similarity scores |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| vector_store | Vector Store | Built Clickhouse vector store |
+| search_results | Search Results | Results of the similarity search as a list of Data objects |
+
+## Couchbase
+
+This component creates a Couchbase Vector Store with search capabilities.
+For more information, see the [Couchbase documentation](https://docs.couchbase.com/home/index.html).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------------------|---------------|--------------------------------------------------|
+| couchbase_connection_string | SecretString | Couchbase Cluster connection string (required). |
+| couchbase_username | String | Couchbase username (required). |
+| couchbase_password | SecretString | Couchbase password (required). |
+| bucket_name | String | Name of the Couchbase bucket (required). |
+| scope_name | String | Name of the Couchbase scope (required). |
+| collection_name | String | Name of the Couchbase collection (required). |
+| index_name | String | Name of the Couchbase index (required). |
+| search_query | String | The query to search for in the vector store. |
+| ingest_data | Data | The data to ingest into the vector store (list of Data objects). |
+| embedding | Embeddings | The embedding function to use for the vector store. |
+| number_of_results | Integer | Number of results to return from the search. Default: 4 (advanced). |
+
+#### Outputs
+
+| Name | Type | Description |
+|----------------|------------------------|--------------------------------|
+| vector_store | CouchbaseVectorStore | A Couchbase vector store instance configured with the specified parameters. |
+
+## FAISS
+
+This component creates a FAISS Vector Store with search capabilities.
+For more information, see the [FAISS documentation](https://faiss.ai/index.html).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------------|---------------|--------------------------------------------------|
+| index_name | String | The name of the FAISS index. Default: "langflow_index". |
+| persist_directory | String | Path to save the FAISS index. It will be relative to where Langflow is running. |
+| search_query | String | The query to search for in the vector store. |
+| ingest_data | Data | The data to ingest into the vector store (list of Data objects or documents). |
+| allow_dangerous_deserialization | Boolean | Set to True to allow loading pickle files from untrusted sources. Default: True (advanced). |
+| embedding | Embeddings | The embedding function to use for the vector store. |
+| number_of_results | Integer | Number of results to return from the search. Default: 4 (advanced). |
+
+#### Outputs
+
+| Name | Type | Description |
+|----------------|------------------------|--------------------------------|
+| vector_store | FAISS | A FAISS vector store instance configured with the specified parameters. |
+
+## Hyper-Converged Database (HCD) Vector Store
+
+This component implements a Vector Store using HCD.
+
+### Parameters
+
+#### Inputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| collection_name | Collection Name | The name of the collection within HCD where the vectors will be stored (required) |
+| username | HCD Username | Authentication username for accessing HCD (default: "hcd-superuser", required) |
+| password | HCD Password | Authentication password for accessing HCD (required) |
+| api_endpoint | HCD API Endpoint | API endpoint URL for the HCD service (required) |
+| search_input | Search Input | Query string for similarity search |
+| ingest_data | Ingest Data | Data to be ingested into the vector store |
+| namespace | Namespace | Optional namespace within HCD to use for the collection (default: "default_namespace") |
+| ca_certificate | CA Certificate | Optional CA certificate for TLS connections to HCD |
+| metric | Metric | Optional distance metric for vector comparisons (options: "cosine", "dot_product", "euclidean") |
+| batch_size | Batch Size | Optional number of data to process in a single batch |
+| bulk_insert_batch_concurrency | Bulk Insert Batch Concurrency | Optional concurrency level for bulk insert operations |
+| bulk_insert_overwrite_concurrency | Bulk Insert Overwrite Concurrency | Optional concurrency level for bulk insert operations that overwrite existing data |
+| bulk_delete_concurrency | Bulk Delete Concurrency | Optional concurrency level for bulk delete operations |
+| setup_mode | Setup Mode | Configuration mode for setting up the vector store (options: "Sync", "Async", "Off", default: "Sync") |
+| pre_delete_collection | Pre Delete Collection | Boolean flag to determine whether to delete the collection before creating a new one |
+| metadata_indexing_include | Metadata Indexing Include | Optional list of metadata fields to include in the indexing |
+| embedding | Embedding or Astra Vectorize | Allows either an embedding model or an Astra Vectorize configuration |
+| metadata_indexing_exclude | Metadata Indexing Exclude | Optional list of metadata fields to exclude from the indexing |
+| collection_indexing_policy | Collection Indexing Policy | Optional dictionary defining the indexing policy for the collection |
+| number_of_results | Number of Results | Number of results to return in similarity search (default: 4) |
+| search_type | Search Type | Search type to use (options: "Similarity", "Similarity with score threshold", "MMR (Max Marginal Relevance)", default: "Similarity") |
+| search_score_threshold | Search Score Threshold | Minimum similarity score threshold for search results (default: 0) |
+| search_filter | Search Metadata Filter | Optional dictionary of filters to apply to the search query |
+
+#### Outputs
+
+| Name | Display Name | Info |
+|------|--------------|------|
+| vector_store | Vector Store | Built HCD vector store instance |
+| search_results | Search Results | Results of similarity search as a list of Data objects |
+
+## Milvus
+
+This component creates a Milvus Vector Store with search capabilities.
+For more information, see the [Milvus documentation](https://milvus.io/docs).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|-------------------------|---------------|--------------------------------------------------|
+| collection_name | String | Name of the Milvus collection |
+| collection_description | String | Description of the Milvus collection |
+| uri | String | Connection URI for Milvus |
+| password | SecretString | Password for Milvus |
+| username | SecretString | Username for Milvus |
+| batch_size | Integer | Number of data to process in a single batch |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+| search_type | String | Type of search to perform |
+| search_score_threshold | Float | Minimum similarity score for search results |
+| search_filter | Dict | Metadata filters for search query |
+| setup_mode | String | Configuration mode for setting up the vector store |
+| vector_dimensions | Integer | Number of dimensions of the vectors |
+| pre_delete_collection | Boolean | Whether to delete the collection before creating a new one |
+
+#### Outputs
+
+| Name | Type | Description |
+|----------------|------------------------|--------------------------------|
+| vector_store | Milvus | A Milvus vector store instance configured with the specified parameters. |
+
+## MongoDB Atlas
+
+This component creates a MongoDB Atlas Vector Store with search capabilities.
+For more information, see the [MongoDB Atlas documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/tutorials/vector-search-quick-start/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| ------------------------ | ------------ | ----------------------------------------- |
+| mongodb_atlas_cluster_uri | SecretString | MongoDB Atlas Cluster URI |
+| db_name | String | Database name |
+| collection_name | String | Collection name |
+| index_name | String | Index name |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | ---------------------- | ----------------------------------------- |
+| vector_store | MongoDBAtlasVectorSearch| MongoDB Atlas vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+
+## PGVector
+
+This component creates a PGVector Vector Store with search capabilities.
+For more information, see the [PGVector documentation](https://github.com/pgvector/pgvector).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| --------------- | ------------ | ----------------------------------------- |
+| pg_server_url | SecretString | PostgreSQL server connection string |
+| collection_name | String | Table name for the vector store |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | ----------- | ----------------------------------------- |
+| vector_store | PGVector | PGVector vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+
+## Pinecone
+
+This component creates a Pinecone Vector Store with search capabilities.
+For more information, see the [Pinecone documentation](https://docs.pinecone.io/home).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| ----------------- | ------------ | ----------------------------------------- |
+| index_name | String | Name of the Pinecone index |
+| namespace | String | Namespace for the index |
+| distance_strategy | String | Strategy for calculating distance between vectors |
+| pinecone_api_key | SecretString | API key for Pinecone |
+| text_key | String | Key in the record to use as text |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | ---------- | ----------------------------------------- |
+| vector_store | Pinecone | Pinecone vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+
+## Qdrant
+
+This component creates a Qdrant Vector Store with search capabilities.
+For more information, see the [Qdrant documentation](https://qdrant.tech/documentation/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| -------------------- | ------------ | ----------------------------------------- |
+| collection_name | String | Name of the Qdrant collection |
+| host | String | Qdrant server host |
+| port | Integer | Qdrant server port |
+| grpc_port | Integer | Qdrant gRPC port |
+| api_key | SecretString | API key for Qdrant |
+| prefix | String | Prefix for Qdrant |
+| timeout | Integer | Timeout for Qdrant operations |
+| path | String | Path for Qdrant |
+| url | String | URL for Qdrant |
+| distance_func | String | Distance function for vector similarity |
+| content_payload_key | String | Key for content payload |
+| metadata_payload_key | String | Key for metadata payload |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | -------- | ----------------------------------------- |
+| vector_store | Qdrant | Qdrant vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+
+## Redis
+
+This component creates a Redis Vector Store with search capabilities.
+For more information, see the [Redis documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/vectors/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| ----------------- | ------------ | ----------------------------------------- |
+| redis_server_url | SecretString | Redis server connection string |
+| redis_index_name | String | Name of the Redis index |
+| code | String | Custom code for Redis (advanced) |
+| schema | String | Schema for Redis index |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| number_of_results | Integer | Number of results to return in search |
+| embedding | Embeddings | Embedding function to use |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | -------- | ----------------------------------------- |
+| vector_store | Redis | Redis vector store instance |
+| search_results| List[Data]| Results of similarity search |
+
+
+## Supabase
+
+This component creates a connection to a Supabase Vector Store with search capabilities.
+For more information, see the [Supabase documentation](https://supabase.com/docs/guides/ai).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| ------------------- | ------------ | ----------------------------------------- |
+| supabase_url | String | URL of the Supabase instance |
+| supabase_service_key| SecretString | Service key for Supabase authentication |
+| table_name | String | Name of the table in Supabase |
+| query_name | String | Name of the query to use |
+| search_query | String | Query for similarity search |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | ------------------ | ----------------------------------------- |
+| vector_store | SupabaseVectorStore | Supabase vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+
+## Upstash
+
+This component creates an Upstash Vector Store with search capabilities.
+For more information, see the [Upstash documentation](https://upstash.com/docs/introduction).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| --------------- | ------------ | ----------------------------------------- |
+| index_url | String | The URL of the Upstash index |
+| index_token | SecretString | The token for the Upstash index |
+| text_key | String | The key in the record to use as text |
+| namespace | String | Namespace for the index |
+| search_query | String | Query for similarity search |
+| metadata_filter | String | Filters documents by metadata |
+| ingest_data | Data | Data to be ingested into the vector store |
+| embedding | Embeddings | Embedding function to use (optional) |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | ---------------- | ----------------------------------------- |
+| vector_store | UpstashVectorStore| Upstash vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+
+## Vectara
+
+This component creates a Vectara Vector Store with search capabilities.
+For more information, see the [Vectara documentation](https://docs.vectara.com/docs/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+| ---------------- | ------------ | ----------------------------------------- |
+| vectara_customer_id | String | Vectara customer ID |
+| vectara_corpus_id | String | Vectara corpus ID |
+| vectara_api_key | SecretString | Vectara API key |
+| embedding | Embeddings | Embedding function to use (optional) |
+| ingest_data | List[Document/Data] | Data to be ingested into the vector store |
+| search_query | String | Query for similarity search |
+| number_of_results | Integer | Number of results to return in search |
+
+#### Outputs
+
+| Name | Type | Description |
+| ------------- | ----------------- | ----------------------------------------- |
+| vector_store | VectaraVectorStore | Vectara vector store instance |
+| search_results| List[Data] | Results of similarity search |
+
+## Vectara Search
+
+This component searches a Vectara Vector Store for documents based on the provided input.
+For more information, see the [Vectara documentation](https://docs.vectara.com/docs/).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------------|--------------|-------------------------------------------|
+| search_type | String | Type of search, such as "Similarity" or "MMR" |
+| input_value | String | Search query |
+| vectara_customer_id | String | Vectara customer ID |
+| vectara_corpus_id | String | Vectara corpus ID |
+| vectara_api_key | SecretString | Vectara API key |
+| files_url | List[String] | Optional URLs for file initialization |
+
+#### Outputs
+
+| Name | Type | Description |
+|----------------|------------|----------------------------|
+| search_results | List[Data] | Results of similarity search |
+
+## Weaviate
+
+This component facilitates a Weaviate Vector Store setup, optimizing text and document indexing and retrieval.
+For more information, see the [Weaviate Documentation](https://weaviate.io/developers/weaviate).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------|--------------|-------------------------------------------|
+| weaviate_url | String | Default instance URL |
+| search_by_text| Boolean | Indicates whether to search by text |
+| api_key | SecretString | Optional API key for authentication |
+| index_name | String | Optional index name |
+| text_key | String | Default text extraction key |
+| input | Document | Document or record |
+| embedding | Embeddings | Model used |
+| attributes | List[String] | Optional additional attributes |
+
+#### Outputs
+
+| Name | Type | Description |
+|--------------|------------------|-------------------------------|
+| vector_store | WeaviateVectorStore | Weaviate vector store instance |
+
+**Note:** Ensure Weaviate instance is running and accessible. Verify API key, index name, text key, and attributes are set correctly.
+
+## Weaviate Search
+
+This component searches a Weaviate Vector Store for documents similar to the input.
+For more information, see the [Weaviate Documentation](https://weaviate.io/developers/weaviate).
+
+### Parameters
+
+#### Inputs
+
+| Name | Type | Description |
+|---------------|--------------|-------------------------------------------|
+| search_type | String | Type of search, such as "Similarity" or "MMR" |
+| input_value | String | Search query |
+| weaviate_url | String | Default instance URL |
+| search_by_text| Boolean | Indicates whether to search by text |
+| api_key | SecretString | Optional API key for authentication |
+| index_name | String | Optional index name |
+| text_key | String | Default text extraction key |
+| embedding | Embeddings | Model used |
+| attributes | List[String] | Optional additional attributes |
+
+#### Outputs
+
+| Name | Type | Description |
+|----------------|------------|----------------------------|
+| search_results | List[Data] | Results of similarity search |
diff --git a/docs/docs/Configuration/_category_.json b/docs/docs/Configuration/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..27519db0b01cf2a7c6120110c8358c5a7dae15e4
--- /dev/null
+++ b/docs/docs/Configuration/_category_.json
@@ -0,0 +1 @@
+{"position":8, "label":"Configuration"}
\ No newline at end of file
diff --git a/docs/docs/Configuration/configuration-api-keys.md b/docs/docs/Configuration/configuration-api-keys.md
new file mode 100644
index 0000000000000000000000000000000000000000..68ce60752158102f6b8e280119494708ca82e9e9
--- /dev/null
+++ b/docs/docs/Configuration/configuration-api-keys.md
@@ -0,0 +1,193 @@
+---
+title: API keys
+sidebar_position: 1
+slug: /configuration-api-keys
+---
+
+Langflow provides an API key functionality that allows users to access their individual components and flows without traditional login authentication. The API key is a user-specific token that can be included in the request header, query parameter, or as a command line argument to authenticate API calls. This documentation outlines how to generate, use, and manage API keys in Langflow.
+
+:::info
+
+The default user and password are set using the LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD environment variables. The default values are `langflow` and `langflow`, respectively.
+
+:::
+
+## Generate an API key
+
+Generate a user-specific token to use with Langflow.
+
+### Generate an API key with the Langflow UI
+
+1. Click your user icon and select **Settings**.
+2. Click **Langflow API**, and then click **Add New**.
+3. Name your key, and then click **Create Secret Key**.
+4. Copy the API key and store it in a secure location.
+
+### Generate an API key with the Langflow CLI
+
+```shell
+langflow api-key
+# or
+python -m langflow api-key
+âââââââââââââââââââââââââââââââââââââââââââââââââââââââââââââââââââââââŽ
+â API Key Created Successfully: â
+â â
+â sk-O0elzoWID1izAH8RUKrnnvyyMwIzHi2Wk-uXWoNJ2Ro â
+â â
+â This is the only time the API key will be displayed. â
+â Make sure to store it in a secure location. â
+â â
+â The API key has been copied to your clipboard. Cmd + V to paste it. â
+â°ââââââââââââââââââââââââââââââ
+
+```
+
+## Authenticate requests with the Langflow API key
+
+Include your API key in API requests to authenticate requests to Langflow.
+
+### Include the API key in the HTTP header
+
+To use the API key when making API requests with cURL, include the API key in the HTTP header.
+
+```shell
+curl -X POST \
+ "http://127.0.0.1:7860/api/v1/run/*`YOUR_FLOW_ID`*?stream=false" \
+ -H 'Content-Type: application/json' \
+ -H 'x-api-key: *`YOUR_API_KEY`*' \
+ -d '{"inputs": {"text":""}, "tweaks": {}}'
+```
+
+To instead pass the API key as a query parameter, do the following:
+
+```shell
+curl -X POST \
+ "http://127.0.0.1:7860/api/v1/run/*`YOUR_FLOW_ID`*?x-api-key=*`YOUR_API_KEY`*?stream=false" \
+ -H 'Content-Type: application/json' \
+ -d '{"inputs": {"text":""}, "tweaks": {}}'
+```
+
+To use the API key when making API requests with the Python `requests` library, include the API key as a variable string.
+
+```python
+import argparse
+import json
+from argparse import RawTextHelpFormatter
+import requests
+from typing import Optional
+import warnings
+try:
+ from langflow.load import upload_file
+except ImportError:
+ warnings.warn("Langflow provides a function to help you upload files to the flow. Please install langflow to use it.")
+ upload_file = None
+
+BASE_API_URL = "http://127.0.0.1:7860"
+FLOW_ID = "*`YOUR_FLOW_ID`*"
+ENDPOINT = "" # You can set a specific endpoint name in the flow settings
+
+# You can tweak the flow by adding a tweaks dictionary
+# e.g {"OpenAI-XXXXX": {"model_name": "gpt-4"}}
+TWEAKS = {
+ "ChatInput-8a86T": {},
+ "Prompt-pKfl9": {},
+ "ChatOutput-WcGpD": {},
+ "OpenAIModel-5UyvQ": {}
+}
+
+def run_flow(message: str,
+ endpoint: str,
+ output_type: str = "chat",
+ input_type: str = "chat",
+ tweaks: Optional[dict] = None,
+ api_key: Optional[str] = None) -> dict:
+ """
+ Run a flow with a given message and optional tweaks.
+
+ :param message: The message to send to the flow
+ :param endpoint: The ID or the endpoint name of the flow
+ :param tweaks: Optional tweaks to customize the flow
+ :return: The JSON response from the flow
+ """
+ api_url = f"{BASE_API_URL}/api/v1/run/{endpoint}"
+
+ payload = {
+ "input_value": message,
+ "output_type": output_type,
+ "input_type": input_type,
+ }
+ headers = None
+ if tweaks:
+ payload["tweaks"] = tweaks
+ if api_key:
+ headers = {"x-api-key": api_key}
+ response = requests.post(api_url, json=payload, headers=headers)
+ return response.json()
+
+def main():
+ parser = argparse.ArgumentParser(description="""Run a flow with a given message and optional tweaks.
+Run it like: python .py "your message here" --endpoint "your_endpoint" --tweaks '{"key": "value"}'""",
+ formatter_class=RawTextHelpFormatter)
+ parser.add_argument("message", type=str, help="The message to send to the flow")
+ parser.add_argument("--endpoint", type=str, default=ENDPOINT or FLOW_ID, help="The ID or the endpoint name of the flow")
+ parser.add_argument("--tweaks", type=str, help="JSON string representing the tweaks to customize the flow", default=json.dumps(TWEAKS))
+ parser.add_argument("--api_key", type=str, help="API key for authentication", default=None)
+ parser.add_argument("--output_type", type=str, default="chat", help="The output type")
+ parser.add_argument("--input_type", type=str, default="chat", help="The input type")
+ parser.add_argument("--upload_file", type=str, help="Path to the file to upload", default=None)
+ parser.add_argument("--components", type=str, help="Components to upload the file to", default=None)
+
+ args = parser.parse_args()
+ try:
+ tweaks = json.loads(args.tweaks)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid tweaks JSON string")
+
+ if args.upload_file:
+ if not upload_file:
+ raise ImportError("Langflow is not installed. Please install it to use the upload_file function.")
+ elif not args.components:
+ raise ValueError("You need to provide the components to upload the file to.")
+ tweaks = upload_file(file_path=args.upload_file, host=BASE_API_URL, flow_id=args.endpoint, components=[args.components], tweaks=tweaks)
+
+ response = run_flow(
+ message=args.message,
+ endpoint=args.endpoint,
+ output_type=args.output_type,
+ input_type=args.input_type,
+ tweaks=tweaks,
+ api_key=args.api_key
+ )
+
+ print(json.dumps(response, indent=2))
+
+if __name__ == "__main__":
+ main()
+
+```
+
+To pass the API key to your script with a command line argument, do the following:
+
+```shell
+python your_script.py "*`YOUR_INPUT_MESSAGE`*" --api_key "*`YOUR_API_KEY`*"
+```
+
+## Security considerations
+
+- **Visibility**: For security reasons, the API key cannot be retrieved again through the UI.
+- **Scope**: The key allows access only to the flows and components of the specific user to whom it was issued.
+
+## Custom API endpoint
+
+To choose a custom name for your API endpoint, select **Project Settings** > **Endpoint Name** and name your endpoint.
+
+## Revoke an API key
+
+To revoke an API key, delete it from the the list of keys in the **Settings** menu.
+
+1. Click your user icon and select **Settings**.
+2. Click **Langflow API**.
+3. Select the keys you want to delete and click the trash can icon.
+
+This action immediately invalidates the key and prevents it from being used again.
+
diff --git a/docs/docs/Configuration/configuration-authentication.md b/docs/docs/Configuration/configuration-authentication.md
new file mode 100644
index 0000000000000000000000000000000000000000..f697cd11dda4b1f3a83a7b921cafa59c0a7346fb
--- /dev/null
+++ b/docs/docs/Configuration/configuration-authentication.md
@@ -0,0 +1,119 @@
+---
+title: Authentication
+sidebar_position: 0
+slug: /configuration-authentication
+---
+
+The login functionality in Langflow serves to authenticate users and protect sensitive routes in the application.
+
+Starting from version 0.5, Langflow introduces an enhanced login mechanism that is governed by a few environment variables. This allows new secure features.
+
+## Create a superuser and new users in Langflow
+
+Learn how to create a new superuser, log in to Langflow, and add new users.
+
+1. Create a `.env` file and open it in your preferred editor.
+
+2. Add the following environment variables to your file.
+
+```bash
+LANGFLOW_AUTO_LOGIN=False
+LANGFLOW_SUPERUSER=admin
+LANGFLOW_SUPERUSER_PASSWORD=securepassword
+LANGFLOW_SECRET_KEY=randomly_generated_secure_key
+LANGFLOW_NEW_USER_IS_ACTIVE=False
+```
+
+For more information, see [Authentication configuration values](#values).
+
+:::tip
+The Langflow project includes a [`.env.example`](https://github.com/langflow-ai/langflow/blob/main/.env.example) file to help you get started.
+You can copy the contents of this file into your own `.env` file and replace the example values with your own preferred settings.
+:::
+
+3. Save your `.env` file.
+4. Run Langflow with the configured environment variables.
+
+```bash
+python -m langflow run --env-file .env
+```
+
+5. Sign in with your username `admin` and password `securepassword`.
+6. To open the **Admin Page**, click your user profile image, and then select **Admin Page**.
+ You can also go to `http://127.0.0.1:7861/admin`.
+7. To add a new user, click **New User**, and then add the **Username** and **Password**.
+8. To activate the new user, select **Active**.
+ The user can only sign in if you select them as **Active**.
+9. To give the user `superuser` privileges, click **Superuser**.
+10. Click **Save**.
+11. To confirm your new user has been created, sign out of Langflow, and then sign back in using your new **Username** and **Password**.
+
+## Manage Superuser with the Langflow CLI
+
+Langflow provides a command-line utility for interactively creating superusers:
+
+1. Enter the CLI command:
+
+```bash
+langflow superuser
+```
+
+2. Langflow prompts you for a **Username** and **Password**:
+
+```
+langflow superuser
+Username: new_superuser_1
+Password:
+Default folder created successfully.
+Superuser created successfully.
+```
+
+3. To confirm your new superuser was created successfully, go to the **Admin Page** at `http://127.0.0.1:7861/admin`.
+
+## Authentication configuration values {#values}
+
+The following table lists the available authentication configuration variables, their descriptions, and default values:
+
+| Variable | Description | Default |
+| ----------------------------- | ------------------------------------- | ------- |
+| `LANGFLOW_AUTO_LOGIN` | Enables automatic login | `True` |
+| `LANGFLOW_SUPERUSER` | Superuser username | - |
+| `LANGFLOW_SUPERUSER_PASSWORD` | Superuser password | - |
+| `LANGFLOW_SECRET_KEY` | Key for encrypting superuser password | - |
+| `LANGFLOW_NEW_USER_IS_ACTIVE` | Automatically activates new users | `False` |
+
+### LANGFLOW_AUTO_LOGIN
+
+By default, this variable is set to `True`. When enabled, Langflow operates as it did in versions prior to 0.5, including automatic login without requiring explicit user authentication.
+
+To disable automatic login and enforce user authentication:
+
+```shell
+LANGFLOW_AUTO_LOGIN=False
+```
+
+### LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
+
+These environment variables are only relevant when LANGFLOW_AUTO_LOGIN is set to False. They specify the username and password for the superuser, which is essential for administrative tasks.
+To create a superuser manually:
+
+```bash
+LANGFLOW_SUPERUSER=admin
+LANGFLOW_SUPERUSER_PASSWORD=securepassword
+```
+
+### LANGFLOW_SECRET_KEY
+
+This environment variable holds a secret key used for encrypting the superuser's password. Make sure to set this to a secure, randomly generated string.
+
+```bash
+LANGFLOW_SECRET_KEY=randomly_generated_secure_key
+```
+
+### LANGFLOW_NEW_USER_IS_ACTIVE
+
+By default, this variable is set to `False`. When enabled, new users are automatically activated and can log in without requiring explicit activation by the superuser.
+
+```bash
+LANGFLOW_NEW_USER_IS_ACTIVE=False
+```
\ No newline at end of file
diff --git a/docs/docs/Configuration/configuration-auto-saving.md b/docs/docs/Configuration/configuration-auto-saving.md
new file mode 100644
index 0000000000000000000000000000000000000000..b7283641a30778892c8416e35cbac07cf8622b64
--- /dev/null
+++ b/docs/docs/Configuration/configuration-auto-saving.md
@@ -0,0 +1,48 @@
+---
+title: Auto-saving
+sidebar_position: 6
+slug: /configuration-auto-save
+---
+
+Langflow supports both manual and auto-saving functionality.
+
+## Auto-saving {#auto-saving}
+
+When Langflow is in auto-saving mode, all changes are saved automatically. Auto-save progress is indicated in the left side of the top bar.
+
+* When a flow is being saved, a loading icon indicates that the flow is being saved in the database.
+
+* If you try to exit the flow page before auto-save completes, you are prompted to confirm you want to exit before the flow has saved.
+
+* When the flow has successfully saved, click **Exit**.
+
+## Disable auto-saving {#environment}
+
+To disable auto-saving,
+
+1. Set an environment variable in your `.env` file.
+
+```env
+LANGFLOW_AUTO_SAVING=false
+```
+
+2. Start Langflow with the values from your `.env` file.
+
+```shell
+python -m langflow run --env-file .env
+```
+
+Alternatively, disable auto-saving by passing the `--no-auto-saving` flag at startup.
+
+```shell
+python -m langflow --no-auto-saving
+```
+
+## Save a flow manually {#manual-saving}
+
+When auto-saving is disabled, you will need to manually save your flow when making changes.
+
+To manually save your flow, click the **Save** button or enter Ctrl+S or Command+S.
+
+If you try to exit after making changes and not saving, a confirmation dialog appears.
+
diff --git a/docs/docs/Configuration/configuration-backend-only.md b/docs/docs/Configuration/configuration-backend-only.md
new file mode 100644
index 0000000000000000000000000000000000000000..1abac1cf23a85683081afc8e5e1788b866f12dce
--- /dev/null
+++ b/docs/docs/Configuration/configuration-backend-only.md
@@ -0,0 +1,123 @@
+---
+title: Run Langflow in backend-only mode
+sidebar_position: 4
+slug: /configuration-backend-only
+---
+
+Langflow can run in `--backend-only` mode to expose a Langflow app as an API endpoint, without running the frontend UI.
+This is also known as "headless" mode. Running Langflow without the frontend is useful for automation, testing, and situations where you just need to serve a flow as a workload without creating a new flow in the UI.
+
+To run Langflow in backend-only mode, pass the `--backend-only` flag at startup.
+
+```python
+python3 -m langflow run --backend-only
+```
+
+The terminal prints `Welcome to â Langflow`, and Langflow will now serve requests to its API without the frontend running.
+
+## Set up a basic prompting flow in backend-only mode
+
+This example shows you how to set up a [Basic Prompting flow](/starter-projects-basic-prompting) as an endpoint in backend-only mode.
+However, you can use these same instructions as guidelines for using any type of flow in backend-only mode.
+
+### Prerequisites
+
+- [Langflow is installed](/get-started-installation)
+- [You have an OpenAI API key](https://platform.openai.com/)
+- [You have a Langflow Basic Prompting flow](/starter-projects-basic-prompting)
+
+### Get your flow's ID
+
+This guide assumes you have created a [Basic Prompting flow](/starter-projects-basic-prompting) or have another working flow available.
+
+1. In the Langflow UI, click **API**.
+2. Click **curl** > **Copy code** to copy the curl command.
+This command will POST input to your flow's endpoint.
+It will look something like this:
+
+```text
+curl -X POST \
+ "http://127.0.0.1:7861/api/v1/run/fff8dcaa-f0f6-4136-9df0-b7cb38de42e0?stream=false" \
+ -H 'Content-Type: application/json'\
+ -d '{"input_value": "message",
+ "output_type": "chat",
+ "input_type": "chat",
+ "tweaks": {
+ "ChatInput-8a86T": {},
+ "Prompt-pKfl9": {},
+ "ChatOutput-WcGpD": {},
+ "OpenAIModel-5UyvQ": {}
+}}'
+```
+
+The flow ID in this example is `fff8dcaa-f0f6-4136-9df0-b7cb38de42e0`, a UUID generated by Langflow and used in the endpoint URL.
+See [API](/configuration-api-keys) to change the endpoint.
+
+3. To stop Langflow, press **Ctrl+C**.
+
+### Start Langflow in backend-only mode
+
+1. Start Langflow in backend-only mode.
+
+```python
+python3 -m langflow run --backend-only
+```
+
+The terminal prints `Welcome to â Langflow`.
+Langflow is now serving requests to its API.
+
+2. Run the curl code you copied from the UI.
+You should get a result like this:
+
+```shell
+{"session_id":"ef7e0554-69e5-4e3e-ab29-ee83bcd8d9ef:bf81d898868ac87e1b4edbd96c131c5dee801ea2971122cc91352d144a45b880","outputs":[{"inputs":{"input_value":"hi, are you there?"},"outputs":[{"results":{"result":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?"},"artifacts":{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI"},"messages":[{"message":"Arrr, ahoy matey! Aye, I be here. What be ye needin', me hearty?","sender":"Machine","sender_name":"AI","component_id":"ChatOutput-ktwdw"}],"component_display_name":"Chat Output","component_id":"ChatOutput-ktwdw","used_frozen_result":false}]}]}%
+```
+
+This confirms Langflow is receiving your POST request, running the flow, and returning the result without running the frontend.
+
+You can interact with this endpoint using the other options in the **API** menu, including the Python and Javascript APIs.
+
+### Query the Langflow endpoint with a Python script
+
+Using the same flow ID, run a Python sample script to send a query and get a prettified JSON response back.
+
+1. Create a Python file and name it `langflow_api_demo.py`.
+
+```python
+import requests
+import json
+
+def query_langflow(message):
+ url = "http://127.0.0.1:7861/api/v1/run/fff8dcaa-f0f6-4136-9df0-b7cb38de42e0"
+ headers = {"Content-Type": "application/json"}
+ data = {"input_value": message}
+
+ response = requests.post(url, headers=headers, json=data)
+ return response.json()
+
+user_input = input("Enter your message: ")
+result = query_langflow(user_input)
+
+print(json.dumps(result, indent=2))
+```
+2. Run the script.
+
+```python
+python langflow_api_demo.py
+```
+
+3. Enter your message when prompted.
+You will get a prettified JSON response back containing a response to your message.
+
+### Configure host and ports in backend-only mode
+
+To change the host and port, pass the values as additional flags.
+
+```python
+python -m langflow run --host 127.0.0.1 --port 7860 --backend-only
+```
+
+
+
+
+
diff --git a/docs/docs/Configuration/configuration-cli.md b/docs/docs/Configuration/configuration-cli.md
new file mode 100644
index 0000000000000000000000000000000000000000..205604103d04c913ad705b53073ae3a6d50925d4
--- /dev/null
+++ b/docs/docs/Configuration/configuration-cli.md
@@ -0,0 +1,170 @@
+---
+title: Langflow CLI
+sidebar_position: 2
+slug: /configuration-cli
+---
+
+# Langflow CLI
+
+The Langflow command line interface (Langflow CLI) is the main interface for managing and running the Langflow server.
+
+## CLI commands
+
+The following sections describe the available CLI commands and their options, as well as their corresponding [environment variables](./environment-variables.md).
+
+### langflow
+
+Running the CLI without any arguments displays a list of available options and commands.
+
+```bash
+langflow [OPTIONS]
+# or
+python -m langflow [OPTIONS]
+```
+
+#### Options
+
+| Option | Default | Values | Description |
+|--------|------|-----------|-------------|
+| `--install-completion` | *Not applicable* | *Not applicable* | Install auto-completion for the current shell. |
+| `--show-completion` | *Not applicable* | *Not applicable* | Show the location of the auto-completion config file (if installed). |
+| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. |
+
+### langflow api-key
+
+Create an API key for the default superuser if the [`LANGFLOW_AUTO_LOGIN` environment variable] is set to `true`.
+
+```bash
+langflow api-key [OPTIONS]
+# or
+python -m langflow api-key [OPTIONS]
+```
+
+#### Options
+
+| Option | Default | Values | Description |
+|--------|---------|--------|-------------|
+| `--log-level` | `critical` | `debug` `info` `warning` `error` `critical` | Set the logging level. |
+| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. |
+
+### langflow copy-db
+
+Copy the database files to the current directory.
+Copy the Langflow database files, `langflow.db` and `langflow-pre.db` (if they exist), from the cache directory to the current directory.
+
+:::note
+The current directory is the directory containing `__main__.py`.
+You can find this directory by running `which langflow`.
+:::
+
+```bash
+langflow copy-db
+# or
+python -m langflow copy-db
+```
+
+#### Options
+
+| Option | Default | Values | Description |
+|--------|---------|--------|-------------|
+| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. |
+
+### langflow migration
+
+Run or test database migrations.
+
+```bash
+langflow migration [OPTIONS]
+# or
+python -m langflow migration [OPTIONS]
+```
+
+#### Options
+
+| Option | Default | Values | Description |
+|--------|---------|--------|-------------|
+| `--test` | `true` | [Boolean](#boolean) | Run migrations in test mode. Use `--no-test` to disable test mode. |
+| `--fix` | `false` (`--no-fix`) | [Boolean](#boolean) | Fix migrations. This is a destructive operation, and all affected data will be deleted. Only use this option if you know what you are doing. |
+| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. |
+
+
+### langflow run
+
+Start the Langflow server.
+
+```bash
+langflow run [OPTIONS]
+# or
+python -m langflow run [OPTIONS]
+```
+
+#### Options
+
+| Option | Default | Values | Description |
+|--------|---------|--------|-------------|
+| `--host` | `127.0.0.1` | String | The host on which the Langflow server will run. See [`LANGFLOW_HOST` variable](./environment-variables.md#LANGFLOW_HOST). |
+| `--workers` | `1` | Integer | Number of worker processes. See [`LANGFLOW_WORKERS` variable](./environment-variables.md#LANGFLOW_WORKERS). |
+| `--worker-timeout` | `300` | Integer | Worker timeout in seconds. See [`LANGFLOW_WORKER_TIMEOUT` variable](./environment-variables.md#LANGFLOW_WORKER_TIMEOUT). |
+| `--port` | `7860` | Integer | The port on which the Langflow server will run. The server automatically selects a free port if the specified port is in use. See [`LANGFLOW_PORT` variable](./environment-variables.md#LANGFLOW_PORT). |
+| `--components-path` | `langflow/components` | String | Path to the directory containing custom components. See [`LANGFLOW_COMPONENTS_PATH` variable](./environment-variables.md#LANGFLOW_COMPONENTS_PATH). |
+| `--env-file` | Not set | String | Path to the `.env` file containing environment variables. See [Import environment variables from a .env file](./environment-variables.md#configure-variables-env-file). |
+| `--log-level` | `critical` | `debug` `info` `warning` `error` `critical` | Set the logging level. See [`LANGFLOW_LOG_LEVEL` variable](./environment-variables.md#LANGFLOW_LOG_LEVEL). |
+| `--log-file` | `logs/langflow.log` | String | Set the path to the log file for Langflow. See [`LANGFLOW_LOG_FILE` variable](./environment-variables.md#LANGFLOW_LOG_FILE). |
+| `--cache` | `InMemoryCache` | `InMemoryCache` `SQLiteCache` | Type of cache to use. See [`LANGFLOW_LANGCHAIN_CACHE` variable](./environment-variables.md#LANGFLOW_LANGCHAIN_CACHE). |
+| `--dev` | `false` (`--no-dev`) | [Boolean](#boolean) | Run Langflow in development mode (may contain bugs). See [`LANGFLOW_DEV` variable](./environment-variables.md#LANGFLOW_DEV). |
+| `--frontend-path` | `./frontend` | String | Path to the frontend directory containing build files. This is for development purposes only. See [`LANGFLOW_FRONTEND_PATH` variable](./environment-variables.md#LANGFLOW_FRONTEND_PATH). |
+| `--open-browser` | `true` | [Boolean](#boolean) | Open the system web browser on startup. Use `--no-open-browser` to disable opening the system web browser on startup. See [`LANGFLOW_OPEN_BROWSER` variable](./environment-variables.md#LANGFLOW_OPEN_BROWSER). |
+| `--remove-api-keys` | `false` (`--no-remove-api-keys`) | [Boolean](#boolean) | Remove API keys from the projects saved in the database. See [`LANGFLOW_REMOVE_API_KEYS` variable](./environment-variables.md#LANGFLOW_REMOVE_API_KEYS). |
+| `--backend-only` | `false` (`--no-backend-only`) | [Boolean](#boolean) | Only run Langflow's backend server (no frontend). See [`LANGFLOW_BACKEND_ONLY` variable](./environment-variables.md#LANGFLOW_BACKEND_ONLY). |
+| `--store` | `true` | [Boolean](#boolean) | Enable the Langflow Store features. Use `--no-store` to disable the Langflow Store features. See [`LANGFLOW_STORE` variable](./environment-variables.md#LANGFLOW_STORE). |
+| `--auto-saving` | `true` | [Boolean](#boolean) | Enable flow auto-saving. Use `--no-auto-saving` to disable flow auto-saving. See [`LANGFLOW_AUTO_SAVING` variable](./environment-variables.md#LANGFLOW_AUTO_SAVING). |
+| `--auto-saving-interval` | `1000` | Integer | Set the interval for flow auto-saving in milliseconds. See [`LANGFLOW_AUTO_SAVING_INTERVAL` variable](./environment-variables.md#LANGFLOW_AUTO_SAVING_INTERVAL). |
+| `--health-check-max-retries` | `5` | Integer | Set the maximum number of retries for the health check. Use `--no-health-check-max-retries` to disable the maximum number of retries for the health check. See [`LANGFLOW_HEALTH_CHECK_MAX_RETRIES` variable](./environment-variables.md#LANGFLOW_HEALTH_CHECK_MAX_RETRIES). |
+| `--max-file-size-upload` | `100` | Integer | Set the maximum file size for the upload in megabytes. See [`LANGFLOW_MAX_FILE_SIZE_UPLOAD` variable](./environment-variables.md#LANGFLOW_MAX_FILE_SIZE_UPLOAD). |
+| `--help` | *Not applicable* | *Not applicable* | Display information about the command usage and its options and arguments. |
+
+### langflow superuser
+
+Create a superuser account.
+
+```bash
+langflow superuser [OPTIONS]
+# or
+python -m langflow superuser [OPTIONS]
+```
+
+#### Options
+
+| Option | Default | Values | Description |
+|--------|---------|--------|-------------|
+| `--username` | Required | String | Specify the name for the superuser. See [`LANGFLOW_SUPERUSER` variable](./environment-variables.md#LANGFLOW_SUPERUSER). |
+| `--password` | Required | String | Specify the password for the superuser. See [`LANGFLOW_SUPERUSER_PASSWORD` variable](./environment-variables.md#LANGFLOW_SUPERUSER_PASSWORD). |
+| `--log-level` | `critical` | `debug` `info` `warning` `error` `critical` | Set the logging level. |
+
+## Precedence
+
+Langflow CLI options override the values of corresponding [environment variables](./environment-variables.md).
+
+For example, if you have `LANGFLOW_PORT=7860` defined as an environment variable, but you run the CLI with `--port 7880`, then Langflow will set the port to **`7880`** (the value passed with the CLI).
+
+## Assign values
+
+There are two ways you can assign a value to a CLI option.
+You can write the option flag and its value with a single space between them: `--option value`.
+Or, you can write them using an equals sign (`=`) between the option flag and the value: `--option=value`.
+
+Values that contain spaces must be surrounded by quotation marks: `--option 'Value with Spaces'` or `--option='Value with Spaces'`.
+
+### Boolean values {#boolean}
+
+Boolean options turn a behavior on or off, and therefore accept no arguments.
+To activate a boolean option, type it on the command line.
+For example:
+
+```bash
+langflow run --remove-api-keys
+```
+
+All boolean options have a corresponding option that negates it.
+For example, the negating option for `--remove-api-keys` is `--no-remove-api-keys`.
+These options let you negate boolean options that you may have set using [environment variables](./environment-variables.md).
diff --git a/docs/docs/Configuration/configuration-custom-database.md b/docs/docs/Configuration/configuration-custom-database.md
new file mode 100644
index 0000000000000000000000000000000000000000..a81a7cf116e8c0a59db18c8013e9e7933cc01643
--- /dev/null
+++ b/docs/docs/Configuration/configuration-custom-database.md
@@ -0,0 +1,76 @@
+---
+title: Configure an external PostgreSQL database
+sidebar_position: 8
+---
+Langflow's default database is [SQLite](https://www.sqlite.org/docs.html), but you can configure Langflow to use PostgreSQL instead.
+
+This guide will walk you through the process of setting up an external database for Langflow by replacing the default SQLite connection string `sqlite:///./langflow.db` with PostgreSQL.
+
+## Prerequisite
+
+* A [PostgreSQL](https://www.pgadmin.org/download/) database
+
+## Connect Langflow to PostgreSQL
+
+To connect Langflow to PostgreSQL, follow these steps.
+
+1. Find your PostgreSQL database's connection string.
+It looks like `postgresql://user:password@host:port/dbname`.
+For example, if you started PostgreSQL with this Docker command:
+
+```
+docker run --name some-postgres -e POSTGRES_PASSWORD=mysecretpassword -d -p 5432:5432 postgres
+```
+
+Your connection string would be `postgresql://some-postgres:mysecretpassword@localhost:5432/postgres`.
+
+2. Create a `.env` file for configuring Langflow.
+```
+touch .env
+```
+
+3. To set the database URL environment variable, add it to your `.env` file:
+```plaintext
+LANGFLOW_DATABASE_URL="postgresql://user:password@localhost:5432/dbname"
+```
+
+:::tip
+The Langflow project includes a [`.env.example`](https://github.com/langflow-ai/langflow/blob/main/.env.example) file to help you get started.
+You can copy the contents of this file into your own `.env` file and replace the example values with your own preferred settings.
+Replace the value for `LANGFLOW_DATABASE_URL` with your PostgreSQL connection string.
+:::
+
+4. Run Langflow with the `.env` file:
+```bash
+langflow run --env-file .env
+```
+
+5. In Langflow, create traffic by running a flow.
+6. Inspect your PostgreSQL deployment's tables and activity.
+You will see new tables and traffic created.
+
+## Example Langflow and PostgreSQL docker-compose.yml
+
+The Langflow project includes a [`docker-compose.yml`](https://github.com/langflow-ai/langflow/blob/main/docker_example/docker-compose.yml) file for quick deployment with PostgreSQL.
+
+This configuration launches Langflow and PostgreSQL containers, with Langflow pre-configured to use the PostgreSQL database. Customize the database credentials as needed.
+
+To start the services, navigate to the `/docker_example` directory, and then run `docker-compose up`.
+
+```yaml
+services:
+ langflow:
+ image: langflow-ai/langflow:latest
+ environment:
+ - LANGFLOW_DATABASE_URL=postgresql://user:password@postgres:5432/langflow
+ depends_on:
+ - postgres
+
+ postgres:
+ image: postgres:15
+ environment:
+ - POSTGRES_USER=user
+ - POSTGRES_PASSWORD=password
+ - POSTGRES_DB=langflow
+```
+
diff --git a/docs/docs/Configuration/configuration-global-variables.md b/docs/docs/Configuration/configuration-global-variables.md
new file mode 100644
index 0000000000000000000000000000000000000000..2900ab786ee164b7a7cc08e894b4109b8ef549b1
--- /dev/null
+++ b/docs/docs/Configuration/configuration-global-variables.md
@@ -0,0 +1,198 @@
+---
+title: Global variables
+sidebar_position: 5
+slug: /configuration-global-variables
+---
+
+import ReactPlayer from "react-player";
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+Global variables let you store and reuse generic input values and credentials across your projects.
+You can use a global variable in any text input field that displays the đ icon.
+
+Langflow stores global variables in its internal database, and encrypts the values using a secret key.
+
+## Create a global variable {#3543d5ef00eb453aa459b97ba85501e5}
+
+1. In the Langflow UI, click your profile icon, and then select **Settings**.
+
+2. Click **Global Variables**.
+
+3. Click **Add New**.
+
+4. In the **Create Variable** dialog, enter a name for your variable in the **Variable Name** field.
+
+5. Optional: Select a **Type** for your global variable. The available types are **Generic** (default) and **Credential**.
+
+ No matter which **Type** you select, Langflow still encrypts the **Value** of the global variable.
+
+6. Enter the **Value** for your global variable.
+
+7. Optional: Use the **Apply To Fields** menu to select one or more fields that you want Langflow to automatically apply your global variable to. For example, if you select **OpenAI API Key**, Langflow will automatically apply the variable to any **OpenAI API Key** field.
+
+8. Click **Save Variable**.
+
+You can now select your global variable from any text input field that displays the đ icon.
+
+:::info
+Because values are encrypted, you can't view the actual values of your global variables.
+In **Settings > Global Variables**, the **Value** column shows the encrypted hash for **Generic** type variables, and shows nothing for **Credential** type variables.
+:::
+
+## Edit a global variable
+
+1. In the Langflow UI, click your profile icon, and then select **Settings**.
+
+2. Click **Global Variables**.
+
+3. Click on the global variable you want to edit.
+
+4. In the **Update Variable** dialog, you can edit the following fields: **Variable Name**, **Value**, and **Apply To Fields**.
+
+5. Click **Update Variable**.
+
+## Delete a global variable
+
+:::warning
+Deleting a global variable permanently deletes any references to it from your existing projects.
+:::
+
+1. In the Langflow UI, click your profile icon, and then select **Settings**.
+
+2. Click **Global Variables**.
+
+3. Click the checkbox next to the global variable that you want to delete.
+
+4. Click the Trash icon.
+
+The global variable, and any existing references to it, are deleted.
+
+## Add global variables from the environment {#76844a93dbbc4d1ba551ea1a4a89ccdd}
+
+### Custom environment variables
+
+You can use the `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` environment variable to source global variables from your runtime environment.
+
+
+
+
+
+If you installed Langflow locally, you must define the `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` environment variable in a `.env` file.
+
+1. Create a `.env` file and open it in your preferred editor.
+
+2. Add the `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` environment variable as follows:
+
+ ```plaintext title=".env"
+ LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=VARIABLE1,VARIABLE2
+ ```
+
+ Replace `VARIABLE1,VARIABLE2` with a comma-separated list (no spaces) of variables that you want Langflow to source from the environment.
+ For example, `my_key,some_string`.
+
+3. Save and close the file.
+
+4. Start Langflow with the `.env` file:
+
+ ```bash
+ VARIABLE1="VALUE1" VARIABLE2="VALUE2" python -m langflow run --env-file .env
+ ```
+
+ :::note
+ In this example, the environment variables (`VARIABLE1="VALUE1"` and `VARIABLE2="VALUE2"`) are prefixed to the startup command.
+ This is a rudimentary method for exposing environment variables to Python on the command line, and is meant for illustrative purposes.
+ Make sure to expose your environment variables to Langflow in a manner that best suits your own environment.
+ :::
+
+5. Confirm that Langflow successfully sourced the global variables from the environment.
+
+ 1. In the Langflow UI, click your profile icon, and then select **Settings**.
+
+ 2. Click **Global Variables**.
+
+ The environment variables appear in the list of **Global Variables**.
+
+
+
+
+
+If you're using Docker, you can pass `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` directly from the command line or from a `.env` file.
+
+To pass `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` directly from the command line:
+
+```bash
+docker run -it --rm \
+ -p 7860:7860 \
+ -e LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT="VARIABLE1,VARIABLE2" \
+ -e VARIABLE1="VALUE1" \
+ -e VARIABLE2="VALUE2" \
+ langflowai/langflow:latest
+```
+
+To pass `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` from a `.env` file:
+
+```bash
+docker run -it --rm \
+ -p 7860:7860 \
+ --env-file .env \
+ -e VARIABLE1="VALUE1" \
+ -e VARIABLE2="VALUE2" \
+ langflowai/langflow:latest
+```
+
+
+
+
+
+:::info
+When adding global variables from the environment, the following limitations apply:
+
+- You can only source the **Name** and **Value** from the environment.
+ To add additional parameters, such as the **Apply To Fields** parameter, you must edit the global variables in the Langflow UI.
+
+- Global variables that you add from the environment always have the **Credential** type.
+ :::
+
+:::tip
+If you want to explicitly prevent Langflow from sourcing global variables from the environment, set `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` to `false` in your `.env` file:
+
+```plaintext title=".env"
+LANGFLOW_STORE_ENVIRONMENT_VARIABLES=false
+```
+
+:::
+
+### Default environment variables
+
+Langflow automatically detects and converts some environment variables into global variables of the type **Credential**, which are applied to the specific fields in components that require them. Currently, the following variables are supported:
+
+- `OPENAI_API_KEY`
+- `ANTHROPIC_API_KEY`
+- `GOOGLE_API_KEY`
+- `COHERE_API_KEY`
+- `GROQ_API_KEY`
+- `HUGGINGFACEHUB_API_TOKEN`
+- `SEARCHAPI_API_KEY`
+- `SERPAPI_API_KEY`
+- `AZURE_OPENAI_API_KEY`
+- `AZURE_OPENAI_API_VERSION`
+- `AZURE_OPENAI_API_INSTANCE_NAME`
+- `AZURE_OPENAI_API_DEPLOYMENT_NAME`
+- `AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME`
+- `PINECONE_API_KEY`
+- `ASTRA_DB_APPLICATION_TOKEN`
+- `ASTRA_DB_API_ENDPOINT`
+- `UPSTASH_VECTOR_REST_URL`
+- `UPSTASH_VECTOR_REST_TOKEN`
+- `VECTARA_CUSTOMER_ID`
+- `VECTARA_CORPUS_ID`
+- `VECTARA_API_KEY`
+- `AWS_ACCESS_KEY_ID`
+- `AWS_SECRET_ACCESS_KEY`
+
+For information about other environment variables and their usage, see [Environment Variables](/environment-variables).
+
+## Security best practices
+
+For information about securing your global variables and other sensitive data, see [Security best practices](/configuration-security-best-practices).
\ No newline at end of file
diff --git a/docs/docs/Configuration/configuration-security-best-practices.md b/docs/docs/Configuration/configuration-security-best-practices.md
new file mode 100644
index 0000000000000000000000000000000000000000..053ff5662fc00b99c3dc34caea3bc66a71c68084
--- /dev/null
+++ b/docs/docs/Configuration/configuration-security-best-practices.md
@@ -0,0 +1,48 @@
+---
+title: Security best practices
+sidebar_position: 1
+slug: /configuration-security-best-practices
+---
+
+This guide outlines security best practices for deploying and managing Langflow.
+
+## Secret key protection
+
+The secret key is critical for encrypting sensitive data in Langflow. Follow these guidelines:
+
+- Always use a custom secret key in production:
+
+ ```bash
+ LANGFLOW_SECRET_KEY=your-secure-secret-key
+ ```
+
+- Store the secret key securely:
+
+ - Use environment variables or secure secret management systems.
+ - Never commit the secret key to version control.
+ - Regularly rotate the secret key.
+
+- Use the default secret key locations:
+ - macOS: `~/Library/Caches/langflow/secret_key`
+ - Linux: `~/.cache/langflow/secret_key`
+ - Windows: `%USERPROFILE%\AppData\Local\langflow\secret_key`
+
+## API keys and credentials
+
+- Store API keys and credentials as encrypted global variables.
+- Use the Credential type for sensitive information.
+- Implement proper access controls for users who can view/edit credentials.
+- Regularly audit and rotate API keys.
+
+## Database file protection
+
+- Store the database in a secure location:
+
+ ```bash
+ LANGFLOW_SAVE_DB_IN_CONFIG_DIR=true
+ LANGFLOW_CONFIG_DIR=/secure/path/to/config
+ ```
+
+- Use the default database locations:
+ - macOS/Linux: `PYTHON_LOCATION/site-packages/langflow/langflow.db`
+ - Windows: `PYTHON_LOCATION\Lib\site-packages\langflow\langflow.db`
diff --git a/docs/docs/Configuration/environment-variables.md b/docs/docs/Configuration/environment-variables.md
new file mode 100644
index 0000000000000000000000000000000000000000..40215504ae79de89997bc04017869ece67e45efe
--- /dev/null
+++ b/docs/docs/Configuration/environment-variables.md
@@ -0,0 +1,147 @@
+---
+title: Environment variables
+sidebar_position: 7
+slug: /environment-variables
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+Langflow lets you configure a number of settings using environment variables.
+
+## Configure environment variables
+
+Langflow recognizes [supported environment variables](#supported-variables) from the following sources:
+
+- Environment variables that you've set in your terminal.
+- Environment variables that you've imported from a `.env` file using the `--env-file` option in the Langflow CLI.
+
+You can choose to use one source exclusively, or use both sources together.
+If you choose to use both sources together, be aware that environment variables imported from a `.env` file take [precedence](#precedence) over those set in your terminal.
+
+### Set environment variables in your terminal {#configure-variables-terminal}
+
+Run the following commands to set environment variables for your current terminal session:
+
+
+
+
+```bash
+export VARIABLE_NAME='VALUE'
+```
+
+
+
+```
+set VARIABLE_NAME='VALUE'
+```
+
+
+
+```bash
+docker run -it --rm \
+ -p 7860:7860 \
+ -e VARIABLE_NAME='VALUE' \
+ langflowai/langflow:latest
+```
+
+
+
+
+When you start Langflow, it looks for environment variables that you've set in your terminal.
+If it detects a supported environment variable, then it automatically adopts the specified value, subject to [precedence rules](#precedence).
+
+### Import environment variables from a .env file {#configure-variables-env-file}
+
+1. Create a `.env` file and open it in your preferred editor.
+
+2. Add your environment variables to the file:
+
+ ```plaintext title=".env"
+ VARIABLE_NAME='VALUE'
+ VARIABLE_NAME='VALUE'
+ ```
+
+ :::tip
+ The Langflow project includes a [`.env.example`](https://github.com/langflow-ai/langflow/blob/main/.env.example) file to help you get started.
+ You can copy the contents of this file into your own `.env` file and replace the example values with your own preferred settings.
+ :::
+
+3. Save and close the file.
+
+4. Start Langflow using the `--env-file` option to define the path to your `.env` file:
+
+
+
+
+ ```bash
+ python -m langflow run --env-file .env
+ ```
+
+
+
+ ```bash
+ docker run -it --rm \
+ -p 7860:7860 \
+ --env-file .env \
+ langflowai/langflow:latest
+ ```
+
+
+
+
+On startup, Langflow imports the environment variables from your `.env` file, as well as any that you [set in your terminal](#configure-variables-terminal), and adopts their specified values.
+
+## Precedence {#precedence}
+
+Environment variables [defined in the .env file](#configure-variables-env-file) take precedence over those [set in your terminal](#configure-variables-terminal).
+That means, if you happen to set the same environment variable in both your terminal and your `.env` file, Langflow adopts the value from the the `.env` file.
+
+:::info[CLI precedence]
+[Langflow CLI options](./configuration-cli.md) override the value of corresponding environment variables defined in the `.env` file as well as any environment variables set in your terminal.
+:::
+
+## Supported environment variables {#supported-variables}
+
+The following table lists the environment variables supported by Langflow.
+
+| Variable | Format / Values | Default | Description |
+|----------|---------------|---------|-------------|
+| `DO_NOT_TRACK` | Boolean | `false` | If enabled, Langflow will not track telemetry. |
+| `LANGFLOW_AUTO_LOGIN` | Boolean | `true` | Enable automatic login for Langflow. Set to `false` to disable automatic login and require the login form to log into the Langflow UI. Setting to `false` requires [`LANGFLOW_SUPERUSER`](#LANGFLOW_SUPERUSER) and [`LANGFLOW_SUPERUSER_PASSWORD`](#LANGFLOW_SUPERUSER_PASSWORD) to be set. |
+| `LANGFLOW_AUTO_SAVING` | Boolean | `true` | Enable flow auto-saving. See [`--auto-saving` option](./configuration-cli.md#run-auto-saving). |
+| `LANGFLOW_AUTO_SAVING_INTERVAL` | Integer | `1000` | Set the interval for flow auto-saving in milliseconds. See [`--auto-saving-interval` option](./configuration-cli.md#run-auto-saving-interval). |
+| `LANGFLOW_BACKEND_ONLY` | Boolean | `false` | Only run Langflow's backend server (no frontend). See [`--backend-only` option](./configuration-cli.md#run-backend-only). |
+| `LANGFLOW_CACHE_TYPE` | `async` `redis` `memory` `disk` `critical` | `async` | Set the cache type for Langflow. If you set the type to `redis`, then you must also set the following environment variables: [`LANGFLOW_REDIS_HOST`](#LANGFLOW_REDIS_HOST), [`LANGFLOW_REDIS_PORT`](#LANGFLOW_REDIS_PORT), [`LANGFLOW_REDIS_DB`](#LANGFLOW_REDIS_DB), and [`LANGFLOW_REDIS_CACHE_EXPIRE`](#LANGFLOW_REDIS_CACHE_EXPIRE). |
+| `LANGFLOW_COMPONENTS_PATH` | String | `langflow/components` | Path to the directory containing custom components. See [`--components-path` option](./configuration-cli.md#run-components-path). |
+| `LANGFLOW_CONFIG_DIR` | String | | Set the Langflow configuration directory where files, logs, and the Langflow database are stored. |
+| `LANGFLOW_DATABASE_URL` | String | | Set the database URL for Langflow. If you don't provide one, Langflow uses an SQLite database. |
+| `LANGFLOW_DEV` | Boolean | `false` | Run Langflow in development mode (may contain bugs). See [`--dev` option](./configuration-cli.md#run-dev). |
+| `LANGFLOW_FALLBACK_TO_ENV_VAR` | Boolean | `true` | If enabled, [global variables](../Configuration/configuration-global-variables.md) set in the Langflow UI fall back to an environment variable with the same name when Langflow fails to retrieve the variable value. |
+| `LANGFLOW_FRONTEND_PATH` | String | `./frontend` | Path to the frontend directory containing build files. This is for development purposes only. See [`--frontend-path` option](./configuration-cli.md#run-frontend-path). |
+| `LANGFLOW_HEALTH_CHECK_MAX_RETRIES` | Integer | `5` | Set the maximum number of retries for the health check. See [`--health-check-max-retries` option](./configuration-cli.md#run-health-check-max-retries). |
+| `LANGFLOW_HOST` | String | `127.0.0.1` | The host on which the Langflow server will run. See [`--host` option](./configuration-cli.md#run-host). |
+| `LANGFLOW_LANGCHAIN_CACHE` | `InMemoryCache` `SQLiteCache` | `InMemoryCache` | Type of cache to use. See [`--cache` option](./configuration-cli.md#run-cache). |
+| `LANGFLOW_MAX_FILE_SIZE_UPLOAD` | Integer | `100` | Set the maximum file size for the upload in megabytes. See [`--max-file-size-upload` option](./configuration-cli.md#run-max-file-size-upload). |
+| `LANGFLOW_LOG_ENV` | `container_json` `container_csv` | Not set | Set the log environment. Default (Not set) is json with color. If not set a format string can be provided. See [`LANGFLOW_LOG_FORMAT`](#LANGFLOW_CACHE_TYPE) |
+| `LANGFLOW_LOG_FILE` | String | `logs/langflow.log` | Set the path to the log file for Langflow. See [`--log-file` option](./configuration-cli.md#run-log-file). |
+| `LANGFLOW_LOG_FORMAT` | String | `{time:YYYY-MM-DD HH:mm:ss} - {level: <8} - {module} - {message}` | Configure the logformat. For example without colors: `{time:YYYY-MM-DD HH:mm:ss.SSS} {level} {file} {line} {function} {message}` If [`LANGFLOW_LOG_ENV`](#LANGFLOW_LOG_ENV) is set this configuration will be ignored.
+| `LANGFLOW_LOG_LEVEL` | `debug` `info` `warning` `error` `critical` | `critical` | Set the logging level. See [`--log-level` option](./configuration-cli.md#run-log-level). |
+| `LANGFLOW_MAX_FILE_SIZE_UPLOAD` | Integer | `100` | Set the maximum file size for the upload in megabytes. See [`--max-file-size-upload` option](./configuration-cli.md#run-max-file-size-upload). |
+| `LANGFLOW_OPEN_BROWSER` | Boolean | `true` | Open the system web browser on startup. See [`--open-browser` option](./configuration-cli.md#run-open-browser). |
+| `LANGFLOW_PORT` | Integer | `7860` | The port on which the Langflow server will run. The server automatically selects a free port if the specified port is in use. See [`--port` option](./configuration-cli.md#run-port). |
+| `LANGFLOW_PROMETHEUS_ENABLED` | Boolean | `false` | Expose Prometheus metrics. |
+| `LANGFLOW_PROMETHEUS_PORT` | Integer | `9090` | Set the port on which Langflow exposes Prometheus metrics. |
+| `LANGFLOW_REDIS_CACHE_EXPIRE` | Integer | `3600` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). |
+| `LANGFLOW_REDIS_DB` | Integer | `0` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). |
+| `LANGFLOW_REDIS_HOST` | String | `localhost` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). |
+| `LANGFLOW_REDIS_PORT` | String | `6379` | See [`LANGFLOW_CACHE_TYPE`](#LANGFLOW_CACHE_TYPE). |
+| `LANGFLOW_REMOVE_API_KEYS` | Boolean | `false` | Remove API keys from the projects saved in the database. See [`--remove-api-keys` option](./configuration-cli.md#run-remove-api-keys). |
+| `LANGFLOW_SAVE_DB_IN_CONFIG_DIR` | Boolean | `false` | Save the Langflow database in [`LANGFLOW_CONFIG_DIR`](#LANGFLOW_CONFIG_DIR) instead of in the Langflow package directory. Note, when this variable is set to default (`false`), the database isn't shared between different virtual environments and the database is deleted when you uninstall Langflow. |
+| `LANGFLOW_STORE` | Boolean | `true` | Enable the Langflow Store. See [`--store` option](./configuration-cli.md#run-store). |
+| `LANGFLOW_STORE_ENVIRONMENT_VARIABLES` | Boolean | `true` | Store environment variables as [global variables](../Configuration/configuration-global-variables.md) in the database. |
+| `LANGFLOW_SUPERUSER` | String | Not set | Set the name for the superuser. Required if [`LANGFLOW_AUTO_LOGIN`](#LANGFLOW_AUTO_LOGIN) is set to `false`. See [`superuser --username` option](./configuration-cli.md#superuser-username). |
+| `LANGFLOW_SUPERUSER_PASSWORD` | String | Not set | Set the password for the superuser. Required if [`LANGFLOW_AUTO_LOGIN`](#LANGFLOW_AUTO_LOGIN) is set to `false`. See [`superuser --password` option](./configuration-cli.md#superuser-password).|
+| `LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT` | String | Not set | Comma-separated list of environment variables to get from the environment and store as [global variables](../Configuration/configuration-global-variables.md). |
+| `LANGFLOW_WORKER_TIMEOUT` | Integer | `300` | Worker timeout in seconds. See [`--worker-timeout` option](./configuration-cli.md#run-worker-timeout). |
+| `LANGFLOW_WORKERS` | Integer | `1` | Number of worker processes. See [`--workers` option](./configuration-cli.md#run-workers). |
diff --git a/docs/docs/Contributing/_category_.json b/docs/docs/Contributing/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..32da1de7cd2bc6ef6e76e7e0f003c5d23765592f
--- /dev/null
+++ b/docs/docs/Contributing/_category_.json
@@ -0,0 +1 @@
+{"position":10, "label":"Contributing"}
\ No newline at end of file
diff --git a/docs/docs/Contributing/contributing-community.md b/docs/docs/Contributing/contributing-community.md
new file mode 100644
index 0000000000000000000000000000000000000000..f37ae5d7e5255e2010819b57a8faef5364bc32af
--- /dev/null
+++ b/docs/docs/Contributing/contributing-community.md
@@ -0,0 +1,26 @@
+---
+title: Join the Langflow community
+sidebar_position: 5
+slug: /contributing-community
+---
+
+
+
+## Join the Langflow Discord server
+
+Join the [Langflow Discord Server](https://discord.gg/EqksyE2EX9) to ask questions and showcase your projects.
+
+## Follow Langflow on X
+
+Follow [@langflow_ai](https://twitter.com/langflow_ai) on X to get the latest news about Langflow.
+
+## Star Langflow on GitHub
+
+You can [star Langflow in GitHub](https://github.com/langflow-ai/langflow).
+
+By adding a star, other users will be able to find Langflow more easily, and see that it has been already useful for others.
+
+## Watch the GitHub repository for releases
+
+You can [watch Langflow in GitHub](https://github.com/langflow-ai/langflow). If you select **Watching** instead of **Releases only** you will receive notifications when someone creates a new issue or question. You can also specify that you want to be notified only about new issues, discussions, and PRs so you can try to help solve those issues.
+
diff --git a/docs/docs/Contributing/contributing-components.md b/docs/docs/Contributing/contributing-components.md
new file mode 100644
index 0000000000000000000000000000000000000000..0cac371e49b62d3eeda596fa42524aaa66a9e309
--- /dev/null
+++ b/docs/docs/Contributing/contributing-components.md
@@ -0,0 +1,24 @@
+---
+title: Contribute components
+sidebar_position: 4
+slug: /contributing-components
+---
+
+
+New components are added as objects of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/custom/custom_component/custom_component.py) class.
+
+Any dependencies are added to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/main/pyproject.toml#L148) file.
+
+### Contribute an example component to Langflow
+
+Anyone can contribute an example component. For example, if you created a new document loader called **MyCustomDocumentLoader**, you can follow these steps to contribute it to Langflow.
+
+1. Write your loader as an object of the [CustomComponent](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/custom/custom_component/custom_component.py) class. You'll create a new class, `MyCustomDocumentLoader`, that will inherit from `CustomComponent` and override the base class's methods.
+2. Define optional attributes like `display_name`, `description`, and `documentation` to provide information about your custom component.
+3. Implement the `build_config` method to define the configuration options for your custom component.
+4. Implement the `build` method to define the logic for taking input parameters specified in the `build_config` method and returning the desired output.
+5. Add the code to the [/components/documentloaders](https://github.com/langflow-ai/langflow/tree/dev/src/backend/base/langflow/components) folder.
+6. Add the dependency to [/documentloaders/__init__.py](https://github.com/langflow-ai/langflow/blob/dev/src/backend/base/langflow/components/documentloaders/__init__.py) as `from .MyCustomDocumentLoader import MyCustomDocumentLoader`.
+7. Add any new dependencies to the [pyproject.toml](https://github.com/langflow-ai/langflow/blob/main/pyproject.toml#L148) file.
+8. Submit documentation for your component. For this example, you'd submit documentation to the [loaders page](https://github.com/langflow-ai/langflow/blob/main/docs/docs/Components/components-loaders.md).
+9. Submit your changes as a pull request. The Langflow team will have a look, suggest changes, and add your component to Langflow.
\ No newline at end of file
diff --git a/docs/docs/Contributing/contributing-github-discussion-board.md b/docs/docs/Contributing/contributing-github-discussion-board.md
new file mode 100644
index 0000000000000000000000000000000000000000..e17d5366d768df35b11dddf10882886a3d134233
--- /dev/null
+++ b/docs/docs/Contributing/contributing-github-discussion-board.md
@@ -0,0 +1,13 @@
+---
+title: Ask for help on the Discussions board
+sidebar_position: 3
+slug: /contributing-github-discussions
+---
+
+If you're looking for help with your code, consider posting a question on the Langflow [GitHub Discussions board](https://github.com/langflow-ai/langflow/discussions). The Langflow team cannot provide individual support via email. The team also believes that help is much more valuable if it's shared publicly, so that more people can benefit from it.
+
+Since the Discussions board is public, please follow this guidance when posting your code questions.
+
+* When describing your issue, try to provide as many details as possible. What exactly goes wrong? _How_ is it failing? Is there an error? "XY doesn't work" usually isn't that helpful for tracking down problems. Always remember to include the code you ran and if possible, extract only the relevant parts and don't just dump your entire script. This will make it easier for us to reproduce the error.
+
+* When you include long code, logs, or tracebacks, wrap them in `` and `` tags. This [collapses the content](https://developer.mozilla.org/en/docs/Web/HTML/Element/details) so the contents only becomes visible on click, making the issue easier to read and follow.
\ No newline at end of file
diff --git a/docs/docs/Contributing/contributing-github-issues.md b/docs/docs/Contributing/contributing-github-issues.md
new file mode 100644
index 0000000000000000000000000000000000000000..11105a7e35a06206e9cc8117667802896773c7b8
--- /dev/null
+++ b/docs/docs/Contributing/contributing-github-issues.md
@@ -0,0 +1,7 @@
+---
+title: Request an enhancement or report a bug
+sidebar_position: 2
+slug: /contributing-github-issues
+---
+
+The [Issues page in the Langflow repo](https://github.com/langflow-ai/langflow/issues) is kept up to date with bugs, improvements, and feature requests. Labels are used to help with sorting and discovery of issues of interest. For an overview of the system Langflow uses to tag issues and pull requests, see the Langflow repo's [labels page](https://github.com/langflow-ai/langflow/labels).
\ No newline at end of file
diff --git a/docs/docs/Contributing/contributing-how-to-contribute.md b/docs/docs/Contributing/contributing-how-to-contribute.md
new file mode 100644
index 0000000000000000000000000000000000000000..8a87bcd5d6942ee3997730d5101454878cc5e558
--- /dev/null
+++ b/docs/docs/Contributing/contributing-how-to-contribute.md
@@ -0,0 +1,116 @@
+---
+title: Contribute to Langflow
+sidebar_position: 1
+slug: /contributing-how-to-contribute
+---
+
+This guide is intended to help you start contributing to Langflow.
+As an open-source project in a rapidly developing field, Langflow welcomes contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
+
+To contribute code or documentation to this project, follow the [fork and pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
+
+## Contribute code
+
+Develop Langflow locally with [uv](https://docs.astral.sh/uv/getting-started/installation/) and [Node.js](https://nodejs.org/en/download/package-manager).
+
+### Prerequisites
+
+* [uv(>=0.4)](https://docs.astral.sh/uv/getting-started/installation/)
+* [Node.js](https://nodejs.org/en/download/package-manager)
+
+### Clone the Langflow Repository
+
+1. Navigate to the [Langflow GitHub repository](https://github.com/langflow-ai/langflow), and then click **Fork**.
+
+2. Add the new remote to your local repository on your local machine:
+
+```bash
+git remote add fork https://github.com//langflow.git
+```
+
+### Prepare the development environment
+
+1. Create development hooks.
+
+```bash
+make init
+```
+
+This command sets up the development environment by installing backend and frontend dependencies, building the frontend static files, and initializing the project. It runs `make install_backend`, `make install_frontend`, `make build_frontend`, and finally `uv run langflow run` to start the application.
+
+2. Run `make lint`, `make format`, and `make unit_tests` before pushing to the repository.
+
+### Debug
+
+The repo includes a `.vscode/launch.json` file for debugging the backend in VSCode, which is faster than debugging with Docker Compose. To debug Langflow with the `launch.json` file in VSCode:
+
+1. Open Langflow in VSCode.
+2. Press **Ctrl+Shift+D** for Windows **or Cmd+Shift+D** for Mac to open the Run and Debug view.
+3. From the **Run and Debug** dropdown, choose a debugging configuration.
+4. Click the green **Play** button or press F5 to start debugging.
+
+Use `launch.json` to quickly debug different parts of your application, like the backend, frontend, or CLI, directly from VSCode.
+
+### Run Langflow locally
+
+After setting up the environment with `make init`, you can run Langflow's backend and frontend separately for development.
+Langflow recommends using a virtual environment like [venv](https://docs.python.org/3/library/venv.html) or [conda](https://anaconda.org/anaconda/conda) to isolate dependencies.
+
+Before you begin, ensure you have [uv](https://docs.astral.sh/uv/getting-started/installation/) and [Node.js](https://nodejs.org/en/download/package-manager) installed.
+
+1. In the repository root, install the dependencies and start the development server for the backend:
+
+```bash
+make backend
+```
+
+2. Install dependencies and start the frontend:
+
+```bash
+make frontend
+```
+
+This approach allows you to work on the backend and frontend independently, with hot-reloading for faster development.
+
+## Contribute documentation
+
+The documentation is built using [Docusaurus](https://docusaurus.io/) and written in [Markdown](https://docusaurus.io/docs/markdown-features).
+
+### Prerequisites
+
+* [Node.js](https://nodejs.org/en/download/package-manager)
+
+### Clone the Langflow repository
+
+1. Navigate to the [Langflow GitHub repository](https://github.com/langflow-ai/langflow), and then click **Fork**.
+
+2. Add the new remote to your local repository on your local machine:
+
+```bash
+git remote add fork https://github.com//langflow.git
+```
+
+3. To run the documentation locally, run the following commands:
+
+```bash
+cd docs
+npm install
+npm run start
+```
+
+The documentation will be available at `localhost:3000` and all the files are located in the `docs/docs` folder.
+
+## Open a pull request
+
+Once you have written and manually tested your changes with `make lint` and `make unit_tests`, open a pull request to send your changes upstream to the main Langflow repository.
+
+1. Open a new GitHub pull request with your patch against the `main` branch.
+2. Ensure the PR title follows semantic commit conventions. For example, features are `feat: add new feature` and fixes are `fix: correct issue with X`.
+3. A Langflow maintainer will review your pull request. Thanks for your contribution!
+
+Some additional guidance on pull request titles:
+* Ensure the pull request description clearly describes the problem and solution. If the PR fixes an issue, include a link to the fixed issue in the PR description with `Fixes #1234`.
+* Pull request titles appear in Langflow's release notes, so they should explain what the PR does as explicitly as possible.
+* Pull requests should strive to fix one thing **only**, and should contain a good description of what is being fixed.
+
+For more information, see the [Python Developer's Guide](https://devguide.python.org/getting-started/pull-request-lifecycle/index.html#making-good-commits).
\ No newline at end of file
diff --git a/docs/docs/Contributing/contributing-telemetry.md b/docs/docs/Contributing/contributing-telemetry.md
new file mode 100644
index 0000000000000000000000000000000000000000..c374565caaced08592f0a9ccada412fae1f6003b
--- /dev/null
+++ b/docs/docs/Contributing/contributing-telemetry.md
@@ -0,0 +1,50 @@
+---
+title: Telemetry
+sidebar_position: 0
+slug: /contributing-telemetry
+---
+
+Langflow uses anonymous telemetry to collect essential usage statistics to enhance functionality and the user experience. This data helps us identify popular features and areas that need improvement, and ensures development efforts align with what you need.
+
+We respect your privacy and are committed to protecting your data. We do not collect any personal information or sensitive data. All telemetry data is anonymized and used solely for improving Langflow.
+
+## Opt out of telemetry
+
+To opt out of telemetry, set the `LANGFLOW_DO_NOT_TRACK` or `DO_NOT_TRACK` environment variable to `true` before running Langflow. This disables telemetry data collection.
+
+## Data that Langflow collects
+
+### Run {#2d427dca4f0148ae867997f6789e8bfb}
+
+- **IsWebhook**: Indicates whether the operation was triggered via a webhook.
+- **Seconds**: Duration in seconds for how long the operation lasted, providing insights into performance.
+- **Success**: Boolean value indicating whether the operation was successful, helping identify potential errors or issues.
+- **ErrorMessage**: Provides error message details if the operation was unsuccessful, aiding in troubleshooting and enhancements.
+
+### Shutdown {#081e4bd4faec430fb05b657026d1a69c}
+
+- **Time Running**: Total runtime before shutdown, useful for understanding application lifecycle and optimizing uptime.
+
+### Version {#dc09f6aba6c64c7b8dad3d86a7cba6d6}
+
+- **Version**: The specific version of Langflow used, which helps in tracking feature adoption and compatibility.
+- **Platform**: Operating system of the host machine, which aids in focusing our support for popular platforms like Windows, macOS, and Linux.
+- **Python**: The version of Python used, assisting in maintaining compatibility and support for various Python versions.
+- **Arch**: Architecture of the system (e.g., x86, ARM), which helps optimize our software for different hardware.
+- **AutoLogin**: Indicates whether the auto-login feature is enabled, reflecting user preference settings.
+- **CacheType**: Type of caching mechanism used, which impacts performance and efficiency.
+- **BackendOnly**: Boolean indicating whether you are running Langflow in a backend-only mode, useful for understanding deployment configurations.
+
+### Playground {#ae6c3859f612441db3c15a7155e9f920}
+
+- **Seconds**: Duration in seconds for playground execution, offering insights into performance during testing or experimental stages.
+- **ComponentCount**: Number of components used in the playground, which helps understand complexity and usage patterns.
+- **Success**: Success status of the playground operation, aiding in identifying the stability of experimental features.
+
+### Component {#630728d6654c40a6b8901459a4bc3a4e}
+
+- **Name**: Identifies the component, providing data on which components are most utilized or prone to issues.
+- **Seconds**: Time taken by the component to execute, offering performance metrics.
+- **Success**: Whether the component operated successfully, which helps in quality control.
+- **ErrorMessage**: Details of any errors encountered, crucial for debugging and improvement.
+
diff --git a/docs/docs/Deployment/_category_.json b/docs/docs/Deployment/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..41f76d47b9b7fdb5772788644282ffc04931b258
--- /dev/null
+++ b/docs/docs/Deployment/_category_.json
@@ -0,0 +1 @@
+{"position":7, "label":"Deployment"}
\ No newline at end of file
diff --git a/docs/docs/Deployment/deployment-docker.md b/docs/docs/Deployment/deployment-docker.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f91fe8a4ecbbb4d9df033db390f5cfef0a621dd
--- /dev/null
+++ b/docs/docs/Deployment/deployment-docker.md
@@ -0,0 +1,85 @@
+---
+title: Docker
+sidebar_position: 2
+slug: /deployment-docker
+---
+
+
+
+This guide will help you get LangFlow up and running using Docker and Docker Compose.
+
+
+## Prerequisites {#856bb2d98156402bbd1980365b98110c}
+
+
+---
+
+- Docker
+- Docker Compose
+
+## Docker {#55b5d304f2294e47b0dcd3e069cf5e67}
+
+
+---
+
+
+### Clone repo and build Docker container {#ba89773aa8b8425b985bfe7ba91c35cc}
+
+1. Clone the LangFlow repository:
+
+ `git clone https://github.com/langflow-ai/langflow.git`
+
+2. Navigate to the `docker_example` directory:
+
+ `cd langflow/docker_example`
+
+3. Run the Docker Compose file:
+
+ `docker compose up`
+
+
+LangFlow will now be accessible at [http://localhost:7860/](http://localhost:7860/).
+
+
+### Docker Compose configuration {#02226209cad24185a6ec5b69bd820d0f}
+
+
+The Docker Compose configuration spins up two services: `langflow` and `postgres`.
+
+
+### LangFlow service {#d749848451ea43bd86f6f096dc77e6e6}
+
+
+The `langflow` service uses the `langflowai/langflow:latest` Docker image and exposes port 7860. It depends on the `postgres` service.
+
+
+Environment variables:
+
+- `LANGFLOW_DATABASE_URL`: The connection string for the PostgreSQL database.
+- `LANGFLOW_CONFIG_DIR`: The directory where LangFlow stores logs, file storage, monitor data, and secret keys.
+
+Volumes:
+
+- `langflow-data`: This volume is mapped to `/var/lib/langflow` in the container.
+
+### PostgreSQL service {#121140decbfe4997b12213bdd2c4da7e}
+
+
+The `postgres` service uses the `postgres:16` Docker image and exposes port 5432.
+
+
+Environment variables:
+
+- `POSTGRES_USER`: The username for the PostgreSQL database.
+- `POSTGRES_PASSWORD`: The password for the PostgreSQL database.
+- `POSTGRES_DB`: The name of the PostgreSQL database.
+
+Volumes:
+
+- `langflow-postgres`: This volume is mapped to `/var/lib/postgresql/data` in the container.
+
+### Switch to a specific LangFlow version {#2b3e191ea48f4feab89242433cf012d5}
+
+
+If you want to use a specific version of LangFlow, you can modify the `image` field under the `langflow` service in the Docker Compose file. For example, to use version 1.0-alpha, change `langflowai/langflow:latest` to `langflowai/langflow:1.0-alpha`.
+
diff --git a/docs/docs/Deployment/deployment-gcp.md b/docs/docs/Deployment/deployment-gcp.md
new file mode 100644
index 0000000000000000000000000000000000000000..bcae2a36c79211772f9cdf996c07d39e00409691
--- /dev/null
+++ b/docs/docs/Deployment/deployment-gcp.md
@@ -0,0 +1,43 @@
+---
+title: GCP
+sidebar_position: 3
+slug: /deployment-gcp
+---
+
+# Deploy on Google Cloud Platform
+
+To deploy Langflow on Google Cloud Platform using Cloud Shell, use the below script.
+The script will guide you through setting up a Debian-based VM with the Langflow package, Nginx, and the necessary configurations to run the Langflow dev environment in GCP.
+
+## Prerequisites
+
+* A GCP account with the necessary permissions to create resources
+* A project on GCP where you want to deploy Langflow
+
+## Deploy Langflow in GCP
+
+1. Click below to launch Cloud Shell.
+
+[ Deploy to Google Cloud](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/langflow-ai/langflow&working_dir=scripts/gcp&shellonly=true&tutorial=walkthroughtutorial.md)
+
+2. Click **Trust repo**. Some gcloud commands might not run in an ephemeral Cloud Shell environment.
+3. Click **Start** and follow the tutorial to deploy Langflow.
+
+## Spot/Preemptible Instance
+
+When running a [spot (preemptible) instance](https://cloud.google.com/compute/docs/instances/preemptible), the code and VM will behave the same way as in a regular instance, executing the startup script to configure the environment, install necessary dependencies, and run the Langflow application. However, **due to the nature of spot instances, the VM may be terminated at any time if Google Cloud needs to reclaim the resources**. This makes spot instances suitable for fault-tolerant, stateless, or interruptible workloads that can handle unexpected terminations and restarts.
+
+## Pricing (approximate)
+
+:::info
+
+For more information, see the [GCP Pricing Calculator](https://cloud.google.com/products/calculator?hl=en).
+
+:::
+
+
+| Component | Regular Cost (Hourly) | Regular Cost (Monthly) | Spot/Preemptible Cost (Hourly) | Spot/Preemptible Cost (Monthly) | Notes |
+| ------------------ | --------------------- | ---------------------- | ------------------------------ | ------------------------------- | -------------------------------------------------------------------------- |
+| 100 GB Disk | - | $10/month | - | $10/month | Disk cost remains the same for both regular and Spot/Preemptible VMs |
+| VM (n1-standard-4) | $0.15/hr | ~$108/month | ~$0.04/hr | ~$29/month | The VM cost can be significantly reduced using a Spot/Preemptible instance |
+| **Total** | **$0.15/hr** | **~$118/month** | **~$0.04/hr** | **~$39/month** | Total costs for running the VM and disk 24/7 for an entire month |
diff --git a/docs/docs/Deployment/deployment-hugging-face-spaces.md b/docs/docs/Deployment/deployment-hugging-face-spaces.md
new file mode 100644
index 0000000000000000000000000000000000000000..c9bb552d6d07905b2cc253b47ee13cf4fb15488a
--- /dev/null
+++ b/docs/docs/Deployment/deployment-hugging-face-spaces.md
@@ -0,0 +1,33 @@
+---
+title: HuggingFace Spaces
+sidebar_position: 0
+slug: /deployment-hugging-face-spaces
+---
+
+
+
+## HuggingFace Spaces {#00f5b3a6818d496dbb18e1a6a910e57d}
+
+
+---
+
+
+HuggingFace provides a great alternative for running Langflow in their Spaces environment. This means you can run Langflow in the cloud without any local installation required. Here's how you can get Langflow up and running on HuggingFace Spaces:
+
+
+1. **Access Langflow Space**: Open a Chromium-based browser and navigate to the [Langflow Space](https://huggingface.co/spaces/Langflow/Langflow?duplicate=true). This link directs you to a pre-configured environment for Langflow.
+
+
+2. **Duplicate the Space**: You'll encounter an option to duplicate the Langflow space. This step involves a few simple decisions:
+
+ - **Naming Your Space**: Assign a unique name to your new Space.
+ - **Visibility Settings**: Choose between Public or Private visibility for your Space.
+ - After setting these parameters, click on **Duplicate Space** to initiate the setup.
+
+
+
+3. **Complete Installation**: The duplication and setup process begins immediately after you click **Duplicate Space**. Once completed, you will be automatically redirected to the main page of your new Space.
+
+
+4. **Start Exploring Langflow**: With the setup complete, Langflow is now ready for use in your Space and you can start exploring its features and capabilities right away!
+
diff --git a/docs/docs/Deployment/deployment-kubernetes.md b/docs/docs/Deployment/deployment-kubernetes.md
new file mode 100644
index 0000000000000000000000000000000000000000..98593b4f7cbc7d8db84512f67a883811882e2f2b
--- /dev/null
+++ b/docs/docs/Deployment/deployment-kubernetes.md
@@ -0,0 +1,371 @@
+---
+title: Kubernetes
+sidebar_position: 1
+slug: /deployment-kubernetes
+---
+
+
+
+This guide will help you get LangFlow up and running in Kubernetes cluster, including the following steps:
+
+- Install [LangFlow as IDE](/deployment-kubernetes) in a Kubernetes cluster (for development)
+- Install [LangFlow as a standalone application](/deployment-kubernetes) in a Kubernetes cluster (for production runtime workloads)
+
+## LangFlow (IDE) {#cb60b2f34e70490faf231cb0fe1a4b42}
+
+
+---
+
+
+This solution is designed to provide a complete environment for developers to create, test, and debug their flows. It includes both the API and the UI.
+
+
+### Prerequisites {#3efd3c63ff8849228c136f9252e504fd}
+
+- Kubernetes server
+- kubectl
+- Helm
+
+### Step 0. Prepare a Kubernetes cluster {#290b9624770a4c1ba2c889d384b7ef4c}
+
+
+We use [Minikube](https://minikube.sigs.k8s.io/docs/start/) for this example, but you can use any Kubernetes cluster.
+
+1. Create a Kubernetes cluster on Minikube.
+
+ ```text
+ minikube start
+ ```
+
+2. Set `kubectl` to use Minikube.
+
+ ```text
+ kubectl config use-context minikube
+ ```
+
+
+### Step 1. Install the LangFlow Helm chart {#b5c2a35144634a05a392f7e650929efe}
+
+1. Add the repository to Helm.
+
+ ```text
+ helm repo add langflow
+ helm repo update
+ ```
+
+2. Install LangFlow with the default options in the `langflow` namespace.
+
+ ```text
+ helm install langflow-ide langflow/langflow-ide -n langflow --create-namespace
+ ```
+
+3. Check the status of the pods
+
+ ```text
+ kubectl get pods -n langflow
+ ```
+
+
+ ```text
+ NAME READY STATUS RESTARTS AGE
+ langflow-0 1/1 Running 0 33s
+ langflow-frontend-5d9c558dbb-g7tc9 1/1 Running 0 38s
+ ```
+
+
+### Step 2. Access LangFlow {#34c71d04351949deb6c8ed7ffe30eafb}
+
+
+Enable local port forwarding to access LangFlow from your local machine.
+
+
+```text
+kubectl port-forward -n langflow svc/langflow-langflow-runtime 7860:7860
+```
+
+
+Now you can access LangFlow at [http://localhost:7860/](http://localhost:7860/).
+
+
+### LangFlow version {#645c6ef7984d4da0bcc4170bab0ff415}
+
+
+To specify a different LangFlow version, you can set the `langflow.backend.image.tag` and `langflow.frontend.image.tag` values in the `values.yaml` file.
+
+
+```yaml
+langflow:
+ backend:
+ image:
+ tag: "1.0.0a59"
+ frontend:
+ image:
+ tag: "1.0.0a59"
+
+```
+
+
+### Storage {#6772c00af79147d293c821b4c6905d3b}
+
+
+By default, the chart will use a SQLLite database stored in a local persistent disk.
+If you want to use an external PostgreSQL database, you can set the `langflow.database` values in the `values.yaml` file.
+
+
+```yaml
+# Deploy postgresql. You can skip this section if you have an existing postgresql database.
+postgresql:
+ enabled: true
+ fullnameOverride: "langflow-ide-postgresql-service"
+ auth:
+ username: "langflow"
+ password: "langflow-postgres"
+ database: "langflow-db"
+
+langflow:
+ backend:
+ externalDatabase:
+ enabled: true
+ driver:
+ value: "postgresql"
+ host:
+ value: "langflow-ide-postgresql-service"
+ port:
+ value: "5432"
+ database:
+ value: "langflow-db"
+ user:
+ value: "langflow"
+ password:
+ valueFrom:
+ secretKeyRef:
+ key: "password"
+ name: "langflow-ide-postgresql-service"
+ sqlite:
+ enabled: false
+
+```
+
+
+### Scaling {#e1d95ba6551742aa86958dc03b26129e}
+
+
+You can scale the number of replicas for the LangFlow backend and frontend services by changing the `replicaCount` value in the `values.yaml` file.
+
+
+```yaml
+langflow:
+ backend:
+ replicaCount: 3
+ frontend:
+ replicaCount: 3
+
+```
+
+
+You can scale frontend and backend services independently.
+
+
+To scale vertically (increase the resources for the pods), you can set the `resources` values in the `values.yaml` file.
+
+
+```yaml
+langflow:
+ backend:
+ resources:
+ requests:
+ memory: "2Gi"
+ cpu: "1000m"
+ frontend:
+ resources:
+ requests:
+ memory: "1Gi"
+ cpu: "1000m"
+
+```
+
+
+### Deploy on AWS EKS, Google GKE, or Azure AKS and other examples {#a8c3d4dc4e4f42f49b21189df5e2b851}
+
+
+Visit the [LangFlow Helm Charts repository](https://github.com/langflow-ai/langflow-helm-charts) for more information.
+
+
+## LangFlow (Runtime) {#49f2813ad2d3460081ad26a286a65e73}
+
+
+---
+
+
+The runtime chart is tailored for deploying applications in a production environment. It is focused on stability, performance, isolation, and security to ensure that applications run reliably and efficiently.
+
+
+Using a dedicated deployment for a set of flows is fundamental in production environments to have granular resource control.
+
+
+### Prerequisites {#3ad3a9389fff483ba8bd309189426a9d}
+
+- Kubernetes server
+- kubectl
+- Helm
+
+### Step 0. Prepare a Kubernetes cluster {#aaa764703ec44bd5ba64b5ef4599630b}
+
+
+Follow the same steps as for the LangFlow IDE.
+
+
+### Step 1. Install the LangFlow runtime Helm chart {#72a18aa8349c421186ba01d73a002531}
+
+1. Add the repository to Helm.
+
+ ```shell
+ helm repo add langflow
+ helm repo update
+ ```
+
+2. Install the LangFlow app with the default options in the `langflow` namespace.
+If you bundled the flow in a docker image, you can specify the image name in the `values.yaml` file or with the `-set` flag:
+If you want to download the flow from a remote location, you can specify the URL in the `values.yaml` file or with the `-set` flag:
+
+ ```shell
+ helm install my-langflow-app langflow/langflow-runtime -n langflow --create-namespace --set image.repository=myuser/langflow-just-chat --set image.tag=1.0.0
+
+ ```
+
+
+ ```shell
+ helm install my-langflow-app langflow/langflow-runtime -n langflow --create-namespace --set downloadFlows.flows[0].url=https://raw.githubusercontent.com/langflow-ai/langflow/dev/src/backend/base/langflow/initial_setup/starter_projects/Basic%20Prompting%20(Hello%2C%20world!).json
+
+ ```
+
+3. Check the status of the pods.
+
+ ```text
+ kubectl get pods -n langflow
+
+ ```
+
+
+### Step 2. Access the LangFlow app API {#e13326fc07734e4aa86dfb75ccfa31f8}
+
+
+Enable local port forwarding to access LangFlow from your local machine.
+
+
+```text
+kubectl port-forward -n langflow svc/langflow-my-langflow-app 7860:7860
+```
+
+
+Now you can access the API at [http://localhost:7860/api/v1/flows](http://localhost:7860/api/v1/flows) and execute the flow:
+
+
+```shell
+id=$(curl -s | jq -r '.flows[0].id')
+curl -X POST \\
+ "" \\
+ -H 'Content-Type: application/json'\\
+ -d '{
+ "input_value": "Hello!",
+ "output_type": "chat",
+ "input_type": "chat"
+ }'
+
+```
+
+
+### Storage {#09514d2b59064d37b685c7c0acecb861}
+
+
+In this case, storage is not needed as our deployment is stateless.
+
+
+### Log level and LangFlow configurations {#ecd97f0be96d4d1cabcc5b77a2d00980}
+
+
+You can set the log level and other LangFlow configurations in the `values.yaml` file.
+
+
+```yaml
+env:
+ - name: LANGFLOW_LOG_LEVEL
+ value: "INFO"
+
+```
+
+
+### Configure secrets and variables {#b91929e92acf47c183ea4c9ba9d19514}
+
+
+To inject secrets and LangFlow global variables, you can use the `secrets` and `env` sections in the `values.yaml` file.
+
+
+Let's say your flow uses a global variable which is a secret; when you export the flow as JSON, it's recommended to not include it.
+When importing the flow in the LangFlow runtime, you can set the global variable using the `env` section in the `values.yaml` file.
+Assuming you have a global variable called `openai_key_var`, you can read it directly from a secret:
+
+
+```yaml
+env:
+ - name: openai_key_var
+ valueFrom:
+ secretKeyRef:
+ name: openai-key
+ key: openai-key
+
+```
+
+
+or directly from the values file (not recommended for secret values!):
+
+
+```yaml
+env:
+ - name: openai_key_var
+ value: "sk-...."
+
+```
+
+
+### Scaling {#359b9ea5302147ebbed3ab8aa49dae8d}
+
+
+You can scale the number of replicas for the LangFlow app by changing the `replicaCount` value in the `values.yaml` file.
+
+
+```yaml
+replicaCount: 3
+
+```
+
+
+To scale vertically (increase the resources for the pods), you can set the `resources` values in the `values.yaml` file.
+
+
+```yaml
+resources:
+ requests:
+ memory: "2Gi"
+ cpu: "1000m"
+
+```
+
+
+## Other Examples {#8522b4276b51448e9f8f0c6efc731a7c}
+
+
+---
+
+
+Visit the LangFlow Helm Charts repository for more examples and configurations. Use the default values file as reference for all the options available.
+
+
+:::note
+
+Visit the examples directory to learn more about different deployment options.
+
+:::
+
+
+
diff --git a/docs/docs/Deployment/deployment-railway.md b/docs/docs/Deployment/deployment-railway.md
new file mode 100644
index 0000000000000000000000000000000000000000..69cf2b30d5a18c08f4ccd4b577e95bff1131f94f
--- /dev/null
+++ b/docs/docs/Deployment/deployment-railway.md
@@ -0,0 +1,21 @@
+---
+title: Railway
+sidebar_position: 5
+slug: /deployment-railway
+---
+
+## Deploy on Railway {#a9a1ce4d39e74cc29aef4d30c6172d10}
+
+---
+
+Railway is a cloud infrastructure platform that enables developers to deploy and manage applications effortlessly. It provides an intuitive interface, seamless integrations, and powerful features like auto-deploy from GitHub, managed databases, and automatic scaling.
+
+Deploying Langflow to Railway involves a few simple steps:
+
+1. **Click the Button Below**: Start by clicking the deployment button provided below. This will redirect you to the Railway platform.
+
+ [](https://railway.app/template/JMXEWp?referralCode=MnPSdg)
+
+2. **Deploy**: Proceed to deploy your Langflow instance. Click Deploy Now to deploy the instance. Railway will handle the rest, including setting up the infrastructure, deploying the Langflow instance, and starting the application.
+
+By following these steps, your Langflow instance will be successfully deployed on Railway.
diff --git a/docs/docs/Deployment/deployment-render.md b/docs/docs/Deployment/deployment-render.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e057efe55a2a2916d1f03ac0f42ec71d8bf25e9
--- /dev/null
+++ b/docs/docs/Deployment/deployment-render.md
@@ -0,0 +1,24 @@
+---
+title: Render
+sidebar_position: 4
+slug: /deployment-render
+---
+
+## Deploy on Render {#20a959b7047e44e490cc129fd21895c0}
+
+---
+
+[Render.com](http://render.com/) is a unified cloud platform designed to make deploying web applications, APIs, and static sites easy. It provides a streamlined experience with powerful features like automatic SSL, managed databases, and auto-deploy from Git, making it a popular choice for developers looking to simplify their deployment workflows.
+
+Deploying Langflow to Render is a straightforward process that can be completed in just a few steps:
+
+1. **Click the Button Below**: Start by clicking the deployment button provided below. This will redirect you to the Render platform.
+
+ [](https://render.com/deploy?repo=https%3A%2F%2Fgithub.com%2Flangflow-ai%2Flangflow%2Ftree%2Fdev)
+
+2. **Select the Blueprint Configuration**: Once on the Render platform, you will be prompted to provide a blueprint name and to select the branch for your `render.yaml` file in Langflow. This configuration file includes all the necessary settings and resources to deploy Langflow in Render. The default is `main`.
+3. The `render.yaml` file specifies a `standard` Render instance, because Langflow requires at least 2 GB of RAM to run. This may require a credit card to sign up. Review the pricing details on the Render platform to understand any costs involved before proceeding. If you need to change your plan later, from the Render dashboard, go to **Settings** > **Instance Type**.
+
+4. Click **Deploy Blueprint** to deploy Langflow. Render will handle the rest, including setting up the database, deploying the Langflow instance, and starting the application.
+
+By following these steps, your Langflow instance will be successfully deployed on Render.
diff --git a/docs/docs/Get-Started/_category_.json b/docs/docs/Get-Started/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..dfddedbe3b70633b4f32374f5bfdc1ab6fd9c934
--- /dev/null
+++ b/docs/docs/Get-Started/_category_.json
@@ -0,0 +1 @@
+{"position":1, "label":"Get Started"}
\ No newline at end of file
diff --git a/docs/docs/Get-Started/get-started-installation.md b/docs/docs/Get-Started/get-started-installation.md
new file mode 100644
index 0000000000000000000000000000000000000000..35f5bc9c19aa66723dc106fafe9cfb1ca3482dbd
--- /dev/null
+++ b/docs/docs/Get-Started/get-started-installation.md
@@ -0,0 +1,133 @@
+---
+title: Install Langflow
+sidebar_position: 1
+slug: /get-started-installation
+---
+
+You can deploy Langflow either locally or as a hosted service with [**Datastax Langflow**](#datastax-langflow).
+
+## Install Langflow locally
+
+Install Langflow locally with [pip](https://pypi.org/project/pip/), [uv](https://docs.astral.sh/uv/getting-started/installation/), or [pipx](https://pipx.pypa.io/stable/installation/).
+
+### Prerequisites
+
+* [Python 3.10 to 3.12](https://www.python.org/downloads/release/python-3100/) installed
+* [pip](https://pypi.org/project/pip/), [uv](https://docs.astral.sh/uv/getting-started/installation/), or [pipx](https://pipx.pypa.io/stable/installation/) installed
+* Before installing Langflow, we recommend creating a virtual environment to isolate your Python dependencies with [venv](https://docs.python.org/3/library/venv.html), [uv](https://docs.astral.sh/uv/pip/environments), or [conda](https://anaconda.org/anaconda/conda)
+
+### Install Langflow with pip or pipx
+
+Install Langflow with pip:
+
+```bash
+python -m pip install langflow
+```
+
+Install Langflow with uv:
+
+```bash
+uv pip install langflow
+```
+
+Install Langflow with pipx using the Python 3.10 executable:
+
+```bash
+pipx install langflow --python python3.10
+```
+
+## Run Langflow
+
+1. To run Langflow, enter the following command.
+
+```bash
+python -m langflow run
+```
+
+To run Langflow with uv, enter the following command.
+
+```bash
+uv run langflow run
+```
+
+2. Confirm that a local Langflow instance starts by visiting `http://127.0.0.1:7860` in a Chromium-based browser.
+
+Now that Langflow is running, follow the [Quickstart](/get-started-quickstart) to create your first flow.
+
+## Manage Langflow versions
+
+To upgrade Langflow to the latest version, use the pip upgrade command.
+
+```bash
+python -m pip install langflow -U
+```
+
+To upgrade Langflow to the latest version with uv, use the uv pip upgrade command.
+
+```bash
+uv pip install langflow -U
+```
+
+To install a specific version of the Langflow package, add the required version to the command.
+
+```bash
+python -m pip install langflow==1.1
+```
+
+To reinstall Langflow and all of its dependencies, add the `--force-reinstall` flag to the command.
+
+```bash
+python -m pip install langflow --force-reinstall
+```
+
+## DataStax Langflow {#datastax-langflow}
+
+**DataStax Langflow** is a hosted version of Langflow integrated with [Astra DB](https://www.datastax.com/products/datastax-astra). Be up and running in minutes with no installation or setup required. [Sign up for free](https://astra.datastax.com/signup?type=langflow).
+
+## Common installation issues
+
+This is a list of possible issues that you may encounter when installing and running Langflow.
+
+### No `langflow.__main__` module
+
+When you try to run Langflow with the command `langflow run`, you encounter the following error:
+```bash
+> No module named 'langflow.__main__'
+```
+1. Run `python -m langflow run` instead of `langflow run`.
+2. If that doesn't work, reinstall the latest Langflow version with `python -m pip install langflow -U`.
+3. If that doesn't work, reinstall Langflow and its dependencies with `python -m pip install langflow --pre -U --force-reinstall`.
+
+### Langflow runTraceback
+
+When you try to run Langflow using the command `langflow run`, you encounter the following error:
+
+```bash
+> langflow runTraceback (most recent call last): File ".../langflow", line 5, in from langflow.__main__ import mainModuleNotFoundError: No module named 'langflow.__main__'
+```
+
+There are two possible reasons for this error:
+
+1. You've installed Langflow using `pip install langflow` but you already had a previous version of Langflow installed in your system. In this case, you might be running the wrong executable. To solve this issue, run the correct executable by running `python -m langflow run` instead of `langflow run`. If that doesn't work, try uninstalling and reinstalling Langflow with `python -m pip install langflow --pre -U`.
+2. Some version conflicts might have occurred during the installation process. Run `python -m pip install langflow --pre -U --force-reinstall` to reinstall Langflow and its dependencies.
+
+### Something went wrong running migrations
+
+```bash
+> Something went wrong running migrations. Please, run 'langflow migration --fix'
+```
+
+Clear the cache by deleting the contents of the cache folder.
+
+This folder can be found at:
+
+- **Linux or WSL2 on Windows**:Â `home//.cache/langflow/`
+- **MacOS**:Â `/Users//Library/Caches/langflow/`
+
+This error can occur during Langflow upgrades when the new version can't override `langflow-pre.db` in `.cache/langflow/`. Clearing the cache removes this file but also erases your settings.
+
+If you wish to retain your files, back them up before clearing the folder.
+
+
+
+
diff --git a/docs/docs/Get-Started/get-started-quickstart.md b/docs/docs/Get-Started/get-started-quickstart.md
new file mode 100644
index 0000000000000000000000000000000000000000..08945bb372bd0d5a66786b340aac4c6963931995
--- /dev/null
+++ b/docs/docs/Get-Started/get-started-quickstart.md
@@ -0,0 +1,77 @@
+---
+title: Quickstart
+sidebar_position: 2
+slug: /get-started-quickstart
+---
+
+
+
+## Prerequisites {#b5f154a3a1d242c7bdf57acf0a552732}
+
+
+* [Python 3.10 to 3.12](https://www.python.org/downloads/release/python-3100/) installed
+* [pip](https://pypi.org/project/pip/), [uv](https://docs.astral.sh/uv/getting-started/installation/), or [pipx](https://pipx.pypa.io/stable/installation/) installed
+* Before installing Langflow, we recommend creating a virtual environment to isolate your Python dependencies with [venv](https://docs.python.org/3/library/venv.html), [uv](https://docs.astral.sh/uv/pip/environments), or [conda](https://anaconda.org/anaconda/conda)
+
+## Create the basic prompting flow
+
+
+1. From the Langflow dashboard, click **New Flow**.
+
+
+2. Select **Basic Prompting**.
+
+
+3. The **Basic Prompting** flow is created.
+
+
+
+
+
+This flow allows you to chat with the **OpenAI** component through the **Prompt** component.
+
+4. To examine the flow's **Prompt**Â component, click on the **Template**Â field of the **Prompt** component.
+
+```plain
+Answer the user as if you were a pirate.
+
+User: {user_input}
+
+Answer:
+```
+
+The **Template** instructs the LLM to accept `{user_input}` and `Answer the user as if you were a pirate.`.
+
+5. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
+
+ 1. In the **Variable Name** field, enter `openai_api_key`.
+ 2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
+ 3. Click **Save Variable**.
+
+
+## Run the Basic Prompting flow {#ef0e8283bfb646f99bbb825462d8cbab}
+
+1. To open the **Playground** pane, click **Playground**.
+This is where you can interact with your AI.
+2. Type a message and press Enter. The bot should respond in a markedly piratical manner!
+
+## Modify the prompt for a different result {#dcea9df0cd51434db76717c78b1e9a94}
+
+1. To modify your prompt results, in the **Prompt** template, click the **Template** field. The **Edit Prompt** window opens.
+2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Hermione Granger.`
+3. Run the workflow again. The response will be very different.
+
+## Next steps {#63b6db6cb571489c86b3ae89051f1a4f}
+
+
+---
+
+
+Well done! You've built your first prompt in Langflow. đ
+
+
+By dragging Langflow components to your workspace, you can create all sorts of interesting behaviors. Here are a couple of examples:
+
+- [Memory Chatbot](/starter-projects-memory-chatbot)
+- [Blog Writer](/starter-projects-blog-writer)
+- [Document QA](/starter-projects-document-qa)
diff --git a/docs/docs/Get-Started/welcome-to-langflow.md b/docs/docs/Get-Started/welcome-to-langflow.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a8a658dd63b0bad060f9dd16ebd50ea2edf9028
--- /dev/null
+++ b/docs/docs/Get-Started/welcome-to-langflow.md
@@ -0,0 +1,40 @@
+---
+title: Welcome to Langflow
+sidebar_position: 0
+slug: /
+---
+
+Langflow is a new, visual framework for building multi-agent and RAG applications. It is open-source, Python-powered, fully customizable, and LLM and vector store agnostic.
+
+Its intuitive interface allows for easy manipulation of AI building blocks, enabling developers to quickly prototype and turn their ideas into powerful, real-world solutions.
+
+Langflow empowers developers to rapidly prototype and build AI applications with its user-friendly interface and powerful features. Whether you're a seasoned AI developer or just starting out, Langflow provides the tools you need to bring your AI ideas to life.
+
+## Visual flow builder
+
+Langflow is an intuitive visual flow builder. This drag-and-drop interface allows developers to create complex AI workflows without writing extensive code. You can easily connect different components, such as prompts, language models, and data sources, to build sophisticated AI applications.
+
+
+
+## Use cases
+
+Langflow can be used for a wide range of AI applications, including:
+
+* [Craft intelligent chatbots](/starter-projects-memory-chatbot)
+* [Build document analysis systems](/starter-projects-document-qa)
+* [Generate compelling content](/starter-projects-blog-writer)
+* [Orchestrate multi-agent applications](/starter-projects-simple-agent)
+
+## Community and support
+
+Join Langflow's vibrant community of developers and AI enthusiasts. See the following resources to join discussions, share your projects, and get support:
+
+* [Contribute to Langflow](contributing-how-to-contribute)
+* [Langflow Discord Server](https://discord.gg/EqksyE2EX9)
+* [@langflow_ai](https://twitter.com/langflow_ai)Â
+
+## Get started with Langflow
+
+- [Install Langflow](/get-started-installation)
+- [Quickstart](/get-started-quickstart)
+
diff --git a/docs/docs/Guides/_category_.json b/docs/docs/Guides/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..4b98c2824e3f1e3d5dbeb37692dfd02bbf056f12
--- /dev/null
+++ b/docs/docs/Guides/_category_.json
@@ -0,0 +1 @@
+{"position":3, "label":"Guides"}
\ No newline at end of file
diff --git a/docs/docs/Guides/guides-chat-memory.md b/docs/docs/Guides/guides-chat-memory.md
new file mode 100644
index 0000000000000000000000000000000000000000..a9a63b43e00fd4537c752c5bcdb3da9df94e4e3f
--- /dev/null
+++ b/docs/docs/Guides/guides-chat-memory.md
@@ -0,0 +1,93 @@
+---
+title: Chat Memory
+sidebar_position: 1
+slug: /guides-chat-memory
+---
+
+
+
+Langflow allows every chat message to be stored, and a single flow can have multiple memory sessions. This enables you to create separate _memories_ for agents to store and recall information as needed.
+
+
+In any project, as long as there are [**Chat**](/components-io) being used, memories are always being stored by default. These are messages from a user to the AI or vice-versa.
+
+
+To see and access this history of messages, Langflow features a component called **Chat Memory**. It retrieves previous messages and outputs them in structured format or parsed.
+
+
+To learn the basics about memory in Langflow, check out the [Memory Chatbot](/starter-projects-memory-chatbot) starter example.
+
+
+Memories can be visualized and managed directly from the **Playground**. You can edit and remove previous messages to inspect and validate the AIâs response behavior. You can remove or edit previous messages to get your models acting just right.
+
+
+
+
+
+Modifying these memories will influence the behavior of the chatbot responses, as long as an agent uses them. Here you have the ability to remove or edit previous messages, allowing them to manipulate and explore how these changes affect model responses.
+
+To modify chat memories, from the playground, click the **Options** menu of any session, and then select **Message Logs**.
+
+
+
+
+
+## Session ID {#4ee86e27d1004e8288a72c633c323703}
+
+
+---
+
+
+Chat conversations store messages categorized by a `Session ID`. A a single flow can host multiple session IDs, and different flows can also share the same one.
+
+
+The **Chat Memory** component also retrieves message histories by `Session ID`, which users can change in the component's **Controls** pane.
+
+
+
+By default, if the `Session ID` value is empty, it is set to the same value as `Flow ID`.
+
+You can also display all messages stored across every flow and session by going to **Settings** > **Messages**.
+
+
+
+
+
+## Store chat memory in an external database
+
+Chat memory is retrieved from an external database or vector store using the [**Chat Memory**](/components-helpers#chat-memory) component.
+
+Chat memory is stored to an external database or vector store using the [Store Message](/components-helpers#store-message) component.
+
+The [**Chat Memories**](/Components/components-memories) components provide access to their respective external databases **as memory**. This allows AIs to access external memory for persistence and context retention. For example, connect the **Chat Memory** component to an **AstraDBChatMemory*** component to store the message history in an external Astra DB database.
+
+This example stores and retrieves chat history from an [AstraDBChatMemory](/Components/components-memories#astradbchatmemory-component) component with **Store Message** and **Chat Memory** components.
+
+### Prerequisites
+
+* [An OpenAI API key](https://platform.openai.com/)
+* [An Astra DB vector database](https://docs.datastax.com/en/astra-db-serverless/get-started/quickstart.html)Â with:
+ * Application Token
+ * API Endpoint
+
+### Connect the chat memory component to an external database
+
+1. Load the [Memory Chatbot](/starter-projects-memory-chatbot) starter project.
+This starter project extends the basic prompting flow to include a chat memory component.
+2. Add the [Store Message](/components-helpers#store-message) component to the flow.
+The **Store message** component stores messages in the external database.
+3. Add the [AstraDBChatMemory Component](/Components/components-memories#astradbchatmemory-component) to the flow.
+The **Astra DB Chat Memory** component stores and retrieves messages from **Astra DB**.
+4. Configure the **AstraDBChatMemory** component with your AstraDB instance details.
+ 1. In the **Astra DB Application Token** field, add your Astra token. (`AstraCS:...`)
+ 2. In the **API Endpoint** field, add your Astra database's endpoint. (for example, `https://12adb-bc-5378c845f05a6-e0a12-bd889b4-us-east-2.apps.astra.datastax.com`)
+5. Connect the **AstraDBChatMemory** component output to the external memory inputs of the [Chat Memory](/components-helpers#chat-memory) and [Store Message](/components-helpers#store-message) components.
+6. Link the [Chat Output](/components-io#chat-output) component to the input of the [Store Message](/components-helpers#store-message) component.
+
+Your completed flow should look like this:
+
+
+
+7. In Langflow, create message traffic by running a flow.
+8. Inspect your Astra database's tables and activity.
+You will see new tables and traffic created.
diff --git a/docs/docs/Guides/guides-data-message.md b/docs/docs/Guides/guides-data-message.md
new file mode 100644
index 0000000000000000000000000000000000000000..a8e48ec11c4b8c64662982ab387943ea8fdde95c
--- /dev/null
+++ b/docs/docs/Guides/guides-data-message.md
@@ -0,0 +1,105 @@
+---
+title: Data & Message
+sidebar_position: 2
+slug: /guides-data-message
+---
+
+
+
+In Langflow, the `Data` and `Message` objects serve as structured, functional representations of data that enhance the capabilities and reliability of the platform.
+
+
+## The Data Object {#e0d56e463d2f483bb1b5df09d88bf309}
+
+
+---
+
+
+The `Data` object is a Pydantic model that serves as a container for storing and manipulating data. It carries `data`âa dictionary that can be accessed as attributesâand uses `text_key` to specify which key in the dictionary should be considered the primary text content.
+
+
+- **Main Attributes:**
+ - `text_key`: Specifies the key to retrieve the primary text data.
+ - `data`: A dictionary to store additional data.
+ - `default_value`: default value when the `text_key` is not present in the `data` dictionary.
+
+### Creating a Data Object {#3540b7e651f74b558febebbe43380660}
+
+
+You can create a `Data` object by directly assigning key-value pairs to it. For example:
+
+
+```python
+from langflow.schema import Data
+
+# Creating a Data object with specified key-value pairs
+data = Data(text="my_string", bar=3, foo="another_string")
+
+# Outputs:
+print(data.text) # Outputs: "my_string"
+print(data.bar) # Outputs: 3
+print(data.foo) # Outputs: "another_string"
+```
+
+
+The `text_key` specifies which key in the `data` dictionary should be considered the primary text content. The `default_value` provides a fallback if the `text_key` is not present.
+
+
+```python
+# Creating a Data object with a specific text_key and default_value
+data = Data(data={"title": "Hello, World!"}, text_key="content", default_value="No content available")
+
+# Accessing the primary text using text_key and default_value
+print(data.get_text()) # Outputs: "No content available" because "content" key is not in the data dictionary
+
+# Accessing data keys by calling the attribute directly
+print(data.title) # Outputs: "Hello, World!" because "title" key is in the data dictionary
+```
+
+
+The `Data` object is also convenient for visualization of outputs, since the output preview has visual elements to inspect data as a table and its cells as pop ups for basic types. The idea is to create a unified way to work and visualize complex information in Langflow.
+
+
+To receive `Data` objects in a component input, you can use the `DataInput` input type.
+
+
+## Message object attributes {#f4f17cad02a545068f407d515cbc2902}
+
+
+---
+
+
+The `Message` object extends the functionality of `Data` and includes additional attributes and methods for chat interactions.
+
+
+- **Core message data:**
+ - `text`: The main text content of the message
+ - `sender`: Identifier for the sender (e.g., "User" or "AI")
+ - `sender_name`: Name of the sender
+ - `session_id`: Identifier for the chat session
+ - `timestamp`: Timestamp when the message was created (UTC)
+ - `flow_id`: Identifier for the flow
+ - `id`: Unique identifier for the message
+
+- **Content and files:**
+ - `files`: List of files or images associated with the message
+ - `content_blocks`: List of structured content blocks
+ - `properties`: Additional properties including visual styling and source information
+
+- **Message state:**
+ - `error`: Boolean indicating if there was an error
+ - `edit`: Boolean indicating if the message was edited
+ - `category`: Message category ("message", "error", "warning", "info")
+
+The `Message` object can be used to send, store, and manipulate chat messages within Langflow. You can create a `Message` object by directly assigning key-value pairs to it. For example:
+
+
+```python
+from langflow.schema.message import Message
+
+message = Message(text="Hello, AI!", sender="User", sender_name="John Doe")
+```
+
+
+To receive `Message` objects in a component input, you can use the `MessageInput` input type or `MessageTextInput` when the goal is to extract just the `text` field of the `Message` object.
+
diff --git a/docs/docs/Guides/guides-new-to-llms.md b/docs/docs/Guides/guides-new-to-llms.md
new file mode 100644
index 0000000000000000000000000000000000000000..b7117b7072f133945c7cdcf18430eb139e252b2c
--- /dev/null
+++ b/docs/docs/Guides/guides-new-to-llms.md
@@ -0,0 +1,19 @@
+---
+title: đ New to LLMs?
+sidebar_position: 0
+slug: /guides-new-to-llms
+---
+
+
+
+Large Language Models, or LLMs, are part of an exciting new world in computing.
+
+
+We made Langflow for anyone to create with LLMs, and hope you'll feel comfortable installing Langflow and [getting started](/get-started-quickstart).
+
+
+If you want to learn the basics of LLMs, prompt engineering, and AI models, Langflow recommends [promptingguide.ai](https://promptingguide.ai/), an open-source repository of prompt engineering content maintained by AI experts. PromptingGuide offers content for [beginners](https://www.promptingguide.ai/introduction/basics) and [experts](https://www.promptingguide.ai/techniques/cot), as well as the latest [research papers](https://www.promptingguide.ai/papers) and [test results](https://www.promptingguide.ai/research) fueling AI's progress.
+
+
+For in depth readings, we recommend [Awesome LLM Books](https://github.com/Hannibal046/Awesome-LLM?tab=readme-ov-file#llm-books), a curated list of resources for learning about LLMs and their applications.
+
diff --git a/docs/docs/Integrations/505849097.png b/docs/docs/Integrations/505849097.png
new file mode 100644
index 0000000000000000000000000000000000000000..12975bbedf779016dddf32b49a8fbfec95bf53be
--- /dev/null
+++ b/docs/docs/Integrations/505849097.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf441779237dbbdf694db43679950daee76c469be80b0c93166de52fad685af4
+size 159322
diff --git a/docs/docs/Integrations/965098683.png b/docs/docs/Integrations/965098683.png
new file mode 100644
index 0000000000000000000000000000000000000000..b431315390adc092f3864abcc926c7914901cc30
--- /dev/null
+++ b/docs/docs/Integrations/965098683.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8efedfa18d06d69f1fee4637529f565f5aa303e2eba9e2e7d2ff17bebb425c88
+size 305642
diff --git a/docs/docs/Integrations/AssemblyAI_Flow.json b/docs/docs/Integrations/AssemblyAI_Flow.json
new file mode 100644
index 0000000000000000000000000000000000000000..195bb1906abf83b3c671eb1a2f28a4e123885142
--- /dev/null
+++ b/docs/docs/Integrations/AssemblyAI_Flow.json
@@ -0,0 +1,1431 @@
+{
+ "name": "AssemblyAI Transcription and Speech AI Flow",
+ "icon": null,
+ "is_component": false,
+ "endpoint_name": null,
+ "data": {
+ "nodes": [
+ {
+ "id": "Prompt-IO8Cq",
+ "type": "genericNode",
+ "position": {
+ "x": -1376.3296370680628,
+ "y": 928.8860970980681
+ },
+ "data": {
+ "type": "Prompt",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "template": {
+ "trace_as_input": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "template",
+ "value": "Provide a brief summary of the transcript.",
+ "display_name": "Template",
+ "advanced": false,
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "prompt",
+ "_input_type": "PromptInput"
+ }
+ },
+ "description": "Create a prompt template with dynamic variables.",
+ "icon": "prompts",
+ "is_input": null,
+ "is_output": null,
+ "is_composition": null,
+ "base_classes": [
+ "Message"
+ ],
+ "name": "",
+ "display_name": "Prompt",
+ "documentation": "",
+ "custom_fields": {
+ "template": []
+ },
+ "output_types": [],
+ "full_path": null,
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Message"
+ ],
+ "selected": "Message",
+ "name": "prompt",
+ "hidden": null,
+ "display_name": "Prompt Message",
+ "method": "build_prompt",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "template"
+ ],
+ "beta": false,
+ "error": null,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "Prompt-IO8Cq"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 324,
+ "positionAbsolute": {
+ "x": -1376.3296370680628,
+ "y": 928.8860970980681
+ },
+ "dragging": false
+ },
+ {
+ "id": "AssemblyAITranscriptionJobCreator-Idt7P",
+ "type": "genericNode",
+ "position": {
+ "x": -1957.7132501771657,
+ "y": 470.79685053457587
+ },
+ "data": {
+ "type": "AssemblyAITranscriptionJobCreator",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "audio_file": {
+ "trace_as_metadata": true,
+ "file_path": "fa69381c-d1c4-4535-bc23-bc2fb4956e1e/2024-09-26_16-47-01_sports_injuries.mp3",
+ "fileTypes": [
+ "3ga",
+ "8svx",
+ "aac",
+ "ac3",
+ "aif",
+ "aiff",
+ "alac",
+ "amr",
+ "ape",
+ "au",
+ "dss",
+ "flac",
+ "flv",
+ "m4a",
+ "m4b",
+ "m4p",
+ "m4r",
+ "mp3",
+ "mpga",
+ "ogg",
+ "oga",
+ "mogg",
+ "opus",
+ "qcp",
+ "tta",
+ "voc",
+ "wav",
+ "wma",
+ "wv",
+ "webm",
+ "mts",
+ "m2ts",
+ "ts",
+ "mov",
+ "mp2",
+ "mp4",
+ "m4p",
+ "m4v",
+ "mxf"
+ ],
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "audio_file",
+ "value": "sports_injuries.mp3",
+ "display_name": "Audio File",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The audio file to transcribe",
+ "title_case": false,
+ "type": "file",
+ "_input_type": "FileInput",
+ "load_from_db": false
+ },
+ "api_key": {
+ "load_from_db": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": null,
+ "display_name": "Assembly API Key",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "audio_file_url": {
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "audio_file_url",
+ "value": "",
+ "display_name": "Audio File URL",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "The URL of the audio file to transcribe (Can be used instead of a File)",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import os\n\nimport assemblyai as aai\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, FileInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAITranscriptionJobCreator(Component):\n display_name = \"AssemblyAI Start Transcript\"\n description = \"Create a transcription job for an audio file using AssemblyAI with advanced options\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n FileInput(\n name=\"audio_file\",\n display_name=\"Audio File\",\n file_types=[\n \"3ga\",\n \"8svx\",\n \"aac\",\n \"ac3\",\n \"aif\",\n \"aiff\",\n \"alac\",\n \"amr\",\n \"ape\",\n \"au\",\n \"dss\",\n \"flac\",\n \"flv\",\n \"m4a\",\n \"m4b\",\n \"m4p\",\n \"m4r\",\n \"mp3\",\n \"mpga\",\n \"ogg\",\n \"oga\",\n \"mogg\",\n \"opus\",\n \"qcp\",\n \"tta\",\n \"voc\",\n \"wav\",\n \"wma\",\n \"wv\",\n \"webm\",\n \"mts\",\n \"m2ts\",\n \"ts\",\n \"mov\",\n \"mp2\",\n \"mp4\",\n \"m4p\",\n \"m4v\",\n \"mxf\",\n ],\n info=\"The audio file to transcribe\",\n ),\n MessageTextInput(\n name=\"audio_file_url\",\n display_name=\"Audio File URL\",\n info=\"The URL of the audio file to transcribe (Can be used instead of a File)\",\n advanced=True,\n ),\n DropdownInput(\n name=\"speech_model\",\n display_name=\"Speech Model\",\n options=[\n \"best\",\n \"nano\",\n ],\n value=\"best\",\n info=\"The speech model to use for the transcription\",\n advanced=True,\n ),\n BoolInput(\n name=\"language_detection\",\n display_name=\"Automatic Language Detection\",\n info=\"Enable automatic language detection\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"language_code\",\n display_name=\"Language\",\n info=\"\"\"\n The language of the audio file. Can be set manually if automatic language detection is disabled. \n See https://www.assemblyai.com/docs/getting-started/supported-languages for a list of supported language codes.\n \"\"\",\n advanced=True,\n ),\n BoolInput(\n name=\"speaker_labels\",\n display_name=\"Enable Speaker Labels\",\n info=\"Enable speaker diarization\",\n ),\n MessageTextInput(\n name=\"speakers_expected\",\n display_name=\"Expected Number of Speakers\",\n info=\"Set the expected number of speakers (optional, enter a number)\",\n advanced=True,\n ),\n BoolInput(\n name=\"punctuate\",\n display_name=\"Punctuate\",\n info=\"Enable automatic punctuation\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"format_text\",\n display_name=\"Format Text\",\n info=\"Enable text formatting\",\n advanced=True,\n value=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcript ID\", name=\"transcript_id\", method=\"create_transcription_job\"),\n ]\n\n def create_transcription_job(self) -> Data:\n aai.settings.api_key = self.api_key\n\n # Convert speakers_expected to int if it's not empty\n speakers_expected = None\n if self.speakers_expected and self.speakers_expected.strip():\n try:\n speakers_expected = int(self.speakers_expected)\n except ValueError:\n self.status = \"Error: Expected Number of Speakers must be a valid integer\"\n return Data(data={\"error\": \"Error: Expected Number of Speakers must be a valid integer\"})\n\n language_code = self.language_code if self.language_code else None\n\n config = aai.TranscriptionConfig(\n speech_model=self.speech_model,\n language_detection=self.language_detection,\n language_code=language_code,\n speaker_labels=self.speaker_labels,\n speakers_expected=speakers_expected,\n punctuate=self.punctuate,\n format_text=self.format_text,\n )\n\n audio = None\n if self.audio_file:\n if self.audio_file_url:\n logger.warning(\"Both an audio file an audio URL were specified. The audio URL was ignored.\")\n\n # Check if the file exists\n if not os.path.exists(self.audio_file):\n self.status = \"Error: Audio file not found\"\n return Data(data={\"error\": \"Error: Audio file not found\"})\n audio = self.audio_file\n elif self.audio_file_url:\n audio = self.audio_file_url\n else:\n self.status = \"Error: Either an audio file or an audio URL must be specified\"\n return Data(data={\"error\": \"Error: Either an audio file or an audio URL must be specified\"})\n\n try:\n transcript = aai.Transcriber().submit(audio, config=config)\n\n if transcript.error:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n else:\n result = Data(data={\"transcript_id\": transcript.id})\n self.status = result\n return result\n except Exception as e:\n self.status = f\"An error occurred: {str(e)}\"\n return Data(data={\"error\": f\"An error occurred: {str(e)}\"})\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "format_text": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "format_text",
+ "value": true,
+ "display_name": "Format Text",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Enable text formatting",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "language_code": {
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "language_code",
+ "value": "",
+ "display_name": "Language",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "\n The language of the audio file. Can be set manually if automatic language detection is disabled. \n See https://www.assemblyai.com/docs/getting-started/supported-languages for a list of supported language codes.\n ",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "language_detection": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "language_detection",
+ "value": false,
+ "display_name": "Automatic Language Detection",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Enable automatic language detection",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "punctuate": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "punctuate",
+ "value": true,
+ "display_name": "Punctuate",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Enable automatic punctuation",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "speaker_labels": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "speaker_labels",
+ "value": true,
+ "display_name": "Enable Speaker Labels",
+ "advanced": false,
+ "dynamic": false,
+ "info": "Enable speaker diarization",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput",
+ "load_from_db": false
+ },
+ "speakers_expected": {
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "speakers_expected",
+ "value": "",
+ "display_name": "Expected Number of Speakers",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Set the expected number of speakers (optional, enter a number)",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "speech_model": {
+ "trace_as_metadata": true,
+ "options": [
+ "best",
+ "nano"
+ ],
+ "combobox": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "speech_model",
+ "value": "best",
+ "display_name": "Speech Model",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The speech model to use for the transcription",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "DropdownInput"
+ }
+ },
+ "description": "Create a transcription job for an audio file using AssemblyAI with advanced options",
+ "icon": "AssemblyAI",
+ "base_classes": [
+ "Data"
+ ],
+ "display_name": "AssemblyAI Start Transcript",
+ "documentation": "https://www.assemblyai.com/docs",
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": true,
+ "outputs": [
+ {
+ "types": [
+ "Data"
+ ],
+ "selected": "Data",
+ "name": "transcript_id",
+ "display_name": "Transcript ID",
+ "method": "create_transcription_job",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "api_key",
+ "audio_file",
+ "audio_file_url",
+ "speech_model",
+ "language_detection",
+ "language_code",
+ "speaker_labels",
+ "speakers_expected",
+ "punctuate",
+ "format_text"
+ ],
+ "beta": false,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "AssemblyAITranscriptionJobCreator-Idt7P",
+ "description": "Create a transcription job for an audio file using AssemblyAI with advanced options",
+ "display_name": "AssemblyAI Start Transcript"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 482,
+ "positionAbsolute": {
+ "x": -1957.7132501771657,
+ "y": 470.79685053457587
+ },
+ "dragging": false
+ },
+ {
+ "id": "AssemblyAITranscriptionJobPoller-F46nf",
+ "type": "genericNode",
+ "position": {
+ "x": -1408.0967182254753,
+ "y": 461.5039554434261
+ },
+ "data": {
+ "type": "AssemblyAITranscriptionJobPoller",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "transcript_id": {
+ "trace_as_metadata": true,
+ "list": false,
+ "trace_as_input": true,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "transcript_id",
+ "value": "",
+ "display_name": "Transcript ID",
+ "advanced": false,
+ "input_types": [
+ "Data"
+ ],
+ "dynamic": false,
+ "info": "The ID of the transcription job to poll",
+ "title_case": false,
+ "type": "other",
+ "_input_type": "DataInput"
+ },
+ "api_key": {
+ "load_from_db": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": null,
+ "display_name": "Assembly API Key",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import DataInput, FloatInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAITranscriptionJobPoller(Component):\n display_name = \"AssemblyAI Poll Transcript\"\n description = \"Poll for the status of a transcription job using AssemblyAI\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n DataInput(\n name=\"transcript_id\",\n display_name=\"Transcript ID\",\n info=\"The ID of the transcription job to poll\",\n ),\n FloatInput(\n name=\"polling_interval\",\n display_name=\"Polling Interval\",\n value=3.0,\n info=\"The polling interval in seconds\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcription Result\", name=\"transcription_result\", method=\"poll_transcription_job\"),\n ]\n\n def poll_transcription_job(self) -> Data:\n \"\"\"Polls the transcription status until completion and returns the Data.\"\"\"\n aai.settings.api_key = self.api_key\n aai.settings.polling_interval = self.polling_interval\n\n # check if it's an error message from the previous step\n if self.transcript_id.data.get(\"error\"):\n self.status = self.transcript_id.data[\"error\"]\n return self.transcript_id\n\n try:\n transcript = aai.Transcript.get_by_id(self.transcript_id.data[\"transcript_id\"])\n except Exception as e:\n error = f\"Getting transcription failed: {str(e)}\"\n self.status = error\n return Data(data={\"error\": error})\n\n if transcript.status == aai.TranscriptStatus.completed:\n json_response = transcript.json_response\n text = json_response.pop(\"text\", None)\n utterances = json_response.pop(\"utterances\", None)\n transcript_id = json_response.pop(\"id\", None)\n sorted_data = { \"text\": text, \"utterances\": utterances, \"id\": transcript_id}\n sorted_data.update(json_response)\n data = Data(data=sorted_data)\n self.status = data\n return data\n else:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "polling_interval": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "polling_interval",
+ "value": 3,
+ "display_name": "Polling Interval",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The polling interval in seconds",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ }
+ },
+ "description": "Poll for the status of a transcription job using AssemblyAI",
+ "icon": "AssemblyAI",
+ "base_classes": [
+ "Data"
+ ],
+ "display_name": "AssemblyAI Poll Transcript",
+ "documentation": "https://www.assemblyai.com/docs",
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Data"
+ ],
+ "selected": "Data",
+ "name": "transcription_result",
+ "display_name": "Transcription Result",
+ "method": "poll_transcription_job",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "api_key",
+ "transcript_id",
+ "polling_interval"
+ ],
+ "beta": false,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "AssemblyAITranscriptionJobPoller-F46nf",
+ "description": "Poll for the status of a transcription job using AssemblyAI",
+ "display_name": "AssemblyAI Poll Transcript"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 368,
+ "positionAbsolute": {
+ "x": -1408.0967182254753,
+ "y": 461.5039554434261
+ },
+ "dragging": false
+ },
+ {
+ "id": "AssemblyAIGetSubtitles-3sjU6",
+ "type": "genericNode",
+ "position": {
+ "x": -867.5862690424032,
+ "y": 368.91683022842676
+ },
+ "data": {
+ "type": "AssemblyAIGetSubtitles",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "transcription_result": {
+ "trace_as_metadata": true,
+ "list": false,
+ "trace_as_input": true,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "transcription_result",
+ "value": "",
+ "display_name": "Transcription Result",
+ "advanced": false,
+ "input_types": [
+ "Data"
+ ],
+ "dynamic": false,
+ "info": "The transcription result from AssemblyAI",
+ "title_case": false,
+ "type": "other",
+ "_input_type": "DataInput"
+ },
+ "api_key": {
+ "load_from_db": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": null,
+ "display_name": "Assembly API Key",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "chars_per_caption": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "chars_per_caption",
+ "value": 0,
+ "display_name": "Characters per Caption",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The maximum number of characters per caption (0 for no limit)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import DataInput, DropdownInput, IntInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAIGetSubtitles(Component):\n display_name = \"AssemblyAI Get Subtitles\"\n description = \"Export your transcript in SRT or VTT format for subtitles and closed captions\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n DataInput(\n name=\"transcription_result\",\n display_name=\"Transcription Result\",\n info=\"The transcription result from AssemblyAI\",\n ),\n DropdownInput(\n name=\"subtitle_format\",\n display_name=\"Subtitle Format\",\n options=[\"srt\", \"vtt\"],\n value=\"srt\",\n info=\"The format of the captions (SRT or VTT)\",\n ),\n IntInput(\n name=\"chars_per_caption\",\n display_name=\"Characters per Caption\",\n info=\"The maximum number of characters per caption (0 for no limit)\",\n value=0,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Subtitles\", name=\"subtitles\", method=\"get_subtitles\"),\n ]\n\n def get_subtitles(self) -> Data:\n aai.settings.api_key = self.api_key\n\n # check if it's an error message from the previous step\n if self.transcription_result.data.get(\"error\"):\n self.status = self.transcription_result.data[\"error\"]\n return self.transcription_result\n\n try:\n transcript_id = self.transcription_result.data[\"id\"]\n transcript = aai.Transcript.get_by_id(transcript_id)\n except Exception as e:\n error = f\"Getting transcription failed: {str(e)}\"\n self.status = error\n return Data(data={\"error\": error})\n\n if transcript.status == aai.TranscriptStatus.completed:\n subtitles = None\n chars_per_caption = self.chars_per_caption if self.chars_per_caption > 0 else None\n if self.subtitle_format == \"srt\":\n subtitles = transcript.export_subtitles_srt(chars_per_caption)\n else:\n subtitles = transcript.export_subtitles_vtt(chars_per_caption)\n\n result = Data(\n subtitles=subtitles,\n format=self.subtitle_format,\n transcript_id=transcript_id,\n chars_per_caption=chars_per_caption,\n )\n\n self.status = result\n return result\n else:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "subtitle_format": {
+ "trace_as_metadata": true,
+ "options": [
+ "srt",
+ "vtt"
+ ],
+ "combobox": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "subtitle_format",
+ "value": "srt",
+ "display_name": "Subtitle Format",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The format of the captions (SRT or VTT)",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "DropdownInput"
+ }
+ },
+ "description": "Export your transcript in SRT or VTT format for subtitles and closed captions",
+ "icon": "AssemblyAI",
+ "base_classes": [
+ "Data"
+ ],
+ "display_name": "AssemblyAI Get Subtitles",
+ "documentation": "https://www.assemblyai.com/docs",
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Data"
+ ],
+ "selected": "Data",
+ "name": "subtitles",
+ "display_name": "Subtitles",
+ "method": "get_subtitles",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "api_key",
+ "transcription_result",
+ "subtitle_format",
+ "chars_per_caption"
+ ],
+ "beta": false,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "AssemblyAIGetSubtitles-3sjU6",
+ "description": "Export your transcript in SRT or VTT format for subtitles and closed captions",
+ "display_name": "AssemblyAI Get Subtitles"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 454,
+ "positionAbsolute": {
+ "x": -867.5862690424032,
+ "y": 368.91683022842676
+ },
+ "dragging": false
+ },
+ {
+ "id": "AssemblyAIListTranscripts-3prc4",
+ "type": "genericNode",
+ "position": {
+ "x": -380.99808133361984,
+ "y": 401.2674645310267
+ },
+ "data": {
+ "type": "AssemblyAIListTranscripts",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "api_key": {
+ "load_from_db": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": null,
+ "display_name": "Assembly API Key",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAIListTranscripts(Component):\n display_name = \"AssemblyAI List Transcripts\"\n description = \"Retrieve a list of transcripts from AssemblyAI with filtering options\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n ),\n IntInput(\n name=\"limit\",\n display_name=\"Limit\",\n info=\"Maximum number of transcripts to retrieve (default: 20, use 0 for all)\",\n value=20,\n ),\n DropdownInput(\n name=\"status_filter\",\n display_name=\"Status Filter\",\n options=[\"all\", \"queued\", \"processing\", \"completed\", \"error\"],\n value=\"all\",\n info=\"Filter by transcript status\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"created_on\",\n display_name=\"Created On\",\n info=\"Only get transcripts created on this date (YYYY-MM-DD)\",\n advanced=True,\n ),\n BoolInput(\n name=\"throttled_only\",\n display_name=\"Throttled Only\",\n info=\"Only get throttled transcripts, overrides the status filter\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcript List\", name=\"transcript_list\", method=\"list_transcripts\"),\n ]\n\n def list_transcripts(self) -> list[Data]:\n aai.settings.api_key = self.api_key\n\n params = aai.ListTranscriptParameters()\n if self.limit:\n params.limit = self.limit\n if self.status_filter != \"all\":\n params.status = self.status_filter\n if self.created_on and self.created_on.text:\n params.created_on = self.created_on.text\n if self.throttled_only:\n params.throttled_only = True\n\n try:\n transcriber = aai.Transcriber()\n\n def convert_page_to_data_list(page):\n return [Data(**t.dict()) for t in page.transcripts]\n\n if self.limit == 0:\n # paginate over all pages\n params.limit = 100\n page = transcriber.list_transcripts(params)\n transcripts = convert_page_to_data_list(page)\n\n while page.page_details.before_id_of_prev_url is not None:\n params.before_id = page.page_details.before_id_of_prev_url\n page = transcriber.list_transcripts(params)\n transcripts.extend(convert_page_to_data_list(page))\n else:\n # just one page\n page = transcriber.list_transcripts(params)\n transcripts = convert_page_to_data_list(page)\n\n self.status = transcripts\n return transcripts\n except Exception as e:\n error_data = Data(data={\"error\": f\"An error occurred: {str(e)}\"})\n self.status = [error_data]\n return [error_data]\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "created_on": {
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "created_on",
+ "value": "",
+ "display_name": "Created On",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Only get transcripts created on this date (YYYY-MM-DD)",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "limit": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "limit",
+ "value": 20,
+ "display_name": "Limit",
+ "advanced": false,
+ "dynamic": false,
+ "info": "Maximum number of transcripts to retrieve (default: 20, use 0 for all)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "status_filter": {
+ "trace_as_metadata": true,
+ "options": [
+ "all",
+ "queued",
+ "processing",
+ "completed",
+ "error"
+ ],
+ "combobox": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "status_filter",
+ "value": "all",
+ "display_name": "Status Filter",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Filter by transcript status",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "throttled_only": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "throttled_only",
+ "value": false,
+ "display_name": "Throttled Only",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Only get throttled transcripts, overrides the status filter",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ }
+ },
+ "description": "Retrieve a list of transcripts from AssemblyAI with filtering options",
+ "icon": "AssemblyAI",
+ "base_classes": [
+ "Data"
+ ],
+ "display_name": "AssemblyAI List Transcripts",
+ "documentation": "https://www.assemblyai.com/docs",
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Data"
+ ],
+ "selected": "Data",
+ "name": "transcript_list",
+ "display_name": "Transcript List",
+ "method": "list_transcripts",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "api_key",
+ "limit",
+ "status_filter",
+ "created_on",
+ "throttled_only"
+ ],
+ "beta": false,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "AssemblyAIListTranscripts-3prc4",
+ "description": "Retrieve a list of transcripts from AssemblyAI with filtering options",
+ "display_name": "AssemblyAI List Transcripts"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 410,
+ "positionAbsolute": {
+ "x": -380.99808133361984,
+ "y": 401.2674645310267
+ },
+ "dragging": false
+ },
+ {
+ "id": "AssemblyAILeMUR-jzwHZ",
+ "type": "genericNode",
+ "position": {
+ "x": -875.6482330011189,
+ "y": 887.1705799007382
+ },
+ "data": {
+ "type": "AssemblyAILeMUR",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "transcription_result": {
+ "trace_as_metadata": true,
+ "list": false,
+ "trace_as_input": true,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "transcription_result",
+ "value": "",
+ "display_name": "Transcription Result",
+ "advanced": false,
+ "input_types": [
+ "Data"
+ ],
+ "dynamic": false,
+ "info": "The transcription result from AssemblyAI",
+ "title_case": false,
+ "type": "other",
+ "_input_type": "DataInput"
+ },
+ "api_key": {
+ "load_from_db": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": null,
+ "display_name": "Assembly API Key",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import assemblyai as aai\n\nfrom langflow.custom import Component\nfrom langflow.io import DataInput, DropdownInput, FloatInput, IntInput, MultilineInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAILeMUR(Component):\n display_name = \"AssemblyAI LeMUR\"\n description = \"Apply Large Language Models to spoken data using the AssemblyAI LeMUR framework\"\n documentation = \"https://www.assemblyai.com/docs/lemur\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n advanced=False,\n ),\n DataInput(\n name=\"transcription_result\",\n display_name=\"Transcription Result\",\n info=\"The transcription result from AssemblyAI\",\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Input Prompt\",\n info=\"The text to prompt the model\",\n ),\n DropdownInput(\n name=\"final_model\",\n display_name=\"Final Model\",\n options=[\"claude3_5_sonnet\", \"claude3_opus\", \"claude3_haiku\", \"claude3_sonnet\"],\n value=\"claude3_5_sonnet\",\n info=\"The model that is used for the final prompt after compression is performed\",\n advanced=True,\n ),\n FloatInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n advanced=True,\n value=0.0,\n info=\"The temperature to use for the model\",\n ),\n IntInput(\n name=\"max_output_size\",\n display_name=\" Max Output Size\",\n advanced=True,\n value=2000,\n info=\"Max output size in tokens, up to 4000\",\n ),\n DropdownInput(\n name=\"endpoint\",\n display_name=\"Endpoint\",\n options=[\"task\", \"summary\", \"question-answer\"],\n value=\"task\",\n info=\"The LeMUR endpoint to use. For 'summary' and 'question-answer', no prompt input is needed. See https://www.assemblyai.com/docs/api-reference/lemur/ for more info.\",\n advanced=True,\n ),\n MultilineInput(\n name=\"questions\",\n display_name=\"Questions\",\n info=\"Comma-separated list of your questions. Only used if Endpoint is 'question-answer'\",\n advanced=True,\n ),\n MultilineInput(\n name=\"transcript_ids\",\n display_name=\"Transcript IDs\",\n info=\"Comma-separated list of transcript IDs. LeMUR can perform actions over multiple transcripts. If provided, the Transcription Result is ignored.\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"LeMUR Response\", name=\"lemur_response\", method=\"run_lemur\"),\n ]\n\n def run_lemur(self) -> Data:\n \"\"\"Use the LeMUR task endpoint to input the LLM prompt.\"\"\"\n aai.settings.api_key = self.api_key\n\n if not self.transcription_result and not self.transcript_ids:\n error = \"Either a Transcription Result or Transcript IDs must be provided\"\n self.status = error\n return Data(data={\"error\": error})\n elif self.transcription_result and self.transcription_result.data.get(\"error\"):\n # error message from the previous step\n self.status = self.transcription_result.data[\"error\"]\n return self.transcription_result\n elif self.endpoint == \"task\" and not self.prompt:\n self.status = \"No prompt specified for the task endpoint\"\n return Data(data={\"error\": \"No prompt specified\"})\n elif self.endpoint == \"question-answer\" and not self.questions:\n error = \"No Questions were provided for the question-answer endpoint\"\n self.status = error\n return Data(data={\"error\": error})\n\n # Check for valid transcripts\n transcript_ids = None\n if self.transcription_result and \"id\" in self.transcription_result.data:\n transcript_ids = [self.transcription_result.data[\"id\"]]\n elif self.transcript_ids:\n transcript_ids = self.transcript_ids.split(\",\")\n transcript_ids = [t.strip() for t in transcript_ids]\n \n if not transcript_ids:\n error = \"Either a valid Transcription Result or valid Transcript IDs must be provided\"\n self.status = error\n return Data(data={\"error\": error})\n\n # Get TranscriptGroup and check if there is any error\n transcript_group = aai.TranscriptGroup(transcript_ids=transcript_ids)\n transcript_group, failures = transcript_group.wait_for_completion(return_failures=True)\n if failures:\n error = f\"Getting transcriptions failed: {failures[0]}\"\n self.status = error\n return Data(data={\"error\": error})\n \n for t in transcript_group.transcripts:\n if t.status == aai.TranscriptStatus.error:\n self.status = t.error\n return Data(data={\"error\": t.error})\n\n # Perform LeMUR action\n try:\n response = self.perform_lemur_action(transcript_group, self.endpoint)\n result = Data(data=response)\n self.status = result\n return result\n except Exception as e:\n error = f\"An Error happened: {str(e)}\"\n self.status = error\n return Data(data={\"error\": error})\n\n def perform_lemur_action(self, transcript_group: aai.TranscriptGroup, endpoint: str) -> dict:\n print(\"Endpoint:\", endpoint, type(endpoint))\n if endpoint == \"task\":\n result = transcript_group.lemur.task(\n prompt=self.prompt,\n final_model=self.get_final_model(self.final_model),\n temperature=self.temperature,\n max_output_size=self.max_output_size,\n )\n elif endpoint == \"summary\":\n result = transcript_group.lemur.summarize(\n final_model=self.get_final_model(self.final_model),\n temperature=self.temperature,\n max_output_size=self.max_output_size,\n )\n elif endpoint == \"question-answer\":\n questions = self.questions.split(\",\")\n questions = [aai.LemurQuestion(question=q) for q in questions]\n result = transcript_group.lemur.question(\n questions=questions,\n final_model=self.get_final_model(self.final_model),\n temperature=self.temperature,\n max_output_size=self.max_output_size,\n )\n else:\n raise ValueError(f\"Endpoint not supported: {endpoint}\")\n\n return result.dict()\n \n def get_final_model(self, model_name: str) -> aai.LemurModel:\n if model_name == \"claude3_5_sonnet\":\n return aai.LemurModel.claude3_5_sonnet\n elif model_name == \"claude3_opus\":\n return aai.LemurModel.claude3_opus\n elif model_name == \"claude3_haiku\":\n return aai.LemurModel.claude3_haiku\n elif model_name == \"claude3_sonnet\":\n return aai.LemurModel.claude3_sonnet\n else:\n raise ValueError(f\"Model name not supported: {model_name}\")\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "endpoint": {
+ "trace_as_metadata": true,
+ "options": [
+ "task",
+ "summary",
+ "question-answer"
+ ],
+ "combobox": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "endpoint",
+ "value": "task",
+ "display_name": "Endpoint",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The LeMUR endpoint to use. For 'summary' and 'question-answer', no prompt input is needed. See https://www.assemblyai.com/docs/api-reference/lemur/ for more info.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "final_model": {
+ "trace_as_metadata": true,
+ "options": [
+ "claude3_5_sonnet",
+ "claude3_opus",
+ "claude3_haiku",
+ "claude3_sonnet"
+ ],
+ "combobox": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "final_model",
+ "value": "claude3_5_sonnet",
+ "display_name": "Final Model",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The model that is used for the final prompt after compression is performed",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "max_output_size": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "max_output_size",
+ "value": 2000,
+ "display_name": " Max Output Size",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Max output size in tokens, up to 4000",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "prompt": {
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "prompt",
+ "value": "",
+ "display_name": "Input Prompt",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "The text to prompt the model",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ },
+ "questions": {
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "questions",
+ "value": "",
+ "display_name": "Questions",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Comma-separated list of your questions. Only used if Endpoint is 'question-answer'",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ },
+ "temperature": {
+ "trace_as_metadata": true,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "temperature",
+ "value": 0,
+ "display_name": "Temperature",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The temperature to use for the model",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "transcript_ids": {
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "transcript_ids",
+ "value": "",
+ "display_name": "Transcript IDs",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Comma-separated list of transcript IDs. LeMUR can perform actions over multiple transcripts. If provided, the Transcription Result is ignored.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ }
+ },
+ "description": "Apply Large Language Models to spoken data using the AssemblyAI LeMUR framework",
+ "icon": "AssemblyAI",
+ "base_classes": [
+ "Data"
+ ],
+ "display_name": "AssemblyAI LeMUR",
+ "documentation": "https://www.assemblyai.com/docs/lemur",
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Data"
+ ],
+ "selected": "Data",
+ "name": "lemur_response",
+ "display_name": "LeMUR Response",
+ "method": "run_lemur",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "api_key",
+ "transcription_result",
+ "prompt",
+ "final_model",
+ "temperature",
+ "max_output_size",
+ "endpoint",
+ "questions",
+ "transcript_ids"
+ ],
+ "beta": false,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "AssemblyAILeMUR-jzwHZ",
+ "description": "Apply Large Language Models to spoken data using the AssemblyAI LeMUR framework",
+ "display_name": "AssemblyAI LeMUR"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 454,
+ "positionAbsolute": {
+ "x": -875.6482330011189,
+ "y": 887.1705799007382
+ },
+ "dragging": false
+ },
+ {
+ "id": "ParseData-th7JM",
+ "type": "genericNode",
+ "position": {
+ "x": -862.5843195492909,
+ "y": -56.71774780191424
+ },
+ "data": {
+ "type": "ParseData",
+ "node": {
+ "template": {
+ "_type": "Component",
+ "data": {
+ "trace_as_metadata": true,
+ "list": false,
+ "trace_as_input": true,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "data",
+ "value": "",
+ "display_name": "Data",
+ "advanced": false,
+ "input_types": [
+ "Data"
+ ],
+ "dynamic": false,
+ "info": "The data to convert to text.",
+ "title_case": false,
+ "type": "other",
+ "_input_type": "DataInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "sep": {
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "sep",
+ "value": "\n",
+ "display_name": "Separator",
+ "advanced": true,
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
+ },
+ "template": {
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "template",
+ "value": "{text}",
+ "display_name": "Template",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ }
+ },
+ "description": "Convert Data into plain text following a specified template.",
+ "icon": "braces",
+ "base_classes": [
+ "Message"
+ ],
+ "display_name": "Parse Data",
+ "documentation": "",
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Message"
+ ],
+ "selected": "Message",
+ "name": "text",
+ "display_name": "Text",
+ "method": "parse_data",
+ "value": "__UNDEFINED__",
+ "cache": true
+ }
+ ],
+ "field_order": [
+ "data",
+ "template",
+ "sep"
+ ],
+ "beta": false,
+ "edited": false,
+ "lf_version": "1.0.18"
+ },
+ "id": "ParseData-th7JM"
+ },
+ "selected": false,
+ "width": 384,
+ "height": 368,
+ "positionAbsolute": {
+ "x": -862.5843195492909,
+ "y": -56.71774780191424
+ },
+ "dragging": false
+ }
+ ],
+ "edges": [
+ {
+ "source": "AssemblyAITranscriptionJobCreator-Idt7P",
+ "sourceHandle": "{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobCreatorÅ,ÅidÅ:ÅAssemblyAITranscriptionJobCreator-Idt7PÅ,ÅnameÅ:Åtranscript_idÅ,Åoutput_typesÅ:[ÅDataÅ]}",
+ "target": "AssemblyAITranscriptionJobPoller-F46nf",
+ "targetHandle": "{ÅfieldNameÅ:Åtranscript_idÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "data": {
+ "targetHandle": {
+ "fieldName": "transcript_id",
+ "id": "AssemblyAITranscriptionJobPoller-F46nf",
+ "inputTypes": [
+ "Data"
+ ],
+ "type": "other"
+ },
+ "sourceHandle": {
+ "dataType": "AssemblyAITranscriptionJobCreator",
+ "id": "AssemblyAITranscriptionJobCreator-Idt7P",
+ "name": "transcript_id",
+ "output_types": [
+ "Data"
+ ]
+ }
+ },
+ "id": "reactflow__edge-AssemblyAITranscriptionJobCreator-Idt7P{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobCreatorÅ,ÅidÅ:ÅAssemblyAITranscriptionJobCreator-Idt7PÅ,ÅnameÅ:Åtranscript_idÅ,Åoutput_typesÅ:[ÅDataÅ]}-AssemblyAITranscriptionJobPoller-F46nf{ÅfieldNameÅ:Åtranscript_idÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "animated": false,
+ "className": "",
+ "selected": false
+ },
+ {
+ "source": "AssemblyAITranscriptionJobPoller-F46nf",
+ "sourceHandle": "{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobPollerÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅnameÅ:Åtranscription_resultÅ,Åoutput_typesÅ:[ÅDataÅ]}",
+ "target": "AssemblyAIGetSubtitles-3sjU6",
+ "targetHandle": "{ÅfieldNameÅ:Åtranscription_resultÅ,ÅidÅ:ÅAssemblyAIGetSubtitles-3sjU6Å,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "data": {
+ "targetHandle": {
+ "fieldName": "transcription_result",
+ "id": "AssemblyAIGetSubtitles-3sjU6",
+ "inputTypes": [
+ "Data"
+ ],
+ "type": "other"
+ },
+ "sourceHandle": {
+ "dataType": "AssemblyAITranscriptionJobPoller",
+ "id": "AssemblyAITranscriptionJobPoller-F46nf",
+ "name": "transcription_result",
+ "output_types": [
+ "Data"
+ ]
+ }
+ },
+ "id": "reactflow__edge-AssemblyAITranscriptionJobPoller-F46nf{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobPollerÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅnameÅ:Åtranscription_resultÅ,Åoutput_typesÅ:[ÅDataÅ]}-AssemblyAIGetSubtitles-3sjU6{ÅfieldNameÅ:Åtranscription_resultÅ,ÅidÅ:ÅAssemblyAIGetSubtitles-3sjU6Å,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "animated": false,
+ "className": ""
+ },
+ {
+ "source": "AssemblyAITranscriptionJobPoller-F46nf",
+ "sourceHandle": "{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobPollerÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅnameÅ:Åtranscription_resultÅ,Åoutput_typesÅ:[ÅDataÅ]}",
+ "target": "ParseData-th7JM",
+ "targetHandle": "{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-th7JMÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "data": {
+ "targetHandle": {
+ "fieldName": "data",
+ "id": "ParseData-th7JM",
+ "inputTypes": [
+ "Data"
+ ],
+ "type": "other"
+ },
+ "sourceHandle": {
+ "dataType": "AssemblyAITranscriptionJobPoller",
+ "id": "AssemblyAITranscriptionJobPoller-F46nf",
+ "name": "transcription_result",
+ "output_types": [
+ "Data"
+ ]
+ }
+ },
+ "id": "reactflow__edge-AssemblyAITranscriptionJobPoller-F46nf{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobPollerÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅnameÅ:Åtranscription_resultÅ,Åoutput_typesÅ:[ÅDataÅ]}-ParseData-th7JM{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-th7JMÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "animated": false,
+ "className": ""
+ },
+ {
+ "source": "Prompt-IO8Cq",
+ "sourceHandle": "{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-IO8CqÅ,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}",
+ "target": "AssemblyAILeMUR-jzwHZ",
+ "targetHandle": "{ÅfieldNameÅ:ÅpromptÅ,ÅidÅ:ÅAssemblyAILeMUR-jzwHZÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}",
+ "data": {
+ "targetHandle": {
+ "fieldName": "prompt",
+ "id": "AssemblyAILeMUR-jzwHZ",
+ "inputTypes": [
+ "Message"
+ ],
+ "type": "str"
+ },
+ "sourceHandle": {
+ "dataType": "Prompt",
+ "id": "Prompt-IO8Cq",
+ "name": "prompt",
+ "output_types": [
+ "Message"
+ ]
+ }
+ },
+ "id": "reactflow__edge-Prompt-IO8Cq{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-IO8CqÅ,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}-AssemblyAILeMUR-jzwHZ{ÅfieldNameÅ:ÅpromptÅ,ÅidÅ:ÅAssemblyAILeMUR-jzwHZÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}",
+ "animated": false,
+ "className": ""
+ },
+ {
+ "source": "AssemblyAITranscriptionJobPoller-F46nf",
+ "sourceHandle": "{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobPollerÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅnameÅ:Åtranscription_resultÅ,Åoutput_typesÅ:[ÅDataÅ]}",
+ "target": "AssemblyAILeMUR-jzwHZ",
+ "targetHandle": "{ÅfieldNameÅ:Åtranscription_resultÅ,ÅidÅ:ÅAssemblyAILeMUR-jzwHZÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "data": {
+ "targetHandle": {
+ "fieldName": "transcription_result",
+ "id": "AssemblyAILeMUR-jzwHZ",
+ "inputTypes": [
+ "Data"
+ ],
+ "type": "other"
+ },
+ "sourceHandle": {
+ "dataType": "AssemblyAITranscriptionJobPoller",
+ "id": "AssemblyAITranscriptionJobPoller-F46nf",
+ "name": "transcription_result",
+ "output_types": [
+ "Data"
+ ]
+ }
+ },
+ "id": "reactflow__edge-AssemblyAITranscriptionJobPoller-F46nf{ÅdataTypeÅ:ÅAssemblyAITranscriptionJobPollerÅ,ÅidÅ:ÅAssemblyAITranscriptionJobPoller-F46nfÅ,ÅnameÅ:Åtranscription_resultÅ,Åoutput_typesÅ:[ÅDataÅ]}-AssemblyAILeMUR-jzwHZ{ÅfieldNameÅ:Åtranscription_resultÅ,ÅidÅ:ÅAssemblyAILeMUR-jzwHZÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}",
+ "animated": false,
+ "className": ""
+ }
+ ],
+ "viewport": {
+ "x": 733.3920447354355,
+ "y": -42.8262727047815,
+ "zoom": 0.2612816498236053
+ }
+ },
+ "user_id": "9c01eee4-17dd-460e-8c52-bba36d635a9d",
+ "folder_id": "54fc9211-d42d-4c3f-a932-ee4987f61988",
+ "description": "Transcribe and analyze audio with AssemblyAI",
+ "icon_bg_color": null,
+ "updated_at": "2024-09-26T14:55:47+00:00",
+ "webhook": false,
+ "id": "fa69381c-d1c4-4535-bc23-bc2fb4956e1e"
+}
\ No newline at end of file
diff --git a/docs/docs/Integrations/Google/_category_.json b/docs/docs/Integrations/Google/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..8fd84656cebb5820e674f7b887d24fba401eaf08
--- /dev/null
+++ b/docs/docs/Integrations/Google/_category_.json
@@ -0,0 +1 @@
+{ "position": 2, "label": "Google" }
diff --git a/docs/docs/Integrations/Google/integrations-setup-google-cloud-vertex-ai-langflow.md b/docs/docs/Integrations/Google/integrations-setup-google-cloud-vertex-ai-langflow.md
new file mode 100644
index 0000000000000000000000000000000000000000..0f5e0cf4eda1632d12777a54ee0f809841fd087a
--- /dev/null
+++ b/docs/docs/Integrations/Google/integrations-setup-google-cloud-vertex-ai-langflow.md
@@ -0,0 +1,39 @@
+---
+title: 'Integrate Google Cloud Vertex AI with Langflow'
+slug: /integrations-setup-google-cloud-vertex-ai-langflow
+sidebar_position: 2
+description: "A comprehensive guide on creating a Google OAuth app, obtaining tokens, and integrating them with Langflow's Google components."
+---
+
+Langflow integrates with the [Google Vertex AI API](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) for authenticating the [Vertex AI embeddings model](/components-embedding-models#vertexai-embeddings) and [Vertex AI](/components-models#vertexai) components.
+
+Learn how to create a service account JSON in Google Cloud to authenticate Langflowâs Vertex AI components.
+
+## Create a service account with Vertex AI access
+
+1. Select and enable your Google Cloud project.
+For more information, see [Create a Google Cloud project](https://developers.google.com/workspace/guides/create-project).
+2. Create a service account in your Google Cloud project.
+For more information, see [Create a service account](https://developers.google.com/workspace/guides/create-credentials#service-account).
+3. Assign the **Vertex AI Service Agent** role to your new account.
+This role allows Langflow to access Vertex AI resources.
+For more information, see [Vertex AI access control with IAM](https://cloud.google.com/vertex-ai/docs/general/access-control).
+4. To generate a new JSON key for the service account, navigate to your service account.
+5. Click **Add Key**, and then click **Create new key**.
+6. Under **Key type**, select **JSON**, and then click **Create**.
+A JSON private key file is downloaded.
+Now that you have a service account and a JSON private key, you need to configure the credentials in Langflow components.
+
+## Configure credentials in Langflow components
+
+With your service account configured and your credentials JSON file created, follow these steps to authenticate the Langflow application.
+
+1. Create a new project in Langflow.
+2. From the components sidebar, drag and drop either the **Vertex AI** or **Vertex AI Embeddings** component to your workspace.
+3. In the Vertex AI component's **Credentials** field, add the service account JSON file.
+4. Confirm the component can access the Vertex AI resources.
+Connect a **Chat input** and **Chat output** component to the Vertex AI component.
+A successful chat confirms the component has access to the Vertex AI resources.
+
+
+
diff --git a/docs/docs/Integrations/Google/integrations-setup-google-oauth-langflow.md b/docs/docs/Integrations/Google/integrations-setup-google-oauth-langflow.md
new file mode 100644
index 0000000000000000000000000000000000000000..0bfcd3a6320922ceac4bf760dce72a7a5bb29f4b
--- /dev/null
+++ b/docs/docs/Integrations/Google/integrations-setup-google-oauth-langflow.md
@@ -0,0 +1,69 @@
+---
+title: Integrate Google OAuth with Langflow
+slug: /integrations-setup-google-oauth-langflow
+sidebar_position: 3
+description: "A comprehensive guide on creating a Google OAuth app, obtaining tokens, and integrating them with Langflow's Google components."
+---
+
+import TOCInline from '@theme/TOCInline';
+
+Langflow integrates with [Google OAuth](https://developers.google.com/identity/protocols/oauth2) for authenticating the [Gmail loader](/components-data#gmail-loader), [Google Drive loader](components-data#google-drive-loader), and [Google Drive Search](/components-data#google-drive-search) components.
+
+Learn how to create an OAuth app in Google Cloud, obtain the necessary credentials and access tokens, and add them to Langflowâs Google components.
+
+## Create an OAuth Application in Google Cloud {#5b8981b15d86192d17b0e5725c1f95e7}
+
+1. Navigate to the [Google Cloud Console](https://console.cloud.google.com/).
+
+2. Click **Select a project**, and then click **New Project** to create a new project.
+
+
+
+3. To enable APIs for the project, select **APIs & Services**, and then click **Library**. Enable the APIs you need for your project. For example, if your flow uses the Google Drive component, enable the Google Drive API.
+4. To navigate to the OAuth consent screen, click **APIs & Services**, and then click **OAuth consent screen**.
+5. Populate your OAuth consent screen with the application name, user support email, required [scopes](https://developers.google.com/identity/protocols/oauth2/scopes), and authorized domains.
+6. To create an **OAuth Client ID**, navigate to **Clients**, and then click **Create Client**.
+7. Choose **Desktop app** as the application type, and then name your client ID.
+8. Click **Create**. A Client ID and Client Secret are created. Download the credentials as a JSON file to your local machine and save it securely.
+
+
+
+---
+
+## Retrieve Access and Refresh Tokens
+
+With your OAuth application configured and your credentials JSON file created, follow these steps to authenticate the Langflow application.
+
+1. Create a new project in Langflow.
+2. Add a **Google OAuth Token** component to your flow.
+3. In the **Credentials File** field of the Google OAuth Token component, enter the path to your **Credentials File**, the JSON file containing the Client ID credentials you downloaded from Google in the previous steps.
+4. To authenticate your application, in the **Google OAuth Token** component, click **Play**.
+A new tab opens in the browser to authenticate your application using your Google Cloud account. You must authenticate the application with the same Google account that created the OAuth credentials.
+
+:::info
+If a new tab does not open automatically, check the Langflow **Logs** for the Google authentication URL. Open this URL in your browser to complete the authentication.
+:::
+
+5. After successful authentication, your Langflow application can now request and refresh tokens. These tokens enable Langflow to interact with Google services on your behalf and execute the requests youâve specified.
+
+## Create a flow with Google Drive loader
+
+For a pre-built JSON file of a flow that uses the Google Drive loader component, download the Google Drive Document Translation Example Flow JSON to your local machine.
+
+In this example, the **Google Drive loader** component loads a text file hosted on Google Drive, translates the text to Spanish, and returns it to a chat output.
+
+1. Download the Google Drive Document Translation Example Flow JSON to your local machine.
+2. To import the downloaded JSON to Langflow, click **Options**, and then select **Import**.
+3. In the **Credentials File** field of the Google OAuth Token component, enter the path to your **Credentials File**, the JSON file containing the Client ID credentials you downloaded from Google in the previous steps.
+4. In the Google Drive loader component, in the `JSON String of the Service Account Token` field, enter the JSON string containing the token returned in the output of the Google OAuth Token component.
+
+The example flow includes a **Parse data** component to convert the `data` output of the Google OAuth Token component to the `text` input of the JSON Cleaner component.
+
+5. To allow the Langflow component to access the file in Google Drive, copy the Google Drive File ID from the document's URL.
+:::info
+The file ID is located between `/d/` and `/edit` in a Google Drive document's URL.
+For example, in the URL `https://drive.google.com/file/d/1a2b3c4D5E6F7gHI8J9klmnopQ/edit`, the File ID is `1a2b3c4D5E6F7gHI8J9klmnopQ`.
+:::
+6. In the Google Drive loader component, in the **Document ID** field, paste the document URL.
+7. Click the **Chat output** component, and then click **Play**.
+The chat output should display a translated document.
diff --git a/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json b/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json
new file mode 100644
index 0000000000000000000000000000000000000000..2b8b4063d855db1755ba9002ad59b35f169d7049
--- /dev/null
+++ b/docs/docs/Integrations/Notion/Conversational_Notion_Agent.json
@@ -0,0 +1 @@
+{"id":"e070f0be-edc4-4512-bb0f-e53307062a26","data":{"nodes":[{"id":"AddContentToPage-ZezUn","type":"genericNode","position":{"x":1416.217259177943,"y":1709.6205867919527},"data":{"type":"AddContentToPage","node":{"template":{"_type":"Component","block_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"block_id","value":"","display_name":"Page/Block ID","advanced":true,"dynamic":false,"info":"The ID of the page/block to add the content.","title_case":false,"type":"str","_input_type":"StrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nfrom markdown import markdown\nfrom bs4 import BeautifulSoup\nimport requests\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\n\nclass AddContentToPage(LCToolComponent):\n display_name: str = \"Add Content to Page \"\n description: str = \"Convert markdown text to Notion blocks and append them to a Notion page.\"\n documentation: str = \"https://developers.notion.com/reference/patch-block-children\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n MultilineInput(\n name=\"markdown_text\",\n display_name=\"Markdown Text\",\n info=\"The markdown text to convert to Notion blocks.\",\n ),\n StrInput(\n name=\"block_id\",\n display_name=\"Page/Block ID\",\n info=\"The ID of the page/block to add the content.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class AddContentToPageSchema(BaseModel):\n markdown_text: str = Field(..., description=\"The markdown text to convert to Notion blocks.\")\n block_id: str = Field(..., description=\"The ID of the page/block to add the content.\")\n\n def run_model(self) -> Data:\n result = self._add_content_to_page(self.markdown_text, self.block_id)\n return Data(data=result, text=json.dumps(result))\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"add_content_to_notion_page\",\n description=\"Convert markdown text to Notion blocks and append them to a Notion page.\",\n func=self._add_content_to_page,\n args_schema=self.AddContentToPageSchema,\n )\n\n def _add_content_to_page(self, markdown_text: str, block_id: str) -> Union[Dict[str, Any], str]:\n try:\n html_text = markdown(markdown_text)\n soup = BeautifulSoup(html_text, \"html.parser\")\n blocks = self.process_node(soup)\n\n url = f\"https://api.notion.com/v1/blocks/{block_id}/children\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"children\": blocks,\n }\n\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n\n return response.json()\n except requests.exceptions.RequestException as e:\n error_message = f\"Error: Failed to add content to Notion page. {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n except Exception as e:\n return f\"Error: An unexpected error occurred while adding content to Notion page. {str(e)}\"\n\n def process_node(self, node):\n blocks = []\n if isinstance(node, str):\n text = node.strip()\n if text:\n if text.startswith(\"#\"):\n heading_level = text.count(\"#\", 0, 6)\n heading_text = text[heading_level:].strip()\n if heading_level == 1:\n blocks.append(self.create_block(\"heading_1\", heading_text))\n elif heading_level == 2:\n blocks.append(self.create_block(\"heading_2\", heading_text))\n elif heading_level == 3:\n blocks.append(self.create_block(\"heading_3\", heading_text))\n else:\n blocks.append(self.create_block(\"paragraph\", text))\n elif node.name == \"h1\":\n blocks.append(self.create_block(\"heading_1\", node.get_text(strip=True)))\n elif node.name == \"h2\":\n blocks.append(self.create_block(\"heading_2\", node.get_text(strip=True)))\n elif node.name == \"h3\":\n blocks.append(self.create_block(\"heading_3\", node.get_text(strip=True)))\n elif node.name == \"p\":\n code_node = node.find(\"code\")\n if code_node:\n code_text = code_node.get_text()\n language, code = self.extract_language_and_code(code_text)\n blocks.append(self.create_block(\"code\", code, language=language))\n elif self.is_table(str(node)):\n blocks.extend(self.process_table(node))\n else:\n blocks.append(self.create_block(\"paragraph\", node.get_text(strip=True)))\n elif node.name == \"ul\":\n blocks.extend(self.process_list(node, \"bulleted_list_item\"))\n elif node.name == \"ol\":\n blocks.extend(self.process_list(node, \"numbered_list_item\"))\n elif node.name == \"blockquote\":\n blocks.append(self.create_block(\"quote\", node.get_text(strip=True)))\n elif node.name == \"hr\":\n blocks.append(self.create_block(\"divider\", \"\"))\n elif node.name == \"img\":\n blocks.append(self.create_block(\"image\", \"\", image_url=node.get(\"src\")))\n elif node.name == \"a\":\n blocks.append(self.create_block(\"bookmark\", node.get_text(strip=True), link_url=node.get(\"href\")))\n elif node.name == \"table\":\n blocks.extend(self.process_table(node))\n\n for child in node.children:\n if isinstance(child, str):\n continue\n blocks.extend(self.process_node(child))\n\n return blocks\n\n def extract_language_and_code(self, code_text):\n lines = code_text.split(\"\\n\")\n language = lines[0].strip()\n code = \"\\n\".join(lines[1:]).strip()\n return language, code\n\n def is_code_block(self, text):\n return text.startswith(\"```\")\n\n def extract_code_block(self, text):\n lines = text.split(\"\\n\")\n language = lines[0].strip(\"`\").strip()\n code = \"\\n\".join(lines[1:]).strip(\"`\").strip()\n return language, code\n\n def is_table(self, text):\n rows = text.split(\"\\n\")\n if len(rows) < 2:\n return False\n\n has_separator = False\n for i, row in enumerate(rows):\n if \"|\" in row:\n cells = [cell.strip() for cell in row.split(\"|\")]\n cells = [cell for cell in cells if cell] # Remove empty cells\n if i == 1 and all(set(cell) <= set(\"-|\") for cell in cells):\n has_separator = True\n elif not cells:\n return False\n\n return has_separator and len(rows) >= 3\n\n def process_list(self, node, list_type):\n blocks = []\n for item in node.find_all(\"li\"):\n item_text = item.get_text(strip=True)\n checked = item_text.startswith(\"[x]\")\n is_checklist = item_text.startswith(\"[ ]\") or checked\n\n if is_checklist:\n item_text = item_text.replace(\"[x]\", \"\").replace(\"[ ]\", \"\").strip()\n blocks.append(self.create_block(\"to_do\", item_text, checked=checked))\n else:\n blocks.append(self.create_block(list_type, item_text))\n return blocks\n\n def process_table(self, node):\n blocks = []\n header_row = node.find(\"thead\").find(\"tr\") if node.find(\"thead\") else None\n body_rows = node.find(\"tbody\").find_all(\"tr\") if node.find(\"tbody\") else []\n\n if header_row or body_rows:\n table_width = max(\n len(header_row.find_all([\"th\", \"td\"])) if header_row else 0,\n max(len(row.find_all([\"th\", \"td\"])) for row in body_rows),\n )\n\n table_block = self.create_block(\"table\", \"\", table_width=table_width, has_column_header=bool(header_row))\n blocks.append(table_block)\n\n if header_row:\n header_cells = [cell.get_text(strip=True) for cell in header_row.find_all([\"th\", \"td\"])]\n header_row_block = self.create_block(\"table_row\", header_cells)\n blocks.append(header_row_block)\n\n for row in body_rows:\n cells = [cell.get_text(strip=True) for cell in row.find_all([\"th\", \"td\"])]\n row_block = self.create_block(\"table_row\", cells)\n blocks.append(row_block)\n\n return blocks\n\n def create_block(self, block_type: str, content: str, **kwargs) -> Dict[str, Any]:\n block: dict[str, Any] = {\n \"object\": \"block\",\n \"type\": block_type,\n block_type: {},\n }\n\n if block_type in [\n \"paragraph\",\n \"heading_1\",\n \"heading_2\",\n \"heading_3\",\n \"bulleted_list_item\",\n \"numbered_list_item\",\n \"quote\",\n ]:\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n elif block_type == \"to_do\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"checked\"] = kwargs.get(\"checked\", False)\n elif block_type == \"code\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"language\"] = kwargs.get(\"language\", \"plain text\")\n elif block_type == \"image\":\n block[block_type] = {\"type\": \"external\", \"external\": {\"url\": kwargs.get(\"image_url\", \"\")}}\n elif block_type == \"divider\":\n pass\n elif block_type == \"bookmark\":\n block[block_type][\"url\"] = kwargs.get(\"link_url\", \"\")\n elif block_type == \"table\":\n block[block_type][\"table_width\"] = kwargs.get(\"table_width\", 0)\n block[block_type][\"has_column_header\"] = kwargs.get(\"has_column_header\", False)\n block[block_type][\"has_row_header\"] = kwargs.get(\"has_row_header\", False)\n elif block_type == \"table_row\":\n block[block_type][\"cells\"] = [[{\"type\": \"text\", \"text\": {\"content\": cell}} for cell in content]]\n\n return block\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"markdown_text":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"markdown_text","value":"","display_name":"Markdown Text","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The markdown text to convert to Notion blocks.","title_case":false,"type":"str","_input_type":"MultilineInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Convert markdown text to Notion blocks and append them to a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Add Content to Page ","documentation":"https://developers.notion.com/reference/patch-block-children","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["markdown_text","block_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"AddContentToPage-ZezUn","description":"Convert markdown text to Notion blocks and append them to a Notion page.","display_name":"Add Content to Page "},"selected":false,"width":384,"height":330,"dragging":false,"positionAbsolute":{"x":1416.217259177943,"y":1709.6205867919527}},{"id":"NotionPageCreator-6SCB5","type":"genericNode","position":{"x":1413.9782390799146,"y":2051.645785494985},"data":{"type":"NotionPageCreator","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nimport requests\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionPageCreator(LCToolComponent):\n display_name: str = \"Create Page \"\n description: str = \"A component for creating Notion pages.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-create\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n MultilineInput(\n name=\"properties_json\",\n display_name=\"Properties (JSON)\",\n info=\"The properties of the new page as a JSON string.\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageCreatorSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n properties_json: str = Field(..., description=\"The properties of the new page as a JSON string.\")\n\n def run_model(self) -> Data:\n result = self._create_notion_page(self.database_id, self.properties_json)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the created page data\n output = \"Created page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"create_notion_page\",\n description=\"Create a new page in a Notion database. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._create_notion_page,\n args_schema=self.NotionPageCreatorSchema,\n )\n\n def _create_notion_page(self, database_id: str, properties_json: str) -> Union[Dict[str, Any], str]:\n if not database_id or not properties_json:\n return \"Invalid input. Please provide 'database_id' and 'properties_json'.\"\n\n try:\n properties = json.loads(properties_json)\n except json.JSONDecodeError as e:\n return f\"Invalid properties format. Please provide a valid JSON string. Error: {str(e)}\"\n\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"parent\": {\"database_id\": database_id},\n \"properties\": properties,\n }\n\n try:\n response = requests.post(\"https://api.notion.com/v1/pages\", headers=headers, json=data)\n response.raise_for_status()\n result = response.json()\n return result\n except requests.exceptions.RequestException as e:\n error_message = f\"Failed to create Notion page. Error: {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._create_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"properties_json":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties_json","value":"","display_name":"Properties (JSON)","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties of the new page as a JSON string.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"A component for creating Notion pages.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Create Page ","documentation":"https://docs.langflow.org/integrations/notion/page-create","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret","properties_json"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageCreator-6SCB5","description":"A component for creating Notion pages.","display_name":"Create Page "},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1413.9782390799146,"y":2051.645785494985}},{"id":"NotionDatabaseProperties-aeWil","type":"genericNode","position":{"x":1004.5753613670959,"y":1713.914531491452},"data":{"type":"NotionDatabaseProperties","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionDatabaseProperties(LCToolComponent):\n display_name: str = \"List Database Properties \"\n description: str = \"Retrieve properties of a Notion database.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/list-database-properties\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionDatabasePropertiesSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n\n def run_model(self) -> Data:\n result = self._fetch_database_properties(self.database_id)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the properties\n return Data(text=str(result), data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_database_properties\",\n description=\"Retrieve properties of a Notion database. Input should include the database ID.\",\n func=self._fetch_database_properties,\n args_schema=self.NotionDatabasePropertiesSchema,\n )\n\n def _fetch_database_properties(self, database_id: str) -> Union[Dict, str]:\n url = f\"https://api.notion.com/v1/databases/{database_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n data = response.json()\n properties = data.get(\"properties\", {})\n return properties\n except requests.exceptions.RequestException as e:\n return f\"Error fetching Notion database properties: {str(e)}\"\n except ValueError as e:\n return f\"Error parsing Notion API response: {str(e)}\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve properties of a Notion database.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Database Properties ","documentation":"https://docs.langflow.org/integrations/notion/list-database-properties","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionDatabaseProperties-aeWil","description":"Retrieve properties of a Notion database.","display_name":"List Database Properties "},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1004.5753613670959,"y":1713.914531491452}},{"id":"NotionListPages-znA3w","type":"genericNode","position":{"x":1006.1848442547046,"y":2022.7880909242833},"data":{"type":"NotionListPages","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nimport json\nfrom typing import Dict, Any, List, Optional\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionListPages(LCToolComponent):\n display_name: str = \"List Pages \"\n description: str = (\n \"Query a Notion database with filtering and sorting. \"\n \"The input should be a JSON string containing the 'filter' and 'sorts' objects. \"\n \"Example input:\\n\"\n '{\"filter\": {\"property\": \"Status\", \"select\": {\"equals\": \"Done\"}}, \"sorts\": [{\"timestamp\": \"created_time\", \"direction\": \"descending\"}]}'\n )\n documentation: str = \"https://docs.langflow.org/integrations/notion/list-pages\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database to query.\",\n ),\n MultilineInput(\n name=\"query_json\",\n display_name=\"Database query (JSON)\",\n info=\"A JSON string containing the filters and sorts that will be used for querying the database. Leave empty for no filters or sorts.\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n class NotionListPagesSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database to query.\")\n query_json: Optional[str] = Field(\n default=\"\",\n description=\"A JSON string containing the filters and sorts for querying the database. Leave empty for no filters or sorts.\",\n )\n\n def run_model(self) -> List[Data]:\n result = self._query_notion_database(self.database_id, self.query_json)\n\n if isinstance(result, str):\n # An error occurred, return it as a single record\n return [Data(text=result)]\n\n records = []\n combined_text = f\"Pages found: {len(result)}\\n\\n\"\n\n for page in result:\n page_data = {\n \"id\": page[\"id\"],\n \"url\": page[\"url\"],\n \"created_time\": page[\"created_time\"],\n \"last_edited_time\": page[\"last_edited_time\"],\n \"properties\": page[\"properties\"],\n }\n\n text = (\n f\"id: {page['id']}\\n\"\n f\"url: {page['url']}\\n\"\n f\"created_time: {page['created_time']}\\n\"\n f\"last_edited_time: {page['last_edited_time']}\\n\"\n f\"properties: {json.dumps(page['properties'], indent=2)}\\n\\n\"\n )\n\n combined_text += text\n records.append(Data(text=text, **page_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_pages\",\n description=self.description,\n func=self._query_notion_database,\n args_schema=self.NotionListPagesSchema,\n )\n\n def _query_notion_database(self, database_id: str, query_json: Optional[str] = None) -> List[Dict[str, Any]] | str:\n url = f\"https://api.notion.com/v1/databases/{database_id}/query\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n query_payload = {}\n if query_json and query_json.strip():\n try:\n query_payload = json.loads(query_json)\n except json.JSONDecodeError as e:\n return f\"Invalid JSON format for query: {str(e)}\"\n\n try:\n response = requests.post(url, headers=headers, json=query_payload)\n response.raise_for_status()\n results = response.json()\n return results[\"results\"]\n except requests.exceptions.RequestException as e:\n return f\"Error querying Notion database: {str(e)}\"\n except KeyError:\n return \"Unexpected response format from Notion API\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database to query.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query_json":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query_json","value":"","display_name":"Database query (JSON)","advanced":true,"input_types":["Message"],"dynamic":false,"info":"A JSON string containing the filters and sorts that will be used for querying the database. Leave empty for no filters or sorts.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Query a Notion database with filtering and sorting. The input should be a JSON string containing the 'filter' and 'sorts' objects. Example input:\n{\"filter\": {\"property\": \"Status\", \"select\": {\"equals\": \"Done\"}}, \"sorts\": [{\"timestamp\": \"created_time\", \"direction\": \"descending\"}]}","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Pages ","documentation":"https://docs.langflow.org/integrations/notion/list-pages","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["notion_secret","database_id","query_json"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionListPages-znA3w","description":"Query a Notion database with filtering and sorting. The input should be a JSON string containing the 'filter' and 'sorts' objects. Example input:\n{\"filter\": {\"property\": \"Status\", \"select\": {\"equals\": \"Done\"}}, \"sorts\": [{\"timestamp\": \"created_time\", \"direction\": \"descending\"}]}","display_name":"List Pages "},"selected":false,"width":384,"height":470,"dragging":false,"positionAbsolute":{"x":1006.1848442547046,"y":2022.7880909242833}},{"id":"NotionUserList-C3eGn","type":"genericNode","position":{"x":2260.15497405973,"y":1717.4551881467207},"data":{"type":"NotionUserList","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import List, Dict\nfrom pydantic import BaseModel\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionUserList(LCToolComponent):\n display_name = \"List Users \"\n description = \"Retrieve users from Notion.\"\n documentation = \"https://docs.langflow.org/integrations/notion/list-users\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionUserListSchema(BaseModel):\n pass\n\n def run_model(self) -> List[Data]:\n users = self._list_users()\n records = []\n combined_text = \"\"\n\n for user in users:\n output = \"User:\\n\"\n for key, value in user.items():\n output += f\"{key.replace('_', ' ').title()}: {value}\\n\"\n output += \"________________________\\n\"\n\n combined_text += output\n records.append(Data(text=output, data=user))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_users\",\n description=\"Retrieve users from Notion.\",\n func=self._list_users,\n args_schema=self.NotionUserListSchema,\n )\n\n def _list_users(self) -> List[Dict]:\n url = \"https://api.notion.com/v1/users\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n data = response.json()\n results = data[\"results\"]\n\n users = []\n for user in results:\n user_data = {\n \"id\": user[\"id\"],\n \"type\": user[\"type\"],\n \"name\": user.get(\"name\", \"\"),\n \"avatar_url\": user.get(\"avatar_url\", \"\"),\n }\n users.append(user_data)\n\n return users\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve users from Notion.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Users ","documentation":"https://docs.langflow.org/integrations/notion/list-users","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionUserList-C3eGn","description":"Retrieve users from Notion.","display_name":"List Users "},"selected":true,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":2260.15497405973,"y":1717.4551881467207}},{"id":"NotionPageContent-SlL21","type":"genericNode","position":{"x":1826.4242329724448,"y":1715.6365113286927},"data":{"type":"NotionPageContent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionPageContent(LCToolComponent):\n display_name = \"Page Content Viewer \"\n description = \"Retrieve the content of a Notion page as plain text.\"\n documentation = \"https://docs.langflow.org/integrations/notion/page-content-viewer\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"page_id\",\n display_name=\"Page ID\",\n info=\"The ID of the Notion page to retrieve.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageContentSchema(BaseModel):\n page_id: str = Field(..., description=\"The ID of the Notion page to retrieve.\")\n\n def run_model(self) -> Data:\n result = self._retrieve_page_content(self.page_id)\n if isinstance(result, str) and result.startswith(\"Error:\"):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the content\n return Data(text=result, data={\"content\": result})\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_page_content\",\n description=\"Retrieve the content of a Notion page as plain text.\",\n func=self._retrieve_page_content,\n args_schema=self.NotionPageContentSchema,\n )\n\n def _retrieve_page_content(self, page_id: str) -> str:\n blocks_url = f\"https://api.notion.com/v1/blocks/{page_id}/children?page_size=100\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n try:\n blocks_response = requests.get(blocks_url, headers=headers)\n blocks_response.raise_for_status()\n blocks_data = blocks_response.json()\n return self.parse_blocks(blocks_data.get(\"results\", []))\n except requests.exceptions.RequestException as e:\n error_message = f\"Error: Failed to retrieve Notion page content. {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n except Exception as e:\n return f\"Error: An unexpected error occurred while retrieving Notion page content. {str(e)}\"\n\n def parse_blocks(self, blocks: list) -> str:\n content = \"\"\n for block in blocks:\n block_type = block.get(\"type\")\n if block_type in [\"paragraph\", \"heading_1\", \"heading_2\", \"heading_3\", \"quote\"]:\n content += self.parse_rich_text(block[block_type].get(\"rich_text\", [])) + \"\\n\\n\"\n elif block_type in [\"bulleted_list_item\", \"numbered_list_item\"]:\n content += self.parse_rich_text(block[block_type].get(\"rich_text\", [])) + \"\\n\"\n elif block_type == \"to_do\":\n content += self.parse_rich_text(block[\"to_do\"].get(\"rich_text\", [])) + \"\\n\"\n elif block_type == \"code\":\n content += self.parse_rich_text(block[\"code\"].get(\"rich_text\", [])) + \"\\n\\n\"\n elif block_type == \"image\":\n content += f\"[Image: {block['image'].get('external', {}).get('url', 'No URL')}]\\n\\n\"\n elif block_type == \"divider\":\n content += \"---\\n\\n\"\n return content.strip()\n\n def parse_rich_text(self, rich_text: list) -> str:\n return \"\".join(segment.get(\"plain_text\", \"\") for segment in rich_text)\n\n def __call__(self, *args, **kwargs):\n return self._retrieve_page_content(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"page_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"page_id","value":"","display_name":"Page ID","advanced":true,"dynamic":false,"info":"The ID of the Notion page to retrieve.","title_case":false,"type":"str","_input_type":"StrInput"}},"description":"Retrieve the content of a Notion page as plain text.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Page Content Viewer ","documentation":"https://docs.langflow.org/integrations/notion/page-content-viewer","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["page_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageContent-SlL21","description":"Retrieve the content of a Notion page as plain text.","display_name":"Page Content Viewer "},"selected":false,"width":384,"height":330,"dragging":false,"positionAbsolute":{"x":1826.4242329724448,"y":1715.6365113286927}},{"id":"NotionSearch-VS2mI","type":"genericNode","position":{"x":2258.1166047519732,"y":2034.3959294952945},"data":{"type":"NotionSearch","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Any, List\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, DropdownInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionSearch(LCToolComponent):\n display_name: str = \"Search \"\n description: str = \"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/search\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The text that the API compares page and database titles against.\",\n ),\n DropdownInput(\n name=\"filter_value\",\n display_name=\"Filter Type\",\n info=\"Limits the results to either only pages or only databases.\",\n options=[\"page\", \"database\"],\n value=\"page\",\n ),\n DropdownInput(\n name=\"sort_direction\",\n display_name=\"Sort Direction\",\n info=\"The direction to sort the results.\",\n options=[\"ascending\", \"descending\"],\n value=\"descending\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionSearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query text.\")\n filter_value: str = Field(default=\"page\", description=\"Filter type: 'page' or 'database'.\")\n sort_direction: str = Field(default=\"descending\", description=\"Sort direction: 'ascending' or 'descending'.\")\n\n def run_model(self) -> List[Data]:\n results = self._search_notion(self.query, self.filter_value, self.sort_direction)\n records = []\n combined_text = f\"Results found: {len(results)}\\n\\n\"\n\n for result in results:\n result_data = {\n \"id\": result[\"id\"],\n \"type\": result[\"object\"],\n \"last_edited_time\": result[\"last_edited_time\"],\n }\n\n if result[\"object\"] == \"page\":\n result_data[\"title_or_url\"] = result[\"url\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['url']}\\n\"\n elif result[\"object\"] == \"database\":\n if \"title\" in result and isinstance(result[\"title\"], list) and len(result[\"title\"]) > 0:\n result_data[\"title_or_url\"] = result[\"title\"][0][\"plain_text\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['title'][0]['plain_text']}\\n\"\n else:\n result_data[\"title_or_url\"] = \"N/A\"\n text = f\"id: {result['id']}\\ntitle_or_url: N/A\\n\"\n\n text += f\"type: {result['object']}\\nlast_edited_time: {result['last_edited_time']}\\n\\n\"\n combined_text += text\n records.append(Data(text=text, data=result_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_search\",\n description=\"Search Notion pages and databases. Input should include the search query and optionally filter type and sort direction.\",\n func=self._search_notion,\n args_schema=self.NotionSearchSchema,\n )\n\n def _search_notion(\n self, query: str, filter_value: str = \"page\", sort_direction: str = \"descending\"\n ) -> List[Dict[str, Any]]:\n url = \"https://api.notion.com/v1/search\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"query\": query,\n \"filter\": {\"value\": filter_value, \"property\": \"object\"},\n \"sort\": {\"direction\": sort_direction, \"timestamp\": \"last_edited_time\"},\n }\n\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n\n results = response.json()\n return results[\"results\"]\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"filter_value":{"trace_as_metadata":true,"options":["page","database"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"filter_value","value":"page","display_name":"Filter Type","advanced":true,"dynamic":false,"info":"Limits the results to either only pages or only databases.","title_case":false,"type":"str","_input_type":"DropdownInput"},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query","value":"","display_name":"Search Query","advanced":true,"dynamic":false,"info":"The text that the API compares page and database titles against.","title_case":false,"type":"str","_input_type":"StrInput"},"sort_direction":{"trace_as_metadata":true,"options":["ascending","descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sort_direction","value":"descending","display_name":"Sort Direction","advanced":true,"dynamic":false,"info":"The direction to sort the results.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Search ","documentation":"https://docs.langflow.org/integrations/notion/search","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["notion_secret","query","filter_value","sort_direction"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionSearch-VS2mI","description":"Searches all pages and databases that have been shared with an integration.","display_name":"Search "},"selected":false,"width":384,"height":386,"dragging":false,"positionAbsolute":{"x":2258.1166047519732,"y":2034.3959294952945}},{"id":"NotionPageUpdate-6FyYd","type":"genericNode","position":{"x":1827.0574354713603,"y":2055.9948126656136},"data":{"type":"NotionPageUpdate","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nimport requests\nfrom typing import Dict, Any, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom langflow.io import Output\n\nclass NotionPageUpdate(LCToolComponent):\n display_name: str = \"Update Page Property \"\n description: str = \"Update the properties of a Notion page.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-update\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"page_id\",\n display_name=\"Page ID\",\n info=\"The ID of the Notion page to update.\",\n ),\n MultilineInput(\n name=\"properties\",\n display_name=\"Properties\",\n info=\"The properties to update on the page (as a JSON string or a dictionary).\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageUpdateSchema(BaseModel):\n page_id: str = Field(..., description=\"The ID of the Notion page to update.\")\n properties: Union[str, Dict[str, Any]] = Field(\n ..., description=\"The properties to update on the page (as a JSON string or a dictionary).\"\n )\n\n def run_model(self) -> Data:\n result = self._update_notion_page(self.page_id, self.properties)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the updated page data\n output = \"Updated page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"update_notion_page\",\n description=\"Update the properties of a Notion page. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._update_notion_page,\n args_schema=self.NotionPageUpdateSchema,\n )\n\n def _update_notion_page(self, page_id: str, properties: Union[str, Dict[str, Any]]) -> Union[Dict[str, Any], str]:\n url = f\"https://api.notion.com/v1/pages/{page_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n\n # Parse properties if it's a string\n if isinstance(properties, str):\n try:\n parsed_properties = json.loads(properties)\n except json.JSONDecodeError as e:\n error_message = f\"Invalid JSON format for properties: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n else:\n parsed_properties = properties\n\n data = {\"properties\": parsed_properties}\n\n try:\n logger.info(f\"Sending request to Notion API: URL: {url}, Data: {json.dumps(data)}\")\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n updated_page = response.json()\n\n logger.info(f\"Successfully updated Notion page. Response: {json.dumps(updated_page)}\")\n return updated_page\n except requests.exceptions.HTTPError as e:\n error_message = f\"HTTP Error occurred: {str(e)}\"\n if e.response is not None:\n error_message += f\"\\nStatus code: {e.response.status_code}\"\n error_message += f\"\\nResponse body: {e.response.text}\"\n logger.error(error_message)\n return error_message\n except requests.exceptions.RequestException as e:\n error_message = f\"An error occurred while making the request: {str(e)}\"\n logger.error(error_message)\n return error_message\n except Exception as e:\n error_message = f\"An unexpected error occurred: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._update_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":true,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"page_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"page_id","value":"","display_name":"Page ID","advanced":true,"dynamic":false,"info":"The ID of the Notion page to update.","title_case":false,"type":"str","_input_type":"StrInput"},"properties":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties","value":"","display_name":"Properties","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties to update on the page (as a JSON string or a dictionary).","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Update the properties of a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Update Page Property ","documentation":"https://docs.langflow.org/integrations/notion/page-update","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["page_id","properties","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageUpdate-6FyYd","description":"Update the properties of a Notion page.","display_name":"Update Page Property "},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1827.0574354713603,"y":2055.9948126656136}},{"id":"ToolCallingAgent-50Gcd","type":"genericNode","position":{"x":2186.0530739759893,"y":612.1744804997304},"data":{"type":"ToolCallingAgent","node":{"template":{"_type":"Component","chat_history":{"trace_as_metadata":true,"list":true,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"chat_history","value":"","display_name":"Chat History","advanced":false,"input_types":["Data"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"DataInput"},"llm":{"trace_as_metadata":true,"list":false,"required":true,"placeholder":"","show":true,"name":"llm","value":"","display_name":"Language Model","advanced":false,"input_types":["LanguageModel"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool","BaseTool"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"LangChain\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"handle_parsing_errors":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"handle_parsing_errors","value":true,"display_name":"Handle Parse Errors","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":false,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageTextInput"},"max_iterations":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"max_iterations","value":15,"display_name":"Max Iterations","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"system_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_prompt","value":"","display_name":"System Prompt","advanced":false,"input_types":["Message"],"dynamic":false,"info":"System prompt for the agent.","title_case":false,"type":"str","_input_type":"MultilineInput"},"user_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"user_prompt","value":"{input}","display_name":"Prompt","advanced":true,"input_types":["Message"],"dynamic":false,"info":"This prompt must contain 'input' key.","title_case":false,"type":"str","_input_type":"MultilineInput"},"verbose":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"verbose","value":true,"display_name":"Verbose","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Agent that uses tools","icon":"LangChain","base_classes":["AgentExecutor","Message"],"display_name":"Tool Calling Agent","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["AgentExecutor"],"selected":"AgentExecutor","name":"agent","display_name":"Agent","method":"build_agent","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Message"],"selected":"Message","name":"response","display_name":"Response","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","handle_parsing_errors","verbose","max_iterations","tools","llm","system_prompt","user_prompt","chat_history"],"beta":true,"edited":false,"lf_version":"1.0.17"},"id":"ToolCallingAgent-50Gcd"},"selected":false,"width":384,"height":532,"dragging":false,"positionAbsolute":{"x":2186.0530739759893,"y":612.1744804997304}},{"id":"ChatOutput-TSCup","type":"genericNode","position":{"x":2649.190603849412,"y":841.0466487848925},"data":{"type":"ChatOutput","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"data_template":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"data_template","value":"{text}","display_name":"Data Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Message to be passed as output.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"Machine","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Type of sender.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"AI","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Name of the sender.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"should_store_message":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"should_store_message","value":true,"display_name":"Store Messages","advanced":true,"dynamic":false,"info":"Store the message in the history.","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Display a chat message in the Playground.","icon":"ChatOutput","base_classes":["Message"],"display_name":"Chat Output","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"message","display_name":"Message","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","should_store_message","sender","sender_name","session_id","data_template"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ChatOutput-TSCup"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2649.190603849412,"y":841.0466487848925},"dragging":false},{"id":"ChatInput-bcq6D","type":"genericNode","position":{"x":557.6262725075026,"y":724.8518930903978},"data":{"type":"ChatInput","node":{"template":{"_type":"Component","files":{"trace_as_metadata":true,"file_path":"","fileTypes":["txt","md","mdx","csv","json","yaml","yml","xml","html","htm","pdf","docx","py","sh","sql","js","ts","tsx","jpg","jpeg","png","bmp","image"],"list":true,"required":false,"placeholder":"","show":true,"name":"files","value":"","display_name":"Files","advanced":true,"dynamic":false,"info":"Files to be sent with the message.","title_case":false,"type":"file","_input_type":"FileInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, MESSAGE_SENDER_NAME_USER\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"ChatInput\"\n name = \"ChatInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n )\n\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"list users","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Message to be passed as input.","title_case":false,"type":"str","_input_type":"MultilineInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"User","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Type of sender.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"User","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Name of the sender.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"should_store_message":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"should_store_message","value":true,"display_name":"Store Messages","advanced":true,"dynamic":false,"info":"Store the message in the history.","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Get chat inputs from the Playground.","icon":"ChatInput","base_classes":["Message"],"display_name":"Chat Input","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"message","display_name":"Message","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","should_store_message","sender","sender_name","session_id","files"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ChatInput-bcq6D"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":557.6262725075026,"y":724.8518930903978},"dragging":false},{"id":"ToolkitComponent-2lNG0","type":"genericNode","position":{"x":1731.8884789245508,"y":1378.7846304343796},"data":{"type":"ToolkitComponent","node":{"template":{"_type":"Component","tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool"],"dynamic":false,"info":"List of tools to combine.","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import List\r\nfrom langflow.custom import Component\r\nfrom langflow.inputs import HandleInput, MessageTextInput\r\nfrom langflow.template import Output\r\nfrom langflow.field_typing import Tool, Embeddings\r\nfrom langchain.tools.base import BaseTool, StructuredTool\r\nfrom langflow.schema import Data\r\n\r\nclass ToolkitComponent(Component):\r\n display_name = \"Toolkit\"\r\n description = \"Combines multiple tools into a single list of tools.\"\r\n icon = \"pocket-knife\"\r\n\r\n inputs = [\r\n HandleInput(\r\n name=\"tools\",\r\n display_name=\"Tools\",\r\n input_types=[\"Tool\"],\r\n info=\"List of tools to combine.\",\r\n is_list=True,\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Tools\", name=\"generated_tools\", method=\"generate_toolkit\"),\r\n Output(display_name=\"Tool Data\", name=\"tool_data\", method=\"generate_tool_data\"),\r\n ]\r\n\r\n def generate_toolkit(self) -> List[BaseTool]:\r\n combined_tools = []\r\n name_count = {}\r\n for index, tool in enumerate(self.tools):\r\n self.log(f\"Processing tool {index}: {type(tool)}\")\r\n if isinstance(tool, (BaseTool, StructuredTool)):\r\n processed_tool = tool\r\n elif hasattr(tool, 'build_tool'):\r\n processed_tool = tool.build_tool()\r\n else:\r\n self.log(f\"Unsupported tool type: {type(tool)}. Attempting to process anyway.\")\r\n processed_tool = tool\r\n\r\n original_name = getattr(processed_tool, 'name', f\"UnnamedTool_{index}\")\r\n self.log(f\"Original tool name: {original_name}\")\r\n\r\n if original_name not in name_count:\r\n name_count[original_name] = 0\r\n final_name = original_name\r\n else:\r\n name_count[original_name] += 1\r\n final_name = f\"{original_name}_{name_count[original_name]}\"\r\n\r\n if hasattr(processed_tool, 'name'):\r\n processed_tool.name = final_name\r\n\r\n self.log(f\"Final tool name: {final_name}\")\r\n\r\n if isinstance(processed_tool, StructuredTool) and hasattr(processed_tool, 'args_schema'):\r\n processed_tool.args_schema.name = f\"{final_name}_Schema\"\r\n\r\n combined_tools.append(processed_tool)\r\n\r\n debug_info = \"\\n\".join([f\"Tool {i}: {getattr(tool, 'name', f'UnnamedTool_{i}')} (Original: {getattr(tool, '_original_name', 'N/A')}) - Type: {type(tool)}\" for i, tool in enumerate(combined_tools)])\r\n self.log(\"Final toolkit composition:\")\r\n self.log(debug_info)\r\n\r\n\r\n self.status = combined_tools\r\n return combined_tools\r\n\r\n def generate_tool_data(self) -> List[Data]:\r\n tool_data = []\r\n for tool in self.generate_toolkit():\r\n tool_data.append(Data(\r\n data={\r\n \"name\": getattr(tool, 'name', 'Unnamed Tool'),\r\n \"description\": getattr(tool, 'description', 'No description available')\r\n }\r\n ))\r\n return tool_data","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false}},"description":"Combines multiple tools into a single list of tools.","icon":"pocket-knife","base_classes":["BaseTool","Data"],"display_name":"Toolkit","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["BaseTool"],"selected":"BaseTool","name":"generated_tools","display_name":"Tools","method":"generate_toolkit","value":"__UNDEFINED__","cache":true},{"types":["Data"],"selected":"Data","name":"tool_data","display_name":"Tool Data","method":"generate_tool_data","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["tools"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"ToolkitComponent-2lNG0"},"selected":false,"width":384,"height":292,"dragging":false,"positionAbsolute":{"x":1731.8884789245508,"y":1378.7846304343796}},{"id":"OpenAIModel-BJWIg","type":"genericNode","position":{"x":1718.9773974162958,"y":603.4642741725065},"data":{"type":"OpenAIModel","node":{"template":{"_type":"Component","api_key":{"load_from_db":true,"required":false,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"OpenAI API Key","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The OpenAI API Key to use for the OpenAI model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import operator\nfrom functools import reduce\n\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = LCModelComponent._base_inputs + [\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageInput"},"json_mode":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"json_mode","value":false,"display_name":"JSON Mode","advanced":true,"dynamic":false,"info":"If True, it will output JSON regardless of passing a schema.","title_case":false,"type":"bool","_input_type":"BoolInput"},"max_tokens":{"trace_as_metadata":true,"range_spec":{"step_type":"float","min":0,"max":128000,"step":0.1},"list":false,"required":false,"placeholder":"","show":true,"name":"max_tokens","value":"","display_name":"Max Tokens","advanced":true,"dynamic":false,"info":"The maximum number of tokens to generate. Set to 0 for unlimited tokens.","title_case":false,"type":"int","_input_type":"IntInput"},"model_kwargs":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"model_kwargs","value":{},"display_name":"Model Kwargs","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"dict","_input_type":"DictInput"},"model_name":{"trace_as_metadata":true,"options":["gpt-4o-mini","gpt-4o","gpt-4-turbo","gpt-4-turbo-preview","gpt-4","gpt-3.5-turbo","gpt-3.5-turbo-0125"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"model_name","value":"gpt-4o","display_name":"Model Name","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"DropdownInput"},"openai_api_base":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"openai_api_base","value":"","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.","title_case":false,"type":"str","_input_type":"StrInput"},"output_schema":{"trace_as_input":true,"list":true,"required":false,"placeholder":"","show":true,"name":"output_schema","value":{},"display_name":"Schema","advanced":true,"dynamic":false,"info":"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.","title_case":false,"type":"dict","_input_type":"DictInput"},"seed":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"seed","value":1,"display_name":"Seed","advanced":true,"dynamic":false,"info":"The seed controls the reproducibility of the job.","title_case":false,"type":"int","_input_type":"IntInput"},"stream":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"stream","value":false,"display_name":"Stream","advanced":true,"dynamic":false,"info":"Stream the response from the model. Streaming works only in Chat.","title_case":false,"type":"bool","_input_type":"BoolInput"},"system_message":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_message","value":"","display_name":"System Message","advanced":true,"input_types":["Message"],"dynamic":false,"info":"System message to pass to the model.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"temperature":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"temperature","value":"0.2","display_name":"Temperature","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"float","_input_type":"FloatInput"}},"description":"Generates text using OpenAI LLMs.","icon":"OpenAI","base_classes":["LanguageModel","Message"],"display_name":"OpenAI","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text_output","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true},{"types":["LanguageModel"],"selected":"LanguageModel","name":"model_output","display_name":"Language Model","method":"build_model","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","system_message","stream","max_tokens","model_kwargs","json_mode","output_schema","model_name","openai_api_base","api_key","temperature","seed"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"OpenAIModel-BJWIg"},"selected":false,"width":384,"height":433,"positionAbsolute":{"x":1718.9773974162958,"y":603.4642741725065},"dragging":false},{"id":"Memory-CTQWu","type":"genericNode","position":{"x":1240.7186213296432,"y":1059.5754404393747},"data":{"type":"Memory","node":{"template":{"_type":"Component","memory":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"memory","value":"","display_name":"External Memory","advanced":true,"input_types":["BaseChatMessageHistory"],"dynamic":false,"info":"Retrieve messages from an external memory. If empty, it will use the Langflow tables.","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langchain.memory import ConversationBufferMemory\n\nfrom langflow.custom import Component\nfrom langflow.field_typing import BaseChatMemory\nfrom langflow.helpers.data import data_to_text\nfrom langflow.inputs import HandleInput\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output\nfrom langflow.memory import LCBuiltinChatMemory, get_messages\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Chat Memory\"\n description = \"Retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n\n inputs = [\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"BaseChatMessageHistory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Messages (Data)\", name=\"messages\", method=\"retrieve_messages\"),\n Output(display_name=\"Messages (Text)\", name=\"messages_text\", method=\"retrieve_messages_as_text\"),\n Output(display_name=\"Memory\", name=\"lc_memory\", method=\"build_lc_memory\"),\n ]\n\n def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = self.memory.messages\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = get_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return stored\n\n def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, self.retrieve_messages())\n self.status = stored_text\n return Message(text=stored_text)\n\n def build_lc_memory(self) -> BaseChatMemory:\n if self.memory:\n chat_memory = self.memory\n else:\n chat_memory = LCBuiltinChatMemory(flow_id=self.flow_id, session_id=self.session_id)\n return ConversationBufferMemory(chat_memory=chat_memory)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"n_messages":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"n_messages","value":100,"display_name":"Number of Messages","advanced":true,"dynamic":false,"info":"Number of messages to retrieve.","title_case":false,"type":"int","_input_type":"IntInput"},"order":{"trace_as_metadata":true,"options":["Ascending","Descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"order","value":"Ascending","display_name":"Order","advanced":true,"dynamic":false,"info":"Order of the messages.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User","Machine and User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"Machine and User","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Filter by sender type.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Filter by sender name.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{sender_name}: {text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Retrieves stored chat messages from Langflow tables or an external memory.","icon":"message-square-more","base_classes":["BaseChatMemory","Data","Message"],"display_name":"Chat Memory","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"messages","display_name":"Messages (Data)","method":"retrieve_messages","value":"__UNDEFINED__","cache":true},{"types":["Message"],"selected":"Message","name":"messages_text","display_name":"Messages (Text)","method":"retrieve_messages_as_text","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["BaseChatMemory"],"selected":"BaseChatMemory","name":"lc_memory","display_name":"Memory","method":"build_lc_memory","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["memory","sender","sender_name","n_messages","session_id","order","template"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"Memory-CTQWu"},"selected":false,"width":384,"height":244,"dragging":false,"positionAbsolute":{"x":1240.7186213296432,"y":1059.5754404393747}},{"id":"Prompt-0dWZu","type":"genericNode","position":{"x":1227.4862876736101,"y":616.3826667128244},"data":{"type":"Prompt","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"template":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"\nYou are a Notion Agent, an AI assistant designed to help users interact with their Notion workspace. Your role is to understand user requests, utilize the appropriate Notion tools to fulfill these requests, and communicate clearly with the user throughout the process.\n\nGeneral Guidelines:\n\n1. Carefully analyze each user request to determine which tool(s) you need to use.\n\n2. Before using any tool, ensure you have all the necessary information. If you need more details, ask the user clear and concise questions.\n\n3. When using a tool, provide a brief explanation to the user about what you're doing and why.\n\n4. After using a tool, interpret the results for the user in a clear, concise manner.\n\n5. If a task requires multiple steps, outline your plan to the user before proceeding.\n\n6. If you encounter an error or limitation, explain it to the user and suggest possible solutions or alternative approaches.\n\n7. Always maintain a helpful and professional tone in your interactions.\n\n8. Be proactive in offering suggestions or alternatives if the user's initial request can't be fulfilled exactly as stated.\n\n9. When providing information or results, focus on relevance and clarity. Summarize when necessary, but provide details when they're important.\n\n10. If a user's request is unclear or could be interpreted in multiple ways, ask for clarification before proceeding.\n\n11. After completing a task, summarize what was accomplished and suggest any relevant next steps or additional actions the user might want to take.\n\n12. If a user asks about capabilities you don't have or tools you can't access, clearly explain your limitations and suggest alternative ways to assist if possible.\n\nRemember, your primary goal is to assist the user effectively with their Notion-related tasks using the provided tools. Always strive for clarity, accuracy, and helpfulness in your interactions. Adapt your communication style to the user's level of technical understanding and familiarity with Notion.\n\nNow, you're ready to assist the user\n\nToday is: {CURRENT_DATE}\n","display_name":"Template","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"prompt","_input_type":"PromptInput"},"CURRENT_DATE":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"CURRENT_DATE","display_name":"CURRENT_DATE","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"}},"description":"Create a prompt template with dynamic variables.","icon":"prompts","is_input":null,"is_output":null,"is_composition":null,"base_classes":["Message"],"name":"","display_name":"Prompt","documentation":"","custom_fields":{"template":["CURRENT_DATE"]},"output_types":[],"full_path":null,"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"prompt","hidden":null,"display_name":"Prompt Message","method":"build_prompt","value":"__UNDEFINED__","cache":true}],"field_order":["template"],"beta":false,"error":null,"edited":false,"lf_version":"1.0.17"},"id":"Prompt-0dWZu"},"selected":false,"width":384,"height":416,"positionAbsolute":{"x":1227.4862876736101,"y":616.3826667128244},"dragging":false},{"id":"CurrentDateComponent-NSNQ8","type":"genericNode","position":{"x":1092.5108512311297,"y":868.3249850335523},"data":{"type":"CurrentDateComponent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from datetime import datetime\r\nfrom zoneinfo import ZoneInfo\r\nfrom typing import List\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import DropdownInput, Output\r\nfrom langflow.schema.message import Message\r\n\r\nclass CurrentDateComponent(Component):\r\n display_name = \"Current Date đ°ī¸\"\r\n description = \"Returns the current date and time in the selected timezone.\"\r\n icon = \"clock\"\r\n\r\n inputs = [\r\n DropdownInput(\r\n name=\"timezone\",\r\n display_name=\"Timezone\",\r\n options=[\r\n \"UTC\",\r\n \"US/Eastern\",\r\n \"US/Central\",\r\n \"US/Mountain\",\r\n \"US/Pacific\",\r\n \"Europe/London\",\r\n \"Europe/Paris\",\r\n \"Asia/Tokyo\",\r\n \"Australia/Sydney\",\r\n \"America/Sao_Paulo\",\r\n \"America/Cuiaba\",\r\n ],\r\n value=\"UTC\",\r\n info=\"Select the timezone for the current date and time.\",\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Current Date\", name=\"current_date\", method=\"get_current_date\"),\r\n ]\r\n\r\n def get_current_date(self) -> Message:\r\n try:\r\n tz = ZoneInfo(self.timezone)\r\n current_date = datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S %Z\")\r\n result = f\"Current date and time in {self.timezone}: {current_date}\"\r\n self.status = result\r\n return Message(text=result)\r\n except Exception as e:\r\n error_message = f\"Error: {str(e)}\"\r\n self.status = error_message\r\n return Message(text=error_message)","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"timezone":{"trace_as_metadata":true,"options":["UTC","US/Eastern","US/Central","US/Mountain","US/Pacific","Europe/London","Europe/Paris","Asia/Tokyo","Australia/Sydney","America/Sao_Paulo","America/Cuiaba"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"timezone","value":"UTC","display_name":"Timezone","advanced":false,"dynamic":false,"info":"Select the timezone for the current date and time.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Returns the current date and time in the selected timezone.","icon":"clock","base_classes":["Message"],"display_name":"Current Date","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"current_date","display_name":"Current Date","method":"get_current_date","value":"__UNDEFINED__","cache":true}],"field_order":["timezone"],"beta":false,"edited":true,"official":false,"lf_version":"1.0.17"},"id":"CurrentDateComponent-NSNQ8","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":1092.5108512311297,"y":868.3249850335523}}],"edges":[{"source":"ChatInput-bcq6D","target":"ToolCallingAgent-50Gcd","sourceHandle":"{ÅdataTypeÅ:ÅChatInputÅ,ÅidÅ:ÅChatInput-bcq6DÅ,ÅnameÅ:ÅmessageÅ,Åoutput_typesÅ:[ÅMessageÅ]}","targetHandle":"{ÅfieldNameÅ:Åinput_valueÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","id":"reactflow__edge-ChatInput-bcq6D{ÅdataTypeÅ:ÅChatInputÅ,ÅidÅ:ÅChatInput-bcq6DÅ,ÅnameÅ:ÅmessageÅ,Åoutput_typesÅ:[ÅMessageÅ]}-ToolCallingAgent-50Gcd{ÅfieldNameÅ:Åinput_valueÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"input_value","id":"ToolCallingAgent-50Gcd","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"ChatInput","id":"ChatInput-bcq6D","name":"message","output_types":["Message"]}},"selected":false,"className":""},{"source":"ToolCallingAgent-50Gcd","target":"ChatOutput-TSCup","sourceHandle":"{ÅdataTypeÅ:ÅToolCallingAgentÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅnameÅ:ÅresponseÅ,Åoutput_typesÅ:[ÅMessageÅ]}","targetHandle":"{ÅfieldNameÅ:Åinput_valueÅ,ÅidÅ:ÅChatOutput-TSCupÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","id":"reactflow__edge-ToolCallingAgent-50Gcd{ÅdataTypeÅ:ÅToolCallingAgentÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅnameÅ:ÅresponseÅ,Åoutput_typesÅ:[ÅMessageÅ]}-ChatOutput-TSCup{ÅfieldNameÅ:Åinput_valueÅ,ÅidÅ:ÅChatOutput-TSCupÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"input_value","id":"ChatOutput-TSCup","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"ToolCallingAgent","id":"ToolCallingAgent-50Gcd","name":"response","output_types":["Message"]}},"selected":false,"className":""},{"source":"ToolkitComponent-2lNG0","target":"ToolCallingAgent-50Gcd","sourceHandle":"{ÅdataTypeÅ:ÅToolkitComponentÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅnameÅ:Ågenerated_toolsÅ,Åoutput_typesÅ:[ÅBaseToolÅ]}","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","id":"reactflow__edge-ToolkitComponent-2lNG0{ÅdataTypeÅ:ÅToolkitComponentÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅnameÅ:Ågenerated_toolsÅ,Åoutput_typesÅ:[ÅBaseToolÅ]}-ToolCallingAgent-50Gcd{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-50Gcd","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"ToolkitComponent","id":"ToolkitComponent-2lNG0","name":"generated_tools","output_types":["BaseTool"]}},"selected":false,"className":""},{"source":"NotionPageUpdate-6FyYd","sourceHandle":"{ÅdataTypeÅ:ÅNotionPageUpdateÅ,ÅidÅ:ÅNotionPageUpdate-6FyYdÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageUpdate","id":"NotionPageUpdate-6FyYd","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageUpdate-6FyYd{ÅdataTypeÅ:ÅNotionPageUpdateÅ,ÅidÅ:ÅNotionPageUpdate-6FyYdÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"NotionPageCreator-6SCB5","sourceHandle":"{ÅdataTypeÅ:ÅNotionPageCreatorÅ,ÅidÅ:ÅNotionPageCreator-6SCB5Å,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageCreator","id":"NotionPageCreator-6SCB5","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageCreator-6SCB5{ÅdataTypeÅ:ÅNotionPageCreatorÅ,ÅidÅ:ÅNotionPageCreator-6SCB5Å,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"AddContentToPage-ZezUn","sourceHandle":"{ÅdataTypeÅ:ÅAddContentToPageÅ,ÅidÅ:ÅAddContentToPage-ZezUnÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"AddContentToPage","id":"AddContentToPage-ZezUn","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-AddContentToPage-ZezUn{ÅdataTypeÅ:ÅAddContentToPageÅ,ÅidÅ:ÅAddContentToPage-ZezUnÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"NotionDatabaseProperties-aeWil","sourceHandle":"{ÅdataTypeÅ:ÅNotionDatabasePropertiesÅ,ÅidÅ:ÅNotionDatabaseProperties-aeWilÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionDatabaseProperties","id":"NotionDatabaseProperties-aeWil","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionDatabaseProperties-aeWil{ÅdataTypeÅ:ÅNotionDatabasePropertiesÅ,ÅidÅ:ÅNotionDatabaseProperties-aeWilÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"NotionListPages-znA3w","sourceHandle":"{ÅdataTypeÅ:ÅNotionListPagesÅ,ÅidÅ:ÅNotionListPages-znA3wÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionListPages","id":"NotionListPages-znA3w","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionListPages-znA3w{ÅdataTypeÅ:ÅNotionListPagesÅ,ÅidÅ:ÅNotionListPages-znA3wÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"NotionPageContent-SlL21","sourceHandle":"{ÅdataTypeÅ:ÅNotionPageContentÅ,ÅidÅ:ÅNotionPageContent-SlL21Å,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageContent","id":"NotionPageContent-SlL21","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageContent-SlL21{ÅdataTypeÅ:ÅNotionPageContentÅ,ÅidÅ:ÅNotionPageContent-SlL21Å,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"NotionUserList-C3eGn","sourceHandle":"{ÅdataTypeÅ:ÅNotionUserListÅ,ÅidÅ:ÅNotionUserList-C3eGnÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionUserList","id":"NotionUserList-C3eGn","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionUserList-C3eGn{ÅdataTypeÅ:ÅNotionUserListÅ,ÅidÅ:ÅNotionUserList-C3eGnÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"NotionSearch-VS2mI","sourceHandle":"{ÅdataTypeÅ:ÅNotionSearchÅ,ÅidÅ:ÅNotionSearch-VS2mIÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolkitComponent-2lNG0","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolkitComponent-2lNG0","inputTypes":["Tool"],"type":"other"},"sourceHandle":{"dataType":"NotionSearch","id":"NotionSearch-VS2mI","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionSearch-VS2mI{ÅdataTypeÅ:ÅNotionSearchÅ,ÅidÅ:ÅNotionSearch-VS2mIÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolkitComponent-2lNG0{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolkitComponent-2lNG0Å,ÅinputTypesÅ:[ÅToolÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"OpenAIModel-BJWIg","sourceHandle":"{ÅdataTypeÅ:ÅOpenAIModelÅ,ÅidÅ:ÅOpenAIModel-BJWIgÅ,ÅnameÅ:Åmodel_outputÅ,Åoutput_typesÅ:[ÅLanguageModelÅ]}","target":"ToolCallingAgent-50Gcd","targetHandle":"{ÅfieldNameÅ:ÅllmÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅLanguageModelÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"llm","id":"ToolCallingAgent-50Gcd","inputTypes":["LanguageModel"],"type":"other"},"sourceHandle":{"dataType":"OpenAIModel","id":"OpenAIModel-BJWIg","name":"model_output","output_types":["LanguageModel"]}},"id":"reactflow__edge-OpenAIModel-BJWIg{ÅdataTypeÅ:ÅOpenAIModelÅ,ÅidÅ:ÅOpenAIModel-BJWIgÅ,ÅnameÅ:Åmodel_outputÅ,Åoutput_typesÅ:[ÅLanguageModelÅ]}-ToolCallingAgent-50Gcd{ÅfieldNameÅ:ÅllmÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅLanguageModelÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"Memory-CTQWu","sourceHandle":"{ÅdataTypeÅ:ÅMemoryÅ,ÅidÅ:ÅMemory-CTQWuÅ,ÅnameÅ:ÅmessagesÅ,Åoutput_typesÅ:[ÅDataÅ]}","target":"ToolCallingAgent-50Gcd","targetHandle":"{ÅfieldNameÅ:Åchat_historyÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"chat_history","id":"ToolCallingAgent-50Gcd","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"Memory","id":"Memory-CTQWu","name":"messages","output_types":["Data"]}},"id":"reactflow__edge-Memory-CTQWu{ÅdataTypeÅ:ÅMemoryÅ,ÅidÅ:ÅMemory-CTQWuÅ,ÅnameÅ:ÅmessagesÅ,Åoutput_typesÅ:[ÅDataÅ]}-ToolCallingAgent-50Gcd{ÅfieldNameÅ:Åchat_historyÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","className":"","selected":false},{"source":"Prompt-0dWZu","sourceHandle":"{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-0dWZuÅ,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"ToolCallingAgent-50Gcd","targetHandle":"{ÅfieldNameÅ:Åsystem_promptÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"system_prompt","id":"ToolCallingAgent-50Gcd","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"Prompt","id":"Prompt-0dWZu","name":"prompt","output_types":["Message"]}},"id":"reactflow__edge-Prompt-0dWZu{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-0dWZuÅ,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}-ToolCallingAgent-50Gcd{ÅfieldNameÅ:Åsystem_promptÅ,ÅidÅ:ÅToolCallingAgent-50GcdÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","className":"","selected":false},{"source":"CurrentDateComponent-NSNQ8","sourceHandle":"{ÅdataTypeÅ:ÅCurrentDateComponentÅ,ÅidÅ:ÅCurrentDateComponent-NSNQ8Å,ÅnameÅ:Åcurrent_dateÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-0dWZu","targetHandle":"{ÅfieldNameÅ:ÅCURRENT_DATEÅ,ÅidÅ:ÅPrompt-0dWZuÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"CURRENT_DATE","id":"Prompt-0dWZu","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"CurrentDateComponent","id":"CurrentDateComponent-NSNQ8","name":"current_date","output_types":["Message"]}},"id":"reactflow__edge-CurrentDateComponent-NSNQ8{ÅdataTypeÅ:ÅCurrentDateComponentÅ,ÅidÅ:ÅCurrentDateComponent-NSNQ8Å,ÅnameÅ:Åcurrent_dateÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-0dWZu{ÅfieldNameÅ:ÅCURRENT_DATEÅ,ÅidÅ:ÅPrompt-0dWZuÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","className":"","selected":false}],"viewport":{"x":97.72528949998423,"y":-211.85229348429561,"zoom":0.41621432461249197}},"description":"This flow creates an AI assistant that interacts with your Notion workspace. It understands natural language requests, performs actions in Notion (like creating pages or searching for information), and provides helpful responses. To use it, simply start a conversation by asking the agent to perform a Notion-related task, and it will guide you through the process, making it easy to manage your Notion workspace through chat.","name":"Conversational Notion Agent","last_tested_version":"1.0.17","endpoint_name":null,"is_component":false}
\ No newline at end of file
diff --git a/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json b/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json
new file mode 100644
index 0000000000000000000000000000000000000000..56d8e74e7a3050f99af16b1c9dbd25339279e811
--- /dev/null
+++ b/docs/docs/Integrations/Notion/Meeting_Notes_Agent.json
@@ -0,0 +1 @@
+{"id":"b6de0fdb-31a2-40bf-b921-719bc0890a0e","data":{"nodes":[{"id":"TextInput-iJPEJ","type":"genericNode","position":{"x":94.43614181571661,"y":387.24602783243165},"data":{"type":"TextInput","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.io.text import TextComponent\nfrom langflow.io import MultilineInput, Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n message = Message(\n text=self.input_value,\n )\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"Good morning. Thanks for joining this project review meeting. We've got quite a few tasks to discuss, especially some Notion-related ones. Shall we get started?\n\nMorning, Felipe. Absolutely, let's dive in. I see we have several projects and tasks on our plate.\n\nGreat. Let's begin with the AI Content Gen project. I'm currently working on \"Montar base agente seletor de cortes.\" It's in progress, and I'm aiming to complete it by June 14th. Have you had a chance to look at this task, Cezar?\n\nI haven't been directly involved with that one. Can you give me an overview of what it entails?\n\nOf course. Essentially, we're building a base agent that can intelligently select and edit content. It's part of our larger AI-driven content generation initiative. The challenge is creating an algorithm that can understand context and make smart editing decisions.\n\nInteresting. How's the progress so far?\n\nIt's coming along. I've set up the basic framework, but fine-tuning the selection criteria is proving to be more complex than initially anticipated. I might need an extra day or two beyond the June 14th deadline.\n\nUnderstood, Felipe. Keep me posted if you need any resources or if the deadline needs to be adjusted. By the way, I've been meaning to ask - have you had a chance to look into that new NLP library I mentioned last week? I think it could be useful for this project.\n\nActually, Cezar, I haven't gotten to that yet. Should we add it as a new task? Maybe \"Evaluate NLP library for content selection\"?\n\nGood idea. Let's add that to our task list with a due date of next Friday. Now, moving on to the next task in this project - \"Create Notion Task Automation.\" It's assigned to you and set for June 19th, but you haven't started it yet, right? This is where I'd like to focus our discussion today.\n\nThat's correct. So, the goal is to streamline our workflow by automating certain tasks within Notion. I'm thinking we could create scripts or use Notion's API to automatically create, assign, and update tasks based on certain triggers or schedules.\n\nThat sounds like it could save us a lot of time. What specific automations are you considering?\n\nI'm glad you asked, Cezar. I'm thinking of a few key areas:\n1. Automatic task creation based on project milestones\n2. Assigning tasks to team members based on their expertise and current workload\n3. Updating task statuses based on linked database entries\n4. Generating weekly progress reports\n5. Setting up reminders for overdue tasks\n\nThose all sound valuable. Have you looked into the technical requirements for implementing these?\n\nI've done some initial research. Notion's API seems robust enough to handle these automations. We'll likely need to use a combination of Notion's API and a server to run our scripts. I'm thinking of using Node.js for this.\n\nGood thinking. Do you foresee any challenges?\n\nThe main challenge will be ensuring our automations are flexible enough to handle different project structures and team dynamics. We'll need to build in some configurability.\n\nAgreed. Let's make sure we involve the team in defining these automations. Their input will be crucial for making this truly useful. Oh, and speaking of team input, I think we should add a task for \"Conduct team survey on Notion pain points.\" This could help us prioritize which automations to tackle first.\n\nThat's an excellent idea, Cezar. I'll create that task and aim to complete the survey by next Wednesday. Now, I see we have another Notion-related task: \"Subir Notion Agent no Langflow Prod.\" Can you remind me what this entails?\n\nYes, this task is about deploying our Notion integration agent to the Langflow production environment. It's not started yet, but it's a crucial step in making our Notion automations available to the whole team.\n\nI see. What's the timeline for this?\n\nWe haven't set a specific deadline yet, but I think we should aim to complete this shortly after the automation task. Let's tentatively say by the end of June?\n\nSounds reasonable. Make sure to coordinate with the DevOps team for a smooth deployment. And while we're on the topic of deployment, we should probably add a task for \"Create documentation for Notion Agent usage.\" We want to make sure the team knows how to use these new tools once they're available.\n\nYou're right, Felipe. I'll add that to our task list. Now, switching gears a bit, let's talk about the Internal Projects. I see you're working on \"Crypto Links\" - it's in progress.\n\nAh yes, our blockchain initiative. It's moving forward. I'm researching various blockchain platforms and their potential applications for our projects. I'm particularly interested in smart contract capabilities.\n\nInteresting. Keep me updated on any promising findings. By the way, have you considered reaching out to any blockchain experts for consultation? It might be worth adding a task for \"Schedule blockchain expert consultation.\"\n\nThat's a great suggestion, Cezar. I'll add it to my to-do list. Now, for the Internal Tasks, I see you're assigned to \"Revisar modos do Charlinho, preparar para open source.\" What's the status on that?\n\nI haven't started yet, but it's on my radar. The deadline is June 7th, so I'll be diving into it this week. Essentially, we need to review and refine Charlinho's modes before we open-source the project.\n\nSounds good. Let me know if you need any assistance with that. Oh, and don't forget we need to add a task for \"Prepare Charlinho documentation for open source.\" We want to make sure our project is well-documented when we release it.\n\nYou're right, Felipe. I'll make sure to include that in our task list. Now, I see you have several tasks assigned to you in the Internal Tasks section. Can you give me a quick rundown?\n\nOf course. I'm working on finding a freelancer to create flows in ComfyUI - that's in progress and due May 28th. I'm also handling the conception of the Agent UI, due May 30th. Both are moving along well.\n\nThere's also a task to \"Check, install and test Gladia to use a bot in Google Meet.\" That's in progress, and I'm collaborating with C on it.\n\nThat's quite a workload. How are you managing all these tasks?\n\nIt's challenging, but I'm prioritizing based on deadlines and dependencies. The Notion automation project is a high priority because it'll help us manage tasks more efficiently in the long run.\n\nGood strategy, Felipe. Is there anything you need from me or the team to help move these tasks forward?\n\nActually, yes. For the \"pegar os arquivos necessÃĄrios para tentarmos montar um stinger com ffmpeg\" task, I could use some input on which files are critical for this. It's a low-priority task due June 2nd, but any insights would be helpful.\n\nI'll review our asset library and send you a list of potential files by tomorrow. Oh, and let's add a task for \"Create ffmpeg stinger tutorial\" once we figure out the process. It could be useful for the team in the future.\n\nGreat idea, Cezar. I'll add that to our backlog. Anything else we should discuss?\n\nI think we've covered the major points. Oh, one last thing - for the \"Create Notion Task Automation\" project, I was thinking of setting up a series of short daily meetings next week to keep everyone aligned. What do you think?\n\nThat's a good idea. Maybe 15-minute stand-ups? We can use those to address any roadblocks quickly. And let's add a task for \"Set up Notion Automation progress tracking board\" to help visualize our progress during these stand-ups.\n\nPerfect. I'll send out calendar invites this afternoon and create that tracking board task. Any final thoughts or concerns, Cezar?\n\nNot from my side. I think we have a clear path forward, especially with the Notion-related tasks and the new items we've added to our list.\n\nAgreed. Let's plan to reconvene next week to check on progress, particularly for the Notion automation project and these new tasks we've discussed. Thanks for the comprehensive update, Felipe.\n\nThank you, Cezar. I'll send out a summary of our discussion and action items shortly, including all the new tasks we've identified during this meeting.\n","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Text to be passed as input.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Get text inputs from the Playground.","icon":"type","base_classes":["Message"],"display_name":"Meeting Transcript","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"TextInput-iJPEJ"},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":94.43614181571661,"y":387.24602783243165}},{"id":"NotionUserList-TvIKS","type":"genericNode","position":{"x":80.49204196902156,"y":741.0568511678105},"data":{"type":"NotionUserList","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import List, Dict\nfrom pydantic import BaseModel\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionUserList(LCToolComponent):\n display_name = \"List Users \"\n description = \"Retrieve users from Notion.\"\n documentation = \"https://docs.langflow.org/integrations/notion/list-users\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionUserListSchema(BaseModel):\n pass\n\n def run_model(self) -> List[Data]:\n users = self._list_users()\n records = []\n combined_text = \"\"\n\n for user in users:\n output = \"User:\\n\"\n for key, value in user.items():\n output += f\"{key.replace('_', ' ').title()}: {value}\\n\"\n output += \"________________________\\n\"\n\n combined_text += output\n records.append(Data(text=output, data=user))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_users\",\n description=\"Retrieve users from Notion.\",\n func=self._list_users,\n args_schema=self.NotionUserListSchema,\n )\n\n def _list_users(self) -> List[Dict]:\n url = \"https://api.notion.com/v1/users\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n data = response.json()\n results = data[\"results\"]\n\n users = []\n for user in results:\n user_data = {\n \"id\": user[\"id\"],\n \"type\": user[\"type\"],\n \"name\": user.get(\"name\", \"\"),\n \"avatar_url\": user.get(\"avatar_url\", \"\"),\n }\n users.append(user_data)\n\n return users\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve users from Notion.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Users ","documentation":"https://docs.langflow.org/integrations/notion/list-users","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionUserList-TvIKS","description":"Retrieve users from Notion.","display_name":"List Users "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":80.49204196902156,"y":741.0568511678105},"dragging":false},{"id":"NotionSearch-M66HF","type":"genericNode","position":{"x":1095.6934863134345,"y":407.8718765800806},"data":{"type":"NotionSearch","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Any, List\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, DropdownInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionSearch(LCToolComponent):\n display_name: str = \"Search \"\n description: str = \"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/search\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The text that the API compares page and database titles against.\",\n ),\n DropdownInput(\n name=\"filter_value\",\n display_name=\"Filter Type\",\n info=\"Limits the results to either only pages or only databases.\",\n options=[\"page\", \"database\"],\n value=\"page\",\n ),\n DropdownInput(\n name=\"sort_direction\",\n display_name=\"Sort Direction\",\n info=\"The direction to sort the results.\",\n options=[\"ascending\", \"descending\"],\n value=\"descending\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionSearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query text.\")\n filter_value: str = Field(default=\"page\", description=\"Filter type: 'page' or 'database'.\")\n sort_direction: str = Field(default=\"descending\", description=\"Sort direction: 'ascending' or 'descending'.\")\n\n def run_model(self) -> List[Data]:\n results = self._search_notion(self.query, self.filter_value, self.sort_direction)\n records = []\n combined_text = f\"Results found: {len(results)}\\n\\n\"\n\n for result in results:\n result_data = {\n \"id\": result[\"id\"],\n \"type\": result[\"object\"],\n \"last_edited_time\": result[\"last_edited_time\"],\n }\n\n if result[\"object\"] == \"page\":\n result_data[\"title_or_url\"] = result[\"url\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['url']}\\n\"\n elif result[\"object\"] == \"database\":\n if \"title\" in result and isinstance(result[\"title\"], list) and len(result[\"title\"]) > 0:\n result_data[\"title_or_url\"] = result[\"title\"][0][\"plain_text\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['title'][0]['plain_text']}\\n\"\n else:\n result_data[\"title_or_url\"] = \"N/A\"\n text = f\"id: {result['id']}\\ntitle_or_url: N/A\\n\"\n\n text += f\"type: {result['object']}\\nlast_edited_time: {result['last_edited_time']}\\n\\n\"\n combined_text += text\n records.append(Data(text=text, data=result_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_search\",\n description=\"Search Notion pages and databases. Input should include the search query and optionally filter type and sort direction.\",\n func=self._search_notion,\n args_schema=self.NotionSearchSchema,\n )\n\n def _search_notion(\n self, query: str, filter_value: str = \"page\", sort_direction: str = \"descending\"\n ) -> List[Dict[str, Any]]:\n url = \"https://api.notion.com/v1/search\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"query\": query,\n \"filter\": {\"value\": filter_value, \"property\": \"object\"},\n \"sort\": {\"direction\": sort_direction, \"timestamp\": \"last_edited_time\"},\n }\n\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n\n results = response.json()\n return results[\"results\"]\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"filter_value":{"trace_as_metadata":true,"options":["page","database"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"filter_value","value":"database","display_name":"Filter Type","advanced":true,"dynamic":false,"info":"Limits the results to either only pages or only databases.","title_case":false,"type":"str","_input_type":"DropdownInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query","value":"","display_name":"Search Query","advanced":true,"dynamic":false,"info":"The text that the API compares page and database titles against.","title_case":false,"type":"str","_input_type":"StrInput"},"sort_direction":{"trace_as_metadata":true,"options":["ascending","descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sort_direction","value":"descending","display_name":"Sort Direction","advanced":true,"dynamic":false,"info":"The direction to sort the results.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Search ","documentation":"https://docs.langflow.org/integrations/notion/search","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":false}],"field_order":["notion_secret","query","filter_value","sort_direction"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionSearch-M66HF","description":"Searches all pages and databases that have been shared with an integration.","display_name":"Search "},"selected":false,"width":384,"height":386,"positionAbsolute":{"x":1095.6934863134345,"y":407.8718765800806},"dragging":false},{"id":"Prompt-19rub","type":"genericNode","position":{"x":688.7954025956392,"y":456.4686463487848},"data":{"type":"Prompt","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"template":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"\nYou are an AI assistant specialized in analyzing meeting transcripts and identifying tasks. Your goal is to extract relevant tasks from the given transcript, search for related existing tasks in Notion, and provide a comprehensive list of tasks with their current status and any needed updates.\n\nYou have access to the following input:\n\n\n{TRANSCRIPT}\n\n\n\n{USERS}\n\n\nFollow these steps to complete your task:\n\n1. Carefully read through the transcript and identify any mentioned tasks, action items, or follow-ups.\n\n2. For each identified task:\n a. Use the notion_search tool to find if there's an existing related task in Notion.\n b. If a related task is found, note its ID and current status.\n c. If no related task is found, mark it as a new task.\n\n3. For each task (existing or new), determine:\n a. The task name or description\n b. The assigned person (if mentioned)\n c. The current status (for existing tasks) or suggested status (for new tasks)\n d. Any updates or changes mentioned in the transcript\n\n4. Compile your findings into a list of tasks using the following format:\n\n\n\n[Notion page ID if existing, or \"NEW\" if new task]\n[Task name or description]\n[Assigned person, if mentioned]\n[Current status for existing tasks, or suggested status for new tasks]\n[Any updates or changes mentioned in the transcript]\n\n\n\nRemember to focus on tasks that are directly related to the meeting discussion. Do not include general conversation topics or unrelated mentions as tasks.\n\nProvide your final output in the format specified above, with each task enclosed in its own tags within the overall structure.\n\nToday is: {CURRENT_DATE}\n\n\n\n","display_name":"Template","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"prompt","_input_type":"PromptInput"},"TRANSCRIPT":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"TRANSCRIPT","display_name":"TRANSCRIPT","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"USERS":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"USERS","display_name":"USERS","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"CURRENT_DATE":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"CURRENT_DATE","display_name":"CURRENT_DATE","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"}},"description":"Create a prompt template with dynamic variables.","icon":"prompts","is_input":null,"is_output":null,"is_composition":null,"base_classes":["Message"],"name":"","display_name":"Prompt","documentation":"","custom_fields":{"template":["TRANSCRIPT","USERS","CURRENT_DATE"]},"output_types":[],"full_path":null,"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"prompt","hidden":null,"display_name":"Prompt Message","method":"build_prompt","value":"__UNDEFINED__","cache":true}],"field_order":["template"],"beta":false,"error":null,"edited":false,"lf_version":"1.0.17"},"id":"Prompt-19rub"},"selected":false,"width":384,"height":588,"positionAbsolute":{"x":688.7954025956392,"y":456.4686463487848},"dragging":false},{"id":"ParseData-aNk1v","type":"genericNode","position":{"x":540.4151030255898,"y":834.2819856588019},"data":{"type":"ParseData","node":{"template":{"_type":"Component","data":{"trace_as_metadata":true,"list":false,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"data","value":"","display_name":"Data","advanced":false,"input_types":["Data"],"dynamic":false,"info":"The data to convert to text.","title_case":false,"type":"other","_input_type":"DataInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"sep":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sep","value":"\n","display_name":"Separator","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"StrInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Convert Data into plain text following a specified template.","icon":"braces","base_classes":["Message"],"display_name":"Parse Data","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"parse_data","value":"__UNDEFINED__","cache":true}],"field_order":["data","template","sep"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ParseData-aNk1v","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":540.4151030255898,"y":834.2819856588019}},{"id":"ToolCallingAgent-rVWeq","type":"genericNode","position":{"x":1566.291217492157,"y":583.6687094567968},"data":{"type":"ToolCallingAgent","node":{"template":{"_type":"Component","chat_history":{"trace_as_metadata":true,"list":true,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"chat_history","value":"","display_name":"Chat History","advanced":true,"input_types":["Data"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"DataInput"},"llm":{"trace_as_metadata":true,"list":false,"required":true,"placeholder":"","show":true,"name":"llm","value":"","display_name":"Language Model","advanced":false,"input_types":["LanguageModel"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool","BaseTool"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"LangChain\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"handle_parsing_errors":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"handle_parsing_errors","value":true,"display_name":"Handle Parse Errors","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"Analyze this meeting","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageTextInput"},"max_iterations":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"max_iterations","value":15,"display_name":"Max Iterations","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"system_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_prompt","value":"","display_name":"System Prompt","advanced":false,"input_types":["Message"],"dynamic":false,"info":"System prompt for the agent.","title_case":false,"type":"str","_input_type":"MultilineInput"},"user_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"user_prompt","value":"{input}","display_name":"Prompt","advanced":true,"input_types":["Message"],"dynamic":false,"info":"This prompt must contain 'input' key.","title_case":false,"type":"str","_input_type":"MultilineInput"},"verbose":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"verbose","value":true,"display_name":"Verbose","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Agent that uses tools","icon":"LangChain","base_classes":["AgentExecutor","Message"],"display_name":"Tool Calling Agent","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["AgentExecutor"],"selected":"AgentExecutor","name":"agent","display_name":"Agent","method":"build_agent","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Message"],"selected":"Message","name":"response","display_name":"Response","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","handle_parsing_errors","verbose","max_iterations","tools","llm","system_prompt","user_prompt","chat_history"],"beta":true,"edited":false,"lf_version":"1.0.17"},"id":"ToolCallingAgent-rVWeq"},"selected":false,"width":384,"height":398,"positionAbsolute":{"x":1566.291217492157,"y":583.6687094567968},"dragging":false},{"id":"OpenAIModel-Ht8xI","type":"genericNode","position":{"x":1097.0545781920632,"y":805.60631548423},"data":{"type":"OpenAIModel","node":{"template":{"_type":"Component","api_key":{"load_from_db":false,"required":false,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"OpenAI API Key","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The OpenAI API Key to use for the OpenAI model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import operator\nfrom functools import reduce\n\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = LCModelComponent._base_inputs + [\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageInput"},"json_mode":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"json_mode","value":false,"display_name":"JSON Mode","advanced":true,"dynamic":false,"info":"If True, it will output JSON regardless of passing a schema.","title_case":false,"type":"bool","_input_type":"BoolInput"},"max_tokens":{"trace_as_metadata":true,"range_spec":{"step_type":"float","min":0,"max":128000,"step":0.1},"list":false,"required":false,"placeholder":"","show":true,"name":"max_tokens","value":"","display_name":"Max Tokens","advanced":true,"dynamic":false,"info":"The maximum number of tokens to generate. Set to 0 for unlimited tokens.","title_case":false,"type":"int","_input_type":"IntInput"},"model_kwargs":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"model_kwargs","value":{},"display_name":"Model Kwargs","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"dict","_input_type":"DictInput"},"model_name":{"trace_as_metadata":true,"options":["gpt-4o-mini","gpt-4o","gpt-4-turbo","gpt-4-turbo-preview","gpt-4","gpt-3.5-turbo","gpt-3.5-turbo-0125"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"model_name","value":"gpt-4o","display_name":"Model Name","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"DropdownInput"},"openai_api_base":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"openai_api_base","value":"","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.","title_case":false,"type":"str","_input_type":"StrInput"},"output_schema":{"trace_as_input":true,"list":true,"required":false,"placeholder":"","show":true,"name":"output_schema","value":{},"display_name":"Schema","advanced":true,"dynamic":false,"info":"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.","title_case":false,"type":"dict","_input_type":"DictInput"},"seed":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"seed","value":1,"display_name":"Seed","advanced":true,"dynamic":false,"info":"The seed controls the reproducibility of the job.","title_case":false,"type":"int","_input_type":"IntInput"},"stream":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"stream","value":false,"display_name":"Stream","advanced":true,"dynamic":false,"info":"Stream the response from the model. Streaming works only in Chat.","title_case":false,"type":"bool","_input_type":"BoolInput"},"system_message":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_message","value":"","display_name":"System Message","advanced":true,"input_types":["Message"],"dynamic":false,"info":"System message to pass to the model.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"temperature":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"temperature","value":0.1,"display_name":"Temperature","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"float","_input_type":"FloatInput"}},"description":"Generates text using OpenAI LLMs.","icon":"OpenAI","base_classes":["LanguageModel","Message"],"display_name":"OpenAI","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text_output","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["LanguageModel"],"selected":"LanguageModel","name":"model_output","display_name":"Language Model","method":"build_model","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","system_message","stream","max_tokens","model_kwargs","json_mode","output_schema","model_name","openai_api_base","api_key","temperature","seed"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"OpenAIModel-Ht8xI"},"selected":false,"width":384,"height":302,"dragging":false,"positionAbsolute":{"x":1097.0545781920632,"y":805.60631548423}},{"id":"Prompt-Lbxk6","type":"genericNode","position":{"x":3042.6844997246735,"y":416.83992118486856},"data":{"type":"Prompt","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(\n self,\n ) -> Message:\n prompt = Message.from_template_and_variables(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"\n This function is called after the code validation is done.\n \"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"template":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"\nYou are an AI assistant responsible for updating tasks in Notion based on the information provided from a meeting analysis. Your goal is to create new tasks and update existing ones using the Notion API tools available to you, and then provide a summary in a simple markdown format suitable for a chat interface.\n\nYou have access to the following inputs:\n\n\n{TASK_LIST}\n\n\n\n{DATABASES}\n\n\n\n{USERS}\n\n\nFollow these steps to update the tasks in Notion and generate a markdown summary:\n\n1. Identify the Task database ID from the provided list.\n\n2. Before processing any tasks, retrieve the database properties for the Task database:\n a. Use the notion_database_properties and carefully review the properties, their types, and any options for select or multi-select properties.\n b. Pay attention to the properties format for further usage.\n\n3. For each task in the task list:\n a. If the task ID is \"NEW\", create a new task using the create_notion_page tool.\n b. If the task has an existing ID, update the task using the update_notion_page tool.\n c. Remember to use the properties from the DB retrieved from the notion_database_properties tool\n\n4. When creating a new task:\n a. Use the create_notion_page tool.\n b. Include the task name, assignee (if available), status, and any other relevant properties based on the database structure.\n c. Ensure that the property names and types match exactly with what you retrieved from the notion_database_properties call.\n\n5. When updating an existing task:\n a. Use the update_notion_page tool.\n b. Update the status, assignee, or any other relevant properties mentioned in the field.\n c. Ensure that the property names and types match exactly with what you retrieved from the notion_database_properties call.\n\n6. After each function call, wait for the before proceeding to the next task.\n\n7. If you encounter any errors during the process, note them and continue with the next task.\n\n8. Provide a summary of your actions for each task in a simple markdown format. Use the following structure:\n # Task Update Summary\n\n ## Created Tasks\n - **[Task Name]**: Assigned to [Assignee], Status: [Status]\n - Details: [Brief description of the new task]\n\n ## Updated Tasks\n - **[Task Name]** (ID: [Notion Page ID])\n - Changes: [Brief description of changes]\n - Status: [Success/Error]\n\n ## Errors\n - **[Task Name or ID]**: [Description of the error encountered]\n\n\nRemember to use the exact property names, types, and options as specified in the Notion database properties you retrieved at the beginning. This is crucial for ensuring that all updates and creations are done correctly.\n\nIf you encounter any errors or uncertainties, include them in the Errors section of the markdown summary. With enough detail to the user understand the issues.\n\nProvide your final output as a complete markdown document containing all the tasks you've processed, whether they were created, updated, or encountered errors. Use only basic markdown formatting (headers, bold, lists) to ensure compatibility with chat interfaces. Do not include any XML tags or complex formatting in your final output.\n\nToday is: {CURRENT_DATE}\n\n","display_name":"Template","advanced":false,"dynamic":false,"info":"","title_case":false,"type":"prompt","_input_type":"PromptInput"},"TASK_LIST":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"TASK_LIST","display_name":"TASK_LIST","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"DATABASES":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"DATABASES","display_name":"DATABASES","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"USERS":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"USERS","display_name":"USERS","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"},"CURRENT_DATE":{"field_type":"str","required":false,"placeholder":"","list":false,"show":true,"multiline":true,"value":"","fileTypes":[],"file_path":"","password":false,"name":"CURRENT_DATE","display_name":"CURRENT_DATE","advanced":false,"input_types":["Message","Text"],"dynamic":false,"info":"","load_from_db":false,"title_case":false,"type":"str"}},"description":"Create a prompt template with dynamic variables.","icon":"prompts","is_input":null,"is_output":null,"is_composition":null,"base_classes":["Message"],"name":"","display_name":"Prompt","documentation":"","custom_fields":{"template":["TASK_LIST","DATABASES","USERS","CURRENT_DATE"]},"output_types":[],"full_path":null,"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"prompt","hidden":null,"display_name":"Prompt Message","method":"build_prompt","value":"__UNDEFINED__","cache":true}],"field_order":["template"],"beta":false,"error":null,"edited":false},"id":"Prompt-Lbxk6"},"selected":false,"width":384,"height":674,"positionAbsolute":{"x":3042.6844997246735,"y":416.83992118486856},"dragging":false},{"id":"ToolCallingAgent-GurdE","type":"genericNode","position":{"x":3974.1377259893243,"y":867.4647271037014},"data":{"type":"ToolCallingAgent","node":{"template":{"_type":"Component","chat_history":{"trace_as_metadata":true,"list":true,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"chat_history","value":"","display_name":"Chat History","advanced":true,"input_types":["Data"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"DataInput"},"llm":{"trace_as_metadata":true,"list":false,"required":true,"placeholder":"","show":true,"name":"llm","value":"","display_name":"Language Model","advanced":false,"input_types":["LanguageModel"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"tools":{"trace_as_metadata":true,"list":true,"required":false,"placeholder":"","show":true,"name":"tools","value":"","display_name":"Tools","advanced":false,"input_types":["Tool","BaseTool"],"dynamic":false,"info":"","title_case":false,"type":"other","_input_type":"HandleInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"LangChain\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"handle_parsing_errors":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"handle_parsing_errors","value":true,"display_name":"Handle Parse Errors","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"Do your task.","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageTextInput"},"max_iterations":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"max_iterations","value":15,"display_name":"Max Iterations","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"system_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_prompt","value":"","display_name":"System Prompt","advanced":false,"input_types":["Message"],"dynamic":false,"info":"System prompt for the agent.","title_case":false,"type":"str","_input_type":"MultilineInput"},"user_prompt":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"user_prompt","value":"{input}","display_name":"Prompt","advanced":true,"input_types":["Message"],"dynamic":false,"info":"This prompt must contain 'input' key.","title_case":false,"type":"str","_input_type":"MultilineInput"},"verbose":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"verbose","value":true,"display_name":"Verbose","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Agent that uses tools","icon":"LangChain","base_classes":["AgentExecutor","Message"],"display_name":"Tool Calling Agent","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["AgentExecutor"],"selected":"AgentExecutor","name":"agent","display_name":"Agent","method":"build_agent","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Message"],"selected":"Message","name":"response","display_name":"Response","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","handle_parsing_errors","verbose","max_iterations","tools","llm","system_prompt","user_prompt","chat_history"],"beta":true,"edited":false,"lf_version":"1.0.17"},"id":"ToolCallingAgent-GurdE"},"selected":false,"width":384,"height":398,"positionAbsolute":{"x":3974.1377259893243,"y":867.4647271037014},"dragging":false},{"id":"OpenAIModel-OTfnt","type":"genericNode","position":{"x":3513.5648778762093,"y":710.2099422974287},"data":{"type":"OpenAIModel","node":{"template":{"_type":"Component","api_key":{"load_from_db":false,"required":false,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"OpenAI API Key","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The OpenAI API Key to use for the OpenAI model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import operator\nfrom functools import reduce\n\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = LCModelComponent._base_inputs + [\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n if openai_api_key:\n api_key = SecretStr(openai_api_key)\n else:\n api_key = None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Input","advanced":true,"input_types":["Message"],"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"MessageInput"},"json_mode":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"json_mode","value":false,"display_name":"JSON Mode","advanced":true,"dynamic":false,"info":"If True, it will output JSON regardless of passing a schema.","title_case":false,"type":"bool","_input_type":"BoolInput"},"max_tokens":{"trace_as_metadata":true,"range_spec":{"step_type":"float","min":0,"max":128000,"step":0.1},"list":false,"required":false,"placeholder":"","show":true,"name":"max_tokens","value":"","display_name":"Max Tokens","advanced":true,"dynamic":false,"info":"The maximum number of tokens to generate. Set to 0 for unlimited tokens.","title_case":false,"type":"int","_input_type":"IntInput"},"model_kwargs":{"trace_as_input":true,"list":false,"required":false,"placeholder":"","show":true,"name":"model_kwargs","value":{},"display_name":"Model Kwargs","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"dict","_input_type":"DictInput"},"model_name":{"trace_as_metadata":true,"options":["gpt-4o-mini","gpt-4o","gpt-4-turbo","gpt-4-turbo-preview","gpt-4","gpt-3.5-turbo","gpt-3.5-turbo-0125"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"model_name","value":"gpt-4o","display_name":"Model Name","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"DropdownInput"},"openai_api_base":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"openai_api_base","value":"","display_name":"OpenAI API Base","advanced":true,"dynamic":false,"info":"The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.","title_case":false,"type":"str","_input_type":"StrInput"},"output_schema":{"trace_as_input":true,"list":true,"required":false,"placeholder":"","show":true,"name":"output_schema","value":{},"display_name":"Schema","advanced":true,"dynamic":false,"info":"The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.","title_case":false,"type":"dict","_input_type":"DictInput"},"seed":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"seed","value":1,"display_name":"Seed","advanced":true,"dynamic":false,"info":"The seed controls the reproducibility of the job.","title_case":false,"type":"int","_input_type":"IntInput"},"stream":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"stream","value":false,"display_name":"Stream","advanced":true,"dynamic":false,"info":"Stream the response from the model. Streaming works only in Chat.","title_case":false,"type":"bool","_input_type":"BoolInput"},"system_message":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"system_message","value":"","display_name":"System Message","advanced":true,"input_types":["Message"],"dynamic":false,"info":"System message to pass to the model.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"temperature":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"temperature","value":0.1,"display_name":"Temperature","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"float","_input_type":"FloatInput"}},"description":"Generates text using OpenAI LLMs.","icon":"OpenAI","base_classes":["LanguageModel","Message"],"display_name":"OpenAI","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text_output","display_name":"Text","method":"text_response","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["LanguageModel"],"selected":"LanguageModel","name":"model_output","display_name":"Language Model","method":"build_model","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","system_message","stream","max_tokens","model_kwargs","json_mode","output_schema","model_name","openai_api_base","api_key","temperature","seed"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"OpenAIModel-OTfnt"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":3513.5648778762093,"y":710.2099422974287},"dragging":false},{"id":"AddContentToPage-vrAvx","type":"genericNode","position":{"x":2649.2991466550634,"y":1050.6250104897197},"data":{"type":"AddContentToPage","node":{"template":{"_type":"Component","block_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"block_id","value":"","display_name":"Page/Block ID","advanced":true,"dynamic":false,"info":"The ID of the page/block to add the content.","title_case":false,"type":"str","_input_type":"StrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nfrom markdown import markdown\nfrom bs4 import BeautifulSoup\nimport requests\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\n\nclass AddContentToPage(LCToolComponent):\n display_name: str = \"Add Content to Page \"\n description: str = \"Convert markdown text to Notion blocks and append them to a Notion page.\"\n documentation: str = \"https://developers.notion.com/reference/patch-block-children\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n MultilineInput(\n name=\"markdown_text\",\n display_name=\"Markdown Text\",\n info=\"The markdown text to convert to Notion blocks.\",\n ),\n StrInput(\n name=\"block_id\",\n display_name=\"Page/Block ID\",\n info=\"The ID of the page/block to add the content.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class AddContentToPageSchema(BaseModel):\n markdown_text: str = Field(..., description=\"The markdown text to convert to Notion blocks.\")\n block_id: str = Field(..., description=\"The ID of the page/block to add the content.\")\n\n def run_model(self) -> Data:\n result = self._add_content_to_page(self.markdown_text, self.block_id)\n return Data(data=result, text=json.dumps(result))\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"add_content_to_notion_page\",\n description=\"Convert markdown text to Notion blocks and append them to a Notion page.\",\n func=self._add_content_to_page,\n args_schema=self.AddContentToPageSchema,\n )\n\n def _add_content_to_page(self, markdown_text: str, block_id: str) -> Union[Dict[str, Any], str]:\n try:\n html_text = markdown(markdown_text)\n soup = BeautifulSoup(html_text, \"html.parser\")\n blocks = self.process_node(soup)\n\n url = f\"https://api.notion.com/v1/blocks/{block_id}/children\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"children\": blocks,\n }\n\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n\n return response.json()\n except requests.exceptions.RequestException as e:\n error_message = f\"Error: Failed to add content to Notion page. {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n except Exception as e:\n return f\"Error: An unexpected error occurred while adding content to Notion page. {str(e)}\"\n\n def process_node(self, node):\n blocks = []\n if isinstance(node, str):\n text = node.strip()\n if text:\n if text.startswith(\"#\"):\n heading_level = text.count(\"#\", 0, 6)\n heading_text = text[heading_level:].strip()\n if heading_level == 1:\n blocks.append(self.create_block(\"heading_1\", heading_text))\n elif heading_level == 2:\n blocks.append(self.create_block(\"heading_2\", heading_text))\n elif heading_level == 3:\n blocks.append(self.create_block(\"heading_3\", heading_text))\n else:\n blocks.append(self.create_block(\"paragraph\", text))\n elif node.name == \"h1\":\n blocks.append(self.create_block(\"heading_1\", node.get_text(strip=True)))\n elif node.name == \"h2\":\n blocks.append(self.create_block(\"heading_2\", node.get_text(strip=True)))\n elif node.name == \"h3\":\n blocks.append(self.create_block(\"heading_3\", node.get_text(strip=True)))\n elif node.name == \"p\":\n code_node = node.find(\"code\")\n if code_node:\n code_text = code_node.get_text()\n language, code = self.extract_language_and_code(code_text)\n blocks.append(self.create_block(\"code\", code, language=language))\n elif self.is_table(str(node)):\n blocks.extend(self.process_table(node))\n else:\n blocks.append(self.create_block(\"paragraph\", node.get_text(strip=True)))\n elif node.name == \"ul\":\n blocks.extend(self.process_list(node, \"bulleted_list_item\"))\n elif node.name == \"ol\":\n blocks.extend(self.process_list(node, \"numbered_list_item\"))\n elif node.name == \"blockquote\":\n blocks.append(self.create_block(\"quote\", node.get_text(strip=True)))\n elif node.name == \"hr\":\n blocks.append(self.create_block(\"divider\", \"\"))\n elif node.name == \"img\":\n blocks.append(self.create_block(\"image\", \"\", image_url=node.get(\"src\")))\n elif node.name == \"a\":\n blocks.append(self.create_block(\"bookmark\", node.get_text(strip=True), link_url=node.get(\"href\")))\n elif node.name == \"table\":\n blocks.extend(self.process_table(node))\n\n for child in node.children:\n if isinstance(child, str):\n continue\n blocks.extend(self.process_node(child))\n\n return blocks\n\n def extract_language_and_code(self, code_text):\n lines = code_text.split(\"\\n\")\n language = lines[0].strip()\n code = \"\\n\".join(lines[1:]).strip()\n return language, code\n\n def is_code_block(self, text):\n return text.startswith(\"```\")\n\n def extract_code_block(self, text):\n lines = text.split(\"\\n\")\n language = lines[0].strip(\"`\").strip()\n code = \"\\n\".join(lines[1:]).strip(\"`\").strip()\n return language, code\n\n def is_table(self, text):\n rows = text.split(\"\\n\")\n if len(rows) < 2:\n return False\n\n has_separator = False\n for i, row in enumerate(rows):\n if \"|\" in row:\n cells = [cell.strip() for cell in row.split(\"|\")]\n cells = [cell for cell in cells if cell] # Remove empty cells\n if i == 1 and all(set(cell) <= set(\"-|\") for cell in cells):\n has_separator = True\n elif not cells:\n return False\n\n return has_separator and len(rows) >= 3\n\n def process_list(self, node, list_type):\n blocks = []\n for item in node.find_all(\"li\"):\n item_text = item.get_text(strip=True)\n checked = item_text.startswith(\"[x]\")\n is_checklist = item_text.startswith(\"[ ]\") or checked\n\n if is_checklist:\n item_text = item_text.replace(\"[x]\", \"\").replace(\"[ ]\", \"\").strip()\n blocks.append(self.create_block(\"to_do\", item_text, checked=checked))\n else:\n blocks.append(self.create_block(list_type, item_text))\n return blocks\n\n def process_table(self, node):\n blocks = []\n header_row = node.find(\"thead\").find(\"tr\") if node.find(\"thead\") else None\n body_rows = node.find(\"tbody\").find_all(\"tr\") if node.find(\"tbody\") else []\n\n if header_row or body_rows:\n table_width = max(\n len(header_row.find_all([\"th\", \"td\"])) if header_row else 0,\n max(len(row.find_all([\"th\", \"td\"])) for row in body_rows),\n )\n\n table_block = self.create_block(\"table\", \"\", table_width=table_width, has_column_header=bool(header_row))\n blocks.append(table_block)\n\n if header_row:\n header_cells = [cell.get_text(strip=True) for cell in header_row.find_all([\"th\", \"td\"])]\n header_row_block = self.create_block(\"table_row\", header_cells)\n blocks.append(header_row_block)\n\n for row in body_rows:\n cells = [cell.get_text(strip=True) for cell in row.find_all([\"th\", \"td\"])]\n row_block = self.create_block(\"table_row\", cells)\n blocks.append(row_block)\n\n return blocks\n\n def create_block(self, block_type: str, content: str, **kwargs) -> Dict[str, Any]:\n block: dict[str, Any] = {\n \"object\": \"block\",\n \"type\": block_type,\n block_type: {},\n }\n\n if block_type in [\n \"paragraph\",\n \"heading_1\",\n \"heading_2\",\n \"heading_3\",\n \"bulleted_list_item\",\n \"numbered_list_item\",\n \"quote\",\n ]:\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n elif block_type == \"to_do\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"checked\"] = kwargs.get(\"checked\", False)\n elif block_type == \"code\":\n block[block_type][\"rich_text\"] = [\n {\n \"type\": \"text\",\n \"text\": {\n \"content\": content,\n },\n }\n ]\n block[block_type][\"language\"] = kwargs.get(\"language\", \"plain text\")\n elif block_type == \"image\":\n block[block_type] = {\"type\": \"external\", \"external\": {\"url\": kwargs.get(\"image_url\", \"\")}}\n elif block_type == \"divider\":\n pass\n elif block_type == \"bookmark\":\n block[block_type][\"url\"] = kwargs.get(\"link_url\", \"\")\n elif block_type == \"table\":\n block[block_type][\"table_width\"] = kwargs.get(\"table_width\", 0)\n block[block_type][\"has_column_header\"] = kwargs.get(\"has_column_header\", False)\n block[block_type][\"has_row_header\"] = kwargs.get(\"has_row_header\", False)\n elif block_type == \"table_row\":\n block[block_type][\"cells\"] = [[{\"type\": \"text\", \"text\": {\"content\": cell}} for cell in content]]\n\n return block\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"markdown_text":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"markdown_text","value":"","display_name":"Markdown Text","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The markdown text to convert to Notion blocks.","title_case":false,"type":"str","_input_type":"MultilineInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Convert markdown text to Notion blocks and append them to a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Add Content to Page ","documentation":"https://developers.notion.com/reference/patch-block-children","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["markdown_text","block_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"AddContentToPage-vrAvx","description":"Convert markdown text to Notion blocks and append them to a Notion page.","display_name":"Add Content to Page "},"selected":false,"width":384,"height":330,"positionAbsolute":{"x":2649.2991466550634,"y":1050.6250104897197},"dragging":false},{"id":"NotionPageCreator-Exc7f","type":"genericNode","position":{"x":3050.8201437255634,"y":1391.0449862668834},"data":{"type":"NotionPageCreator","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nfrom typing import Dict, Any, Union\nimport requests\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionPageCreator(LCToolComponent):\n display_name: str = \"Create Page \"\n description: str = \"A component for creating Notion pages.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-create\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n MultilineInput(\n name=\"properties_json\",\n display_name=\"Properties (JSON)\",\n info=\"The properties of the new page as a JSON string.\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageCreatorSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n properties_json: str = Field(..., description=\"The properties of the new page as a JSON string.\")\n\n def run_model(self) -> Data:\n result = self._create_notion_page(self.database_id, self.properties_json)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the created page data\n output = \"Created page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"create_notion_page\",\n description=\"Create a new page in a Notion database. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._create_notion_page,\n args_schema=self.NotionPageCreatorSchema,\n )\n\n def _create_notion_page(self, database_id: str, properties_json: str) -> Union[Dict[str, Any], str]:\n if not database_id or not properties_json:\n return \"Invalid input. Please provide 'database_id' and 'properties_json'.\"\n\n try:\n properties = json.loads(properties_json)\n except json.JSONDecodeError as e:\n return f\"Invalid properties format. Please provide a valid JSON string. Error: {str(e)}\"\n\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"parent\": {\"database_id\": database_id},\n \"properties\": properties,\n }\n\n try:\n response = requests.post(\"https://api.notion.com/v1/pages\", headers=headers, json=data)\n response.raise_for_status()\n result = response.json()\n return result\n except requests.exceptions.RequestException as e:\n error_message = f\"Failed to create Notion page. Error: {str(e)}\"\n if hasattr(e, \"response\") and e.response is not None:\n error_message += f\" Status code: {e.response.status_code}, Response: {e.response.text}\"\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._create_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"properties_json":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties_json","value":"","display_name":"Properties (JSON)","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties of the new page as a JSON string.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"A component for creating Notion pages.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Create Page ","documentation":"https://docs.langflow.org/integrations/notion/page-create","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret","properties_json"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageCreator-Exc7f","description":"A component for creating Notion pages.","display_name":"Create Page "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":3050.8201437255634,"y":1391.0449862668834},"dragging":false},{"id":"NotionDatabaseProperties-IjzLV","type":"genericNode","position":{"x":3053.0023230574693,"y":1061.535907149244},"data":{"type":"NotionDatabaseProperties","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom langflow.io import Output\n\nclass NotionDatabaseProperties(LCToolComponent):\n display_name: str = \"List Database Properties \"\n description: str = \"Retrieve properties of a Notion database.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/list-database-properties\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"database_id\",\n display_name=\"Database ID\",\n info=\"The ID of the Notion database.\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionDatabasePropertiesSchema(BaseModel):\n database_id: str = Field(..., description=\"The ID of the Notion database.\")\n\n def run_model(self) -> Data:\n result = self._fetch_database_properties(self.database_id)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the properties\n return Data(text=str(result), data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_database_properties\",\n description=\"Retrieve properties of a Notion database. Input should include the database ID.\",\n func=self._fetch_database_properties,\n args_schema=self.NotionDatabasePropertiesSchema,\n )\n\n def _fetch_database_properties(self, database_id: str) -> Union[Dict, str]:\n url = f\"https://api.notion.com/v1/databases/{database_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n data = response.json()\n properties = data.get(\"properties\", {})\n return properties\n except requests.exceptions.RequestException as e:\n return f\"Error fetching Notion database properties: {str(e)}\"\n except ValueError as e:\n return f\"Error parsing Notion API response: {str(e)}\"\n except Exception as e:\n return f\"An unexpected error occurred: {str(e)}\"\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"database_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"database_id","value":"","display_name":"Database ID","advanced":true,"dynamic":false,"info":"The ID of the Notion database.","title_case":false,"type":"str","_input_type":"StrInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve properties of a Notion database.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Database Properties ","documentation":"https://docs.langflow.org/integrations/notion/list-database-properties","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["database_id","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionDatabaseProperties-IjzLV","description":"Retrieve properties of a Notion database.","display_name":"List Database Properties "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":3053.0023230574693,"y":1061.535907149244},"dragging":false},{"id":"NotionPageUpdate-bexvy","type":"genericNode","position":{"x":2649.2991466550625,"y":1385.262204377853},"data":{"type":"NotionPageUpdate","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import json\nimport requests\nfrom typing import Dict, Any, Union\nfrom pydantic import BaseModel, Field\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, MultilineInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom langflow.io import Output\n\nclass NotionPageUpdate(LCToolComponent):\n display_name: str = \"Update Page Property \"\n description: str = \"Update the properties of a Notion page.\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/page-update\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n StrInput(\n name=\"page_id\",\n display_name=\"Page ID\",\n info=\"The ID of the Notion page to update.\",\n ),\n MultilineInput(\n name=\"properties\",\n display_name=\"Properties\",\n info=\"The properties to update on the page (as a JSON string or a dictionary).\",\n ),\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionPageUpdateSchema(BaseModel):\n page_id: str = Field(..., description=\"The ID of the Notion page to update.\")\n properties: Union[str, Dict[str, Any]] = Field(\n ..., description=\"The properties to update on the page (as a JSON string or a dictionary).\"\n )\n\n def run_model(self) -> Data:\n result = self._update_notion_page(self.page_id, self.properties)\n if isinstance(result, str):\n # An error occurred, return it as text\n return Data(text=result)\n else:\n # Success, return the updated page data\n output = \"Updated page properties:\\n\"\n for prop_name, prop_value in result.get(\"properties\", {}).items():\n output += f\"{prop_name}: {prop_value}\\n\"\n return Data(text=output, data=result)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"update_notion_page\",\n description=\"Update the properties of a Notion page. IMPORTANT: Use the tool to check the Database properties for more details before using this tool.\",\n func=self._update_notion_page,\n args_schema=self.NotionPageUpdateSchema,\n )\n\n def _update_notion_page(self, page_id: str, properties: Union[str, Dict[str, Any]]) -> Union[Dict[str, Any], str]:\n url = f\"https://api.notion.com/v1/pages/{page_id}\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\", # Use the latest supported version\n }\n\n # Parse properties if it's a string\n if isinstance(properties, str):\n try:\n parsed_properties = json.loads(properties)\n except json.JSONDecodeError as e:\n error_message = f\"Invalid JSON format for properties: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n else:\n parsed_properties = properties\n\n data = {\"properties\": parsed_properties}\n\n try:\n logger.info(f\"Sending request to Notion API: URL: {url}, Data: {json.dumps(data)}\")\n response = requests.patch(url, headers=headers, json=data)\n response.raise_for_status()\n updated_page = response.json()\n\n logger.info(f\"Successfully updated Notion page. Response: {json.dumps(updated_page)}\")\n return updated_page\n except requests.exceptions.HTTPError as e:\n error_message = f\"HTTP Error occurred: {str(e)}\"\n if e.response is not None:\n error_message += f\"\\nStatus code: {e.response.status_code}\"\n error_message += f\"\\nResponse body: {e.response.text}\"\n logger.error(error_message)\n return error_message\n except requests.exceptions.RequestException as e:\n error_message = f\"An error occurred while making the request: {str(e)}\"\n logger.error(error_message)\n return error_message\n except Exception as e:\n error_message = f\"An unexpected error occurred: {str(e)}\"\n logger.error(error_message)\n return error_message\n\n def __call__(self, *args, **kwargs):\n return self._update_notion_page(*args, **kwargs)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"page_id":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"page_id","value":"","display_name":"Page ID","advanced":true,"dynamic":false,"info":"The ID of the Notion page to update.","title_case":false,"type":"str","_input_type":"StrInput"},"properties":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"properties","value":"","display_name":"Properties","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The properties to update on the page (as a JSON string or a dictionary).","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Update the properties of a Notion page.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"Update Page Property ","documentation":"https://docs.langflow.org/integrations/notion/page-update","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true}],"field_order":["page_id","properties","notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionPageUpdate-bexvy","description":"Update the properties of a Notion page.","display_name":"Update Page Property "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2649.2991466550625,"y":1385.262204377853},"dragging":false},{"id":"NotionSearch-EdSJb","type":"genericNode","position":{"x":2435.4455721283834,"y":357.45573905064634},"data":{"type":"NotionSearch","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import Dict, Any, List\nfrom pydantic import BaseModel, Field\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput, StrInput, DropdownInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionSearch(LCToolComponent):\n display_name: str = \"Search \"\n description: str = \"Searches all pages and databases that have been shared with an integration. The search field can be an empty value to show all values from that search\"\n documentation: str = \"https://docs.langflow.org/integrations/notion/search\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n StrInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The text that the API compares page and database titles against.\",\n ),\n DropdownInput(\n name=\"filter_value\",\n display_name=\"Filter Type\",\n info=\"Limits the results to either only pages or only databases.\",\n options=[\"page\", \"database\"],\n value=\"page\",\n ),\n DropdownInput(\n name=\"sort_direction\",\n display_name=\"Sort Direction\",\n info=\"The direction to sort the results.\",\n options=[\"ascending\", \"descending\"],\n value=\"descending\",\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionSearchSchema(BaseModel):\n query: str = Field(..., description=\"The search query text.\")\n filter_value: str = Field(default=\"page\", description=\"Filter type: 'page' or 'database'.\")\n sort_direction: str = Field(default=\"descending\", description=\"Sort direction: 'ascending' or 'descending'.\")\n\n def run_model(self) -> List[Data]:\n results = self._search_notion(self.query, self.filter_value, self.sort_direction)\n records = []\n combined_text = f\"Results found: {len(results)}\\n\\n\"\n\n for result in results:\n result_data = {\n \"id\": result[\"id\"],\n \"type\": result[\"object\"],\n \"last_edited_time\": result[\"last_edited_time\"],\n }\n\n if result[\"object\"] == \"page\":\n result_data[\"title_or_url\"] = result[\"url\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['url']}\\n\"\n elif result[\"object\"] == \"database\":\n if \"title\" in result and isinstance(result[\"title\"], list) and len(result[\"title\"]) > 0:\n result_data[\"title_or_url\"] = result[\"title\"][0][\"plain_text\"]\n text = f\"id: {result['id']}\\ntitle_or_url: {result['title'][0]['plain_text']}\\n\"\n else:\n result_data[\"title_or_url\"] = \"N/A\"\n text = f\"id: {result['id']}\\ntitle_or_url: N/A\\n\"\n\n text += f\"type: {result['object']}\\nlast_edited_time: {result['last_edited_time']}\\n\\n\"\n combined_text += text\n records.append(Data(text=text, data=result_data))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_search\",\n description=\"Search Notion pages and databases. Input should include the search query and optionally filter type and sort direction.\",\n func=self._search_notion,\n args_schema=self.NotionSearchSchema,\n )\n\n def _search_notion(\n self, query: str, filter_value: str = \"page\", sort_direction: str = \"descending\"\n ) -> List[Dict[str, Any]]:\n url = \"https://api.notion.com/v1/search\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Content-Type\": \"application/json\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n data = {\n \"query\": query,\n \"filter\": {\"value\": filter_value, \"property\": \"object\"},\n \"sort\": {\"direction\": sort_direction, \"timestamp\": \"last_edited_time\"},\n }\n\n response = requests.post(url, headers=headers, json=data)\n response.raise_for_status()\n\n results = response.json()\n return results[\"results\"]\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"filter_value":{"trace_as_metadata":true,"options":["page","database"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"filter_value","value":"database","display_name":"Filter Type","advanced":true,"dynamic":false,"info":"Limits the results to either only pages or only databases.","title_case":false,"type":"str","_input_type":"DropdownInput"},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"query":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"query","value":"","display_name":"Search Query","advanced":true,"dynamic":false,"info":"The text that the API compares page and database titles against.","title_case":false,"type":"str","_input_type":"StrInput"},"sort_direction":{"trace_as_metadata":true,"options":["ascending","descending"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sort_direction","value":"descending","display_name":"Sort Direction","advanced":true,"dynamic":false,"info":"The direction to sort the results.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"List All Databases","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Databases","documentation":"https://docs.langflow.org/integrations/notion/search","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true,"hidden":false},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["notion_secret","query","filter_value","sort_direction"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionSearch-EdSJb","description":"Searches all pages and databases that have been shared with an integration.","display_name":"Search "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2435.4455721283834,"y":357.45573905064634},"dragging":false},{"id":"ParseData-vYVwu","type":"genericNode","position":{"x":2871.5903532688335,"y":563.1965154816405},"data":{"type":"ParseData","node":{"template":{"_type":"Component","data":{"trace_as_metadata":true,"list":false,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"data","value":"","display_name":"Data","advanced":false,"input_types":["Data"],"dynamic":false,"info":"The data to convert to text.","title_case":false,"type":"other","_input_type":"DataInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"sep":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sep","value":"\n","display_name":"Separator","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"StrInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Convert Data into plain text following a specified template.","icon":"braces","base_classes":["Message"],"display_name":"Parse Data","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"parse_data","value":"__UNDEFINED__","cache":true}],"field_order":["data","template","sep"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ParseData-vYVwu","showNode":false},"selected":false,"width":96,"height":96,"positionAbsolute":{"x":2871.5903532688335,"y":563.1965154816405},"dragging":false},{"id":"ChatOutput-zBv53","type":"genericNode","position":{"x":4429.812566227955,"y":940.6072472757681},"data":{"type":"ChatOutput","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.memory import store_message\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"ChatOutput\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n def message_response(self) -> Message:\n message = Message(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n )\n if (\n self.session_id\n and isinstance(message, Message)\n and isinstance(message.text, str)\n and self.should_store_message\n ):\n store_message(\n message,\n flow_id=self.graph.flow_id,\n )\n self.message.value = message\n\n self.status = message\n return message\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"data_template":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"data_template","value":"{text}","display_name":"Data Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"input_value":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"input_value","value":"","display_name":"Text","advanced":false,"input_types":["Message"],"dynamic":false,"info":"Message to be passed as output.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"sender":{"trace_as_metadata":true,"options":["Machine","User"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"sender","value":"Machine","display_name":"Sender Type","advanced":true,"dynamic":false,"info":"Type of sender.","title_case":false,"type":"str","_input_type":"DropdownInput"},"sender_name":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sender_name","value":"AI","display_name":"Sender Name","advanced":true,"input_types":["Message"],"dynamic":false,"info":"Name of the sender.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"session_id":{"trace_as_input":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"session_id","value":"","display_name":"Session ID","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The session ID of the chat. If empty, the current session ID parameter will be used.","title_case":false,"type":"str","_input_type":"MessageTextInput"},"should_store_message":{"trace_as_metadata":true,"list":false,"required":false,"placeholder":"","show":true,"name":"should_store_message","value":true,"display_name":"Store Messages","advanced":true,"dynamic":false,"info":"Store the message in the history.","title_case":false,"type":"bool","_input_type":"BoolInput"}},"description":"Display a chat message in the Playground.","icon":"ChatOutput","base_classes":["Message"],"display_name":"Chat Output","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"message","display_name":"Message","method":"message_response","value":"__UNDEFINED__","cache":true}],"field_order":["input_value","should_store_message","sender","sender_name","session_id","data_template"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ChatOutput-zBv53"},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":4429.812566227955,"y":940.6072472757681},"dragging":false},{"id":"NotionUserList-wFEb1","type":"genericNode","position":{"x":2390.6365450681037,"y":694.4867003504073},"data":{"type":"NotionUserList","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"import requests\nfrom typing import List, Dict\nfrom pydantic import BaseModel\nfrom langflow.io import Output\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.field_typing import Tool\nfrom langchain.tools import StructuredTool\n\n\nclass NotionUserList(LCToolComponent):\n display_name = \"List Users \"\n description = \"Retrieve users from Notion.\"\n documentation = \"https://docs.langflow.org/integrations/notion/list-users\"\n icon = \"NotionDirectoryLoader\"\n\n inputs = [\n SecretStrInput(\n name=\"notion_secret\",\n display_name=\"Notion Secret\",\n info=\"The Notion integration token.\",\n required=True,\n ),\n ]\n outputs = [\n Output(name=\"example_output\", display_name=\"Data\", method=\"run_model\"),\n Output(name=\"example_tool_output\", display_name=\"Tool\", method=\"build_tool\"),\n ]\n\n class NotionUserListSchema(BaseModel):\n pass\n\n def run_model(self) -> List[Data]:\n users = self._list_users()\n records = []\n combined_text = \"\"\n\n for user in users:\n output = \"User:\\n\"\n for key, value in user.items():\n output += f\"{key.replace('_', ' ').title()}: {value}\\n\"\n output += \"________________________\\n\"\n\n combined_text += output\n records.append(Data(text=output, data=user))\n\n self.status = records\n return records\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"notion_list_users\",\n description=\"Retrieve users from Notion.\",\n func=self._list_users,\n args_schema=self.NotionUserListSchema,\n )\n\n def _list_users(self) -> List[Dict]:\n url = \"https://api.notion.com/v1/users\"\n headers = {\n \"Authorization\": f\"Bearer {self.notion_secret}\",\n \"Notion-Version\": \"2022-06-28\",\n }\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n data = response.json()\n results = data[\"results\"]\n\n users = []\n for user in results:\n user_data = {\n \"id\": user[\"id\"],\n \"type\": user[\"type\"],\n \"name\": user.get(\"name\", \"\"),\n \"avatar_url\": user.get(\"avatar_url\", \"\"),\n }\n users.append(user_data)\n\n return users\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"notion_secret":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"notion_secret","value":"","display_name":"Notion Secret","advanced":false,"input_types":["Message"],"dynamic":false,"info":"The Notion integration token.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"}},"description":"Retrieve users from Notion.","icon":"NotionDirectoryLoader","base_classes":["Data","Tool"],"display_name":"List Users ","documentation":"https://docs.langflow.org/integrations/notion/list-users","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Data"],"selected":"Data","name":"example_output","display_name":"Data","method":"run_model","value":"__UNDEFINED__","cache":true},{"types":["Tool"],"selected":"Tool","name":"example_tool_output","display_name":"Tool","method":"build_tool","value":"__UNDEFINED__","cache":true,"hidden":true}],"field_order":["notion_secret"],"beta":false,"edited":true,"lf_version":"1.0.17"},"id":"NotionUserList-wFEb1","description":"Retrieve users from Notion.","display_name":"List Users "},"selected":false,"width":384,"height":302,"positionAbsolute":{"x":2390.6365450681037,"y":694.4867003504073},"dragging":false},{"id":"ParseData-WKjW6","type":"genericNode","position":{"x":2877.571533084884,"y":856.8480898893301},"data":{"type":"ParseData","node":{"template":{"_type":"Component","data":{"trace_as_metadata":true,"list":false,"trace_as_input":true,"required":false,"placeholder":"","show":true,"name":"data","value":"","display_name":"Data","advanced":false,"input_types":["Data"],"dynamic":false,"info":"The data to convert to text.","title_case":false,"type":"other","_input_type":"DataInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\"),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(display_name=\"Text\", name=\"text\", method=\"parse_data\"),\n ]\n\n def parse_data(self) -> Message:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n\n result_string = data_to_text(template, data, sep=self.sep)\n self.status = result_string\n return Message(text=result_string)\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"sep":{"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"sep","value":"\n","display_name":"Separator","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"str","_input_type":"StrInput"},"template":{"trace_as_input":true,"multiline":true,"trace_as_metadata":true,"load_from_db":false,"list":false,"required":false,"placeholder":"","show":true,"name":"template","value":"{text}","display_name":"Template","advanced":true,"input_types":["Message"],"dynamic":false,"info":"The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.","title_case":false,"type":"str","_input_type":"MultilineInput"}},"description":"Convert Data into plain text following a specified template.","icon":"braces","base_classes":["Message"],"display_name":"Parse Data","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"text","display_name":"Text","method":"parse_data","value":"__UNDEFINED__","cache":true}],"field_order":["data","template","sep"],"beta":false,"edited":false,"lf_version":"1.0.17"},"id":"ParseData-WKjW6","showNode":false},"selected":false,"width":96,"height":96,"positionAbsolute":{"x":2877.571533084884,"y":856.8480898893301},"dragging":false},{"id":"CurrentDateComponent-WOwNq","type":"genericNode","position":{"x":536.7929500860405,"y":617.6055631700241},"data":{"type":"CurrentDateComponent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from datetime import datetime\r\nfrom zoneinfo import ZoneInfo\r\nfrom typing import List\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import DropdownInput, Output\r\nfrom langflow.schema.message import Message\r\n\r\nclass CurrentDateComponent(Component):\r\n display_name = \"Current Date đ°ī¸\"\r\n description = \"Returns the current date and time in the selected timezone.\"\r\n icon = \"clock\"\r\n\r\n inputs = [\r\n DropdownInput(\r\n name=\"timezone\",\r\n display_name=\"Timezone\",\r\n options=[\r\n \"UTC\",\r\n \"US/Eastern\",\r\n \"US/Central\",\r\n \"US/Mountain\",\r\n \"US/Pacific\",\r\n \"Europe/London\",\r\n \"Europe/Paris\",\r\n \"Asia/Tokyo\",\r\n \"Australia/Sydney\",\r\n \"America/Sao_Paulo\",\r\n \"America/Cuiaba\",\r\n ],\r\n value=\"UTC\",\r\n info=\"Select the timezone for the current date and time.\",\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Current Date\", name=\"current_date\", method=\"get_current_date\"),\r\n ]\r\n\r\n def get_current_date(self) -> Message:\r\n try:\r\n tz = ZoneInfo(self.timezone)\r\n current_date = datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S %Z\")\r\n result = f\"Current date and time in {self.timezone}: {current_date}\"\r\n self.status = result\r\n return Message(text=result)\r\n except Exception as e:\r\n error_message = f\"Error: {str(e)}\"\r\n self.status = error_message\r\n return Message(text=error_message)","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"timezone":{"trace_as_metadata":true,"options":["UTC","US/Eastern","US/Central","US/Mountain","US/Pacific","Europe/London","Europe/Paris","Asia/Tokyo","Australia/Sydney","America/Sao_Paulo","America/Cuiaba"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"timezone","value":"UTC","display_name":"Timezone","advanced":false,"dynamic":false,"info":"Select the timezone for the current date and time.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Returns the current date and time in the selected timezone.","icon":"clock","base_classes":["Message"],"display_name":"Current Date","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"current_date","display_name":"Current Date","method":"get_current_date","value":"__UNDEFINED__","cache":true}],"field_order":["timezone"],"beta":false,"edited":true},"id":"CurrentDateComponent-WOwNq","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":536.7929500860405,"y":617.6055631700241}},{"id":"CurrentDateComponent-PZ8xJ","type":"genericNode","position":{"x":2871.6341688682833,"y":453.3374434097356},"data":{"type":"CurrentDateComponent","node":{"template":{"_type":"Component","code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from datetime import datetime\r\nfrom zoneinfo import ZoneInfo\r\nfrom typing import List\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import DropdownInput, Output\r\nfrom langflow.schema.message import Message\r\n\r\nclass CurrentDateComponent(Component):\r\n display_name = \"Current Date đ°ī¸\"\r\n description = \"Returns the current date and time in the selected timezone.\"\r\n icon = \"clock\"\r\n\r\n inputs = [\r\n DropdownInput(\r\n name=\"timezone\",\r\n display_name=\"Timezone\",\r\n options=[\r\n \"UTC\",\r\n \"US/Eastern\",\r\n \"US/Central\",\r\n \"US/Mountain\",\r\n \"US/Pacific\",\r\n \"Europe/London\",\r\n \"Europe/Paris\",\r\n \"Asia/Tokyo\",\r\n \"Australia/Sydney\",\r\n \"America/Sao_Paulo\",\r\n \"America/Cuiaba\",\r\n ],\r\n value=\"UTC\",\r\n info=\"Select the timezone for the current date and time.\",\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Current Date\", name=\"current_date\", method=\"get_current_date\"),\r\n ]\r\n\r\n def get_current_date(self) -> Message:\r\n try:\r\n tz = ZoneInfo(self.timezone)\r\n current_date = datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S %Z\")\r\n result = f\"Current date and time in {self.timezone}: {current_date}\"\r\n self.status = result\r\n return Message(text=result)\r\n except Exception as e:\r\n error_message = f\"Error: {str(e)}\"\r\n self.status = error_message\r\n return Message(text=error_message)","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"timezone":{"trace_as_metadata":true,"options":["UTC","US/Eastern","US/Central","US/Mountain","US/Pacific","Europe/London","Europe/Paris","Asia/Tokyo","Australia/Sydney","America/Sao_Paulo","America/Cuiaba"],"combobox":false,"required":false,"placeholder":"","show":true,"name":"timezone","value":"UTC","display_name":"Timezone","advanced":false,"dynamic":false,"info":"Select the timezone for the current date and time.","title_case":false,"type":"str","_input_type":"DropdownInput"}},"description":"Returns the current date and time in the selected timezone.","icon":"clock","base_classes":["Message"],"display_name":"Current Date","documentation":"","custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Message"],"selected":"Message","name":"current_date","display_name":"Current Date","method":"get_current_date","value":"__UNDEFINED__","cache":true}],"field_order":["timezone"],"beta":false,"edited":true,"official":false},"id":"CurrentDateComponent-PZ8xJ","showNode":false},"selected":false,"width":96,"height":96,"dragging":false,"positionAbsolute":{"x":2871.6341688682833,"y":453.3374434097356}}],"edges":[{"source":"TextInput-iJPEJ","sourceHandle":"{ÅdataTypeÅ:ÅTextInputÅ,ÅidÅ:ÅTextInput-iJPEJÅ,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-19rub","targetHandle":"{ÅfieldNameÅ:ÅTRANSCRIPTÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"TRANSCRIPT","id":"Prompt-19rub","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"TextInput","id":"TextInput-iJPEJ","name":"text","output_types":["Message"]}},"id":"reactflow__edge-TextInput-iJPEJ{ÅdataTypeÅ:ÅTextInputÅ,ÅidÅ:ÅTextInput-iJPEJÅ,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-19rub{ÅfieldNameÅ:ÅTRANSCRIPTÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"NotionUserList-TvIKS","sourceHandle":"{ÅdataTypeÅ:ÅNotionUserListÅ,ÅidÅ:ÅNotionUserList-TvIKSÅ,ÅnameÅ:Åexample_outputÅ,Åoutput_typesÅ:[ÅDataÅ]}","target":"ParseData-aNk1v","targetHandle":"{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-aNk1vÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"data","id":"ParseData-aNk1v","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"NotionUserList","id":"NotionUserList-TvIKS","name":"example_output","output_types":["Data"]}},"id":"reactflow__edge-NotionUserList-TvIKS{ÅdataTypeÅ:ÅNotionUserListÅ,ÅidÅ:ÅNotionUserList-TvIKSÅ,ÅnameÅ:Åexample_outputÅ,Åoutput_typesÅ:[ÅDataÅ]}-ParseData-aNk1v{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-aNk1vÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"ParseData-aNk1v","sourceHandle":"{ÅdataTypeÅ:ÅParseDataÅ,ÅidÅ:ÅParseData-aNk1vÅ,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-19rub","targetHandle":"{ÅfieldNameÅ:ÅUSERSÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"USERS","id":"Prompt-19rub","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ParseData","id":"ParseData-aNk1v","name":"text","output_types":["Message"]}},"id":"reactflow__edge-ParseData-aNk1v{ÅdataTypeÅ:ÅParseDataÅ,ÅidÅ:ÅParseData-aNk1vÅ,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-19rub{ÅfieldNameÅ:ÅUSERSÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"Prompt-19rub","sourceHandle":"{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"ToolCallingAgent-rVWeq","targetHandle":"{ÅfieldNameÅ:Åsystem_promptÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"system_prompt","id":"ToolCallingAgent-rVWeq","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"Prompt","id":"Prompt-19rub","name":"prompt","output_types":["Message"]}},"id":"reactflow__edge-Prompt-19rub{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}-ToolCallingAgent-rVWeq{ÅfieldNameÅ:Åsystem_promptÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"NotionSearch-M66HF","sourceHandle":"{ÅdataTypeÅ:ÅNotionSearchÅ,ÅidÅ:ÅNotionSearch-M66HFÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolCallingAgent-rVWeq","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-rVWeq","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionSearch","id":"NotionSearch-M66HF","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionSearch-M66HF{ÅdataTypeÅ:ÅNotionSearchÅ,ÅidÅ:ÅNotionSearch-M66HFÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolCallingAgent-rVWeq{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"OpenAIModel-Ht8xI","sourceHandle":"{ÅdataTypeÅ:ÅOpenAIModelÅ,ÅidÅ:ÅOpenAIModel-Ht8xIÅ,ÅnameÅ:Åmodel_outputÅ,Åoutput_typesÅ:[ÅLanguageModelÅ]}","target":"ToolCallingAgent-rVWeq","targetHandle":"{ÅfieldNameÅ:ÅllmÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅinputTypesÅ:[ÅLanguageModelÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"llm","id":"ToolCallingAgent-rVWeq","inputTypes":["LanguageModel"],"type":"other"},"sourceHandle":{"dataType":"OpenAIModel","id":"OpenAIModel-Ht8xI","name":"model_output","output_types":["LanguageModel"]}},"id":"reactflow__edge-OpenAIModel-Ht8xI{ÅdataTypeÅ:ÅOpenAIModelÅ,ÅidÅ:ÅOpenAIModel-Ht8xIÅ,ÅnameÅ:Åmodel_outputÅ,Åoutput_typesÅ:[ÅLanguageModelÅ]}-ToolCallingAgent-rVWeq{ÅfieldNameÅ:ÅllmÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅinputTypesÅ:[ÅLanguageModelÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"ToolCallingAgent-rVWeq","sourceHandle":"{ÅdataTypeÅ:ÅToolCallingAgentÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅnameÅ:ÅresponseÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-Lbxk6","targetHandle":"{ÅfieldNameÅ:ÅTASK_LISTÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"TASK_LIST","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ToolCallingAgent","id":"ToolCallingAgent-rVWeq","name":"response","output_types":["Message"]}},"id":"reactflow__edge-ToolCallingAgent-rVWeq{ÅdataTypeÅ:ÅToolCallingAgentÅ,ÅidÅ:ÅToolCallingAgent-rVWeqÅ,ÅnameÅ:ÅresponseÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-Lbxk6{ÅfieldNameÅ:ÅTASK_LISTÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"OpenAIModel-OTfnt","sourceHandle":"{ÅdataTypeÅ:ÅOpenAIModelÅ,ÅidÅ:ÅOpenAIModel-OTfntÅ,ÅnameÅ:Åmodel_outputÅ,Åoutput_typesÅ:[ÅLanguageModelÅ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{ÅfieldNameÅ:ÅllmÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅLanguageModelÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"llm","id":"ToolCallingAgent-GurdE","inputTypes":["LanguageModel"],"type":"other"},"sourceHandle":{"dataType":"OpenAIModel","id":"OpenAIModel-OTfnt","name":"model_output","output_types":["LanguageModel"]}},"id":"reactflow__edge-OpenAIModel-OTfnt{ÅdataTypeÅ:ÅOpenAIModelÅ,ÅidÅ:ÅOpenAIModel-OTfntÅ,ÅnameÅ:Åmodel_outputÅ,Åoutput_typesÅ:[ÅLanguageModelÅ]}-ToolCallingAgent-GurdE{ÅfieldNameÅ:ÅllmÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅLanguageModelÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"Prompt-Lbxk6","sourceHandle":"{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{ÅfieldNameÅ:Åsystem_promptÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"system_prompt","id":"ToolCallingAgent-GurdE","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"Prompt","id":"Prompt-Lbxk6","name":"prompt","output_types":["Message"]}},"id":"reactflow__edge-Prompt-Lbxk6{ÅdataTypeÅ:ÅPromptÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅnameÅ:ÅpromptÅ,Åoutput_typesÅ:[ÅMessageÅ]}-ToolCallingAgent-GurdE{ÅfieldNameÅ:Åsystem_promptÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"AddContentToPage-vrAvx","sourceHandle":"{ÅdataTypeÅ:ÅAddContentToPageÅ,ÅidÅ:ÅAddContentToPage-vrAvxÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"AddContentToPage","id":"AddContentToPage-vrAvx","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-AddContentToPage-vrAvx{ÅdataTypeÅ:ÅAddContentToPageÅ,ÅidÅ:ÅAddContentToPage-vrAvxÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolCallingAgent-GurdE{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"NotionPageCreator-Exc7f","sourceHandle":"{ÅdataTypeÅ:ÅNotionPageCreatorÅ,ÅidÅ:ÅNotionPageCreator-Exc7fÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageCreator","id":"NotionPageCreator-Exc7f","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageCreator-Exc7f{ÅdataTypeÅ:ÅNotionPageCreatorÅ,ÅidÅ:ÅNotionPageCreator-Exc7fÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolCallingAgent-GurdE{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"NotionDatabaseProperties-IjzLV","sourceHandle":"{ÅdataTypeÅ:ÅNotionDatabasePropertiesÅ,ÅidÅ:ÅNotionDatabaseProperties-IjzLVÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionDatabaseProperties","id":"NotionDatabaseProperties-IjzLV","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionDatabaseProperties-IjzLV{ÅdataTypeÅ:ÅNotionDatabasePropertiesÅ,ÅidÅ:ÅNotionDatabaseProperties-IjzLVÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolCallingAgent-GurdE{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"NotionPageUpdate-bexvy","sourceHandle":"{ÅdataTypeÅ:ÅNotionPageUpdateÅ,ÅidÅ:ÅNotionPageUpdate-bexvyÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}","target":"ToolCallingAgent-GurdE","targetHandle":"{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"tools","id":"ToolCallingAgent-GurdE","inputTypes":["Tool","BaseTool"],"type":"other"},"sourceHandle":{"dataType":"NotionPageUpdate","id":"NotionPageUpdate-bexvy","name":"example_tool_output","output_types":["Tool"]}},"id":"reactflow__edge-NotionPageUpdate-bexvy{ÅdataTypeÅ:ÅNotionPageUpdateÅ,ÅidÅ:ÅNotionPageUpdate-bexvyÅ,ÅnameÅ:Åexample_tool_outputÅ,Åoutput_typesÅ:[ÅToolÅ]}-ToolCallingAgent-GurdE{ÅfieldNameÅ:ÅtoolsÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅinputTypesÅ:[ÅToolÅ,ÅBaseToolÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"NotionSearch-EdSJb","sourceHandle":"{ÅdataTypeÅ:ÅNotionSearchÅ,ÅidÅ:ÅNotionSearch-EdSJbÅ,ÅnameÅ:Åexample_outputÅ,Åoutput_typesÅ:[ÅDataÅ]}","target":"ParseData-vYVwu","targetHandle":"{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-vYVwuÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"data","id":"ParseData-vYVwu","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"NotionSearch","id":"NotionSearch-EdSJb","name":"example_output","output_types":["Data"]}},"id":"reactflow__edge-NotionSearch-EdSJb{ÅdataTypeÅ:ÅNotionSearchÅ,ÅidÅ:ÅNotionSearch-EdSJbÅ,ÅnameÅ:Åexample_outputÅ,Åoutput_typesÅ:[ÅDataÅ]}-ParseData-vYVwu{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-vYVwuÅ,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","selected":false,"className":""},{"source":"ParseData-vYVwu","sourceHandle":"{ÅdataTypeÅ:ÅParseDataÅ,ÅidÅ:ÅParseData-vYVwuÅ,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-Lbxk6","targetHandle":"{ÅfieldNameÅ:ÅDATABASESÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"DATABASES","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ParseData","id":"ParseData-vYVwu","name":"text","output_types":["Message"]}},"id":"reactflow__edge-ParseData-vYVwu{ÅdataTypeÅ:ÅParseDataÅ,ÅidÅ:ÅParseData-vYVwuÅ,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-Lbxk6{ÅfieldNameÅ:ÅDATABASESÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"ToolCallingAgent-GurdE","sourceHandle":"{ÅdataTypeÅ:ÅToolCallingAgentÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅnameÅ:ÅresponseÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"ChatOutput-zBv53","targetHandle":"{ÅfieldNameÅ:Åinput_valueÅ,ÅidÅ:ÅChatOutput-zBv53Å,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"input_value","id":"ChatOutput-zBv53","inputTypes":["Message"],"type":"str"},"sourceHandle":{"dataType":"ToolCallingAgent","id":"ToolCallingAgent-GurdE","name":"response","output_types":["Message"]}},"id":"reactflow__edge-ToolCallingAgent-GurdE{ÅdataTypeÅ:ÅToolCallingAgentÅ,ÅidÅ:ÅToolCallingAgent-GurdEÅ,ÅnameÅ:ÅresponseÅ,Åoutput_typesÅ:[ÅMessageÅ]}-ChatOutput-zBv53{ÅfieldNameÅ:Åinput_valueÅ,ÅidÅ:ÅChatOutput-zBv53Å,ÅinputTypesÅ:[ÅMessageÅ],ÅtypeÅ:ÅstrÅ}","selected":false,"className":""},{"source":"NotionUserList-wFEb1","sourceHandle":"{ÅdataTypeÅ:ÅNotionUserListÅ,ÅidÅ:ÅNotionUserList-wFEb1Å,ÅnameÅ:Åexample_outputÅ,Åoutput_typesÅ:[ÅDataÅ]}","target":"ParseData-WKjW6","targetHandle":"{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-WKjW6Å,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","data":{"targetHandle":{"fieldName":"data","id":"ParseData-WKjW6","inputTypes":["Data"],"type":"other"},"sourceHandle":{"dataType":"NotionUserList","id":"NotionUserList-wFEb1","name":"example_output","output_types":["Data"]}},"id":"reactflow__edge-NotionUserList-wFEb1{ÅdataTypeÅ:ÅNotionUserListÅ,ÅidÅ:ÅNotionUserList-wFEb1Å,ÅnameÅ:Åexample_outputÅ,Åoutput_typesÅ:[ÅDataÅ]}-ParseData-WKjW6{ÅfieldNameÅ:ÅdataÅ,ÅidÅ:ÅParseData-WKjW6Å,ÅinputTypesÅ:[ÅDataÅ],ÅtypeÅ:ÅotherÅ}","className":""},{"source":"ParseData-WKjW6","sourceHandle":"{ÅdataTypeÅ:ÅParseDataÅ,ÅidÅ:ÅParseData-WKjW6Å,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-Lbxk6","targetHandle":"{ÅfieldNameÅ:ÅUSERSÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"USERS","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"ParseData","id":"ParseData-WKjW6","name":"text","output_types":["Message"]}},"id":"reactflow__edge-ParseData-WKjW6{ÅdataTypeÅ:ÅParseDataÅ,ÅidÅ:ÅParseData-WKjW6Å,ÅnameÅ:ÅtextÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-Lbxk6{ÅfieldNameÅ:ÅUSERSÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","className":""},{"source":"CurrentDateComponent-WOwNq","sourceHandle":"{ÅdataTypeÅ:ÅCurrentDateComponentÅ,ÅidÅ:ÅCurrentDateComponent-WOwNqÅ,ÅnameÅ:Åcurrent_dateÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-19rub","targetHandle":"{ÅfieldNameÅ:ÅCURRENT_DATEÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"CURRENT_DATE","id":"Prompt-19rub","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"CurrentDateComponent","id":"CurrentDateComponent-WOwNq","name":"current_date","output_types":["Message"]}},"id":"reactflow__edge-CurrentDateComponent-WOwNq{ÅdataTypeÅ:ÅCurrentDateComponentÅ,ÅidÅ:ÅCurrentDateComponent-WOwNqÅ,ÅnameÅ:Åcurrent_dateÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-19rub{ÅfieldNameÅ:ÅCURRENT_DATEÅ,ÅidÅ:ÅPrompt-19rubÅ,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","className":""},{"source":"CurrentDateComponent-PZ8xJ","sourceHandle":"{ÅdataTypeÅ:ÅCurrentDateComponentÅ,ÅidÅ:ÅCurrentDateComponent-PZ8xJÅ,ÅnameÅ:Åcurrent_dateÅ,Åoutput_typesÅ:[ÅMessageÅ]}","target":"Prompt-Lbxk6","targetHandle":"{ÅfieldNameÅ:ÅCURRENT_DATEÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","data":{"targetHandle":{"fieldName":"CURRENT_DATE","id":"Prompt-Lbxk6","inputTypes":["Message","Text"],"type":"str"},"sourceHandle":{"dataType":"CurrentDateComponent","id":"CurrentDateComponent-PZ8xJ","name":"current_date","output_types":["Message"]}},"id":"reactflow__edge-CurrentDateComponent-PZ8xJ{ÅdataTypeÅ:ÅCurrentDateComponentÅ,ÅidÅ:ÅCurrentDateComponent-PZ8xJÅ,ÅnameÅ:Åcurrent_dateÅ,Åoutput_typesÅ:[ÅMessageÅ]}-Prompt-Lbxk6{ÅfieldNameÅ:ÅCURRENT_DATEÅ,ÅidÅ:ÅPrompt-Lbxk6Å,ÅinputTypesÅ:[ÅMessageÅ,ÅTextÅ],ÅtypeÅ:ÅstrÅ}","className":""}],"viewport":{"x":-65.48833753518215,"y":119.49034539812101,"zoom":0.5588906662759379}},"description":"The Notion Agent for Meeting Notes is an AI-powered tool that automatically processes meeting transcripts and updates your Notion workspace accordingly. It identifies tasks, action items, and key points from your meetings, then creates new tasks or updates existing ones in Notion without manual input.\n\nTo use it, simply add your API Keys and provide a meeting transcript. The agent will analyze it, interact with your Notion workspace to make necessary updates, and give you a summary of actions taken. This streamlines your workflow, ensuring important meeting outcomes are captured and organized in Notion effortlessly.","name":"Notion Agent - Meeting Notes ","last_tested_version":"1.0.17.dev8","endpoint_name":null,"is_component":false}
\ No newline at end of file
diff --git a/docs/docs/Integrations/Notion/_category_.json b/docs/docs/Integrations/Notion/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..c245462efa38537c129dabc63838396954786ded
--- /dev/null
+++ b/docs/docs/Integrations/Notion/_category_.json
@@ -0,0 +1 @@
+{"position":5, "label":"Notion"}
\ No newline at end of file
diff --git a/docs/docs/Integrations/Notion/integrations-notion.md b/docs/docs/Integrations/Notion/integrations-notion.md
new file mode 100644
index 0000000000000000000000000000000000000000..6664991a07173ce707f44c053f4608148d27a3bf
--- /dev/null
+++ b/docs/docs/Integrations/Notion/integrations-notion.md
@@ -0,0 +1,89 @@
+---
+title: Setup
+sidebar_position: 0
+slug: /integrations/notion/setup
+---
+
+# Set up a Notion App
+
+To use Notion components in Langflow, you first need to create a Notion integration and configure it with the necessary capabilities. This guide will walk you through the process of setting up a Notion integration and granting it access to your Notion databases.
+
+## Prerequisites
+
+- A Notion account with access to the workspace where you want to use the integration.
+- Admin permissions in the Notion workspace to create and manage integrations.
+
+## Create a Notion Integration
+
+1. Go to the [Notion Integrations](https://www.notion.com/my-integrations) page.
+2. Click on the "New integration" button.
+3. Give your integration a name and select the workspace where you want to use it.
+4. Click "Submit" to create the integration.
+
+:::info
+When creating the integration, make sure to enable the necessary capabilities based on your requirements. Refer to the [Notion Integration Capabilities](https://developers.notion.com/reference/capabilities) documentation for more information on each capability.
+:::
+
+
+## Configure Integration Capabilities
+
+After creating the integration, you need to configure its capabilities to define what actions it can perform and what data it can access.
+
+1. In the integration settings page, go to the **Capabilities** tab.
+2. Enable the required capabilities for your integration. For example:
+ - If your integration needs to read data from Notion, enable the "Read content" capability.
+ - If your integration needs to create new content in Notion, enable the "Insert content" capability.
+ - If your integration needs to update existing content in Notion, enable the "Update content" capability.
+3. Configure the user information access level based on your integration's requirements.
+4. Save the changes.
+
+## Obtain Integration Token
+
+To authenticate your integration with Notion, you need to obtain an integration token.
+
+1. In the integration settings page, go to the "Secrets" tab.
+2. Copy the "Internal Integration Token" value. This token will be used to authenticate your integration with Notion.
+
+:::warning
+Your integration token is a sensitive piece of information. Make sure to keep it secure and never share it publicly. Store it safely in your Langflow configuration or environment variables.
+:::
+
+## Grant Integration Access to Notion Databases
+
+For your integration to interact with Notion databases, you need to grant it access to the specific databases it will be working with.
+
+1. Open the Notion database that you want your integration to access.
+2. Click on the "Share" button in the top-right corner of the page.
+3. In the "Invite" section, select your integration from the list.
+4. Click "Invite" to grant the integration access to the database.
+
+:::info
+If your database contains references to other databases, you need to grant the integration access to those referenced databases as well. Repeat step 4 for each referenced database to ensure your integration has the necessary access.
+:::
+
+## Build with Notion Components in Langflow
+
+Once you have set up your Notion integration and granted it access to the required databases, you can start using the Notion components in Langflow.
+
+Langflow provides the following Notion components:
+
+- **Search**: Searches all pages and databases that have been shared with the integration. You can filter results to either pages or databases and specify the sort direction.
+- **List Users**: Retrieves a list of users from the Notion workspace.
+- **List Database Properties**: Retrieves the properties of a specified Notion database.
+- **Create Page**: Creates a new page in a specified Notion database with the provided properties.
+- **Update Page Property**: Updates the properties of an existing Notion page.
+- **Add Content to Page**: Converts markdown text to Notion blocks and appends them to a specified Notion page.
+- **List Pages**: Queries a Notion database with filtering and sorting options.
+- **Page Content Viewer**: Retrieves the content of a Notion page as plain text.
+
+Each of these components output both "Data" and "Tool":
+- The "Data" output can be used directly in your Langflow for further processing or display.
+- The "Tool" output can be utilized in Langflow Agents, allowing them to interact with Notion programmatically.
+
+
+## Additional Resources
+
+- [Notion API Documentation](https://developers.notion.com/docs/getting-started)
+- [Notion Integration Capabilities](https://developers.notion.com/reference/capabilities)
+
+If you encounter any issues or have questions, please reach out to our support team or consult the Langflow community forums.
diff --git a/docs/docs/Integrations/Notion/notion-agent-conversational.md b/docs/docs/Integrations/Notion/notion-agent-conversational.md
new file mode 100644
index 0000000000000000000000000000000000000000..150c6e53c6e5bad2ac134f325881dddd54fb6ddb
--- /dev/null
+++ b/docs/docs/Integrations/Notion/notion-agent-conversational.md
@@ -0,0 +1,145 @@
+---
+title: Notion Conversational Agent
+sidebar_position: 2
+slug: /integrations/notion/notion-agent-conversational
+---
+
+The Notion Conversational Agent is an AI-powered assistant that interacts with your Notion workspace through natural language conversations. This flow performs Notion-related tasks like creating pages, searching for information, and managing content, all through a chat interface.
+
+## Prerequisites
+
+---
+
+- [Notion App](/integrations/notion/setup)
+- [Notion account and API key](https://www.notion.so/my-integrations)
+- [OpenAI API key](https://platform.openai.com/account/api-keys)
+- [Download Flow Conversation Agent Flow](./Conversational_Notion_Agent.json)(Download link)
+
+
+
+## Flow Components
+
+---
+
+
+### Input and Output
+- **Chat Input**: Accepts user queries and commands
+- **Chat Output**: Displays the agent's responses
+
+### Language Model
+- **OpenAI Model**: Processes user input and generates responses
+
+### Agent and Tools
+- **Tool Calling Agent**: Coordinates the use of various Notion tools based on user input
+- **Toolkit**: Combines multiple Notion-specific tools into a single toolkit
+- **Notion Tools**: Various components for interacting with Notion, including:
+ - List Users
+ - List Database Properties
+ - List Pages
+ - Page Content Viewer
+ - Create Page
+ - Update Page Property
+ - Add Content to Page
+ - Search
+
+### Memory and Prompt
+- **Chat Memory**: Stores conversation history
+- **Prompt**: Provides system instructions and context for the agent
+- **Current Date**: Supplies the current date and time for context
+
+
+## Run the Conversational Notion Agent
+
+---
+
+1. Open Langflow and create a new project.
+2. Add the components listed above to your flow canvas, or Download the [Conversation Agent Flow](./Conversational_Notion_Agent.json)(Download link) and **Import** the JSON file into Langflow.
+3. Connect the components as shown in the flow diagram.
+4. Input the Notion and OpenAI API keys in their respective components.
+5. To start the flow, click the "Play" button in Langflow.
+6. In the chat interface, enter a Notion-related request or question. For example:
+ - "Create a new page in my task database"
+ - "Search for pages about project management"
+ - "List all users in my Notion workspace"
+7. The agent will process your request, use the appropriate Notion tools, and provide a response.
+8. Continue the conversation, asking follow-up questions or giving new commands as needed.
+
+## Example Interactions
+
+---
+
+```
+User: List all the users in my Notion workspace.
+
+Agent: Certainly! I'll use the Notion User List tool to retrieve that information for you. One moment please.
+
+[Agent uses the Notion User List tool]
+
+Here's a list of users in your Notion workspace:
+
+1. John Doe (john@example.com)
+2. Jane Smith (jane@example.com)
+3. Alex Johnson (alex@example.com)
+
+Is there anything specific you'd like to know about these users or any other Notion-related task I can help you with?
+
+User: Create a new page in my Projects database with the title "Website Redesign".
+
+Agent: Certainly! I'd be happy to create a new page in your Projects database with the title "Website Redesign". I'll use the Notion Page Creator tool to do this. First, let me check the properties of your Projects database to ensure we include all necessary information.
+
+[Agent uses the Notion Database Properties tool]
+
+I've retrieved the properties for your Projects database. Now, I'll create the new page with the title "Website Redesign". One moment please.
+
+[Agent uses the Notion Page Creator tool]
+
+Great news! I've successfully created a new page in your Projects database with the title "Website Redesign". Here are the details:
+
+- Title: Website Redesign
+- Page ID: [page_id_here]
+- URL: [page_url_here]
+
+Is there any additional information you'd like me to add to this new project page, such as a description, due date, or assigned team members?
+
+User: Yes, please add a description: "Redesign company website to improve user experience and modernize the look."
+
+Agent: Certainly! I'd be happy to add that description to the "Website Redesign" project page. I'll use the Add Content to Page tool to update the page with this information.
+
+[Agent uses the Add Content to Page tool]
+
+I've successfully added the description to your "Website Redesign" project page. Here's a summary of what I've done:
+
+1. Located the "Website Redesign" page using the page ID from our previous interaction.
+2. Added the following content to the page:
+
+ Description: Redesign company website to improve user experience and modernize the look.
+
+The description has been added as a new text block on the page. Is there anything else you'd like me to add or modify on this project page?
+
+```
+
+
+## Customization
+
+---
+
+The flow can be customized to meet your team's specific needs.
+
+Customize this flow by:
+
+1. Adjusting the system prompt to change the agent's behavior or knowledge base.
+2. Adding or removing Notion tools based on your specific needs.
+3. Modifying the OpenAI model parameters (e.g., temperature) to adjust the agent's response style.
+
+## Troubleshooting
+
+---
+
+If you encounter issues:
+
+1. Ensure all API keys are correctly set and have the necessary permissions.
+2. Check that your Notion integration has access to the relevant pages and databases.
+3. Verify that all components are properly connected in the flow.
+4. Review the Langflow logs for any error messages.
+
+For more advanced usage and integration options, refer to the [Notion API documentation](https://developers.notion.com/) and [Langflow documentation](/).
\ No newline at end of file
diff --git a/docs/docs/Integrations/Notion/notion-agent-meeting-notes.md b/docs/docs/Integrations/Notion/notion-agent-meeting-notes.md
new file mode 100644
index 0000000000000000000000000000000000000000..7980bc15fd6c85bc8777829b9267aa8c631a1112
--- /dev/null
+++ b/docs/docs/Integrations/Notion/notion-agent-meeting-notes.md
@@ -0,0 +1,176 @@
+---
+title: Notion Meeting Notes Agent
+sidebar_position: 1
+slug: /integrations/notion/notion-agent-meeting-notes
+---
+
+The Notion Agent for Meeting Notes is an AI-powered tool that automatically processes meeting transcripts and updates your Notion workspace. It identifies tasks, action items, and key points from your meetings, then creates new tasks or updates existing ones in Notion without manual input.
+
+## Prerequisites
+---
+
+- [Notion App](/integrations/notion/setup)
+- [Notion API key](https://www.notion.so/my-integrations)
+- [OpenAI API key](https://platform.openai.com/account/api-keys)
+- [Download Flow Meeting Agent Flow](./Meeting_Notes_Agent.json)(Download link)
+
+:::warning
+
+Before using this flow, ensure you have obtained the necessary API keys from Notion and OpenAI. These keys are essential for the flow to function properly. Keep them secure and do not share them publicly.
+
+:::
+
+## Components
+
+---
+
+
+
+
+
+### Meeting Transcript (Text Input)
+
+This component allows users to input the meeting transcript directly into the flow.
+
+### List Users (Notion Component)
+
+- **Purpose**: Retrieves a list of users from the Notion workspace.
+- **Input**: Notion Secret (API key)
+- **Output**: List of user data
+
+### List Databases (Notion Component)
+
+- **Purpose**: Searches and lists all databases in the Notion workspace.
+- **Input**:
+ - Notion Secret (API key)
+ - Query (optional)
+ - Filter Type (default: database)
+ - Sort Direction
+- **Output**: List of database data
+
+### Prompt
+
+This component creates a dynamic prompt template using the following inputs:
+- Meeting Transcript
+- List of Users
+- List of Databases
+- Current Date
+
+### Meeting Summarizer (Tool Calling Agent)
+
+- **Purpose**: Analyzes the meeting transcript and identifies tasks and action items.
+- **Inputs**:
+ - System Prompt (from the Prompt component)
+ - Language Model (OpenAI)
+ - Tools:
+ - Notion Search
+ - List Database Properties
+ - Create Page
+ - Update Page Property
+ - Add Content to Page
+
+
+
+### Notion Agent (Tool Calling Agent)
+
+- **Purpose**: Executes actions in Notion based on the meeting summary.
+- **Inputs**:
+ - System Prompt (from the second Prompt component)
+ - Language Model (OpenAI)
+ - Tools:
+ - List Database Properties
+ - Create Page
+ - Update Page Property
+ - Add Content to Page
+
+### Notion Components (Tools)
+
+#### List Database Properties
+
+- **Purpose**: Retrieves the properties of a specified Notion database.
+- **Input**:
+ - Database ID
+ - Notion Secret (API key)
+
+#### Create Page
+
+- **Purpose**: Creates a new page in a Notion database.
+- **Inputs**:
+ - Database ID
+ - Notion Secret (API key)
+ - Properties (JSON)
+
+#### Update Page Property
+
+- **Purpose**: Updates the properties of an existing Notion page.
+- **Inputs**:
+ - Page ID
+ - Notion Secret (API key)
+ - Properties to update
+
+#### Add Content to Page
+
+- **Purpose**: Converts markdown text to Notion blocks and appends them to a specified Notion page.
+- **Inputs**:
+ - Page/Block ID
+ - Notion Secret (API key)
+ - Markdown text
+
+### Chat Output
+
+Displays the final output of the Notion Agent in the Playground.
+
+## Flow Process
+
+---
+
+1. The user inputs a meeting transcript.
+2. The flow retrieves the list of Notion users and databases.
+3. A prompt is generated using the transcript, user list, database list, and current date.
+4. The Meeting Summarizer analyzes the transcript and identifies tasks and action items.
+5. The Notion Agent uses the meeting summary to:
+ - Create new pages for new tasks
+ - Update existing pages for existing tasks
+ - Add content to pages with meeting notes
+6. The Chat Output displays a summary of actions taken in Notion.
+
+## Run the Notion Meeting Notes flow
+
+---
+
+To run the Notion Agent for Meeting Notes:
+
+1. Open Langflow and create a new project.
+2. Add the components listed above to your flow canvas, or download the [Flow Meeting Agent Flow](./Meeting_Notes_Agent.json)(Download link) and **Import** the JSON file into Langflow.
+3. Connect the components as shown in the flow diagram.
+4. Input the Notion and OpenAI API keys in their respective components.
+5. Paste your meeting transcript into the Meeting Transcript component.
+6. Run the flow by clicking **Play** on the **Chat Output** component.
+7. Review the output in the Chat Output component, which will summarize the actions taken in your Notion workspace.
+
+For optimal results, use detailed meeting transcripts. The quality of the output depends on the comprehensiveness of the input provided.
+
+## Customization
+
+---
+
+The flow can be customized to meet your team's specific needs.
+
+Customize this flow by:
+
+1. Adjusting the system prompt to change the agent's behavior or knowledge base.
+2. Adding or removing Notion tools based on your specific needs.
+3. Modifying the OpenAI model parameters (e.g., temperature) to adjust the agent's response style.
+
+## Troubleshooting
+
+---
+
+If you encounter issues:
+
+1. Ensure all API keys are correctly set and have the necessary permissions.
+2. Check that your Notion integration has access to the relevant pages and databases.
+3. Verify that all components are properly connected in the flow.
+4. Review the Langflow logs for any error messages.
+
+For more advanced usage and integration options, refer to the [Notion API documentation](https://developers.notion.com/) and [Langflow documentation](/).
\ No newline at end of file
diff --git a/docs/docs/Integrations/Notion/notion_conversational_agent_tools.png b/docs/docs/Integrations/Notion/notion_conversational_agent_tools.png
new file mode 100644
index 0000000000000000000000000000000000000000..bfedc554b89cc62389ed79567c80dd1ecb709c1d
--- /dev/null
+++ b/docs/docs/Integrations/Notion/notion_conversational_agent_tools.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7542eda614fbb215e6b77577362e78d3e144017a9e2d80b57ba084e8801b10d
+size 134025
diff --git a/docs/docs/Integrations/Notion/notion_meeting_agent_part_1.png b/docs/docs/Integrations/Notion/notion_meeting_agent_part_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..24c6284e38c5d06b60799d9f9c76318b6c4c960a
--- /dev/null
+++ b/docs/docs/Integrations/Notion/notion_meeting_agent_part_1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bdf068f3bf8d406b8ed0fa3496b74a654f29a545d7f5a8d14c289c0fc980c6e3
+size 115417
diff --git a/docs/docs/Integrations/Notion/notion_meeting_agent_part_2.png b/docs/docs/Integrations/Notion/notion_meeting_agent_part_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..fc1141c01f26d9e31e13bd6c5cc561576e2ed42b
--- /dev/null
+++ b/docs/docs/Integrations/Notion/notion_meeting_agent_part_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd834610ac86c204c3902839305711a2f2f42d71ac303262488ed6c80b352cc3
+size 181004
diff --git a/docs/docs/Integrations/_category_.json b/docs/docs/Integrations/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..6aed9c635f45a827895d8e5909512949e937445f
--- /dev/null
+++ b/docs/docs/Integrations/_category_.json
@@ -0,0 +1 @@
+{"position":9, "label":"Integrations"}
\ No newline at end of file
diff --git a/docs/docs/Integrations/assemblyai-components.png b/docs/docs/Integrations/assemblyai-components.png
new file mode 100644
index 0000000000000000000000000000000000000000..95d34bf45ac459abe1e298a9da9e0c45d11d90c9
--- /dev/null
+++ b/docs/docs/Integrations/assemblyai-components.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c727ea84e49bd5035db145125b092d26fa6ae81700182b34ea1114e7788e394f
+size 238172
diff --git a/docs/docs/Integrations/integrations-assemblyai.md b/docs/docs/Integrations/integrations-assemblyai.md
new file mode 100644
index 0000000000000000000000000000000000000000..3c9ec98b4015b37e1bb974f2718974abaa9d6398
--- /dev/null
+++ b/docs/docs/Integrations/integrations-assemblyai.md
@@ -0,0 +1,165 @@
+---
+title: AssemblyAI
+sidebar_position: 1
+slug: /integrations-assemblyai
+---
+
+
+
+# AssemblyAI
+
+The AssemblyAI components allow you to apply powerful Speech AI models to your app for tasks like:
+
+- Transcribing audio and video files
+- Formatting transcripts
+- Generating subtitles
+- Applying LLMs to audio files
+
+More info about AssemblyAI:
+
+- [Website](https://www.assemblyai.com/)
+- [AssemblyAI API Docs](https://www.assemblyai.com/docs)
+- [Get a Free API key](https://www.assemblyai.com/dashboard/signup)
+
+
+## Prerequisites
+
+You need an **AssemblyAI API key**. After creating a free account, you'll find the API key in your dashboard. [Get a Free API key here](https://www.assemblyai.com/dashboard/signup).
+
+Enter the key in the *AssemblyAI API Key* field in all components that require the key.
+
+(Optional): To use LeMUR, you need to upgrade your AssemblyAI account, since this is not included in the free account.
+
+## Components
+
+
+
+### AssemblyAI Start Transcript
+
+This component allows you to submit an audio or video file for transcription.
+
+**Tip**: You can freeze the path of this component to only submit the file once.
+
+- **Input**:
+ - AssemblyAI API Key: Your API key.
+ - Audio File: The audio or video file to transcribe.
+ - Speech Model (Optional): Select the class of models. Default is *Best*. See [speech models](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#select-the-speech-model-with-best-and-nano) for more info.
+ - Automatic Language Detection (Optional): Enable automatic language detection.
+ - Language (Optional): The language of the audio file. Can be set manually if automatic language detection is disabled.
+ See [supported languages](https://www.assemblyai.com/docs/getting-started/supported-languages) for a list of supported language codes.
+ - Enable Speaker Labels (Optional): Detect speakers in an audio file and what each speaker said.
+ - Expected Number of Speakers (Optional): Set the expected number of speakers, if Speaker Labels is enabled.
+ - Audio File URL (Optional): The URL of the audio or video file to transcribe. Can be used instead of *Audio File*.
+ - Punctuate (Optional): Apply punctuation. Default is true.
+ - Format Text (Optional): Apply casing and text formatting. Default is true.
+
+- **Output**:
+ - Transcript ID: The id of the transcript
+
+
+### AssebmlyAI Poll Transcript
+
+This components allows you to poll the transcripts. It checks the status of the transcript every few seconds until the transcription is completed.
+
+- **Input**:
+ - AssemblyAI API Key: Your API key.
+ - Polling Interval (Optional): The polling interval in seconds. Default is 3.
+
+- **Output**:
+ - Transcription Result: The AssemblyAI JSON response of a completed transcript. Contains the text and other info.
+
+
+### AssebmlyAI Get Subtitles
+
+This component allows you to generate subtitles in SRT or VTT format.
+
+- **Input**:
+ - AssemblyAI API Key: Your API key.
+ - Transcription Result: The output of the *Poll Transcript* component.
+ - Subtitle Format: The format of the captions (SRT or VTT).
+ - Character per Caption (Optional): The maximum number of characters per caption (0 for no limit).
+
+- **Output**:
+ - Subtitles: A JSON response with the `subtitles` field containing the captions in SRT or VTT format.
+
+
+### AssebmlyAI LeMUR
+
+This component allows you to apply Large Language Models to spoken data using the [AssemblyAI LeMUR framework](https://www.assemblyai.com/docs/lemur).
+
+LeMUR automatically ingests the transcript as additional context, making it easy to apply LLMs to audio data. You can use it for tasks like summarizing audio, extracting insights, or asking questions.
+
+- **Input**:
+ - AssemblyAI API Key: Your API key.
+ - Transcription Result: The output of the *Poll Transcript* component.
+ - Input Prompt: The text to prompt the model. You can type your prompt in this field or connect it to a *Prompt* component.
+ - Final Model: The model that is used for the final prompt after compression is performed. Default is Claude 3.5 Sonnet.
+ - Temperature (Optional): The temperature to use for the model. Default is 0.0.
+ - Max Output Size (Optional): Max output size in tokens, up to 4000. Default is 2000.
+ - Endpoint (Optional): The LeMUR endpoint to use. Default is "task". For "summary" and "question-answer", no prompt input is needed. See [LeMUR API docs](https://www.assemblyai.com/docs/api-reference/lemur/) for more info.
+ - Questions (Optional): Comma-separated list of your questions. Only used if *Endpoint* is "question-answer".
+ - Transcript IDs (Optional): Comma-separated list of transcript IDs. LeMUR can perform actions over multiple transcripts. If provided, the *Transcription Result* is ignored.
+
+- **Output**:
+ - LeMUR Response: The generated LLM response.
+
+### AssemblyAI List Transcripts
+
+This component can be used as a standalone component to list all previously generated transcripts.
+
+- **Input**:
+ - AssemblyAI API Key: Your API key.
+ - Limit (Optional): Maximum number of transcripts to retrieve. Default is 20, use 0 for all.
+ - Filter (Optional): Filter by transcript status.
+ - Created On (Optional): Only get transcripts created on this date (YYYY-MM-DD).
+ - Throttled Only (Optional): Only get throttled transcripts, overrides the status filter
+
+- **Output**:
+ - Transcript List: A list of all transcripts with info such as the transcript ID, the status, and the data.
+
+
+## Flow Process
+
+1. The user inputs an audio or video file.
+2. The user can also input an LLM prompt. In this example, we want to generate a summary of the transcript.
+3. The flow submits the audio file for transcription.
+4. The flow checks the status of the transcript every few seconds until transcription is completed.
+5. The flow parses the transcription result and outputs the transcribed text.
+6. The flow also generates subtitles.
+7. The flow applies the LLM prompt to generate a summary.
+8. As a standalone component, all transcripts can be listed.
+
+## Run the Transcription and Speech AI Flow
+
+To run the Transcription and Speech AI Flow:
+
+1. Open Langflow and create a new project.
+2. Add the components listed above to your flow canvas, or download the [AssemblyAI Transcription and Speech AI Flow](./AssemblyAI_Flow.json)(Download link) and **Import** the JSON file into Langflow.
+3. Connect the components as shown in the flow diagram. **Tip**: Freeze the path of the *Start Transcript* component to only submit the file once.
+4. Input the AssemblyAI API key in in all components that require the key (Start Transcript, Poll Transcript, Get Subtitles, LeMUR, List Transcripts).
+5. Select an audio or video file in the *Start Transcript* component.
+6. Run the flow by clicking **Play** on the *Parse Data* component. Make sure that the specified template is `{text}`.
+7. To generate subtitles, click **Play** on the *Get Subtitles* component.
+8. To apply an LLM to your audio file, click **Play** on the *LeMUR* component. Note that you need an upgraded AssemblyAI account to use LeMUR.
+9. To list all transcripts, click **Play** on the *List Transcript* component.
+
+
+## Customization
+
+The flow can be customized by:
+
+1. Modifying the parameters in the *Start Transcript* component.
+2. Modifying the subtitle format in the *Get Subtitles* component.
+3. Modifying the LLM prompt for input of the *LeMUR* component.
+4. Modifying the LLM parameters (e.g., temperature) in the *LeMUR* component.
+
+## Troubleshooting
+
+If you encounter issues:
+
+1. Ensure the API key is correctly set in all components that require the key.
+2. To use LeMUR, you need to upgrade your AssemblyAI account, since this is not included in the free account.
+3. Verify that all components are properly connected in the flow.
+4. Review the Langflow logs for any error messages.
+
+For more advanced usage, refer to the [AssemblyAI API documentation](https://www.assemblyai.com/docs/). If you need more help, you can reach out to the [AssemblyAI support](https://www.assemblyai.com/contact/support).
diff --git a/docs/docs/Integrations/integrations-langfuse.md b/docs/docs/Integrations/integrations-langfuse.md
new file mode 100644
index 0000000000000000000000000000000000000000..2e64536f05da2226f63d8e0a956057078d4e0452
--- /dev/null
+++ b/docs/docs/Integrations/integrations-langfuse.md
@@ -0,0 +1,78 @@
+---
+title: Langfuse
+sidebar_position: 2
+slug: /integrations-langfuse
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Integrate Langfuse with Langflow
+
+[Langfuse](https://langfuse.com/) is an observability and analytics platform specifically designed for language models and AI applications.
+
+This guide walks you through how to configure Langflow to collect [tracing](https://langfuse.com/docs/tracing) data about your flow executions and automatically send the data to Langfuse.
+
+## Prerequisites
+
+- A project in Langflow with a runnable flow
+- A Langfuse Cloud account in any [data region](https://langfuse.com/faq/all/cloud-data-regions)
+- A Langfuse organization and project
+
+## Create Langfuse project credentials
+
+1. In Langfuse, go to your project settings, and then create a new set of API keys.
+
+2. Copy the following API key information:
+
+ - Secret Key
+ - Public Key
+ - Host URL
+
+## Set your Langfuse credentials as environment variables
+
+Set your Langfuse project credentials as environment variables in the same environment where you run Langflow.
+
+You can use any method you prefer to set environment variables.
+The following examples show how to set environment variables in a terminal session (Linux or macOS) and in a command prompt session (Windows):
+
+
+
+
+```
+export LANGFUSE_SECRET_KEY=SECRET_KEY
+export LANGFUSE_PUBLIC_KEY=PUBLIC_KEY
+export LANGFUSE_HOST=HOST_URL
+```
+
+
+
+```
+set LANGFUSE_SECRET_KEY=SECRET_KEY
+set LANGFUSE_PUBLIC_KEY=PUBLIC_KEY
+set LANGFUSE_HOST=HOST_URL
+```
+
+
+
+
+Replace `SECRET_KEY`, `PUBLIC_KEY`, and `HOST_URL` with the API key information you copied from Langfuse.
+
+## Start Langflow and run a flow
+
+1. Start Langflow in the same terminal or environment where you set the environment variables:
+
+ ```bash
+ python -m langflow run
+ ```
+
+2. In Langflow, open and existing project, and then run a flow.
+
+## View tracing data in Langfuse
+
+Langflow automatically collects and sends tracing data about the flow execution to Langfuse.
+You can view the collected data in your Langfuse project dashboard.
+
+## Disable the Langfuse integration
+
+To disable the Langfuse integration, remove the environment variables you set in the previous steps and restart Langflow.
diff --git a/docs/docs/Integrations/integrations-langsmith.md b/docs/docs/Integrations/integrations-langsmith.md
new file mode 100644
index 0000000000000000000000000000000000000000..957ee9d6b3c459c918c7618f82e643d9c9bfc673
--- /dev/null
+++ b/docs/docs/Integrations/integrations-langsmith.md
@@ -0,0 +1,33 @@
+---
+title: LangSmith
+sidebar_position: 3
+slug: /integrations-langsmith
+---
+
+
+
+LangSmith is a full-lifecycle DevOps service from LangChain that provides monitoring and observability. To integrate with Langflow, just add your LangChain API key as a Langflow environment variable and you are good to go!
+
+
+## Step-by-step Configuration {#b912579a43984f9a92921232b67c885d}
+
+
+---
+
+1. Obtain your LangChain API key from [https://smith.langchain.com](https://smith.langchain.com/)
+2. Add the following keys to Langflow .env file:
+
+`LANGCHAIN_API_KEY="your-api-key"LANGCHAIN_PROJECT="your-project-name"`
+
+
+or export the environment variables in your terminal:
+
+
+`export LANGCHAIN_API_KEY="your-api-key"export LANGCHAIN_PROJECT="your-project-name"`
+
+3. Restart Langflow using `langflow run --env-file .env`
+4. Run a project in Langflow.
+5. View the Langsmith dashboard for monitoring and observability.
+
+
+
diff --git a/docs/docs/Integrations/integrations-langwatch.md b/docs/docs/Integrations/integrations-langwatch.md
new file mode 100644
index 0000000000000000000000000000000000000000..626c1b8b5f43414aa93fb623144d13db9663757e
--- /dev/null
+++ b/docs/docs/Integrations/integrations-langwatch.md
@@ -0,0 +1,40 @@
+---
+title: LangWatch
+sidebar_position: 4
+slug: /integrations-langwatch
+---
+
+
+
+# LangWatch {#938674091aac4d9d9aa4aa6eb5c215b4}
+
+
+LangWatch is an all-in-one LLMOps platform for monitoring, observability, analytics, evaluations and alerting for getting user insights and improve your LLM workflows.
+
+
+To integrate with Langflow, just add your LangWatch API as a Langflow environment variable and you are good to go!
+
+
+## Step-by-step Configuration {#6f1d56ff6063417491d100d522dfcf1a}
+
+1. Obtain your LangWatch API key from [https://app.langwatch.ai/](https://app.langwatch.ai/)
+2. Add the following key to Langflow .env file:
+
+```shell
+LANGWATCH_API_KEY="your-api-key"
+```
+
+
+or export it in your terminal:
+
+
+```shell
+export LANGWATCH_API_KEY="your-api-key"
+```
+
+3. Restart Langflow using `langflow run --env-file .env`
+4. Run a project in Langflow.
+5. View the LangWatch dashboard for monitoring and observability.
+
+
+
diff --git a/docs/docs/Starter-Projects/_category_.json b/docs/docs/Starter-Projects/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..f780f2a6315fbae1dac5eba2d2c6f6afd353f050
--- /dev/null
+++ b/docs/docs/Starter-Projects/_category_.json
@@ -0,0 +1 @@
+{"position":2, "label":"Starter Projects"}
\ No newline at end of file
diff --git a/docs/docs/Starter-Projects/starter-projects-basic-prompting.md b/docs/docs/Starter-Projects/starter-projects-basic-prompting.md
new file mode 100644
index 0000000000000000000000000000000000000000..947fac5249f7a891d8e4cf6481f2ad3c0561e4bd
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-basic-prompting.md
@@ -0,0 +1,63 @@
+---
+title: Basic Prompting
+sidebar_position: 0
+slug: /starter-projects-basic-prompting
+---
+
+
+
+Prompts serve as the inputs to a large language model (LLM), acting as the interface between human instructions and computational tasks.
+
+
+By submitting natural language requests in a prompt to an LLM, you can obtain answers, generate text, and solve problems.
+
+
+This article demonstrates how to use Langflow's prompt tools to issue basic prompts to an LLM, and how various prompting strategies can affect your outcomes.
+
+
+## Prerequisites {#20bd7bc51ce04e2fb4922c95f00870d3}
+
+
+---
+
+- [Langflow installed and running](/get-started-installation)
+- [OpenAI API key created](https://platform.openai.com/)
+
+## Create the basic prompting flow {#19d5305239c841548a695e2bf7839e7a}
+
+
+1. From the Langflow dashboard, click **New Flow**.
+
+
+2. Select **Basic Prompting**.
+
+
+3. The **Basic Prompting** flow is created.
+
+
+
+
+
+This flow allows you to chat with the **OpenAI** component through the **Prompt** component.
+
+
+Examine the **Prompt** component. The **Template** field instructs the LLM to `Answer the user as if you were a pirate.` This should be interesting...
+
+
+4. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
+
+ 1. In the **Variable Name** field, enter `openai_api_key`.
+ 2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
+ 3. Click **Save Variable**.
+
+
+## Run the basic prompting flow {#ce52f8e6b491452a9dfb069feb962eed}
+
+1. Click the **Playground** button on the control panel (bottom right side of the workspace). This is where you can interact with your AI.
+2. Type a message and press Enter. The bot should respond in a markedly piratical manner!
+
+## Modify the prompt for a different result {#3ab045fcbe774c8fb3adc528f9042ba0}
+
+1. To modify your prompt results, in the **Prompt** template, click the **Template** field. The **Edit Prompt** window opens.
+2. Change `Answer the user as if you were a pirate` to a different character, perhaps `Answer the user as if you were Hermione Granger.`
+3. Run the workflow again. The response will be markedly different.
diff --git a/docs/docs/Starter-Projects/starter-projects-blog-writer.md b/docs/docs/Starter-Projects/starter-projects-blog-writer.md
new file mode 100644
index 0000000000000000000000000000000000000000..212ddaa2a47cb644f4b63f3483bc086329d8a3a9
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-blog-writer.md
@@ -0,0 +1,59 @@
+---
+title: Blog Writer
+sidebar_position: 1
+slug: /starter-projects-blog-writer
+---
+
+Build a Blog Writer flow for a one-shot application using OpenAI.
+
+This flow extends the Basic Prompting flow with the **URL** and **Parse data** components that fetch content from multiple URLs and convert the loaded data into plain text.
+
+OpenAI uses this loaded data to generate a blog post, as instructed by the **Text input** component.
+
+
+## Prerequisites {#899268e6c12c49b59215373a38287507}
+
+
+---
+
+- [Langflow installed and running](/get-started-installation)
+- [OpenAI API key created](https://platform.openai.com/)
+
+
+## Create the blog writer flow {#0c1a9c65b7d640f693ec3aad963416ff}
+
+1. From the Langflow dashboard, click **New Flow**.
+2. Select **Blog Writer**.
+3. The **Blog Writer** flow is created.
+
+
+
+
+This flow creates a one-shot article generator with **Prompt**, **OpenAI**, and **Chat Output** components, augmented with reference content and instructions from the **URL** and **Text Input** components.
+
+The **URL** component extracts raw text and metadata from one or more web links.
+The **Parse Data** component converts the data coming from the **URL** component into plain text to feed the prompt.
+
+To examine the flow's prompt, click the **Template** field of the **Prompt** component.
+
+```plain
+Reference 1:
+
+{references}
+
+---
+
+{instructions}
+
+Blog:
+```
+
+The `{instructions}` value is received from the **Text input** component, and one or more `{references}` are received from a list of URLs parsed from the **URL** component.
+
+
+### Run the blog writer flow {#b93be7a567f5400293693b31b8d0f81a}
+
+1. Click the **Playground** button. Here you can chat with the AI that has access to the **URL** content.
+2. Click the **Lighting Bolt** icon to run it.
+3. To write about something different, change the values in the **URL** component and adjust the instructions on the left side bar of the **Playground**. Try again and see what the LLM constructs.
+
diff --git a/docs/docs/Starter-Projects/starter-projects-document-qa.md b/docs/docs/Starter-Projects/starter-projects-document-qa.md
new file mode 100644
index 0000000000000000000000000000000000000000..1bf82162892067ed5cd89ff121afbdb66f5aad0b
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-document-qa.md
@@ -0,0 +1,42 @@
+---
+title: Document QA
+sidebar_position: 2
+slug: /starter-projects-document-qa
+---
+
+
+
+Build a question-and-answer chatbot with a document loaded from local memory.
+
+
+## Prerequisites {#6555c100a30e4a21954af25e2e05403a}
+
+
+---
+
+- [Langflow installed and running](/get-started-installation)
+- [OpenAI API key created](https://platform.openai.com/)
+
+
+## Create the document QA flow {#204500104f024553aab2b633bb99f603}
+
+1. From the Langflow dashboard, click **New Flow**.
+2. Select **Document QA**.
+3. The **Document QA** flow is created.
+
+
+
+
+This flow is composed of a standard chatbot with the **Chat Input**, **Prompt**, **OpenAI**, and **Chat Output** components, but it also incorporates a **File** component, which loads a file from your local machine. **Parse Data** is used to convert the data from **File** into the **Prompt** component as `{Document}`. The **Prompt** component is instructed to answer questions based on the contents of `{Document}`. This gives the **OpenAI** component context it would not otherwise have access to.
+
+
+### Run the document QA flow {#f58fcc2b9e594156a829b1772b6a7191}
+
+
+1. To select a document to load, in the **File** component, click the **Path** field. Select a local file, and then click **Open**. The file name appears in the field.
+
+2. Click the **Playground** button. Here you can chat with the AI that has access to your document's content.
+
+
+3. Type in a question about the document content and press Enter. You should see a contextual response.
+
diff --git a/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md b/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md
new file mode 100644
index 0000000000000000000000000000000000000000..efd1bdbb69445b7e719bd2e669273140dc76daaf
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-memory-chatbot.md
@@ -0,0 +1,77 @@
+---
+title: Memory Chatbot
+sidebar_position: 3
+slug: /starter-projects-memory-chatbot
+---
+
+
+
+This flow extends the [basic prompting](/starter-projects-basic-prompting) flow to include a chat memory. This makes the AI remember previous user inputs.
+
+
+## Prerequisites {#a71d73e99b1543bbba827207503cf31f}
+
+
+---
+
+- [Langflow installed and running](/get-started-installation)
+- [OpenAI API key created](https://platform.openai.com/)
+
+
+## Create the memory chatbot flow
+
+1. From the Langflow dashboard, click **New Flow**.
+2. Select **Memory Chatbot**.
+3. The **Memory Chatbot** flow is created.
+
+
+
+
+This flow uses the same components as the Basic Prompting one, but extends it with a **Chat Memory** component. This component retrieves previous messages and sends them to the **Prompt** component to fill a part of the **Template** with context.
+
+
+By clicking the template, you'll see the prompt editor like below:
+
+
+This gives the **OpenAI** component a memory of previous chat messages.
+
+
+## Run the memory chatbot flow
+
+1. Open the Playground.
+2. Type multiple questions. In the **Memories** tab, your queries are logged in order. Up to 100 queries are stored by default. Try telling the AI your name and asking `What is my name?` on a second message, or `What is the first subject I asked you about?` to validate that previous knowledge is taking effect.
+
+:::tip
+
+Check and adjust advanced parameters by opening the Advanced Settings of the **Chat Memory** component.
+
+:::
+
+
+
+## Use Session ID with the memory chatbot flow {#4e68c3c0750942f98c45c1c45d7ffbbe}
+
+
+---
+
+
+`SessionID`Â is a unique identifier in Langflow that stores conversation sessions between the AI and a user. AÂ `SessionID`Â is created when a conversation is initiated, and then associated with all subsequent messages during that session.
+
+
+In the **Memory Chatbot** flow you created, the **Chat Memory** component references past interactions by **Session ID**. You can demonstrate this by modifying the **Session ID** value to switch between conversation histories.
+
+1. In the **Session ID** field of the **Chat Memory** and **Chat Input** components, add a **Session ID** value like `MySessionID`.
+2. Now, once you send a new message the **Playground**, you should have a new memory created on the **Memories** tab.
+3. Notice how your conversation is being stored in different memory sessions.
+
+:::tip
+
+Every chat component in Langflow comes with a `SessionID`. It defaults to the flow ID. Explore how changing it affects what the AI remembers.
+
+:::
+
+
+
+
+Learn more about memories in the [Chat Memory](/guides-chat-memory) section.
+
diff --git a/docs/docs/Starter-Projects/starter-projects-sequential-agent.md b/docs/docs/Starter-Projects/starter-projects-sequential-agent.md
new file mode 100644
index 0000000000000000000000000000000000000000..7060f02a9358c6cb6cf4f9bd54fe0b8b38951ff5
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-sequential-agent.md
@@ -0,0 +1,53 @@
+---
+title: Sequential tasks agent
+sidebar_position: 4
+slug: /starter-projects-sequential-agent
+---
+
+Build a **Sequential Tasks Agent** flow for a multi-agent application using multiple **Agent** components.
+
+Each agent has an LLM model and a unique set of tools at its disposal, with **Prompt** components connected to the **Agent Instructions** fields to control the agent's behavior. For example, the **Researcher Agent** has a **Tavily AI Search** component connected as a tool. The **Prompt** instructs the agent how to answer your query, format the response, and pass the query and research results on to the next agent in the flow.
+
+Each successive agent in the flow builds on the work of the previous agent, creating a chain of reasoning for solving complex problems.
+
+## Prerequisites
+- [An OpenAI API key](https://platform.openai.com/)
+- [A Tavily AI API key](https://www.tavily.com/)
+
+## Open Langflow and create a new flow
+
+1. Click **New Flow**, and then select **Sequential Tasks Agent**.
+This opens a starter template with the necessary components to run the flow.
+
+
+
+The Sequential Tasks Agent flow consists of these components:
+
+* The **Agent** components use the connected LLM to analyze the user's input and select among the connected tools to complete the tasks.
+* The **Chat Input** component accepts user input to the chat.
+* The **Prompt** component combines the user input with a user-defined prompt.
+* The **Chat Output** component prints the flow's output to the chat.
+* The **YFinance** tool component provides access to financial data from Yahoo Finance.
+* The **Tavily AI Search** tool component performs AI-powered web searches.
+* The **Calculator** tool component performs mathematical calculations.
+
+## Run the Sequential Tasks Agent flow
+
+1. Add your OpenAI API key to the **Agent** components.
+2. Add your Tavily API key to the **Tavily** component.
+3. Click **Playground** to start a chat session with the template's default question.
+
+```plain
+Should I invest in Tesla (TSLA) stock right now?
+Please analyze the company's current position, market trends,
+financial health, and provide a clear investment recommendation.
+```
+
+This question provides clear instructions to the agents about how to proceed and what question to answer.
+
+4. In the **Playground**, inspect the answers to see how the agents use the **Tavily AI Search** tool to research the query, the **YFinance** tool to analyze the stock data, and the **Calculator** to determine if the stock is a wise investment.
+5. Ask similar questions to see how the agents use the tools to answer your queries.
+
+## Next steps
+
+To create your own multi-agent flow, see [Create a problem solving agent](/agents-tool-calling-agent-component).
\ No newline at end of file
diff --git a/docs/docs/Starter-Projects/starter-projects-simple-agent.md b/docs/docs/Starter-Projects/starter-projects-simple-agent.md
new file mode 100644
index 0000000000000000000000000000000000000000..52879c4ad220ed858ee108bd64d8ed9c583993c1
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-simple-agent.md
@@ -0,0 +1,58 @@
+---
+title: Simple agent
+sidebar_position: 6
+slug: /starter-projects-simple-agent
+---
+
+Build a **Simple Agent** flow for an agentic application using the Tool-calling agent.
+
+An **agent** uses an LLM as its "brain" to select among the connected tools and complete its tasks.
+
+In this flow, the **Tool-calling agent** reasons using an **Open AI** LLM to solve math problems. It will select the **Calculator** tool for simpler math, and the **Python REPL** tool (with the Python `math` library) for more complex problems.
+
+## Prerequisites
+
+To use this flow, you need an OpenAI API key.
+
+## Open Langflow and start a new flow
+
+Click **New Flow**, and then select the **Simple Agent** flow.
+
+This opens a starter flow with the necessary components to run an agentic application using the Tool-calling agent.
+
+## Simple Agent flow
+
+
+
+The **Simple Agent** flow consists of these components:
+
+* The **Tool calling agent** component uses the connected LLM to reason through the user's input and select among the connected tools to complete its task.
+* The **Python REPL tool** component executes Python code in a REPL (Read-Evaluate-Print Loop) interpreter.
+* The **Calculator** component performs basic arithmetic operations.
+* The **Chat Input** component accepts user input to the chat.
+* The **Prompt** component combines the user input with a user-defined prompt.
+* The **Chat Output** component prints the flow's output to the chat.
+* The **OpenAI** model component sends the user input and prompt to the OpenAI API and receives a response.
+
+## Run the Simple Agent flow
+
+1. Add your credentials to the Open AI component.
+2. In the **Chat output** component, click âļī¸ Play to start the end-to-end application flow.
+ A **Chat output built successfully** message and a â Check on all components indicate that the flow ran successfully.
+3. Click **Playground** to start a chat session.
+4. Enter a simple math problem, like `2 + 2`, and then make sure the bot responds with the correct answer.
+5. To confirm the REPL interpreter is working, prompt the `math` library directly with `math.sqrt(4)` and see if the bot responds with `4`.
+6. The agent will also reason through more complex word problems. For example, prompt the agent with the following math problem:
+
+```plain
+The equation 24x2+25xâ47axâ2=â8xâ3â53axâ2 is true for all values of xâ 2a, where a is a constant.
+What is the value of a?
+A) -16
+B) -3
+C) 3
+D) 16
+```
+
+The agent should respond with `B`.
+
+Now that your query has completed the journey from **Chat input** to **Chat output**, you have completed the **Simple Agent** flow.
diff --git a/docs/docs/Starter-Projects/starter-projects-travel-planning-agent.md b/docs/docs/Starter-Projects/starter-projects-travel-planning-agent.md
new file mode 100644
index 0000000000000000000000000000000000000000..c34704d06c2947f095cf19e61ef970f7784bf84a
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-travel-planning-agent.md
@@ -0,0 +1,48 @@
+---
+title: Travel planning agent
+sidebar_position: 8
+slug: /starter-projects-travel-planning-agent
+---
+
+Build a **Travel Planning Agent** flow for an agentic application using the multiple Tool-calling agents.
+
+An **agent** uses an LLM as its "brain" to select among the connected tools and complete its tasks.
+
+In this flow, multiple **Tool-calling agents** reason using an **Open AI** LLM to plan a travel journey. Each agent is given a different responsibility defined by its **System Prompt** field.
+
+The **Chat input** defines where the user wants to go, and passes the result to the **City Selection** agent. The **Local Expert** agent then adds information based on the selected cities, and the **Travel Concierge** assembles a seven day travel plan in Markdown.
+
+All agents have access to the **Search API** and **URL Content Fetcher** components, while only the Travel Concierge can use the **Calculator** for computing the trip costs.
+
+## Prerequisites
+
+To use this flow, you need an [OpenAI API key](https://platform.openai.com/) and a [Search API key](https://www.searchapi.io/).
+
+## Open Langflow and start a new flow
+
+Click **New Flow**, and then select the **Travel Planning Agent** flow.
+
+This opens a starter flow with the necessary components to run an agentic application using multiple Tool-calling agents.
+
+## Create the travel planning agent flow
+
+
+
+The **Travel Planning Agent** flow consists of these components:
+
+* Multiple **Tool calling agent** components that use the connected LLM to reason through the user's input and select among the connected tools to complete their tasks.
+* The **Calculator** component performs basic arithmetic operations.
+* The **URL Content Fetcher** component scrapes content from a given URL.
+* The **Chat Input** component accepts user input to the chat.
+* The **Chat Output** component prints the flow's output to the chat.
+* The **OpenAI** model component sends the user input and prompt to the OpenAI API and receives a response.
+
+## Run the travel planning agent flow
+
+1. Add your credentials to the Open AI and Search API components.
+2. In the **Chat output** component, click âļī¸ Play to start the end-to-end application flow.
+ A **Chat output built successfully** message and a â Check on all components indicate that the flow ran successfully.
+3. Click **Playground** to start a chat session.
+ You should receive a detailed, helpful answer to the journey defined in the **Chat input** component.
+
+Now that your query has completed the journey from **Chat input** to **Chat output**, you have completed the **Travel Planning Agent** flow.
diff --git a/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md b/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md
new file mode 100644
index 0000000000000000000000000000000000000000..3f3c770fef4677505d6569457e6932e8b90806c4
--- /dev/null
+++ b/docs/docs/Starter-Projects/starter-projects-vector-store-rag.md
@@ -0,0 +1,85 @@
+---
+title: Vector Store RAG
+sidebar_position: 4
+slug: /starter-projects-vector-store-rag
+---
+
+
+
+Retrieval Augmented Generation, or RAG, is a pattern for training LLMs on your data and querying it.
+
+
+RAG is backed by a **vector store**, a vector database which stores embeddings of the ingested data.
+
+
+This enables **vector search**, a more powerful and context-aware search.
+
+
+We've chosen [Astra DB](https://astra.datastax.com/signup?utm_source=langflow-pre-release&utm_medium=referral&utm_campaign=langflow-announcement&utm_content=create-a-free-astra-db-account) as the vector database for this starter flow, but you can follow along with any of Langflow's vector database options.
+
+
+## Prerequisites {#6aa2c6dff6894eccadc39d4903d79e66}
+
+
+---
+
+- [Langflow installed and running](https://docs.langflow.org/get-started-installation)
+- [OpenAI API key](https://platform.openai.com/)
+- [An Astra DB vector database created](https://docs.datastax.com/en/astra-db-serverless/get-started/quickstart.html)Â with:
+ - Application Token
+ - API Endpoint
+
+
+## Create the vector store RAG flow
+
+1. From the Langflow dashboard, click **New Flow**.
+2. Select **Vector Store RAG**.
+3. The **Vector Store RAG** flowis created.
+
+
+
+
+The vector store RAG flow is built of two separate flows for ingestion and query.
+
+
+The **ingestion** part (bottom of the screen) populates the vector store with data from a local file. It ingests data from a file (**File**), splits it into chunks (**Split Text**), indexes it in Astra DB (**Astra DB**), and computes embeddings for the chunks using an embedding model (**OpenAI Embeddings**).
+
+
+:::tip
+
+Embeddings are numerical vectors that represent data meaningfully. They enable efficient similarity searches in vector stores by placing similar items close together in the vector space, enhancing search and recommendation tasks.
+
+:::
+
+
+
+
+This part creates a searchable index to be queried for contextual similarity.
+
+
+The **query** part (top of the screen) allows users to retrieve embedded vector store data. Components:
+
+- **Chat Input** defines where to send the user input (coming from the Playground).
+- **OpenAI Embeddings**Â is the model used to generate embeddings from the user input.
+- **Astra DB** retrieves the most relevant chunks from the Astra DB database (here, used for search, not ingestion).
+- **Parse Data** converts chunks coming from the **Astra DB** component into plain text to feed a prompt.
+- **Prompt** takes in the user input and the retrieved chunks as text and builds a prompt for the model.
+- **OpenAI** takes in the prompt to generate a response.
+- **Chat Output**Â component displays the response in the Playground.
+1. To create an environment variable for the **OpenAI** component, in the **OpenAI API Key** field, click the **Globe** button, and then click **Add New Variable**.
+ 1. In the **Variable Name** field, enter `openai_api_key`.
+ 2. In the **Value** field, paste your OpenAI API Key (`sk-...`).
+ 3. Click **Save Variable**.
+1. To create environment variables for the **Astra DB** and **Astra DB Search** components:
+ 1. In the **Token** field, click the **Globe** button, and then click **Add New Variable**.
+ 2. In the **Variable Name** field, enter `astra_token`.
+ 3. In the **Value** field, paste your Astra application token (`AstraCS:WSnyFUhRxsrgâĻ`).
+ 4. Click **Save Variable**.
+ 5. Repeat the above steps for the **API Endpoint** field, pasting your Astra API Endpoint instead (`https://ASTRA_DB_ID-ASTRA_DB_REGION.apps.astra.datastax.com`).
+ 6. Add the global variable to both the **Astra DB** and **Astra DB Search** components.
+
+## Run the Vector Store RAG flow
+
+1. Click the **Playground** button. Here you can chat with the AI that uses context from the database you created.
+2. Type a message and press Enter. (Try something like "What topics do you know about?")
+3. The bot will respond with a summary of the data you've embedded.
diff --git a/docs/docs/Workspace/_category_.json b/docs/docs/Workspace/_category_.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e39087bac09e96c55ceb44def28f0bfb66d0286
--- /dev/null
+++ b/docs/docs/Workspace/_category_.json
@@ -0,0 +1 @@
+{"position":4, "label":"Workspace"}
\ No newline at end of file
diff --git a/docs/docs/Workspace/workspace-api.md b/docs/docs/Workspace/workspace-api.md
new file mode 100644
index 0000000000000000000000000000000000000000..b85283c47a21aed448abbaa693558467a0c18621
--- /dev/null
+++ b/docs/docs/Workspace/workspace-api.md
@@ -0,0 +1,218 @@
+---
+title: API
+sidebar_position: 2
+slug: /workspace-api
+---
+
+import ReactPlayer from "react-player";
+
+The **API** section presents code templates for integrating your flow into external applications.
+
+
+
+
+
+### cURL {#4eb287a8424349c4b0b436a6703de5f3}
+
+
+The **cURL** tab displays sample code for posting a query to your flow. Modify the `input_value` to change your input message. Copy the code and run it to post a query to your flow and get the result.
+
+
+### Python API {#fb7db14e6330418389562ef647aa2354}
+
+
+The **Python API** tab displays code to interact with your flow using the Python HTTP requests library.
+
+
+### Python Code {#7af87438549b4972907ac310a4193067}
+
+
+The **Python Code** tab displays code to interact with your flow's `.json` file using the Langflow runtime.
+
+
+### Tweaks {#5680600063724590ac2302b4ddeea867}
+
+
+The **Tweaks** tab displays the available parameters for your flow. Modifying the parameters changes the code parameters across all windows. For example, changing the **Chat Input** component's `input_value` will change that value across all API calls.
+
+
+## Send image files to your flow with the API
+
+Send image files to the Langflow API for AI analysis.
+
+The default file limit is 100 MB. To configure this value, change the `LANGFLOW_MAX_FILE_SIZE_UPLOAD` environment variable.
+For more information, see [Supported environment variables](/environment-variables#supported-variables).
+
+1. To send an image to your flow with the API, POST the image file to the `v1/files/upload/` endpoint of your flow.
+
+```curl
+curl -X POST "http://127.0.0.1:7860/api/v1/files/upload/a430cc57-06bb-4c11-be39-d3d4de68d2c4" \
+ -H "Content-Type: multipart/form-data" \
+ -F "file=@image-file.png"
+```
+
+The API returns the image file path in the format `"file_path":"/_"}`.
+
+```json
+{"flowId":"a430cc57-06bb-4c11-be39-d3d4de68d2c4","file_path":"a430cc57-06bb-4c11-be39-d3d4de68d2c4/2024-11-27_14-47-50_image-file.png"}
+```
+
+2. Post the image file to the **Chat Input** component of a **Basic prompting** flow.
+Pass the file path value as an input in the **Tweaks** section of the curl call to Langflow.
+
+```curl
+curl -X POST \
+ "http://127.0.0.1:7860/api/v1/run/a430cc57-06bb-4c11-be39-d3d4de68d2c4?stream=false" \
+ -H 'Content-Type: application/json'\
+ -d '{
+ "output_type": "chat",
+ "input_type": "chat",
+ "tweaks": {
+ "ChatInput-b67sL": {
+ "files": "a430cc57-06bb-4c11-be39-d3d4de68d2c4/2024-11-27_14-47-50_image-file.png",
+ "input_value": "what do you see?"
+ }
+}}'
+```
+
+Your chatbot describes the image file you sent.
+
+```plain
+"text": "This flowchart appears to represent a complex system for processing financial inquiries using various AI agents and tools. Hereâs a breakdown of its components and how they might work together..."
+```
+
+## Chat Widget {#48f121a6cb3243979a341753da0c2700}
+
+
+---
+
+
+The **Chat Widget HTML** tab displays code that can be inserted in the `` of your HTML to interact with your flow.
+
+
+The **Langflow Chat Widget** is a powerful web component that enables communication with a Langflow project. This widget allows for a chat interface embedding, allowing the integration of Langflow into web applications effortlessly.
+
+
+You can get the HTML code embedded with the chat by clicking the Code button at the Sidebar after building a flow.
+
+
+Clicking the Chat Widget HTML tab, you'll get the code to be inserted. Read below to learn how to use it with HTML, React and Angular.
+
+
+### Embed your flow into HTML {#6e84db2f2a0d451db6fa03c57e9bf9a4}
+
+
+The Chat Widget can be embedded into any HTML page, inside a `` tag, as demonstrated in the video below.
+
+
+### Embed your flow with React {#fe5d3b1c42e74e4c84ebc9d1799b7665}
+
+
+To embed the Chat Widget using React, insert this `
+```
+
+
+Declare your Web Component and encapsulate it in a React component.
+
+
+```javascript
+declare global { namespace JSX { interface IntrinsicElements { "langflow-chat": any; } }}export default function ChatWidget({ className }) { return (
+ );
+
+ // Then render the specific content based on type
+ let contentData: ReactNode | null = null;
+ switch (content.type) {
+ case "text":
+ contentData = (
+