Commit
·
5c221d7
1
Parent(s):
620331c
feat: Add reasoning effort configuration for AI models
Browse filesThis commit introduces a new feature for configuring reasoning effort in AI models:
- Added support for reasoning effort in OpenAI and Anthropic models
- Created a new ReasoningEffortSelector component
- Updated chat session memory to store reasoning effort
- Implemented dynamic reasoning effort selection in the chat interface
- Added type definitions for reasoning effort
- Enhanced model configuration to support reasoning parameters
- bun.lock +4 -3
- package.json +2 -1
- src/hooks/use-chat.ts +1 -0
- src/lib/chat/manager.ts +29 -5
- src/lib/chat/memory.ts +8 -1
- src/lib/chat/types.ts +2 -1
- src/lib/config/types.ts +9 -0
- src/pages/chat/components/Input.tsx +15 -0
- src/pages/chat/components/Messages.tsx +0 -6
- src/pages/chat/components/ReasoningEffortSelector.tsx +88 -0
- src/pages/chat/page.tsx +44 -4
- src/pages/chat/types.ts +4 -0
bun.lock
CHANGED
@@ -7,11 +7,12 @@
|
|
7 |
"@clerk/clerk-react": "^5.23.0",
|
8 |
"@hookform/resolvers": "^4.1.1",
|
9 |
"@huggingface/inference": "2",
|
10 |
-
"@langchain/anthropic": "^0.3.
|
11 |
"@langchain/community": "^0.3.32",
|
12 |
"@langchain/core": "^0.3.40",
|
13 |
"@langchain/google-genai": "^0.1.8",
|
14 |
"@langchain/ollama": "^0.2.0",
|
|
|
15 |
"@radix-ui/react-accordion": "^1.2.3",
|
16 |
"@radix-ui/react-alert-dialog": "^1.1.6",
|
17 |
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
@@ -107,7 +108,7 @@
|
|
107 |
|
108 |
"@ampproject/remapping": ["@ampproject/[email protected]", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="],
|
109 |
|
110 |
-
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.
|
111 |
|
112 |
"@babel/code-frame": ["@babel/[email protected]", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.25.9", "js-tokens": "^4.0.0", "picocolors": "^1.0.0" } }, "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ=="],
|
113 |
|
@@ -267,7 +268,7 @@
|
|
267 |
|
268 |
"@jridgewell/trace-mapping": ["@jridgewell/[email protected]", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ=="],
|
269 |
|
270 |
-
"@langchain/anthropic": ["@langchain/[email protected].
|
271 |
|
272 |
"@langchain/community": ["@langchain/[email protected]", "", { "dependencies": { "@langchain/openai": ">=0.2.0 <0.5.0", "binary-extensions": "^2.2.0", "expr-eval": "^2.0.2", "flat": "^5.0.2", "js-yaml": "^4.1.0", "langchain": ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0", "langsmith": ">=0.2.8 <0.4.0", "uuid": "^10.0.0", "zod": "^3.22.3", "zod-to-json-schema": "^3.22.5" }, "peerDependencies": { "@arcjet/redact": "^v1.0.0-alpha.23", "@aws-crypto/sha256-js": "^5.0.0", "@aws-sdk/client-bedrock-agent-runtime": "^3.749.0", "@aws-sdk/client-bedrock-runtime": "^3.749.0", "@aws-sdk/client-dynamodb": "^3.749.0", "@aws-sdk/client-kendra": "^3.749.0", "@aws-sdk/client-lambda": "^3.749.0", "@aws-sdk/client-s3": "^3.749.0", "@aws-sdk/client-sagemaker-runtime": "^3.749.0", "@aws-sdk/client-sfn": "^3.749.0", "@aws-sdk/credential-provider-node": "^3.388.0", "@azure/search-documents": "^12.0.0", "@azure/storage-blob": "^12.15.0", "@browserbasehq/sdk": "*", "@browserbasehq/stagehand": "^1.0.0", "@clickhouse/client": "^0.2.5", "@cloudflare/ai": "*", "@datastax/astra-db-ts": "^1.0.0", "@elastic/elasticsearch": "^8.4.0", "@getmetal/metal-sdk": "*", "@getzep/zep-cloud": "^1.0.6", "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "*", "@google-cloud/storage": "^6.10.1 || ^7.7.0", "@gradientai/nodejs-sdk": "^1.2.0", "@huggingface/inference": "^2.6.4", "@huggingface/transformers": "^3.2.3", "@ibm-cloud/watsonx-ai": "*", "@lancedb/lancedb": "^0.12.0", "@langchain/core": ">=0.2.21 <0.4.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", "@mendable/firecrawl-js": "^1.4.3", "@mlc-ai/web-llm": "*", "@mozilla/readability": "*", "@neondatabase/serverless": "*", "@notionhq/client": "^2.2.10", "@opensearch-project/opensearch": "*", "@pinecone-database/pinecone": "*", "@planetscale/database": "^1.8.0", "@premai/prem-sdk": "^0.3.25", "@qdrant/js-client-rest": "^1.8.2", "@raycast/api": "^1.55.2", "@rockset/client": "^0.9.1", "@smithy/eventstream-codec": "^2.0.5", "@smithy/protocol-http": "^3.0.6", "@smithy/signature-v4": "^2.0.10", "@smithy/util-utf8": "^2.0.0", "@spider-cloud/spider-client": "^0.0.21", "@supabase/supabase-js": "^2.45.0", "@tensorflow-models/universal-sentence-encoder": "*", "@tensorflow/tfjs-converter": "*", "@tensorflow/tfjs-core": "*", "@upstash/ratelimit": "^1.1.3 || ^2.0.3", "@upstash/redis": "^1.20.6", "@upstash/vector": "^1.1.1", "@vercel/kv": "*", "@vercel/postgres": "*", "@writerai/writer-sdk": "^0.40.2", "@xata.io/client": "^0.28.0", "@zilliz/milvus2-sdk-node": ">=2.3.5", "apify-client": "^2.7.1", "assemblyai": "^4.6.0", "better-sqlite3": ">=9.4.0 <12.0.0", "cassandra-driver": "^4.7.2", "cborg": "^4.1.1", "cheerio": "^1.0.0-rc.12", "chromadb": "*", "closevector-common": "0.1.3", "closevector-node": "0.1.6", "closevector-web": "0.1.6", "cohere-ai": "*", "convex": "^1.3.1", "crypto-js": "^4.2.0", "d3-dsv": "^2.0.0", "discord.js": "^14.14.1", "dria": "^0.0.3", "duck-duck-scrape": "^2.2.5", "epub2": "^3.0.1", "fast-xml-parser": "*", "firebase-admin": "^11.9.0 || ^12.0.0", "google-auth-library": "*", "googleapis": "*", "hnswlib-node": "^3.0.0", "html-to-text": "^9.0.5", "ibm-cloud-sdk-core": "*", "ignore": "^5.2.0", "interface-datastore": "^8.2.11", "ioredis": "^5.3.2", "it-all": "^3.0.4", "jsdom": "*", "jsonwebtoken": "^9.0.2", "llmonitor": "^0.5.9", "lodash": "^4.17.21", "lunary": "^0.7.10", "mammoth": "^1.6.0", "mongodb": ">=5.2.0", "mysql2": "^3.9.8", "neo4j-driver": "*", "notion-to-md": "^3.1.0", "officeparser": "^4.0.4", "openai": "*", "pdf-parse": "1.1.1", "pg": "^8.11.0", "pg-copy-streams": "^6.0.5", "pickleparser": "^0.2.1", "playwright": "^1.32.1", "portkey-ai": "^0.1.11", "puppeteer": "*", "pyodide": ">=0.24.1 <0.27.0", "redis": "*", "replicate": "*", "sonix-speech-recognition": "^2.1.1", "srt-parser-2": "^1.2.3", "typeorm": "^0.3.20", "typesense": "^1.5.3", "usearch": "^1.1.1", "voy-search": "0.6.2", "weaviate-ts-client": "*", "web-auth-library": "^1.0.3", "word-extractor": "*", "ws": "^8.14.2", "youtubei.js": "*" }, "optionalPeers": ["@arcjet/redact", "@aws-crypto/sha256-js", "@aws-sdk/client-bedrock-agent-runtime", "@aws-sdk/client-bedrock-runtime", "@aws-sdk/client-dynamodb", "@aws-sdk/client-kendra", "@aws-sdk/client-lambda", "@aws-sdk/client-s3", "@aws-sdk/client-sagemaker-runtime", "@aws-sdk/client-sfn", "@aws-sdk/credential-provider-node", "@azure/search-documents", "@azure/storage-blob", "@browserbasehq/sdk", "@clickhouse/client", "@cloudflare/ai", "@datastax/astra-db-ts", "@elastic/elasticsearch", "@getmetal/metal-sdk", "@getzep/zep-cloud", "@getzep/zep-js", "@gomomento/sdk", "@gomomento/sdk-core", "@google-ai/generativelanguage", "@google-cloud/storage", "@gradientai/nodejs-sdk", "@huggingface/inference", "@huggingface/transformers", "@lancedb/lancedb", "@layerup/layerup-security", "@libsql/client", "@mendable/firecrawl-js", "@mlc-ai/web-llm", "@mozilla/readability", "@neondatabase/serverless", "@notionhq/client", "@opensearch-project/opensearch", "@pinecone-database/pinecone", "@planetscale/database", "@premai/prem-sdk", "@qdrant/js-client-rest", "@raycast/api", "@rockset/client", "@smithy/eventstream-codec", "@smithy/protocol-http", "@smithy/signature-v4", "@smithy/util-utf8", "@spider-cloud/spider-client", "@supabase/supabase-js", "@tensorflow-models/universal-sentence-encoder", "@tensorflow/tfjs-converter", "@tensorflow/tfjs-core", "@upstash/ratelimit", "@upstash/redis", "@upstash/vector", "@vercel/kv", "@vercel/postgres", "@writerai/writer-sdk", "@xata.io/client", "@zilliz/milvus2-sdk-node", "apify-client", "assemblyai", "better-sqlite3", "cassandra-driver", "cborg", "cheerio", "chromadb", "closevector-common", "closevector-node", "closevector-web", "cohere-ai", "convex", "crypto-js", "d3-dsv", "discord.js", "dria", "duck-duck-scrape", "epub2", "fast-xml-parser", "firebase-admin", "google-auth-library", "googleapis", "hnswlib-node", "html-to-text", "ignore", "interface-datastore", "ioredis", "it-all", "jsdom", "jsonwebtoken", "llmonitor", "lodash", "lunary", "mammoth", "mongodb", "mysql2", "neo4j-driver", "notion-to-md", "officeparser", "pdf-parse", "pg", "pg-copy-streams", "pickleparser", "playwright", "portkey-ai", "puppeteer", "pyodide", "redis", "replicate", "sonix-speech-recognition", "srt-parser-2", "typeorm", "typesense", "usearch", "voy-search", "weaviate-ts-client", "web-auth-library", "word-extractor", "ws", "youtubei.js"] }, "sha512-5AvGyjIFheXdBUSiIWNwc40rI8fXYiHV0UA3ncbBVu5fTwWur+mAQvl2ZsgyxBBKm4VuoCcuh6U6I7b1kiOYBQ=="],
|
273 |
|
|
|
7 |
"@clerk/clerk-react": "^5.23.0",
|
8 |
"@hookform/resolvers": "^4.1.1",
|
9 |
"@huggingface/inference": "2",
|
10 |
+
"@langchain/anthropic": "^0.3.14",
|
11 |
"@langchain/community": "^0.3.32",
|
12 |
"@langchain/core": "^0.3.40",
|
13 |
"@langchain/google-genai": "^0.1.8",
|
14 |
"@langchain/ollama": "^0.2.0",
|
15 |
+
"@langchain/openai": "^0.4.4",
|
16 |
"@radix-ui/react-accordion": "^1.2.3",
|
17 |
"@radix-ui/react-alert-dialog": "^1.1.6",
|
18 |
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
|
|
108 |
|
109 |
"@ampproject/remapping": ["@ampproject/[email protected]", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="],
|
110 |
|
111 |
+
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.37.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" } }, "sha512-tHjX2YbkUBwEgg0JZU3EFSSAQPoK4qQR/NFYa8Vtzd5UAyXzZksCw2In69Rml4R/TyHPBfRYaLK35XiOe33pjw=="],
|
112 |
|
113 |
"@babel/code-frame": ["@babel/[email protected]", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.25.9", "js-tokens": "^4.0.0", "picocolors": "^1.0.0" } }, "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ=="],
|
114 |
|
|
|
268 |
|
269 |
"@jridgewell/trace-mapping": ["@jridgewell/[email protected]", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ=="],
|
270 |
|
271 |
+
"@langchain/anthropic": ["@langchain/[email protected].14", "", { "dependencies": { "@anthropic-ai/sdk": "^0.37.0", "fast-xml-parser": "^4.4.1", "zod": "^3.22.4", "zod-to-json-schema": "^3.22.4" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" } }, "sha512-zfix+qo/coIkgjTYpadp71IAWGXriIfImYLwMr1HnFsit4/RN9DU+aEOdm0nTwycbaneUpwWs5yfje8IKWHfsA=="],
|
272 |
|
273 |
"@langchain/community": ["@langchain/[email protected]", "", { "dependencies": { "@langchain/openai": ">=0.2.0 <0.5.0", "binary-extensions": "^2.2.0", "expr-eval": "^2.0.2", "flat": "^5.0.2", "js-yaml": "^4.1.0", "langchain": ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0", "langsmith": ">=0.2.8 <0.4.0", "uuid": "^10.0.0", "zod": "^3.22.3", "zod-to-json-schema": "^3.22.5" }, "peerDependencies": { "@arcjet/redact": "^v1.0.0-alpha.23", "@aws-crypto/sha256-js": "^5.0.0", "@aws-sdk/client-bedrock-agent-runtime": "^3.749.0", "@aws-sdk/client-bedrock-runtime": "^3.749.0", "@aws-sdk/client-dynamodb": "^3.749.0", "@aws-sdk/client-kendra": "^3.749.0", "@aws-sdk/client-lambda": "^3.749.0", "@aws-sdk/client-s3": "^3.749.0", "@aws-sdk/client-sagemaker-runtime": "^3.749.0", "@aws-sdk/client-sfn": "^3.749.0", "@aws-sdk/credential-provider-node": "^3.388.0", "@azure/search-documents": "^12.0.0", "@azure/storage-blob": "^12.15.0", "@browserbasehq/sdk": "*", "@browserbasehq/stagehand": "^1.0.0", "@clickhouse/client": "^0.2.5", "@cloudflare/ai": "*", "@datastax/astra-db-ts": "^1.0.0", "@elastic/elasticsearch": "^8.4.0", "@getmetal/metal-sdk": "*", "@getzep/zep-cloud": "^1.0.6", "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "*", "@google-cloud/storage": "^6.10.1 || ^7.7.0", "@gradientai/nodejs-sdk": "^1.2.0", "@huggingface/inference": "^2.6.4", "@huggingface/transformers": "^3.2.3", "@ibm-cloud/watsonx-ai": "*", "@lancedb/lancedb": "^0.12.0", "@langchain/core": ">=0.2.21 <0.4.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", "@mendable/firecrawl-js": "^1.4.3", "@mlc-ai/web-llm": "*", "@mozilla/readability": "*", "@neondatabase/serverless": "*", "@notionhq/client": "^2.2.10", "@opensearch-project/opensearch": "*", "@pinecone-database/pinecone": "*", "@planetscale/database": "^1.8.0", "@premai/prem-sdk": "^0.3.25", "@qdrant/js-client-rest": "^1.8.2", "@raycast/api": "^1.55.2", "@rockset/client": "^0.9.1", "@smithy/eventstream-codec": "^2.0.5", "@smithy/protocol-http": "^3.0.6", "@smithy/signature-v4": "^2.0.10", "@smithy/util-utf8": "^2.0.0", "@spider-cloud/spider-client": "^0.0.21", "@supabase/supabase-js": "^2.45.0", "@tensorflow-models/universal-sentence-encoder": "*", "@tensorflow/tfjs-converter": "*", "@tensorflow/tfjs-core": "*", "@upstash/ratelimit": "^1.1.3 || ^2.0.3", "@upstash/redis": "^1.20.6", "@upstash/vector": "^1.1.1", "@vercel/kv": "*", "@vercel/postgres": "*", "@writerai/writer-sdk": "^0.40.2", "@xata.io/client": "^0.28.0", "@zilliz/milvus2-sdk-node": ">=2.3.5", "apify-client": "^2.7.1", "assemblyai": "^4.6.0", "better-sqlite3": ">=9.4.0 <12.0.0", "cassandra-driver": "^4.7.2", "cborg": "^4.1.1", "cheerio": "^1.0.0-rc.12", "chromadb": "*", "closevector-common": "0.1.3", "closevector-node": "0.1.6", "closevector-web": "0.1.6", "cohere-ai": "*", "convex": "^1.3.1", "crypto-js": "^4.2.0", "d3-dsv": "^2.0.0", "discord.js": "^14.14.1", "dria": "^0.0.3", "duck-duck-scrape": "^2.2.5", "epub2": "^3.0.1", "fast-xml-parser": "*", "firebase-admin": "^11.9.0 || ^12.0.0", "google-auth-library": "*", "googleapis": "*", "hnswlib-node": "^3.0.0", "html-to-text": "^9.0.5", "ibm-cloud-sdk-core": "*", "ignore": "^5.2.0", "interface-datastore": "^8.2.11", "ioredis": "^5.3.2", "it-all": "^3.0.4", "jsdom": "*", "jsonwebtoken": "^9.0.2", "llmonitor": "^0.5.9", "lodash": "^4.17.21", "lunary": "^0.7.10", "mammoth": "^1.6.0", "mongodb": ">=5.2.0", "mysql2": "^3.9.8", "neo4j-driver": "*", "notion-to-md": "^3.1.0", "officeparser": "^4.0.4", "openai": "*", "pdf-parse": "1.1.1", "pg": "^8.11.0", "pg-copy-streams": "^6.0.5", "pickleparser": "^0.2.1", "playwright": "^1.32.1", "portkey-ai": "^0.1.11", "puppeteer": "*", "pyodide": ">=0.24.1 <0.27.0", "redis": "*", "replicate": "*", "sonix-speech-recognition": "^2.1.1", "srt-parser-2": "^1.2.3", "typeorm": "^0.3.20", "typesense": "^1.5.3", "usearch": "^1.1.1", "voy-search": "0.6.2", "weaviate-ts-client": "*", "web-auth-library": "^1.0.3", "word-extractor": "*", "ws": "^8.14.2", "youtubei.js": "*" }, "optionalPeers": ["@arcjet/redact", "@aws-crypto/sha256-js", "@aws-sdk/client-bedrock-agent-runtime", "@aws-sdk/client-bedrock-runtime", "@aws-sdk/client-dynamodb", "@aws-sdk/client-kendra", "@aws-sdk/client-lambda", "@aws-sdk/client-s3", "@aws-sdk/client-sagemaker-runtime", "@aws-sdk/client-sfn", "@aws-sdk/credential-provider-node", "@azure/search-documents", "@azure/storage-blob", "@browserbasehq/sdk", "@clickhouse/client", "@cloudflare/ai", "@datastax/astra-db-ts", "@elastic/elasticsearch", "@getmetal/metal-sdk", "@getzep/zep-cloud", "@getzep/zep-js", "@gomomento/sdk", "@gomomento/sdk-core", "@google-ai/generativelanguage", "@google-cloud/storage", "@gradientai/nodejs-sdk", "@huggingface/inference", "@huggingface/transformers", "@lancedb/lancedb", "@layerup/layerup-security", "@libsql/client", "@mendable/firecrawl-js", "@mlc-ai/web-llm", "@mozilla/readability", "@neondatabase/serverless", "@notionhq/client", "@opensearch-project/opensearch", "@pinecone-database/pinecone", "@planetscale/database", "@premai/prem-sdk", "@qdrant/js-client-rest", "@raycast/api", "@rockset/client", "@smithy/eventstream-codec", "@smithy/protocol-http", "@smithy/signature-v4", "@smithy/util-utf8", "@spider-cloud/spider-client", "@supabase/supabase-js", "@tensorflow-models/universal-sentence-encoder", "@tensorflow/tfjs-converter", "@tensorflow/tfjs-core", "@upstash/ratelimit", "@upstash/redis", "@upstash/vector", "@vercel/kv", "@vercel/postgres", "@writerai/writer-sdk", "@xata.io/client", "@zilliz/milvus2-sdk-node", "apify-client", "assemblyai", "better-sqlite3", "cassandra-driver", "cborg", "cheerio", "chromadb", "closevector-common", "closevector-node", "closevector-web", "cohere-ai", "convex", "crypto-js", "d3-dsv", "discord.js", "dria", "duck-duck-scrape", "epub2", "fast-xml-parser", "firebase-admin", "google-auth-library", "googleapis", "hnswlib-node", "html-to-text", "ignore", "interface-datastore", "ioredis", "it-all", "jsdom", "jsonwebtoken", "llmonitor", "lodash", "lunary", "mammoth", "mongodb", "mysql2", "neo4j-driver", "notion-to-md", "officeparser", "pdf-parse", "pg", "pg-copy-streams", "pickleparser", "playwright", "portkey-ai", "puppeteer", "pyodide", "redis", "replicate", "sonix-speech-recognition", "srt-parser-2", "typeorm", "typesense", "usearch", "voy-search", "weaviate-ts-client", "web-auth-library", "word-extractor", "ws", "youtubei.js"] }, "sha512-5AvGyjIFheXdBUSiIWNwc40rI8fXYiHV0UA3ncbBVu5fTwWur+mAQvl2ZsgyxBBKm4VuoCcuh6U6I7b1kiOYBQ=="],
|
274 |
|
package.json
CHANGED
@@ -15,11 +15,12 @@
|
|
15 |
"@clerk/clerk-react": "^5.23.0",
|
16 |
"@hookform/resolvers": "^4.1.1",
|
17 |
"@huggingface/inference": "2",
|
18 |
-
"@langchain/anthropic": "^0.3.
|
19 |
"@langchain/community": "^0.3.32",
|
20 |
"@langchain/core": "^0.3.40",
|
21 |
"@langchain/google-genai": "^0.1.8",
|
22 |
"@langchain/ollama": "^0.2.0",
|
|
|
23 |
"@radix-ui/react-accordion": "^1.2.3",
|
24 |
"@radix-ui/react-alert-dialog": "^1.1.6",
|
25 |
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
|
|
15 |
"@clerk/clerk-react": "^5.23.0",
|
16 |
"@hookform/resolvers": "^4.1.1",
|
17 |
"@huggingface/inference": "2",
|
18 |
+
"@langchain/anthropic": "^0.3.14",
|
19 |
"@langchain/community": "^0.3.32",
|
20 |
"@langchain/core": "^0.3.40",
|
21 |
"@langchain/google-genai": "^0.1.8",
|
22 |
"@langchain/ollama": "^0.2.0",
|
23 |
+
"@langchain/openai": "^0.4.4",
|
24 |
"@radix-ui/react-accordion": "^1.2.3",
|
25 |
"@radix-ui/react-alert-dialog": "^1.1.6",
|
26 |
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
src/hooks/use-chat.ts
CHANGED
@@ -70,6 +70,7 @@ export const generateMessage = async (
|
|
70 |
setStreamingHumanMessage(new HumanMessage(chatInput));
|
71 |
setStreamingAIMessageChunks([]);
|
72 |
|
|
|
73 |
const messageIterator = chatManager.chat(chatId, chatInput, chatAttachments);
|
74 |
|
75 |
for await (const event of messageIterator) {
|
|
|
70 |
setStreamingHumanMessage(new HumanMessage(chatInput));
|
71 |
setStreamingAIMessageChunks([]);
|
72 |
|
73 |
+
// Note: The ChatManager.chat method retrieves the reasoningEffort from the chat session in the database
|
74 |
const messageIterator = chatManager.chat(chatId, chatInput, chatAttachments);
|
75 |
|
76 |
for await (const event of messageIterator) {
|
src/lib/chat/manager.ts
CHANGED
@@ -18,6 +18,7 @@ import { Document } from "@langchain/core/documents";
|
|
18 |
import { HumanMessage, ToolMessage } from "@langchain/core/messages";
|
19 |
import { IChatSession } from "./types";
|
20 |
import { ChatHFInference } from "./chat-hf";
|
|
|
21 |
|
22 |
// Define an error interface for better type safety
|
23 |
interface ErrorWithMessage {
|
@@ -79,7 +80,7 @@ export class ChatManager {
|
|
79 |
this.controller = new AbortController();
|
80 |
}
|
81 |
|
82 |
-
private async getChatModel(modelName: string): Promise<BaseChatModel> {
|
83 |
// Ensure config is loaded
|
84 |
if (!this.config) {
|
85 |
await this.initializeConfig();
|
@@ -103,16 +104,29 @@ export class ChatManager {
|
|
103 |
return new ChatOpenAI({
|
104 |
modelName: this.config.openai_model && this.config.openai_model.trim() !== '' ? this.config.openai_model : model.model,
|
105 |
apiKey: this.config.openai_api_key,
|
|
|
|
|
106 |
configuration: {
|
107 |
baseURL: this.config.openai_base_url && this.config.openai_base_url.trim() !== '' ? this.config.openai_base_url : undefined,
|
108 |
}
|
109 |
});
|
110 |
|
111 |
-
case PROVIDERS.anthropic:
|
|
|
|
|
112 |
return new ChatAnthropic({
|
113 |
modelName: model.model,
|
114 |
apiKey: this.config.anthropic_api_key,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
});
|
|
|
116 |
|
117 |
case PROVIDERS.gemini:
|
118 |
return new ChatGoogleGenerativeAI({
|
@@ -430,8 +444,15 @@ export class ChatManager {
|
|
430 |
|
431 |
const chatSession = await memory.db.table("sessions").get(sessionId);
|
432 |
|
433 |
-
this.model = await this.getChatModel(
|
434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
435 |
|
436 |
const agent = await this.getAgent(chatSession?.enabled_tools || []);
|
437 |
|
@@ -455,11 +476,13 @@ export class ChatManager {
|
|
455 |
for await (const event of eventStream) {
|
456 |
if (event.event === "on_chat_model_stream") {
|
457 |
const chunk = event.data?.chunk;
|
|
|
458 |
if (chunk) {
|
459 |
currentResponse += chunk;
|
460 |
yield { type: "stream", content: chunk };
|
461 |
}
|
462 |
} else if (event.event === "on_chat_model_end") {
|
|
|
463 |
yield { type: "end", content: currentResponse, usageMetadata: event.data?.output?.usage_metadata };
|
464 |
} else if (event.event === "on_tool_start") {
|
465 |
yield { type: "tool_start", name: event.name, input: event.data?.input };
|
@@ -485,8 +508,9 @@ export class ChatManager {
|
|
485 |
async chatChain(
|
486 |
input: string | HumanMessage,
|
487 |
systemPrompt?: string,
|
|
|
488 |
) {
|
489 |
-
const model = await this.getChatModel(this.config.default_chat_model);
|
490 |
const humanMessage = typeof input === "string" ? new HumanMessage(input) : input;
|
491 |
return await model.invoke([
|
492 |
{ type: "system", content: systemPrompt || "You are a helpful assistant" },
|
|
|
18 |
import { HumanMessage, ToolMessage } from "@langchain/core/messages";
|
19 |
import { IChatSession } from "./types";
|
20 |
import { ChatHFInference } from "./chat-hf";
|
21 |
+
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions";
|
22 |
|
23 |
// Define an error interface for better type safety
|
24 |
interface ErrorWithMessage {
|
|
|
80 |
this.controller = new AbortController();
|
81 |
}
|
82 |
|
83 |
+
private async getChatModel(modelName: string, reasoningEffort?: ChatCompletionReasoningEffort): Promise<BaseChatModel> {
|
84 |
// Ensure config is loaded
|
85 |
if (!this.config) {
|
86 |
await this.initializeConfig();
|
|
|
104 |
return new ChatOpenAI({
|
105 |
modelName: this.config.openai_model && this.config.openai_model.trim() !== '' ? this.config.openai_model : model.model,
|
106 |
apiKey: this.config.openai_api_key,
|
107 |
+
reasoningEffort: model.isReasoning ? reasoningEffort : undefined,
|
108 |
+
maxCompletionTokens: -1,
|
109 |
configuration: {
|
110 |
baseURL: this.config.openai_base_url && this.config.openai_base_url.trim() !== '' ? this.config.openai_base_url : undefined,
|
111 |
}
|
112 |
});
|
113 |
|
114 |
+
case PROVIDERS.anthropic: {
|
115 |
+
const isThinkingDisabled = !reasoningEffort || String(reasoningEffort) === "disabled";
|
116 |
+
|
117 |
return new ChatAnthropic({
|
118 |
modelName: model.model,
|
119 |
apiKey: this.config.anthropic_api_key,
|
120 |
+
maxTokens: 64000,
|
121 |
+
thinking: model.isReasoning && !isThinkingDisabled ? {
|
122 |
+
type: "enabled",
|
123 |
+
budget_tokens:
|
124 |
+
reasoningEffort === "high" ? 32000 :
|
125 |
+
reasoningEffort === "medium" ? 16000 :
|
126 |
+
8000 // low
|
127 |
+
} : undefined // disabled
|
128 |
});
|
129 |
+
}
|
130 |
|
131 |
case PROVIDERS.gemini:
|
132 |
return new ChatGoogleGenerativeAI({
|
|
|
444 |
|
445 |
const chatSession = await memory.db.table("sessions").get(sessionId);
|
446 |
|
447 |
+
this.model = await this.getChatModel(
|
448 |
+
chatSession?.model || this.config.default_chat_model,
|
449 |
+
chatSession?.reasoningEffort as ChatCompletionReasoningEffort
|
450 |
+
);
|
451 |
+
try {
|
452 |
+
this.embeddings = await this.getEmbeddingModel(chatSession?.embedding_model || this.config.default_embedding_model || null);
|
453 |
+
} catch (error) {
|
454 |
+
console.log(error)
|
455 |
+
}
|
456 |
|
457 |
const agent = await this.getAgent(chatSession?.enabled_tools || []);
|
458 |
|
|
|
476 |
for await (const event of eventStream) {
|
477 |
if (event.event === "on_chat_model_stream") {
|
478 |
const chunk = event.data?.chunk;
|
479 |
+
console.log(chunk)
|
480 |
if (chunk) {
|
481 |
currentResponse += chunk;
|
482 |
yield { type: "stream", content: chunk };
|
483 |
}
|
484 |
} else if (event.event === "on_chat_model_end") {
|
485 |
+
console.log(event)
|
486 |
yield { type: "end", content: currentResponse, usageMetadata: event.data?.output?.usage_metadata };
|
487 |
} else if (event.event === "on_tool_start") {
|
488 |
yield { type: "tool_start", name: event.name, input: event.data?.input };
|
|
|
508 |
async chatChain(
|
509 |
input: string | HumanMessage,
|
510 |
systemPrompt?: string,
|
511 |
+
reasoningEffort?: ChatCompletionReasoningEffort,
|
512 |
) {
|
513 |
+
const model = await this.getChatModel(this.config.default_chat_model, reasoningEffort);
|
514 |
const humanMessage = typeof input === "string" ? new HumanMessage(input) : input;
|
515 |
return await model.invoke([
|
516 |
{ type: "system", content: systemPrompt || "You are a helpful assistant" },
|
src/lib/chat/memory.ts
CHANGED
@@ -10,6 +10,8 @@ import {
|
|
10 |
HumanMessage,
|
11 |
} from "@langchain/core/messages";
|
12 |
import { IConfig, CHAT_MODELS, EMBEDDING_MODELS } from "@/lib/config/types";
|
|
|
|
|
13 |
|
14 |
// Create a singleton instance of ChatHistoryDB
|
15 |
export class ChatHistoryDB extends Dexie {
|
@@ -19,7 +21,7 @@ export class ChatHistoryDB extends Dexie {
|
|
19 |
private constructor() {
|
20 |
super("chat_history");
|
21 |
this.version(1).stores({
|
22 |
-
sessions: "id, title, createdAt, updatedAt, model, embedding_model, enabled_tools, messages",
|
23 |
});
|
24 |
}
|
25 |
|
@@ -70,6 +72,11 @@ export class DexieChatMemory extends BaseChatMessageHistory {
|
|
70 |
embedding_model: embeddingModel?.model || '',
|
71 |
enabled_tools: [],
|
72 |
messages: [],
|
|
|
|
|
|
|
|
|
|
|
73 |
};
|
74 |
|
75 |
await this.db.sessions.put(this.chatHistory);
|
|
|
10 |
HumanMessage,
|
11 |
} from "@langchain/core/messages";
|
12 |
import { IConfig, CHAT_MODELS, EMBEDDING_MODELS } from "@/lib/config/types";
|
13 |
+
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions";
|
14 |
+
import { PROVIDERS } from "@/lib/config/types";
|
15 |
|
16 |
// Create a singleton instance of ChatHistoryDB
|
17 |
export class ChatHistoryDB extends Dexie {
|
|
|
21 |
private constructor() {
|
22 |
super("chat_history");
|
23 |
this.version(1).stores({
|
24 |
+
sessions: "id, title, createdAt, updatedAt, model, embedding_model, enabled_tools, messages, reasoningEffort",
|
25 |
});
|
26 |
}
|
27 |
|
|
|
72 |
embedding_model: embeddingModel?.model || '',
|
73 |
enabled_tools: [],
|
74 |
messages: [],
|
75 |
+
reasoningEffort: chatModel.isReasoning
|
76 |
+
? (chatModel.provider === PROVIDERS.anthropic
|
77 |
+
? "disabled" as ChatCompletionReasoningEffort
|
78 |
+
: "low" as ChatCompletionReasoningEffort)
|
79 |
+
: null as ChatCompletionReasoningEffort,
|
80 |
};
|
81 |
|
82 |
await this.db.sessions.put(this.chatHistory);
|
src/lib/chat/types.ts
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import { StoredMessage } from "@langchain/core/messages";
|
|
|
2 |
|
3 |
export interface IChatSession {
|
4 |
id: string;
|
@@ -9,7 +10,7 @@ export interface IChatSession {
|
|
9 |
model: string;
|
10 |
embedding_model: string;
|
11 |
enabled_tools: string[];
|
12 |
-
|
13 |
}
|
14 |
|
15 |
export type ToolType =
|
|
|
1 |
import { StoredMessage } from "@langchain/core/messages";
|
2 |
+
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions";
|
3 |
|
4 |
export interface IChatSession {
|
5 |
id: string;
|
|
|
10 |
model: string;
|
11 |
embedding_model: string;
|
12 |
enabled_tools: string[];
|
13 |
+
reasoningEffort?: ChatCompletionReasoningEffort | null | undefined;
|
14 |
}
|
15 |
|
16 |
export type ToolType =
|
src/lib/config/types.ts
CHANGED
@@ -118,6 +118,15 @@ export const BASE_CHAT_MODELS: ChatModel[] = [
|
|
118 |
modalities: [MODALITIES.image, MODALITIES.pdf],
|
119 |
isReasoning: false,
|
120 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
{
|
122 |
name: 'Gemini 2.0 Flash',
|
123 |
provider: PROVIDERS.gemini,
|
|
|
118 |
modalities: [MODALITIES.image, MODALITIES.pdf],
|
119 |
isReasoning: false,
|
120 |
},
|
121 |
+
{
|
122 |
+
name: 'Claude 3.7 Sonnet',
|
123 |
+
provider: PROVIDERS.anthropic,
|
124 |
+
model: 'claude-3-7-sonnet-20250219',
|
125 |
+
description: 'Anthropic Claude 3.7 Sonnet',
|
126 |
+
modalities: [MODALITIES.image, MODALITIES.pdf],
|
127 |
+
isReasoning: true,
|
128 |
+
reasoningArgs: ['disabled', 'low', 'medium', 'high'],
|
129 |
+
},
|
130 |
{
|
131 |
name: 'Gemini 2.0 Flash',
|
132 |
provider: PROVIDERS.gemini,
|
src/pages/chat/components/Input.tsx
CHANGED
@@ -6,6 +6,9 @@ import { InputProps } from "../types";
|
|
6 |
import { ModelSelector } from "./ModelSelector";
|
7 |
import { AttachmentDropdown } from "./AttachmentDropdown";
|
8 |
import { DocumentBadgesScrollArea } from "./DocumentBadgesScrollArea";
|
|
|
|
|
|
|
9 |
|
10 |
export const Input = React.memo(({
|
11 |
input,
|
@@ -26,6 +29,10 @@ export const Input = React.memo(({
|
|
26 |
selectedModelName,
|
27 |
isGenerating,
|
28 |
stopGenerating,
|
|
|
|
|
|
|
|
|
29 |
}: InputProps) => {
|
30 |
return (
|
31 |
<div className="flex flex-col w-1/2 mx-auto bg-muted rounded-md p-1">
|
@@ -59,6 +66,14 @@ export const Input = React.memo(({
|
|
59 |
enabledChatModels={enabledChatModels}
|
60 |
onModelChange={onModelChange}
|
61 |
/>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
<AttachmentDropdown
|
63 |
isUrlInputOpen={isUrlInputOpen}
|
64 |
setIsUrlInputOpen={setIsUrlInputOpen}
|
|
|
6 |
import { ModelSelector } from "./ModelSelector";
|
7 |
import { AttachmentDropdown } from "./AttachmentDropdown";
|
8 |
import { DocumentBadgesScrollArea } from "./DocumentBadgesScrollArea";
|
9 |
+
import { ReasoningEffortSelector } from "./ReasoningEffortSelector";
|
10 |
+
import { PROVIDERS } from "@/lib/config/types";
|
11 |
+
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions";
|
12 |
|
13 |
export const Input = React.memo(({
|
14 |
input,
|
|
|
29 |
selectedModelName,
|
30 |
isGenerating,
|
31 |
stopGenerating,
|
32 |
+
selectedModelProvider,
|
33 |
+
isModelReasoning,
|
34 |
+
reasoningEffort,
|
35 |
+
onReasoningEffortChange,
|
36 |
}: InputProps) => {
|
37 |
return (
|
38 |
<div className="flex flex-col w-1/2 mx-auto bg-muted rounded-md p-1">
|
|
|
66 |
enabledChatModels={enabledChatModels}
|
67 |
onModelChange={onModelChange}
|
68 |
/>
|
69 |
+
{isModelReasoning && selectedModelProvider && onReasoningEffortChange && (
|
70 |
+
<ReasoningEffortSelector
|
71 |
+
selectedReasoningEffort={reasoningEffort as ChatCompletionReasoningEffort}
|
72 |
+
provider={selectedModelProvider as PROVIDERS}
|
73 |
+
isReasoning={isModelReasoning}
|
74 |
+
onReasoningEffortChange={onReasoningEffortChange as (effort: ChatCompletionReasoningEffort) => void}
|
75 |
+
/>
|
76 |
+
)}
|
77 |
<AttachmentDropdown
|
78 |
isUrlInputOpen={isUrlInputOpen}
|
79 |
setIsUrlInputOpen={setIsUrlInputOpen}
|
src/pages/chat/components/Messages.tsx
CHANGED
@@ -28,12 +28,6 @@ export const Messages = React.memo(({
|
|
28 |
const handleScroll = React.useCallback((event: Event) => {
|
29 |
const viewport = event.target as HTMLDivElement;
|
30 |
const isNotAtBottom = viewport.scrollHeight - viewport.scrollTop - viewport.clientHeight > 10;
|
31 |
-
console.log('Scroll position:', {
|
32 |
-
scrollHeight: viewport.scrollHeight,
|
33 |
-
scrollTop: viewport.scrollTop,
|
34 |
-
clientHeight: viewport.clientHeight,
|
35 |
-
isNotAtBottom
|
36 |
-
});
|
37 |
setShowScrollToBottom(isNotAtBottom);
|
38 |
}, []);
|
39 |
|
|
|
28 |
const handleScroll = React.useCallback((event: Event) => {
|
29 |
const viewport = event.target as HTMLDivElement;
|
30 |
const isNotAtBottom = viewport.scrollHeight - viewport.scrollTop - viewport.clientHeight > 10;
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
setShowScrollToBottom(isNotAtBottom);
|
32 |
}, []);
|
33 |
|
src/pages/chat/components/ReasoningEffortSelector.tsx
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React from "react";
|
2 |
+
import { Button } from "@/components/ui/button";
|
3 |
+
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu";
|
4 |
+
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip";
|
5 |
+
import { Check, Brain } from "lucide-react";
|
6 |
+
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions";
|
7 |
+
import { PROVIDERS } from "@/lib/config/types";
|
8 |
+
|
9 |
+
interface ReasoningEffortSelectorProps {
|
10 |
+
selectedReasoningEffort: ChatCompletionReasoningEffort | null | undefined;
|
11 |
+
provider: PROVIDERS;
|
12 |
+
isReasoning: boolean;
|
13 |
+
onReasoningEffortChange: (effort: ChatCompletionReasoningEffort) => void;
|
14 |
+
}
|
15 |
+
|
16 |
+
interface ReasoningOption {
|
17 |
+
value: ChatCompletionReasoningEffort;
|
18 |
+
label: string;
|
19 |
+
description: string;
|
20 |
+
}
|
21 |
+
|
22 |
+
// Define the reasoning effort options
|
23 |
+
const REASONING_EFFORT_OPTIONS: Record<string, ReasoningOption[]> = {
|
24 |
+
[PROVIDERS.openai]: [
|
25 |
+
{ value: "low" as ChatCompletionReasoningEffort, label: "Low", description: "Minimal reasoning, faster responses" },
|
26 |
+
{ value: "medium" as ChatCompletionReasoningEffort, label: "Medium", description: "Balanced reasoning and speed" },
|
27 |
+
{ value: "high" as ChatCompletionReasoningEffort, label: "High", description: "Thorough reasoning, slower responses" }
|
28 |
+
],
|
29 |
+
[PROVIDERS.anthropic]: [
|
30 |
+
{ value: "disabled" as ChatCompletionReasoningEffort, label: "Disabled", description: "No explicit reasoning" },
|
31 |
+
{ value: "low" as ChatCompletionReasoningEffort, label: "Low", description: "Minimal reasoning (~8K tokens)" },
|
32 |
+
{ value: "medium" as ChatCompletionReasoningEffort, label: "Medium", description: "Balanced reasoning (~16K tokens)" },
|
33 |
+
{ value: "high" as ChatCompletionReasoningEffort, label: "High", description: "Thorough reasoning (~32K tokens)" }
|
34 |
+
]
|
35 |
+
};
|
36 |
+
|
37 |
+
export const ReasoningEffortSelector = React.memo(({
|
38 |
+
selectedReasoningEffort,
|
39 |
+
provider,
|
40 |
+
isReasoning,
|
41 |
+
onReasoningEffortChange
|
42 |
+
}: ReasoningEffortSelectorProps) => {
|
43 |
+
if (!isReasoning) return null;
|
44 |
+
|
45 |
+
const options = REASONING_EFFORT_OPTIONS[provider] || REASONING_EFFORT_OPTIONS[PROVIDERS.openai];
|
46 |
+
const selectedOption = options.find((opt: ReasoningOption) => opt.value === selectedReasoningEffort) || options[0];
|
47 |
+
|
48 |
+
return (
|
49 |
+
<DropdownMenu>
|
50 |
+
<DropdownMenuTrigger asChild>
|
51 |
+
<Button
|
52 |
+
variant="ghost"
|
53 |
+
className="h-8 p-1 justify-start font-normal flex items-center gap-1"
|
54 |
+
title="Reasoning Effort"
|
55 |
+
>
|
56 |
+
<Brain className="h-4 w-4" />
|
57 |
+
<span className="truncate">{selectedOption.label}</span>
|
58 |
+
</Button>
|
59 |
+
</DropdownMenuTrigger>
|
60 |
+
<DropdownMenuContent className="w-[250px]">
|
61 |
+
{options.map((option: ReasoningOption) => (
|
62 |
+
<TooltipProvider key={option.value}>
|
63 |
+
<Tooltip>
|
64 |
+
<TooltipTrigger asChild>
|
65 |
+
<DropdownMenuItem
|
66 |
+
className="p-3"
|
67 |
+
onSelect={() => onReasoningEffortChange(option.value)}
|
68 |
+
>
|
69 |
+
<div className="flex items-center gap-2 w-full">
|
70 |
+
{option.value === selectedReasoningEffort && (
|
71 |
+
<Check className="h-4 w-4 shrink-0" />
|
72 |
+
)}
|
73 |
+
<span className="flex-grow">{option.label}</span>
|
74 |
+
</div>
|
75 |
+
</DropdownMenuItem>
|
76 |
+
</TooltipTrigger>
|
77 |
+
<TooltipContent side="right" align="start" className="max-w-[300px]">
|
78 |
+
<p>{option.description}</p>
|
79 |
+
</TooltipContent>
|
80 |
+
</Tooltip>
|
81 |
+
</TooltipProvider>
|
82 |
+
))}
|
83 |
+
</DropdownMenuContent>
|
84 |
+
</DropdownMenu>
|
85 |
+
);
|
86 |
+
});
|
87 |
+
|
88 |
+
ReasoningEffortSelector.displayName = "ReasoningEffortSelector";
|
src/pages/chat/page.tsx
CHANGED
@@ -16,6 +16,7 @@ import { DocumentManager } from "@/lib/document/manager";
|
|
16 |
import { useLoading } from "@/contexts/loading-context";
|
17 |
import { Alert, AlertTitle, AlertDescription } from "@/components/ui/alert";
|
18 |
import { AlertCircle } from "lucide-react";
|
|
|
19 |
|
20 |
export function ChatPage() {
|
21 |
const { id } = useParams();
|
@@ -60,6 +61,11 @@ export function ChatPage() {
|
|
60 |
return model?.provider;
|
61 |
}, [selectedModel]);
|
62 |
|
|
|
|
|
|
|
|
|
|
|
63 |
const handleModelChange = React.useCallback(async (model: string) => {
|
64 |
if (!config) return;
|
65 |
|
@@ -71,9 +77,23 @@ export function ChatPage() {
|
|
71 |
} else {
|
72 |
const session = await chatHistoryDB.sessions.get(id);
|
73 |
if (session) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
await chatHistoryDB.sessions.update(id, {
|
75 |
...session,
|
76 |
model,
|
|
|
77 |
updatedAt: Date.now()
|
78 |
});
|
79 |
}
|
@@ -82,6 +102,21 @@ export function ChatPage() {
|
|
82 |
setError(null); // Clear any previous errors when changing models
|
83 |
}, [config, id, setSelectedModel, configManager, chatHistoryDB.sessions]);
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
const handleSendMessage = React.useCallback(async () => {
|
86 |
// Clear any previous errors
|
87 |
setError(null);
|
@@ -127,7 +162,8 @@ export function ChatPage() {
|
|
127 |
if (isNewChat && chatId) {
|
128 |
const chatName = await chatManager.chatChain(
|
129 |
`Based on this user message, generate a very concise (max 40 chars) but descriptive name for this chat: "${input}"`,
|
130 |
-
"You are a helpful assistant that generates concise chat names. Respond only with the name, no quotes or explanation."
|
|
|
131 |
);
|
132 |
await chatHistoryDB.sessions.update(chatId, {
|
133 |
name: String(chatName.content)
|
@@ -141,7 +177,7 @@ export function ChatPage() {
|
|
141 |
setError("An unknown error occurred while sending your message");
|
142 |
}
|
143 |
}
|
144 |
-
}, [id, input, attachments, isGenerating, chatManager, navigate, chatHistoryDB.sessions, selectedModelProvider, config]);
|
145 |
|
146 |
const handleAttachmentFileUpload = React.useCallback(async (event: React.ChangeEvent<HTMLInputElement>) => {
|
147 |
const files = event.target.files;
|
@@ -213,7 +249,7 @@ export function ChatPage() {
|
|
213 |
}
|
214 |
};
|
215 |
// Remove messages after the edited message
|
216 |
-
const newMessages = updatedMessages.slice(0, editingMessageIndex
|
217 |
|
218 |
await chatHistoryDB.sessions.update(id, {
|
219 |
...chatSession,
|
@@ -277,7 +313,7 @@ export function ChatPage() {
|
|
277 |
const content = message.data.content;
|
278 |
|
279 |
// Remove messages after the current message
|
280 |
-
const newMessages = messages.slice(0, index
|
281 |
|
282 |
await chatHistoryDB.sessions.update(id, {
|
283 |
...chatSession,
|
@@ -354,6 +390,10 @@ export function ChatPage() {
|
|
354 |
selectedModelName={selectedModelName}
|
355 |
isGenerating={isGenerating}
|
356 |
stopGenerating={stopGenerating}
|
|
|
|
|
|
|
|
|
357 |
/>
|
358 |
<FilePreviewDialog
|
359 |
document={previewDocument}
|
|
|
16 |
import { useLoading } from "@/contexts/loading-context";
|
17 |
import { Alert, AlertTitle, AlertDescription } from "@/components/ui/alert";
|
18 |
import { AlertCircle } from "lucide-react";
|
19 |
+
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions";
|
20 |
|
21 |
export function ChatPage() {
|
22 |
const { id } = useParams();
|
|
|
61 |
return model?.provider;
|
62 |
}, [selectedModel]);
|
63 |
|
64 |
+
const isModelReasoning = React.useMemo(() => {
|
65 |
+
const model = CHAT_MODELS.find(model => model.model === selectedModel);
|
66 |
+
return model?.isReasoning || false;
|
67 |
+
}, [selectedModel]);
|
68 |
+
|
69 |
const handleModelChange = React.useCallback(async (model: string) => {
|
70 |
if (!config) return;
|
71 |
|
|
|
77 |
} else {
|
78 |
const session = await chatHistoryDB.sessions.get(id);
|
79 |
if (session) {
|
80 |
+
// Get the new model details
|
81 |
+
const newModel = CHAT_MODELS.find(m => m.model === model);
|
82 |
+
|
83 |
+
// Set default reasoning effort if the model supports it
|
84 |
+
let reasoningEffort = session.reasoningEffort;
|
85 |
+
if (newModel?.isReasoning && !reasoningEffort) {
|
86 |
+
reasoningEffort = newModel.provider === PROVIDERS.anthropic ?
|
87 |
+
"disabled" as ChatCompletionReasoningEffort :
|
88 |
+
"low" as ChatCompletionReasoningEffort;
|
89 |
+
} else if (!newModel?.isReasoning) {
|
90 |
+
reasoningEffort = null;
|
91 |
+
}
|
92 |
+
|
93 |
await chatHistoryDB.sessions.update(id, {
|
94 |
...session,
|
95 |
model,
|
96 |
+
reasoningEffort,
|
97 |
updatedAt: Date.now()
|
98 |
});
|
99 |
}
|
|
|
102 |
setError(null); // Clear any previous errors when changing models
|
103 |
}, [config, id, setSelectedModel, configManager, chatHistoryDB.sessions]);
|
104 |
|
105 |
+
const handleReasoningEffortChange = React.useCallback(async (effort: string) => {
|
106 |
+
if (!id || id === "new" || !chatSession) return;
|
107 |
+
|
108 |
+
try {
|
109 |
+
await chatHistoryDB.sessions.update(id, {
|
110 |
+
...chatSession,
|
111 |
+
reasoningEffort: effort as ChatCompletionReasoningEffort,
|
112 |
+
updatedAt: Date.now()
|
113 |
+
});
|
114 |
+
} catch (error) {
|
115 |
+
console.error("Error updating reasoning effort:", error);
|
116 |
+
toast.error("Failed to update reasoning effort");
|
117 |
+
}
|
118 |
+
}, [id, chatSession, chatHistoryDB.sessions]);
|
119 |
+
|
120 |
const handleSendMessage = React.useCallback(async () => {
|
121 |
// Clear any previous errors
|
122 |
setError(null);
|
|
|
162 |
if (isNewChat && chatId) {
|
163 |
const chatName = await chatManager.chatChain(
|
164 |
`Based on this user message, generate a very concise (max 40 chars) but descriptive name for this chat: "${input}"`,
|
165 |
+
"You are a helpful assistant that generates concise chat names. Respond only with the name, no quotes or explanation.",
|
166 |
+
chatSession?.reasoningEffort as ChatCompletionReasoningEffort
|
167 |
);
|
168 |
await chatHistoryDB.sessions.update(chatId, {
|
169 |
name: String(chatName.content)
|
|
|
177 |
setError("An unknown error occurred while sending your message");
|
178 |
}
|
179 |
}
|
180 |
+
}, [id, input, attachments, isGenerating, chatManager, navigate, chatHistoryDB.sessions, selectedModelProvider, config, chatSession]);
|
181 |
|
182 |
const handleAttachmentFileUpload = React.useCallback(async (event: React.ChangeEvent<HTMLInputElement>) => {
|
183 |
const files = event.target.files;
|
|
|
249 |
}
|
250 |
};
|
251 |
// Remove messages after the edited message
|
252 |
+
const newMessages = updatedMessages.slice(0, editingMessageIndex);
|
253 |
|
254 |
await chatHistoryDB.sessions.update(id, {
|
255 |
...chatSession,
|
|
|
313 |
const content = message.data.content;
|
314 |
|
315 |
// Remove messages after the current message
|
316 |
+
const newMessages = messages.slice(0, index);
|
317 |
|
318 |
await chatHistoryDB.sessions.update(id, {
|
319 |
...chatSession,
|
|
|
390 |
selectedModelName={selectedModelName}
|
391 |
isGenerating={isGenerating}
|
392 |
stopGenerating={stopGenerating}
|
393 |
+
selectedModelProvider={selectedModelProvider}
|
394 |
+
isModelReasoning={isModelReasoning}
|
395 |
+
reasoningEffort={chatSession?.reasoningEffort}
|
396 |
+
onReasoningEffortChange={handleReasoningEffortChange}
|
397 |
/>
|
398 |
<FilePreviewDialog
|
399 |
document={previewDocument}
|
src/pages/chat/types.ts
CHANGED
@@ -30,8 +30,12 @@ export interface InputProps {
|
|
30 |
urlInput: string;
|
31 |
selectedModelName: string;
|
32 |
isGenerating: boolean;
|
|
|
|
|
|
|
33 |
onInputChange: (value: string) => void;
|
34 |
onModelChange: (model: string) => void;
|
|
|
35 |
onSendMessage: () => void;
|
36 |
setPreviewDocument: (doc: IDocument | null) => void;
|
37 |
setIsUrlInputOpen: (open: boolean) => void;
|
|
|
30 |
urlInput: string;
|
31 |
selectedModelName: string;
|
32 |
isGenerating: boolean;
|
33 |
+
selectedModelProvider?: string;
|
34 |
+
isModelReasoning?: boolean;
|
35 |
+
reasoningEffort?: string | null;
|
36 |
onInputChange: (value: string) => void;
|
37 |
onModelChange: (model: string) => void;
|
38 |
+
onReasoningEffortChange?: (effort: string) => void;
|
39 |
onSendMessage: () => void;
|
40 |
setPreviewDocument: (doc: IDocument | null) => void;
|
41 |
setIsUrlInputOpen: (open: boolean) => void;
|