diff --git a/CHANGELOG.md b/CHANGELOG.md index 295e72e2415d38162286933ed83fada5a827c065..7aad55b8ff78a04c8da713a68c579ec32ee8019e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,40 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.3.35] - 2024-10-26 + +### Added + +- **📁 Robust File Handling**: Enhanced file input handling for chat. If the content extraction fails or is empty, users will now receive a clear warning, preventing silent failures and ensuring you always know what's happening with your uploads. +- **🌍 New Language Support**: Introduced Hungarian translations and updated French translations, expanding the platform's language accessibility for a more global user base. + +### Fixed + +- **📚 Knowledge Base Loading Issue**: Resolved a critical bug where the Knowledge Base was not loading, ensuring smooth access to your stored documents and improving information retrieval in RAG-enhanced workflows. +- **🛠️ Tool Parameters Issue**: Fixed an error where tools were not functioning correctly when required parameters were missing, ensuring reliable tool performance and more efficient task completions. +- **🔗 Merged Response Loss in Multi-Model Chats**: Addressed an issue where responses in multi-model chat workflows were being deleted after follow-up queries, improving consistency and ensuring smoother interactions across models. + +## [0.3.34] - 2024-10-26 + +### Added + +- **🔧 Feedback Export Enhancements**: Feedback history data can now be exported to JSON, allowing for seamless integration in RLHF processing and further analysis. +- **🗂️ Embedding Model Lazy Loading**: Search functionality for leaderboard reranking is now more efficient, as embedding models are lazy-loaded only when needed, optimizing performance. +- **🎨 Rich Text Input Toggle**: Users can now switch back to legacy textarea input for chat if they prefer simpler text input, though rich text is still the default until deprecation. +- **🛠️ Improved Tool Calling Mechanism**: Enhanced method for parsing and calling tools, improving the reliability and robustness of tool function calls. +- **🌐 Globalization Enhancements**: Updates to internationalization (i18n) support, further refining multi-language compatibility and accuracy. + +### Fixed + +- **🖥️ Folder Rename Fix for Firefox**: Addressed a persistent issue where users could not rename folders by pressing enter in Firefox, now ensuring seamless folder management across browsers. +- **🔠 Tiktoken Model Text Splitter Issue**: Resolved an issue where the tiktoken text splitter wasn’t working in Docker installations, restoring full functionality for tokenized text editing. +- **💼 S3 File Upload Issue**: Fixed a problem affecting S3 file uploads, ensuring smooth operations for those who store files on cloud storage. +- **🔒 Strict-Transport-Security Crash**: Resolved a crash when setting the Strict-Transport-Security (HSTS) header, improving stability and security enhancements. +- **🚫 OIDC Boolean Access Fix**: Addressed an issue with boolean values not being accessed correctly during OIDC logins, ensuring login reliability. +- **⚙️ Rich Text Paste Behavior**: Refined paste behavior in rich text input to make it smoother and more intuitive when pasting various content types. +- **🔨 Model Exclusion for Arena Fix**: Corrected the filter function that was not properly excluding models from the arena, improving model management. +- **🏷️ "Tags Generation Prompt" Fix**: Addressed an issue preventing custom "tags generation prompts" from registering properly, ensuring custom prompt work seamlessly. + ## [0.3.33] - 2024-10-24 ### Added diff --git a/Dockerfile b/Dockerfile index c360ff3298cd7187a74bb9d2c15095b4be11e2f0..91238b4d4ea6a26d1472ba6d8181cdabbcc7e235 100644 --- a/Dockerfile +++ b/Dockerfile @@ -77,7 +77,7 @@ ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \ SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" ## Tiktoken model settings ## -ENV TIKTOKEN_ENCODING_NAME="$USE_TIKTOKEN_ENCODING_NAME" \ +ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \ TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken" ## Hugging Face download cache ## diff --git a/backend/open_webui/apps/audio/main.py b/backend/open_webui/apps/audio/main.py index 636e9bd2339c202c32fcc6ab376ce7520312f090..a1cc75b43b2490421bb6d342e0c1cc75d23cc00f 100644 --- a/backend/open_webui/apps/audio/main.py +++ b/backend/open_webui/apps/audio/main.py @@ -522,7 +522,8 @@ def transcription( else: data = transcribe(file_path) - return data + file_path = file_path.split("/")[-1] + return {**data, "filename": file_path} except Exception as e: log.exception(e) raise HTTPException( diff --git a/backend/open_webui/apps/ollama/main.py b/backend/open_webui/apps/ollama/main.py index a161db52e3b11ecfed971839b7b7e7b77605e6af..f0f8877f49aba97f2f8fceaa155b59e5921e9118 100644 --- a/backend/open_webui/apps/ollama/main.py +++ b/backend/open_webui/apps/ollama/main.py @@ -692,7 +692,7 @@ class GenerateCompletionForm(BaseModel): options: Optional[dict] = None system: Optional[str] = None template: Optional[str] = None - context: Optional[str] = None + context: Optional[list[int]] = None stream: Optional[bool] = True raw: Optional[bool] = None keep_alive: Optional[Union[int, str]] = None @@ -739,7 +739,7 @@ class GenerateChatCompletionForm(BaseModel): format: Optional[str] = None options: Optional[dict] = None template: Optional[str] = None - stream: Optional[bool] = None + stream: Optional[bool] = True keep_alive: Optional[Union[int, str]] = None diff --git a/backend/open_webui/apps/retrieval/main.py b/backend/open_webui/apps/retrieval/main.py index 2040089ddbee5e119b445615c39143ad2a4e33b4..49772d5dc9ab4405eecc9200ce524ed22a251b7b 100644 --- a/backend/open_webui/apps/retrieval/main.py +++ b/backend/open_webui/apps/retrieval/main.py @@ -14,6 +14,7 @@ from typing import Iterator, Optional, Sequence, Union from fastapi import Depends, FastAPI, File, Form, HTTPException, UploadFile, status from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel +import tiktoken from open_webui.storage.provider import Storage @@ -666,8 +667,13 @@ def save_docs_to_vector_db( add_start_index=True, ) elif app.state.config.TEXT_SPLITTER == "token": + log.info( + f"Using token text splitter: {app.state.config.TIKTOKEN_ENCODING_NAME}" + ) + + tiktoken.get_encoding(str(app.state.config.TIKTOKEN_ENCODING_NAME)) text_splitter = TokenTextSplitter( - encoding_name=app.state.config.TIKTOKEN_ENCODING_NAME, + encoding_name=str(app.state.config.TIKTOKEN_ENCODING_NAME), chunk_size=app.state.config.CHUNK_SIZE, chunk_overlap=app.state.config.CHUNK_OVERLAP, add_start_index=True, diff --git a/backend/open_webui/apps/retrieval/vector/dbs/chroma.py b/backend/open_webui/apps/retrieval/vector/dbs/chroma.py index 15bdc8ff2c6c9d3deacb39bb7fb8cec193b7cca6..cb4d6283f6052c28548a2968857b6173254dbb26 100644 --- a/backend/open_webui/apps/retrieval/vector/dbs/chroma.py +++ b/backend/open_webui/apps/retrieval/vector/dbs/chroma.py @@ -13,11 +13,22 @@ from open_webui.config import ( CHROMA_HTTP_SSL, CHROMA_TENANT, CHROMA_DATABASE, + CHROMA_CLIENT_AUTH_PROVIDER, + CHROMA_CLIENT_AUTH_CREDENTIALS, ) class ChromaClient: def __init__(self): + settings_dict = { + "allow_reset": True, + "anonymized_telemetry": False, + } + if CHROMA_CLIENT_AUTH_PROVIDER is not None: + settings_dict["chroma_client_auth_provider"] = CHROMA_CLIENT_AUTH_PROVIDER + if CHROMA_CLIENT_AUTH_CREDENTIALS is not None: + settings_dict["chroma_client_auth_credentials"] = CHROMA_CLIENT_AUTH_CREDENTIALS + if CHROMA_HTTP_HOST != "": self.client = chromadb.HttpClient( host=CHROMA_HTTP_HOST, @@ -26,12 +37,12 @@ class ChromaClient: ssl=CHROMA_HTTP_SSL, tenant=CHROMA_TENANT, database=CHROMA_DATABASE, - settings=Settings(allow_reset=True, anonymized_telemetry=False), + settings=Settings(**settings_dict), ) else: self.client = chromadb.PersistentClient( path=CHROMA_DATA_PATH, - settings=Settings(allow_reset=True, anonymized_telemetry=False), + settings=Settings(**settings_dict), tenant=CHROMA_TENANT, database=CHROMA_DATABASE, ) diff --git a/backend/open_webui/apps/webui/models/files.py b/backend/open_webui/apps/webui/models/files.py index 7838ebf8429bf2b3f2d7ee85beac1082e8824eb8..bb2a7bf9603fe1fcec45e07a5ebc69c4dd54ccea 100644 --- a/backend/open_webui/apps/webui/models/files.py +++ b/backend/open_webui/apps/webui/models/files.py @@ -73,6 +73,8 @@ class FileModelResponse(BaseModel): created_at: int # timestamp in epoch updated_at: int # timestamp in epoch + model_config = ConfigDict(extra="allow") + class FileMetadataResponse(BaseModel): id: str diff --git a/backend/open_webui/apps/webui/routers/evaluations.py b/backend/open_webui/apps/webui/routers/evaluations.py index 2a7154565ced2a6277ec34b5fc2ee00bcc42084b..5756d434cb74735bde4fd65db18457964e781cc1 100644 --- a/backend/open_webui/apps/webui/routers/evaluations.py +++ b/backend/open_webui/apps/webui/routers/evaluations.py @@ -5,6 +5,7 @@ from pydantic import BaseModel from open_webui.apps.webui.models.users import Users, UserModel from open_webui.apps.webui.models.feedbacks import ( FeedbackModel, + FeedbackResponse, FeedbackForm, Feedbacks, ) @@ -55,27 +56,15 @@ async def update_config( } -@router.get("/feedbacks", response_model=list[FeedbackModel]) -async def get_feedbacks(user=Depends(get_verified_user)): - feedbacks = Feedbacks.get_feedbacks_by_user_id(user.id) - return feedbacks - - -@router.delete("/feedbacks", response_model=bool) -async def delete_feedbacks(user=Depends(get_verified_user)): - success = Feedbacks.delete_feedbacks_by_user_id(user.id) - return success - - -class FeedbackUserModel(FeedbackModel): +class FeedbackUserResponse(FeedbackResponse): user: Optional[UserModel] = None -@router.get("/feedbacks/all", response_model=list[FeedbackUserModel]) +@router.get("/feedbacks/all", response_model=list[FeedbackUserResponse]) async def get_all_feedbacks(user=Depends(get_admin_user)): feedbacks = Feedbacks.get_all_feedbacks() return [ - FeedbackUserModel( + FeedbackUserResponse( **feedback.model_dump(), user=Users.get_user_by_id(feedback.user_id) ) for feedback in feedbacks @@ -88,6 +77,29 @@ async def delete_all_feedbacks(user=Depends(get_admin_user)): return success +@router.get("/feedbacks/all/export", response_model=list[FeedbackModel]) +async def get_all_feedbacks(user=Depends(get_admin_user)): + feedbacks = Feedbacks.get_all_feedbacks() + return [ + FeedbackModel( + **feedback.model_dump(), user=Users.get_user_by_id(feedback.user_id) + ) + for feedback in feedbacks + ] + + +@router.get("/feedbacks/user", response_model=list[FeedbackUserResponse]) +async def get_feedbacks(user=Depends(get_verified_user)): + feedbacks = Feedbacks.get_feedbacks_by_user_id(user.id) + return feedbacks + + +@router.delete("/feedbacks", response_model=bool) +async def delete_feedbacks(user=Depends(get_verified_user)): + success = Feedbacks.delete_feedbacks_by_user_id(user.id) + return success + + @router.post("/feedback", response_model=FeedbackModel) async def create_feedback( request: Request, diff --git a/backend/open_webui/apps/webui/routers/files.py b/backend/open_webui/apps/webui/routers/files.py index 8edc4a919de173a9d44150ca39c9ce19e24995ff..440f7475a50287ce8f5222225d4af42a19d11101 100644 --- a/backend/open_webui/apps/webui/routers/files.py +++ b/backend/open_webui/apps/webui/routers/files.py @@ -38,7 +38,7 @@ router = APIRouter() ############################ -@router.post("/") +@router.post("/", response_model=FileModelResponse) def upload_file(file: UploadFile = File(...), user=Depends(get_verified_user)): log.info(f"file.content_type: {file.content_type}") try: @@ -73,6 +73,12 @@ def upload_file(file: UploadFile = File(...), user=Depends(get_verified_user)): except Exception as e: log.exception(e) log.error(f"Error processing file: {file_item.id}") + file_item = FileModelResponse( + **{ + **file_item.model_dump(), + "error": str(e.detail) if hasattr(e, "detail") else str(e), + } + ) if file_item: return file_item diff --git a/backend/open_webui/apps/webui/routers/knowledge.py b/backend/open_webui/apps/webui/routers/knowledge.py index be399b80d271ec4bc12ea55f8b680e7f2f49a1aa..c07ccdffd11d333dd280984108535590a81ffd4e 100644 --- a/backend/open_webui/apps/webui/routers/knowledge.py +++ b/backend/open_webui/apps/webui/routers/knowledge.py @@ -47,15 +47,43 @@ async def get_knowledge_items( detail=ERROR_MESSAGES.NOT_FOUND, ) else: - return [ - KnowledgeResponse( - **knowledge.model_dump(), - files=Files.get_file_metadatas_by_ids( - knowledge.data.get("file_ids", []) if knowledge.data else [] - ), + knowledge_bases = [] + + for knowledge in Knowledges.get_knowledge_items(): + + files = [] + if knowledge.data: + files = Files.get_file_metadatas_by_ids( + knowledge.data.get("file_ids", []) + ) + + # Check if all files exist + if len(files) != len(knowledge.data.get("file_ids", [])): + missing_files = list( + set(knowledge.data.get("file_ids", [])) + - set([file.id for file in files]) + ) + if missing_files: + data = knowledge.data or {} + file_ids = data.get("file_ids", []) + + for missing_file in missing_files: + file_ids.remove(missing_file) + + data["file_ids"] = file_ids + Knowledges.update_knowledge_by_id( + id=knowledge.id, form_data=KnowledgeUpdateForm(data=data) + ) + + files = Files.get_file_metadatas_by_ids(file_ids) + + knowledge_bases.append( + KnowledgeResponse( + **knowledge.model_dump(), + files=files, + ) ) - for knowledge in Knowledges.get_knowledge_items() - ] + return knowledge_bases ############################ diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index c5cc591236a745078ecd5d8326e51e012e080547..6348c61c11a7cde5e8df0c5475ad3c8025cd16f4 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -937,6 +937,8 @@ CHROMA_TENANT = os.environ.get("CHROMA_TENANT", chromadb.DEFAULT_TENANT) CHROMA_DATABASE = os.environ.get("CHROMA_DATABASE", chromadb.DEFAULT_DATABASE) CHROMA_HTTP_HOST = os.environ.get("CHROMA_HTTP_HOST", "") CHROMA_HTTP_PORT = int(os.environ.get("CHROMA_HTTP_PORT", "8000")) +CHROMA_CLIENT_AUTH_PROVIDER = os.environ.get("CHROMA_CLIENT_AUTH_PROVIDER", "") +CHROMA_CLIENT_AUTH_CREDENTIALS = os.environ.get("CHROMA_CLIENT_AUTH_CREDENTIALS", "") # Comma-separated list of header=value pairs CHROMA_HTTP_HEADERS = os.environ.get("CHROMA_HTTP_HEADERS", "") if CHROMA_HTTP_HEADERS: diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 96b9982dd0c16ac54ba4fd8f7a147ffd28e017d3..7f52d9638b82dc0a7849b78ce762d0187eed55fe 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -439,9 +439,20 @@ async def chat_completion_tools_handler( tool_function_params = result.get("parameters", {}) try: - tool_output = await tools[tool_function_name]["callable"]( - **tool_function_params + required_params = ( + tools[tool_function_name] + .get("spec", {}) + .get("parameters", {}) + .get("required", []) ) + tool_function = tools[tool_function_name]["callable"] + tool_function_params = { + k: v + for k, v in tool_function_params.items() + if k in required_params + } + tool_output = await tool_function(**tool_function_params) + except Exception as e: tool_output = str(e) diff --git a/backend/open_webui/migrations/versions/242a2047eae0_update_chat_table.py b/backend/open_webui/migrations/versions/242a2047eae0_update_chat_table.py index b561abe098a10d3c69144c4fe8eac98a89b9930f..407dee673c5691fa08f8343fa4e016cff121a174 100644 --- a/backend/open_webui/migrations/versions/242a2047eae0_update_chat_table.py +++ b/backend/open_webui/migrations/versions/242a2047eae0_update_chat_table.py @@ -19,17 +19,41 @@ depends_on = None def upgrade(): - # Step 1: Rename current 'chat' column to 'old_chat' - op.alter_column("chat", "chat", new_column_name="old_chat", existing_type=sa.Text) + conn = op.get_bind() + inspector = sa.inspect(conn) - # Step 2: Add new 'chat' column of type JSON - op.add_column("chat", sa.Column("chat", sa.JSON(), nullable=True)) + columns = inspector.get_columns("chat") + column_dict = {col["name"]: col for col in columns} + + chat_column = column_dict.get("chat") + old_chat_exists = "old_chat" in column_dict + + if chat_column: + if isinstance(chat_column["type"], sa.Text): + print("Converting 'chat' column to JSON") + + if old_chat_exists: + print("Dropping old 'old_chat' column") + op.drop_column("chat", "old_chat") + + # Step 1: Rename current 'chat' column to 'old_chat' + print("Renaming 'chat' column to 'old_chat'") + op.alter_column( + "chat", "chat", new_column_name="old_chat", existing_type=sa.Text() + ) + + # Step 2: Add new 'chat' column of type JSON + print("Adding new 'chat' column of type JSON") + op.add_column("chat", sa.Column("chat", sa.JSON(), nullable=True)) + else: + # If the column is already JSON, no need to do anything + pass # Step 3: Migrate data from 'old_chat' to 'chat' chat_table = table( "chat", - sa.Column("id", sa.String, primary_key=True), - sa.Column("old_chat", sa.Text), + sa.Column("id", sa.String(), primary_key=True), + sa.Column("old_chat", sa.Text()), sa.Column("chat", sa.JSON()), ) @@ -50,6 +74,7 @@ def upgrade(): ) # Step 4: Drop 'old_chat' column + print("Dropping 'old_chat' column") op.drop_column("chat", "old_chat") @@ -60,7 +85,7 @@ def downgrade(): # Step 2: Convert 'chat' JSON data back to text and store in 'old_chat' chat_table = table( "chat", - sa.Column("id", sa.String, primary_key=True), + sa.Column("id", sa.String(), primary_key=True), sa.Column("chat", sa.JSON()), sa.Column("old_chat", sa.Text()), ) @@ -79,4 +104,4 @@ def downgrade(): op.drop_column("chat", "chat") # Step 4: Rename 'old_chat' back to 'chat' - op.alter_column("chat", "old_chat", new_column_name="chat", existing_type=sa.Text) + op.alter_column("chat", "old_chat", new_column_name="chat", existing_type=sa.Text()) diff --git a/backend/open_webui/storage/provider.py b/backend/open_webui/storage/provider.py index 944a5d2cc53ee36c58e90ecd5979ef6dabf08ad1..7bea0bdae5f79174fc8b869bc381562567f1da81 100644 --- a/backend/open_webui/storage/provider.py +++ b/backend/open_webui/storage/provider.py @@ -44,14 +44,14 @@ class StorageProvider: ) self.bucket_name = S3_BUCKET_NAME - def _upload_to_s3(self, file: BinaryIO, filename: str) -> Tuple[bytes, str]: + def _upload_to_s3(self, file_path: str, filename: str) -> Tuple[bytes, str]: """Handles uploading of the file to S3 storage.""" if not self.s3_client: raise RuntimeError("S3 Client is not initialized.") try: - self.s3_client.upload_fileobj(file, self.bucket_name, filename) - return file.read(), f"s3://{self.bucket_name}/{filename}" + self.s3_client.upload_file(file_path, self.bucket_name, filename) + return open(file_path, "rb").read(), file_path except ClientError as e: raise RuntimeError(f"Error uploading file to S3: {e}") @@ -132,10 +132,11 @@ class StorageProvider: contents = file.read() if not contents: raise ValueError(ERROR_MESSAGES.EMPTY_CONTENT) + contents, file_path = self._upload_to_local(contents, filename) if self.storage_provider == "s3": - return self._upload_to_s3(file, filename) - return self._upload_to_local(contents, filename) + return self._upload_to_s3(file_path, filename) + return contents, file_path def get_file(self, file_path: str) -> str: """Downloads a file either from S3 or the local file system and returns the file path.""" diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index e9418b200edefab380469931a0f8103308b371eb..36a659fd772a27bee3eb26f6d1dacded894701cd 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -162,7 +162,7 @@ class OAuthManager: if not user: # If the user does not exist, check if merging is enabled - if auth_manager_config.OAUTH_MERGE_ACCOUNTS_BY_EMAIL.value: + if auth_manager_config.OAUTH_MERGE_ACCOUNTS_BY_EMAIL: # Check if the user exists by email user = Users.get_user_by_email(email) if user: @@ -176,7 +176,7 @@ class OAuthManager: if not user: # If the user does not exist, check if signups are enabled - if auth_manager_config.ENABLE_OAUTH_SIGNUP.value: + if auth_manager_config.ENABLE_OAUTH_SIGNUP: # Check if an existing user with the same email already exists existing_user = Users.get_user_by_email( user_data.get("email", "").lower() diff --git a/backend/open_webui/utils/security_headers.py b/backend/open_webui/utils/security_headers.py index 085fd513aa6f4367a9428cb5708aa4944be8d87e..a24c5131dacc94a434be37323757891f148fa1b3 100644 --- a/backend/open_webui/utils/security_headers.py +++ b/backend/open_webui/utils/security_headers.py @@ -60,7 +60,7 @@ def set_hsts(value: str): pattern = r"^max-age=(\d+)(;includeSubDomains)?(;preload)?$" match = re.match(pattern, value, re.IGNORECASE) if not match: - return "max-age=31536000;includeSubDomains" + value = "max-age=31536000;includeSubDomains" return {"Strict-Transport-Security": value} diff --git a/backend/requirements.txt b/backend/requirements.txt index 8f8767b1de16fc47a1e97c92a92ff74c26a66eeb..6bb220920e8892da2ff76e1d8f2ac4f512e93eae 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -40,7 +40,7 @@ langchain-community==0.2.12 langchain-chroma==0.1.4 fake-useragent==1.5.1 -chromadb==0.5.9 +chromadb==0.5.15 pymilvus==2.4.7 qdrant-client~=1.12.0 diff --git a/package-lock.json b/package-lock.json index 21dd1fbfe8aed7cf68388716f5b8c4cde062651e..148493d22b27aa9ac98408e2ab245264872b4ed0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.3.33", + "version": "0.3.35", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.3.33", + "version": "0.3.35", "dependencies": { "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", diff --git a/package.json b/package.json index 451d552d4f22f09fc08a1b236bb47c592e7715b4..232e0883d82b09b1973544bbbb806a98da688da6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.3.33", + "version": "0.3.35", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", diff --git a/src/app.css b/src/app.css index 1890065436dcebbb7161f30bb1ab5370426de39b..d7f2d0e5488a9a9038c3fb3251d768c095037319 100644 --- a/src/app.css +++ b/src/app.css @@ -189,7 +189,7 @@ input[type='number'] { } .ProseMirror { - @apply h-full min-h-fit max-h-full; + @apply h-full min-h-fit max-h-full whitespace-pre-wrap; } .ProseMirror:focus { diff --git a/src/lib/apis/evaluations/index.ts b/src/lib/apis/evaluations/index.ts index d6c062f82d790e0375b552b01432df48910e0178..0ba987cad0a1243e26d3679543c95b8d28340b58 100644 --- a/src/lib/apis/evaluations/index.ts +++ b/src/lib/apis/evaluations/index.ts @@ -93,6 +93,37 @@ export const getAllFeedbacks = async (token: string = '') => { return res; }; +export const exportAllFeedbacks = async (token: string = '') => { + let error = null; + + const res = await fetch(`${WEBUI_API_BASE_URL}/evaluations/feedbacks/all/export`, { + method: 'GET', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + authorization: `Bearer ${token}` + } + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .then((json) => { + return json; + }) + .catch((err) => { + error = err.detail; + console.log(err); + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + export const createNewFeedback = async (token: string, feedback: object) => { let error = null; diff --git a/src/lib/components/admin/Evaluations.svelte b/src/lib/components/admin/Evaluations.svelte index 336ce44bad743a99c729991ae0afe43f14737779..13d51ab8c04d819177f15dcee55017983f4e8716 100644 --- a/src/lib/components/admin/Evaluations.svelte +++ b/src/lib/components/admin/Evaluations.svelte @@ -1,4 +1,7 @@ diff --git a/src/lib/components/chat/MessageInput/VoiceRecording.svelte b/src/lib/components/chat/MessageInput/VoiceRecording.svelte index 2c2032e0dfc316ec96e379db4b61c6f095b284c0..ae93e735069129fb1ab0754b70ba2ae3839d671c 100644 --- a/src/lib/components/chat/MessageInput/VoiceRecording.svelte +++ b/src/lib/components/chat/MessageInput/VoiceRecording.svelte @@ -1,6 +1,6 @@