diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a56d79bbacf75096b832a9c6ea5773c69023b37..460cc04b96d844bebb8cafe8a4fa8e6016975750 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,17 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.4.2] - 2024-11-20 + +### Fixed + +- **📁 Knowledge Files Visibility Issue**: Resolved the bug preventing individual files in knowledge collections from displaying when referenced with '#'. +- **🔗 OpenAI Endpoint Prefix**: Fixed the issue where certain OpenAI connections that deviate from the official API spec weren’t working correctly with prefixes. +- **⚔️ Arena Model Access Control**: Corrected an issue where arena model access control settings were not being saved. +- **🔧 Usage Capability Selector**: Fixed the broken usage capabilities selector in the model editor. + ## [0.4.1] - 2024-11-19 ### Added -- **🛠️ Tool Descriptions on Hover**: When enabled, tool descriptions now appear upon hovering over the tool icon in the message input, giving you more context instantly and improving workflow fluidity. +- **📊 Enhanced Feedback System**: Introduced a detailed 1-10 rating scale for feedback alongside thumbs up/down, preparing for more precise model fine-tuning and improving feedback quality. +- **ℹ️ Tool Descriptions on Hover**: Easily access tool descriptions by hovering over the message input, providing a smoother workflow with more context when utilizing tools. ### Fixed -- **🚫 Graceful Handling of Deleted Users**: Resolved an issue where deleted users caused models, knowledge, prompts, or tools to fail loading in the workspace, ensuring smoother operation and fewer interruptions. -- **🔗 Proxy Fix for HTTPS Models Endpoint**: Fixed issues with proxies affecting the secure `/api/v1/models/` endpoint, ensuring stable connections and reliable access. -- **🔒 API Key Creation**: Addressed a bug that previously prevented API keys from being created. +- **🗑️ Graceful Handling of Deleted Users**: Resolved an issue where deleted users caused workspace items (models, knowledge, prompts, tools) to fail, ensuring reliable workspace loading. +- **🔑 API Key Creation**: Fixed an issue preventing users from creating new API keys, restoring secure and seamless API management. +- **🔗 HTTPS Proxy Fix**: Corrected HTTPS proxy issues affecting the '/api/v1/models/' endpoint, ensuring smoother, uninterrupted model management. ## [0.4.0] - 2024-11-19 diff --git a/README.md b/README.md index 2d5162ec77890cf21478cbbaa58f1075f1e5f052..ac4ba92cca2223b499456c8ba168a76a564fc1f4 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Open WebUI is an [extensible](https://github.com/open-webui/pipelines), feature- - 🤝 **Ollama/OpenAI API Integration**: Effortlessly integrate OpenAI-compatible APIs for versatile conversations alongside Ollama models. Customize the OpenAI API URL to link with **LMStudio, GroqCloud, Mistral, OpenRouter, and more**. -- 🧩 **Pipelines, Open WebUI Plugin Support**: Seamlessly integrate custom logic and Python libraries into Open WebUI using [Pipelines Plugin Framework](https://github.com/open-webui/pipelines). Launch your Pipelines instance, set the OpenAI URL to the Pipelines URL, and explore endless possibilities. [Examples](https://github.com/open-webui/pipelines/tree/main/examples) include **Function Calling**, User **Rate Limiting** to control access, **Usage Monitoring** with tools like Langfuse, **Live Translation with LibreTranslate** for multilingual support, **Toxic Message Filtering** and much more. +- 🛡️ **Granular Permissions and User Groups**: By allowing administrators to create detailed user roles and permissions, we ensure a secure user environment. This granularity not only enhances security but also allows for customized user experiences, fostering a sense of ownership and responsibility amongst users. - 📱 **Responsive Design**: Enjoy a seamless experience across Desktop PC, Laptop, and Mobile devices. @@ -57,6 +57,8 @@ Open WebUI is an [extensible](https://github.com/open-webui/pipelines), feature- - 🌐🌍 **Multilingual Support**: Experience Open WebUI in your preferred language with our internationalization (i18n) support. Join us in expanding our supported languages! We're actively seeking contributors! +- 🧩 **Pipelines, Open WebUI Plugin Support**: Seamlessly integrate custom logic and Python libraries into Open WebUI using [Pipelines Plugin Framework](https://github.com/open-webui/pipelines). Launch your Pipelines instance, set the OpenAI URL to the Pipelines URL, and explore endless possibilities. [Examples](https://github.com/open-webui/pipelines/tree/main/examples) include **Function Calling**, User **Rate Limiting** to control access, **Usage Monitoring** with tools like Langfuse, **Live Translation with LibreTranslate** for multilingual support, **Toxic Message Filtering** and much more. + - 🌟 **Continuous Updates**: We are committed to improving Open WebUI with regular updates, fixes, and new features. Want to learn more about Open WebUI's features? Check out our [Open WebUI documentation](https://docs.openwebui.com/features) for a comprehensive overview! diff --git a/backend/open_webui/apps/openai/main.py b/backend/open_webui/apps/openai/main.py index 2e2da944f10514ecefc308d4278654d829c916b6..f9106a9ab45bec88bfadfd347897e315c0d37a10 100644 --- a/backend/open_webui/apps/openai/main.py +++ b/backend/open_webui/apps/openai/main.py @@ -313,7 +313,9 @@ async def get_all_models_responses() -> list: prefix_id = api_config.get("prefix_id", None) if prefix_id: - for model in response["data"]: + for model in ( + response if isinstance(response, list) else response.get("data", []) + ): model["id"] = f"{prefix_id}.{model['id']}" log.debug(f"get_all_models:responses() {responses}") diff --git a/backend/open_webui/apps/retrieval/loaders/youtube.py b/backend/open_webui/apps/retrieval/loaders/youtube.py new file mode 100644 index 0000000000000000000000000000000000000000..36b8af783be4001a544477ac5e80a635b6d7e321 --- /dev/null +++ b/backend/open_webui/apps/retrieval/loaders/youtube.py @@ -0,0 +1,98 @@ +from typing import Any, Dict, Generator, List, Optional, Sequence, Union +from urllib.parse import parse_qs, urlparse +from langchain_core.documents import Document + + +ALLOWED_SCHEMES = {"http", "https"} +ALLOWED_NETLOCS = { + "youtu.be", + "m.youtube.com", + "youtube.com", + "www.youtube.com", + "www.youtube-nocookie.com", + "vid.plus", +} + + +def _parse_video_id(url: str) -> Optional[str]: + """Parse a YouTube URL and return the video ID if valid, otherwise None.""" + parsed_url = urlparse(url) + + if parsed_url.scheme not in ALLOWED_SCHEMES: + return None + + if parsed_url.netloc not in ALLOWED_NETLOCS: + return None + + path = parsed_url.path + + if path.endswith("/watch"): + query = parsed_url.query + parsed_query = parse_qs(query) + if "v" in parsed_query: + ids = parsed_query["v"] + video_id = ids if isinstance(ids, str) else ids[0] + else: + return None + else: + path = parsed_url.path.lstrip("/") + video_id = path.split("/")[-1] + + if len(video_id) != 11: # Video IDs are 11 characters long + return None + + return video_id + + +class YoutubeLoader: + """Load `YouTube` video transcripts.""" + + def __init__( + self, + video_id: str, + language: Union[str, Sequence[str]] = "en", + ): + """Initialize with YouTube video ID.""" + _video_id = _parse_video_id(video_id) + self.video_id = _video_id if _video_id is not None else video_id + self._metadata = {"source": video_id} + self.language = language + if isinstance(language, str): + self.language = [language] + else: + self.language = language + + def load(self) -> List[Document]: + """Load YouTube transcripts into `Document` objects.""" + try: + from youtube_transcript_api import ( + NoTranscriptFound, + TranscriptsDisabled, + YouTubeTranscriptApi, + ) + except ImportError: + raise ImportError( + 'Could not import "youtube_transcript_api" Python package. ' + "Please install it with `pip install youtube-transcript-api`." + ) + + try: + transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id) + except Exception as e: + print(e) + return [] + + try: + transcript = transcript_list.find_transcript(self.language) + except NoTranscriptFound: + transcript = transcript_list.find_transcript(["en"]) + + transcript_pieces: List[Dict[str, Any]] = transcript.fetch() + + transcript = " ".join( + map( + lambda transcript_piece: transcript_piece["text"].strip(" "), + transcript_pieces, + ) + ) + return [Document(page_content=transcript, metadata=self._metadata)] diff --git a/backend/open_webui/apps/retrieval/main.py b/backend/open_webui/apps/retrieval/main.py index 753239bc46bbfe10550c2cd8f646643145df5f35..e3cca87867f4239ef932ff0debf8272e924ae12a 100644 --- a/backend/open_webui/apps/retrieval/main.py +++ b/backend/open_webui/apps/retrieval/main.py @@ -23,6 +23,7 @@ from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT # Document loaders from open_webui.apps.retrieval.loaders.main import Loader +from open_webui.apps.retrieval.loaders.youtube import YoutubeLoader # Web search engines from open_webui.apps.retrieval.web.main import SearchResult @@ -120,9 +121,6 @@ from open_webui.utils.misc import ( from open_webui.utils.utils import get_admin_user, get_verified_user from langchain.text_splitter import RecursiveCharacterTextSplitter, TokenTextSplitter -from langchain_community.document_loaders import ( - YoutubeLoader, -) from langchain_core.documents import Document @@ -1059,12 +1057,10 @@ def process_youtube_video(form_data: ProcessUrlForm, user=Depends(get_verified_u if not collection_name: collection_name = calculate_sha256_string(form_data.url)[:63] - loader = YoutubeLoader.from_youtube_url( - form_data.url, - add_video_info=False, - language=app.state.config.YOUTUBE_LOADER_LANGUAGE, - translation=app.state.YOUTUBE_LOADER_TRANSLATION, + loader = YoutubeLoader( + form_data.url, language=app.state.config.YOUTUBE_LOADER_LANGUAGE ) + docs = loader.load() content = " ".join([doc.page_content for doc in docs]) log.debug(f"text_content: {content}") diff --git a/backend/open_webui/apps/retrieval/vector/dbs/opensearch.py b/backend/open_webui/apps/retrieval/vector/dbs/opensearch.py index 57d5636443ce2fa694f9e31bc9f3faa2dbe4ce0e..19a266f366a4e3ae9ee1b8c2f791d0cfbe1c03cb 100644 --- a/backend/open_webui/apps/retrieval/vector/dbs/opensearch.py +++ b/backend/open_webui/apps/retrieval/vector/dbs/opensearch.py @@ -1,7 +1,7 @@ from opensearchpy import OpenSearch from typing import Optional -from open_webui.apps.rag.vector.main import VectorItem, SearchResult, GetResult +from open_webui.apps.retrieval.vector.main import VectorItem, SearchResult, GetResult from open_webui.config import ( OPENSEARCH_URI, OPENSEARCH_SSL, diff --git a/backend/open_webui/apps/webui/routers/knowledge.py b/backend/open_webui/apps/webui/routers/knowledge.py index 22f0b34c0f1d734b1b86c3b67765942888d1fedb..1508cfb45e73f550d71938bb57e6bae572e6fc3d 100644 --- a/backend/open_webui/apps/webui/routers/knowledge.py +++ b/backend/open_webui/apps/webui/routers/knowledge.py @@ -43,6 +43,7 @@ async def get_knowledge(user=Depends(get_verified_user)): knowledge_bases = Knowledges.get_knowledge_bases_by_user_id(user.id, "read") # Get files for each knowledge base + knowledge_with_files = [] for knowledge_base in knowledge_bases: files = [] if knowledge_base.data: @@ -70,12 +71,14 @@ async def get_knowledge(user=Depends(get_verified_user)): files = Files.get_file_metadatas_by_ids(file_ids) - knowledge_base = KnowledgeResponse( - **knowledge_base.model_dump(), - files=files, + knowledge_with_files.append( + KnowledgeUserResponse( + **knowledge_base.model_dump(), + files=files, + ) ) - return knowledge_bases + return knowledge_with_files @router.get("/list", response_model=list[KnowledgeUserResponse]) @@ -88,6 +91,7 @@ async def get_knowledge_list(user=Depends(get_verified_user)): knowledge_bases = Knowledges.get_knowledge_bases_by_user_id(user.id, "write") # Get files for each knowledge base + knowledge_with_files = [] for knowledge_base in knowledge_bases: files = [] if knowledge_base.data: @@ -115,12 +119,13 @@ async def get_knowledge_list(user=Depends(get_verified_user)): files = Files.get_file_metadatas_by_ids(file_ids) - knowledge_base = KnowledgeResponse( - **knowledge_base.model_dump(), - files=files, + knowledge_with_files.append( + KnowledgeUserResponse( + **knowledge_base.model_dump(), + files=files, + ) ) - - return knowledge_bases + return knowledge_with_files ############################ diff --git a/backend/open_webui/apps/webui/routers/models.py b/backend/open_webui/apps/webui/routers/models.py index 3ed1d686d4832d250f7c86f63bd8b64c33154876..5e24828935458c939fefd906027137734899348f 100644 --- a/backend/open_webui/apps/webui/routers/models.py +++ b/backend/open_webui/apps/webui/routers/models.py @@ -83,7 +83,8 @@ async def create_new_model( ########################### -@router.get("/id/{id}", response_model=Optional[ModelResponse]) +# Note: We're not using the typical url path param here, but instead using a query parameter to allow '/' in the id +@router.get("/model", response_model=Optional[ModelResponse]) async def get_model_by_id(id: str, user=Depends(get_verified_user)): model = Models.get_model_by_id(id) if model: @@ -105,7 +106,7 @@ async def get_model_by_id(id: str, user=Depends(get_verified_user)): ############################ -@router.post("/id/{id}/toggle", response_model=Optional[ModelResponse]) +@router.post("/model/toggle", response_model=Optional[ModelResponse]) async def toggle_model_by_id(id: str, user=Depends(get_verified_user)): model = Models.get_model_by_id(id) if model: @@ -140,7 +141,7 @@ async def toggle_model_by_id(id: str, user=Depends(get_verified_user)): ############################ -@router.post("/id/{id}/update", response_model=Optional[ModelModel]) +@router.post("/model/update", response_model=Optional[ModelModel]) async def update_model_by_id( id: str, form_data: ModelForm, @@ -163,7 +164,7 @@ async def update_model_by_id( ############################ -@router.delete("/id/{id}/delete", response_model=bool) +@router.delete("/model/delete", response_model=bool) async def delete_model_by_id(id: str, user=Depends(get_verified_user)): model = Models.get_model_by_id(id) if not model: diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 2ac63682aa1e349a9b5db52b666b9c6df6c504dd..e61d75f2315915eacbf0a4919f1a341af2e50354 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -855,6 +855,11 @@ class PipelineMiddleware(BaseHTTPMiddleware): status_code=status.HTTP_401_UNAUTHORIZED, content={"detail": "Not authenticated"}, ) + except HTTPException as e: + return JSONResponse( + status_code=e.status_code, + content={"detail": e.detail}, + ) model_list = await get_all_models() models = {model["id"]: model for model in model_list} diff --git a/backend/requirements.txt b/backend/requirements.txt index 62587496cb3946503683831a007ff570c7b9150b..7ea875e629c253c5e5933fcd477501fb50ce5873 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -37,8 +37,8 @@ anthropic google-generativeai==0.7.2 tiktoken -langchain==0.3.5 -langchain-community==0.3.3 +langchain==0.3.7 +langchain-community==0.3.7 langchain-chroma==0.1.4 fake-useragent==1.5.1 @@ -82,7 +82,7 @@ authlib==1.3.2 black==24.8.0 langfuse==2.44.0 -youtube-transcript-api==0.6.2 +youtube-transcript-api==0.6.3 pytube==15.0.0 extract_msg diff --git a/package-lock.json b/package-lock.json index 7267c10c658ec91ab5020901e44cc48194251e2e..d32fbdcffa0f3e4eed6e3d1a57b352e741fc4918 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.4.1", + "version": "0.4.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.4.1", + "version": "0.4.2", "dependencies": { "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", diff --git a/package.json b/package.json index f8aa9d37b890a494ddddfb6f54208695c3b78c0c..f95538644df4932715608901c5d374dc4ad9151c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.4.1", + "version": "0.4.2", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", diff --git a/pyproject.toml b/pyproject.toml index 1ae65981239235c30e1105be914e08de9b0ae75f..83869f9837f35d6040348754b42de33339ea2244 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,8 +44,8 @@ dependencies = [ "google-generativeai==0.7.2", "tiktoken", - "langchain==0.3.5", - "langchain-community==0.3.3", + "langchain==0.3.7", + "langchain-community==0.3.7", "langchain-chroma==0.1.4", "fake-useragent==1.5.1", @@ -88,7 +88,7 @@ dependencies = [ "black==24.8.0", "langfuse==2.44.0", - "youtube-transcript-api==0.6.2", + "youtube-transcript-api==0.6.3", "pytube==15.0.0", "extract_msg", diff --git a/src/lib/apis/models/index.ts b/src/lib/apis/models/index.ts index 8fec6829a2fd8c45551bad2d3b6eacb5717c6cc9..a94b13617c4a43c0a0ae966a38490e850341f12e 100644 --- a/src/lib/apis/models/index.ts +++ b/src/lib/apis/models/index.ts @@ -97,7 +97,7 @@ export const getModelById = async (token: string, id: string) => { const searchParams = new URLSearchParams(); searchParams.append('id', id); - const res = await fetch(`${WEBUI_API_BASE_URL}/models/id/${id}`, { + const res = await fetch(`${WEBUI_API_BASE_URL}/models/model?${searchParams.toString()}`, { method: 'GET', headers: { Accept: 'application/json', @@ -132,7 +132,7 @@ export const toggleModelById = async (token: string, id: string) => { const searchParams = new URLSearchParams(); searchParams.append('id', id); - const res = await fetch(`${WEBUI_API_BASE_URL}/models/id/${id}/toggle`, { + const res = await fetch(`${WEBUI_API_BASE_URL}/models/model/toggle?${searchParams.toString()}`, { method: 'POST', headers: { Accept: 'application/json', @@ -167,7 +167,7 @@ export const updateModelById = async (token: string, id: string, model: object) const searchParams = new URLSearchParams(); searchParams.append('id', id); - const res = await fetch(`${WEBUI_API_BASE_URL}/models/id/${id}/update`, { + const res = await fetch(`${WEBUI_API_BASE_URL}/models/model/update?${searchParams.toString()}`, { method: 'POST', headers: { Accept: 'application/json', @@ -203,7 +203,7 @@ export const deleteModelById = async (token: string, id: string) => { const searchParams = new URLSearchParams(); searchParams.append('id', id); - const res = await fetch(`${WEBUI_API_BASE_URL}/models/id/${id}/delete`, { + const res = await fetch(`${WEBUI_API_BASE_URL}/models/model/delete?${searchParams.toString()}`, { method: 'DELETE', headers: { Accept: 'application/json', diff --git a/src/lib/components/admin/Settings/Evaluations/ArenaModelModal.svelte b/src/lib/components/admin/Settings/Evaluations/ArenaModelModal.svelte index 84de432b65d5f9ca3e6937152cc687dd8261ceb2..746ca04e40ba2e6281e8ea2041cfd8b16db3528a 100644 --- a/src/lib/components/admin/Settings/Evaluations/ArenaModelModal.svelte +++ b/src/lib/components/admin/Settings/Evaluations/ArenaModelModal.svelte @@ -102,7 +102,7 @@ description = model.meta.description; modelIds = model.meta.model_ids || []; filterMode = model.meta?.filter_mode ?? 'include'; - accessControl = model.meta?.access_control ?? {}; + accessControl = 'access_control' in model.meta ? model.meta.access_control : {}; } }; diff --git a/src/lib/components/chat/Messages/RateComment.svelte b/src/lib/components/chat/Messages/RateComment.svelte index d67b5ede1460726a9bcc49a5ddb3aca1645b8109..61656860915652e8c668c264789cd86ce4ae03cd 100644 --- a/src/lib/components/chat/Messages/RateComment.svelte +++ b/src/lib/components/chat/Messages/RateComment.svelte @@ -38,6 +38,7 @@ let selectedReason = null; let comment = ''; + let detailedRating = null; let selectedModel = null; $: if (message?.annotation?.rating === 1) { @@ -56,6 +57,7 @@ tags = (message?.annotation?.tags ?? []).map((tag) => ({ name: tag })); + detailedRating = message?.annotation?.details?.rating ?? null; }; onMount(() => { @@ -79,7 +81,10 @@ dispatch('save', { reason: selectedReason, comment: comment, - tags: tags.map((tag) => tag.name) + tags: tags.map((tag) => tag.name), + details: { + rating: detailedRating + } }); toast.success($i18n.t('Thanks for your feedback!')); @@ -100,7 +105,9 @@ id="message-feedback-{message.id}" >