github-actions[bot] commited on
Commit
c699d8e
·
1 Parent(s): 40c9478

GitHub deploy: 1b2ae7bb77153abb65fb1a02dbc696e08cf470a1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/workflows/integration-test.yml +0 -7
  2. backend/apps/images/main.py +3 -5
  3. backend/apps/images/utils/comfyui.py +2 -3
  4. backend/apps/ollama/main.py +309 -84
  5. backend/apps/openai/main.py +2 -5
  6. backend/apps/webui/main.py +2 -2
  7. backend/requirements.txt +6 -6
  8. backend/utils/misc.py +5 -46
  9. cypress/e2e/chat.cy.ts +13 -18
  10. docs/CONTRIBUTING.md +1 -0
  11. pyproject.toml +1 -1
  12. src/app.html +1 -1
  13. src/lib/apis/openai/index.ts +1 -1
  14. src/lib/components/ChangelogModal.svelte +6 -6
  15. src/lib/components/admin/Settings/Documents.svelte +1 -1
  16. src/lib/components/chat/Chat.svelte +16 -16
  17. src/lib/components/chat/Controls/Controls.svelte +1 -3
  18. src/lib/components/chat/MessageInput/CallOverlay.svelte +16 -16
  19. src/lib/components/chat/MessageInput/Documents.svelte +1 -1
  20. src/lib/components/chat/Messages.svelte +4 -4
  21. src/lib/components/chat/Messages/CitationsModal.svelte +2 -2
  22. src/lib/components/chat/Messages/ResponseMessage.svelte +2 -2
  23. src/lib/components/chat/Messages/UserMessage.svelte +7 -7
  24. src/lib/components/chat/ModelSelector/Selector.svelte +1 -1
  25. src/lib/components/chat/Settings/About.svelte +2 -2
  26. src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte +0 -47
  27. src/lib/components/common/Valves.svelte +1 -1
  28. src/lib/components/layout/Sidebar/ChatItem.svelte +6 -6
  29. src/lib/components/playground/Playground.svelte +1 -1
  30. src/lib/components/workspace/Documents.svelte +1 -1
  31. src/lib/components/workspace/Models.svelte +2 -2
  32. src/lib/components/workspace/Models/Knowledge/Selector.svelte +1 -1
  33. src/lib/i18n/locales/ar-BH/translation.json +0 -1
  34. src/lib/i18n/locales/bg-BG/translation.json +0 -1
  35. src/lib/i18n/locales/bn-BD/translation.json +0 -1
  36. src/lib/i18n/locales/ca-ES/translation.json +0 -1
  37. src/lib/i18n/locales/ceb-PH/translation.json +0 -1
  38. src/lib/i18n/locales/de-DE/translation.json +0 -1
  39. src/lib/i18n/locales/dg-DG/translation.json +0 -1
  40. src/lib/i18n/locales/en-GB/translation.json +0 -1
  41. src/lib/i18n/locales/en-US/translation.json +0 -1
  42. src/lib/i18n/locales/es-ES/translation.json +0 -1
  43. src/lib/i18n/locales/fa-IR/translation.json +0 -1
  44. src/lib/i18n/locales/fi-FI/translation.json +0 -1
  45. src/lib/i18n/locales/fr-CA/translation.json +0 -1
  46. src/lib/i18n/locales/fr-FR/translation.json +0 -1
  47. src/lib/i18n/locales/he-IL/translation.json +0 -1
  48. src/lib/i18n/locales/hi-IN/translation.json +0 -1
  49. src/lib/i18n/locales/hr-HR/translation.json +0 -1
  50. src/lib/i18n/locales/id-ID/translation.json +0 -1
.github/workflows/integration-test.yml CHANGED
@@ -15,13 +15,6 @@ jobs:
15
  name: Run Cypress Integration Tests
16
  runs-on: ubuntu-latest
17
  steps:
18
- - name: Maximize build space
19
- uses: AdityaGarg8/[email protected]
20
- with:
21
- remove-android: 'true'
22
- remove-haskell: 'true'
23
- remove-codeql: 'true'
24
-
25
  - name: Checkout Repository
26
  uses: actions/checkout@v4
27
 
 
15
  name: Run Cypress Integration Tests
16
  runs-on: ubuntu-latest
17
  steps:
 
 
 
 
 
 
 
18
  - name: Checkout Repository
19
  uses: actions/checkout@v4
20
 
backend/apps/images/main.py CHANGED
@@ -150,11 +150,10 @@ async def update_engine_url(
150
  else:
151
  url = form_data.AUTOMATIC1111_BASE_URL.strip("/")
152
  try:
153
- r = requests.head(url)
154
- r.raise_for_status()
155
  app.state.config.AUTOMATIC1111_BASE_URL = url
156
  except Exception as e:
157
- raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
158
 
159
  if form_data.COMFYUI_BASE_URL == None:
160
  app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
@@ -163,10 +162,9 @@ async def update_engine_url(
163
 
164
  try:
165
  r = requests.head(url)
166
- r.raise_for_status()
167
  app.state.config.COMFYUI_BASE_URL = url
168
  except Exception as e:
169
- raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
170
 
171
  if form_data.AUTOMATIC1111_API_AUTH == None:
172
  app.state.config.AUTOMATIC1111_API_AUTH = AUTOMATIC1111_API_AUTH
 
150
  else:
151
  url = form_data.AUTOMATIC1111_BASE_URL.strip("/")
152
  try:
153
+ r = requests.head(url)
 
154
  app.state.config.AUTOMATIC1111_BASE_URL = url
155
  except Exception as e:
156
+ raise HTTPException(status_code=400, detail="Invalid URL provided.")
157
 
158
  if form_data.COMFYUI_BASE_URL == None:
159
  app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
 
162
 
163
  try:
164
  r = requests.head(url)
 
165
  app.state.config.COMFYUI_BASE_URL = url
166
  except Exception as e:
167
+ raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
168
 
169
  if form_data.AUTOMATIC1111_API_AUTH == None:
170
  app.state.config.AUTOMATIC1111_API_AUTH = AUTOMATIC1111_API_AUTH
backend/apps/images/utils/comfyui.py CHANGED
@@ -1,5 +1,6 @@
1
  import asyncio
2
  import websocket # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
 
3
  import json
4
  import urllib.request
5
  import urllib.parse
@@ -397,9 +398,7 @@ async def comfyui_generate_image(
397
  return None
398
 
399
  try:
400
- images = await asyncio.to_thread(
401
- get_images, ws, comfyui_prompt, client_id, base_url
402
- )
403
  except Exception as e:
404
  log.exception(f"Error while receiving images: {e}")
405
  images = None
 
1
  import asyncio
2
  import websocket # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
3
+ import uuid
4
  import json
5
  import urllib.request
6
  import urllib.parse
 
398
  return None
399
 
400
  try:
401
+ images = await asyncio.to_thread(get_images, ws, comfyui_prompt, client_id, base_url)
 
 
402
  except Exception as e:
403
  log.exception(f"Error while receiving images: {e}")
404
  images = None
backend/apps/ollama/main.py CHANGED
@@ -1,21 +1,27 @@
1
  from fastapi import (
2
  FastAPI,
3
  Request,
 
4
  HTTPException,
5
  Depends,
 
6
  UploadFile,
7
  File,
 
8
  )
9
  from fastapi.middleware.cors import CORSMiddleware
10
  from fastapi.responses import StreamingResponse
 
11
 
12
  from pydantic import BaseModel, ConfigDict
13
 
14
  import os
15
  import re
 
16
  import random
17
  import requests
18
  import json
 
19
  import aiohttp
20
  import asyncio
21
  import logging
@@ -26,11 +32,16 @@ from typing import Optional, List, Union
26
  from starlette.background import BackgroundTask
27
 
28
  from apps.webui.models.models import Models
 
29
  from constants import ERROR_MESSAGES
30
  from utils.utils import (
 
 
31
  get_verified_user,
32
  get_admin_user,
33
  )
 
 
34
 
35
  from config import (
36
  SRC_LOG_LEVELS,
@@ -42,12 +53,7 @@ from config import (
42
  UPLOAD_DIR,
43
  AppConfig,
44
  )
45
- from utils.misc import (
46
- calculate_sha256,
47
- apply_model_params_to_body_ollama,
48
- apply_model_params_to_body_openai,
49
- apply_model_system_prompt_to_body,
50
- )
51
 
52
  log = logging.getLogger(__name__)
53
  log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
@@ -177,7 +183,7 @@ async def post_streaming_url(url: str, payload: str, stream: bool = True):
177
  res = await r.json()
178
  if "error" in res:
179
  error_detail = f"Ollama: {res['error']}"
180
- except Exception:
181
  error_detail = f"Ollama: {e}"
182
 
183
  raise HTTPException(
@@ -232,7 +238,7 @@ async def get_all_models():
232
  async def get_ollama_tags(
233
  url_idx: Optional[int] = None, user=Depends(get_verified_user)
234
  ):
235
- if url_idx is None:
236
  models = await get_all_models()
237
 
238
  if app.state.config.ENABLE_MODEL_FILTER:
@@ -263,7 +269,7 @@ async def get_ollama_tags(
263
  res = r.json()
264
  if "error" in res:
265
  error_detail = f"Ollama: {res['error']}"
266
- except Exception:
267
  error_detail = f"Ollama: {e}"
268
 
269
  raise HTTPException(
@@ -276,7 +282,8 @@ async def get_ollama_tags(
276
  @app.get("/api/version/{url_idx}")
277
  async def get_ollama_versions(url_idx: Optional[int] = None):
278
  if app.state.config.ENABLE_OLLAMA_API:
279
- if url_idx is None:
 
280
  # returns lowest version
281
  tasks = [
282
  fetch_url(f"{url}/api/version")
@@ -316,7 +323,7 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
316
  res = r.json()
317
  if "error" in res:
318
  error_detail = f"Ollama: {res['error']}"
319
- except Exception:
320
  error_detail = f"Ollama: {e}"
321
 
322
  raise HTTPException(
@@ -339,6 +346,8 @@ async def pull_model(
339
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
340
  log.info(f"url: {url}")
341
 
 
 
342
  # Admin should be able to pull models from any source
343
  payload = {**form_data.model_dump(exclude_none=True), "insecure": True}
344
 
@@ -358,7 +367,7 @@ async def push_model(
358
  url_idx: Optional[int] = None,
359
  user=Depends(get_admin_user),
360
  ):
361
- if url_idx is None:
362
  if form_data.name in app.state.MODELS:
363
  url_idx = app.state.MODELS[form_data.name]["urls"][0]
364
  else:
@@ -408,7 +417,7 @@ async def copy_model(
408
  url_idx: Optional[int] = None,
409
  user=Depends(get_admin_user),
410
  ):
411
- if url_idx is None:
412
  if form_data.source in app.state.MODELS:
413
  url_idx = app.state.MODELS[form_data.source]["urls"][0]
414
  else:
@@ -419,13 +428,13 @@ async def copy_model(
419
 
420
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
421
  log.info(f"url: {url}")
422
- r = requests.request(
423
- method="POST",
424
- url=f"{url}/api/copy",
425
- data=form_data.model_dump_json(exclude_none=True).encode(),
426
- )
427
 
428
  try:
 
 
 
 
 
429
  r.raise_for_status()
430
 
431
  log.debug(f"r.text: {r.text}")
@@ -439,7 +448,7 @@ async def copy_model(
439
  res = r.json()
440
  if "error" in res:
441
  error_detail = f"Ollama: {res['error']}"
442
- except Exception:
443
  error_detail = f"Ollama: {e}"
444
 
445
  raise HTTPException(
@@ -455,7 +464,7 @@ async def delete_model(
455
  url_idx: Optional[int] = None,
456
  user=Depends(get_admin_user),
457
  ):
458
- if url_idx is None:
459
  if form_data.name in app.state.MODELS:
460
  url_idx = app.state.MODELS[form_data.name]["urls"][0]
461
  else:
@@ -467,12 +476,12 @@ async def delete_model(
467
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
468
  log.info(f"url: {url}")
469
 
470
- r = requests.request(
471
- method="DELETE",
472
- url=f"{url}/api/delete",
473
- data=form_data.model_dump_json(exclude_none=True).encode(),
474
- )
475
  try:
 
 
 
 
 
476
  r.raise_for_status()
477
 
478
  log.debug(f"r.text: {r.text}")
@@ -486,7 +495,7 @@ async def delete_model(
486
  res = r.json()
487
  if "error" in res:
488
  error_detail = f"Ollama: {res['error']}"
489
- except Exception:
490
  error_detail = f"Ollama: {e}"
491
 
492
  raise HTTPException(
@@ -507,12 +516,12 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_us
507
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
508
  log.info(f"url: {url}")
509
 
510
- r = requests.request(
511
- method="POST",
512
- url=f"{url}/api/show",
513
- data=form_data.model_dump_json(exclude_none=True).encode(),
514
- )
515
  try:
 
 
 
 
 
516
  r.raise_for_status()
517
 
518
  return r.json()
@@ -524,7 +533,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_us
524
  res = r.json()
525
  if "error" in res:
526
  error_detail = f"Ollama: {res['error']}"
527
- except Exception:
528
  error_detail = f"Ollama: {e}"
529
 
530
  raise HTTPException(
@@ -547,7 +556,7 @@ async def generate_embeddings(
547
  url_idx: Optional[int] = None,
548
  user=Depends(get_verified_user),
549
  ):
550
- if url_idx is None:
551
  model = form_data.model
552
 
553
  if ":" not in model:
@@ -564,12 +573,12 @@ async def generate_embeddings(
564
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
565
  log.info(f"url: {url}")
566
 
567
- r = requests.request(
568
- method="POST",
569
- url=f"{url}/api/embeddings",
570
- data=form_data.model_dump_json(exclude_none=True).encode(),
571
- )
572
  try:
 
 
 
 
 
573
  r.raise_for_status()
574
 
575
  return r.json()
@@ -581,7 +590,7 @@ async def generate_embeddings(
581
  res = r.json()
582
  if "error" in res:
583
  error_detail = f"Ollama: {res['error']}"
584
- except Exception:
585
  error_detail = f"Ollama: {e}"
586
 
587
  raise HTTPException(
@@ -594,9 +603,10 @@ def generate_ollama_embeddings(
594
  form_data: GenerateEmbeddingsForm,
595
  url_idx: Optional[int] = None,
596
  ):
 
597
  log.info(f"generate_ollama_embeddings {form_data}")
598
 
599
- if url_idx is None:
600
  model = form_data.model
601
 
602
  if ":" not in model:
@@ -613,12 +623,12 @@ def generate_ollama_embeddings(
613
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
614
  log.info(f"url: {url}")
615
 
616
- r = requests.request(
617
- method="POST",
618
- url=f"{url}/api/embeddings",
619
- data=form_data.model_dump_json(exclude_none=True).encode(),
620
- )
621
  try:
 
 
 
 
 
622
  r.raise_for_status()
623
 
624
  data = r.json()
@@ -628,7 +638,7 @@ def generate_ollama_embeddings(
628
  if "embedding" in data:
629
  return data["embedding"]
630
  else:
631
- raise Exception("Something went wrong :/")
632
  except Exception as e:
633
  log.exception(e)
634
  error_detail = "Open WebUI: Server Connection Error"
@@ -637,10 +647,10 @@ def generate_ollama_embeddings(
637
  res = r.json()
638
  if "error" in res:
639
  error_detail = f"Ollama: {res['error']}"
640
- except Exception:
641
  error_detail = f"Ollama: {e}"
642
 
643
- raise Exception(error_detail)
644
 
645
 
646
  class GenerateCompletionForm(BaseModel):
@@ -664,7 +674,8 @@ async def generate_completion(
664
  url_idx: Optional[int] = None,
665
  user=Depends(get_verified_user),
666
  ):
667
- if url_idx is None:
 
668
  model = form_data.model
669
 
670
  if ":" not in model:
@@ -702,18 +713,6 @@ class GenerateChatCompletionForm(BaseModel):
702
  keep_alive: Optional[Union[int, str]] = None
703
 
704
 
705
- def get_ollama_url(url_idx: Optional[int], model: str):
706
- if url_idx is None:
707
- if model not in app.state.MODELS:
708
- raise HTTPException(
709
- status_code=400,
710
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(model),
711
- )
712
- url_idx = random.choice(app.state.MODELS[model]["urls"])
713
- url = app.state.config.OLLAMA_BASE_URLS[url_idx]
714
- return url
715
-
716
-
717
  @app.post("/api/chat")
718
  @app.post("/api/chat/{url_idx}")
719
  async def generate_chat_completion(
@@ -721,7 +720,12 @@ async def generate_chat_completion(
721
  url_idx: Optional[int] = None,
722
  user=Depends(get_verified_user),
723
  ):
724
- log.debug(f"{form_data.model_dump_json(exclude_none=True).encode()}=")
 
 
 
 
 
725
 
726
  payload = {
727
  **form_data.model_dump(exclude_none=True, exclude=["metadata"]),
@@ -736,21 +740,185 @@ async def generate_chat_completion(
736
  if model_info.base_model_id:
737
  payload["model"] = model_info.base_model_id
738
 
739
- params = model_info.params.model_dump()
740
 
741
- if params:
742
  if payload.get("options") is None:
743
  payload["options"] = {}
744
 
745
- payload["options"] = apply_model_params_to_body_ollama(
746
- params, payload["options"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
747
  )
748
- payload = apply_model_system_prompt_to_body(params, payload, user)
749
 
750
- if ":" not in payload["model"]:
751
- payload["model"] = f"{payload['model']}:latest"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752
 
753
- url = get_ollama_url(url_idx, payload["model"])
754
  log.info(f"url: {url}")
755
  log.debug(payload)
756
 
@@ -784,28 +952,83 @@ async def generate_openai_chat_completion(
784
  url_idx: Optional[int] = None,
785
  user=Depends(get_verified_user),
786
  ):
787
- completion_form = OpenAIChatCompletionForm(**form_data)
788
- payload = {**completion_form.model_dump(exclude_none=True, exclude=["metadata"])}
 
789
  if "metadata" in payload:
790
  del payload["metadata"]
791
 
792
- model_id = completion_form.model
793
  model_info = Models.get_model_by_id(model_id)
794
 
795
  if model_info:
796
  if model_info.base_model_id:
797
  payload["model"] = model_info.base_model_id
798
 
799
- params = model_info.params.model_dump()
800
 
801
- if params:
802
- payload = apply_model_params_to_body_openai(params, payload)
803
- payload = apply_model_system_prompt_to_body(params, payload, user)
 
 
 
 
 
 
 
 
 
 
 
 
 
804
 
805
- if ":" not in payload["model"]:
806
- payload["model"] = f"{payload['model']}:latest"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
807
 
808
- url = get_ollama_url(url_idx, payload["model"])
 
 
 
 
 
 
 
 
 
 
 
 
809
  log.info(f"url: {url}")
810
 
811
  return await post_streaming_url(
@@ -821,7 +1044,7 @@ async def get_openai_models(
821
  url_idx: Optional[int] = None,
822
  user=Depends(get_verified_user),
823
  ):
824
- if url_idx is None:
825
  models = await get_all_models()
826
 
827
  if app.state.config.ENABLE_MODEL_FILTER:
@@ -876,7 +1099,7 @@ async def get_openai_models(
876
  res = r.json()
877
  if "error" in res:
878
  error_detail = f"Ollama: {res['error']}"
879
- except Exception:
880
  error_detail = f"Ollama: {e}"
881
 
882
  raise HTTPException(
@@ -902,6 +1125,7 @@ def parse_huggingface_url(hf_url):
902
  path_components = parsed_url.path.split("/")
903
 
904
  # Extract the desired output
 
905
  model_file = path_components[-1]
906
 
907
  return model_file
@@ -966,6 +1190,7 @@ async def download_model(
966
  url_idx: Optional[int] = None,
967
  user=Depends(get_admin_user),
968
  ):
 
969
  allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
970
 
971
  if not any(form_data.url.startswith(host) for host in allowed_hosts):
@@ -974,7 +1199,7 @@ async def download_model(
974
  detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
975
  )
976
 
977
- if url_idx is None:
978
  url_idx = 0
979
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
980
 
@@ -997,7 +1222,7 @@ def upload_model(
997
  url_idx: Optional[int] = None,
998
  user=Depends(get_admin_user),
999
  ):
1000
- if url_idx is None:
1001
  url_idx = 0
1002
  ollama_url = app.state.config.OLLAMA_BASE_URLS[url_idx]
1003
 
 
1
  from fastapi import (
2
  FastAPI,
3
  Request,
4
+ Response,
5
  HTTPException,
6
  Depends,
7
+ status,
8
  UploadFile,
9
  File,
10
+ BackgroundTasks,
11
  )
12
  from fastapi.middleware.cors import CORSMiddleware
13
  from fastapi.responses import StreamingResponse
14
+ from fastapi.concurrency import run_in_threadpool
15
 
16
  from pydantic import BaseModel, ConfigDict
17
 
18
  import os
19
  import re
20
+ import copy
21
  import random
22
  import requests
23
  import json
24
+ import uuid
25
  import aiohttp
26
  import asyncio
27
  import logging
 
32
  from starlette.background import BackgroundTask
33
 
34
  from apps.webui.models.models import Models
35
+ from apps.webui.models.users import Users
36
  from constants import ERROR_MESSAGES
37
  from utils.utils import (
38
+ decode_token,
39
+ get_current_user,
40
  get_verified_user,
41
  get_admin_user,
42
  )
43
+ from utils.task import prompt_template
44
+
45
 
46
  from config import (
47
  SRC_LOG_LEVELS,
 
53
  UPLOAD_DIR,
54
  AppConfig,
55
  )
56
+ from utils.misc import calculate_sha256, add_or_update_system_message
 
 
 
 
 
57
 
58
  log = logging.getLogger(__name__)
59
  log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
 
183
  res = await r.json()
184
  if "error" in res:
185
  error_detail = f"Ollama: {res['error']}"
186
+ except:
187
  error_detail = f"Ollama: {e}"
188
 
189
  raise HTTPException(
 
238
  async def get_ollama_tags(
239
  url_idx: Optional[int] = None, user=Depends(get_verified_user)
240
  ):
241
+ if url_idx == None:
242
  models = await get_all_models()
243
 
244
  if app.state.config.ENABLE_MODEL_FILTER:
 
269
  res = r.json()
270
  if "error" in res:
271
  error_detail = f"Ollama: {res['error']}"
272
+ except:
273
  error_detail = f"Ollama: {e}"
274
 
275
  raise HTTPException(
 
282
  @app.get("/api/version/{url_idx}")
283
  async def get_ollama_versions(url_idx: Optional[int] = None):
284
  if app.state.config.ENABLE_OLLAMA_API:
285
+ if url_idx == None:
286
+
287
  # returns lowest version
288
  tasks = [
289
  fetch_url(f"{url}/api/version")
 
323
  res = r.json()
324
  if "error" in res:
325
  error_detail = f"Ollama: {res['error']}"
326
+ except:
327
  error_detail = f"Ollama: {e}"
328
 
329
  raise HTTPException(
 
346
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
347
  log.info(f"url: {url}")
348
 
349
+ r = None
350
+
351
  # Admin should be able to pull models from any source
352
  payload = {**form_data.model_dump(exclude_none=True), "insecure": True}
353
 
 
367
  url_idx: Optional[int] = None,
368
  user=Depends(get_admin_user),
369
  ):
370
+ if url_idx == None:
371
  if form_data.name in app.state.MODELS:
372
  url_idx = app.state.MODELS[form_data.name]["urls"][0]
373
  else:
 
417
  url_idx: Optional[int] = None,
418
  user=Depends(get_admin_user),
419
  ):
420
+ if url_idx == None:
421
  if form_data.source in app.state.MODELS:
422
  url_idx = app.state.MODELS[form_data.source]["urls"][0]
423
  else:
 
428
 
429
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
430
  log.info(f"url: {url}")
 
 
 
 
 
431
 
432
  try:
433
+ r = requests.request(
434
+ method="POST",
435
+ url=f"{url}/api/copy",
436
+ data=form_data.model_dump_json(exclude_none=True).encode(),
437
+ )
438
  r.raise_for_status()
439
 
440
  log.debug(f"r.text: {r.text}")
 
448
  res = r.json()
449
  if "error" in res:
450
  error_detail = f"Ollama: {res['error']}"
451
+ except:
452
  error_detail = f"Ollama: {e}"
453
 
454
  raise HTTPException(
 
464
  url_idx: Optional[int] = None,
465
  user=Depends(get_admin_user),
466
  ):
467
+ if url_idx == None:
468
  if form_data.name in app.state.MODELS:
469
  url_idx = app.state.MODELS[form_data.name]["urls"][0]
470
  else:
 
476
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
477
  log.info(f"url: {url}")
478
 
 
 
 
 
 
479
  try:
480
+ r = requests.request(
481
+ method="DELETE",
482
+ url=f"{url}/api/delete",
483
+ data=form_data.model_dump_json(exclude_none=True).encode(),
484
+ )
485
  r.raise_for_status()
486
 
487
  log.debug(f"r.text: {r.text}")
 
495
  res = r.json()
496
  if "error" in res:
497
  error_detail = f"Ollama: {res['error']}"
498
+ except:
499
  error_detail = f"Ollama: {e}"
500
 
501
  raise HTTPException(
 
516
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
517
  log.info(f"url: {url}")
518
 
 
 
 
 
 
519
  try:
520
+ r = requests.request(
521
+ method="POST",
522
+ url=f"{url}/api/show",
523
+ data=form_data.model_dump_json(exclude_none=True).encode(),
524
+ )
525
  r.raise_for_status()
526
 
527
  return r.json()
 
533
  res = r.json()
534
  if "error" in res:
535
  error_detail = f"Ollama: {res['error']}"
536
+ except:
537
  error_detail = f"Ollama: {e}"
538
 
539
  raise HTTPException(
 
556
  url_idx: Optional[int] = None,
557
  user=Depends(get_verified_user),
558
  ):
559
+ if url_idx == None:
560
  model = form_data.model
561
 
562
  if ":" not in model:
 
573
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
574
  log.info(f"url: {url}")
575
 
 
 
 
 
 
576
  try:
577
+ r = requests.request(
578
+ method="POST",
579
+ url=f"{url}/api/embeddings",
580
+ data=form_data.model_dump_json(exclude_none=True).encode(),
581
+ )
582
  r.raise_for_status()
583
 
584
  return r.json()
 
590
  res = r.json()
591
  if "error" in res:
592
  error_detail = f"Ollama: {res['error']}"
593
+ except:
594
  error_detail = f"Ollama: {e}"
595
 
596
  raise HTTPException(
 
603
  form_data: GenerateEmbeddingsForm,
604
  url_idx: Optional[int] = None,
605
  ):
606
+
607
  log.info(f"generate_ollama_embeddings {form_data}")
608
 
609
+ if url_idx == None:
610
  model = form_data.model
611
 
612
  if ":" not in model:
 
623
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
624
  log.info(f"url: {url}")
625
 
 
 
 
 
 
626
  try:
627
+ r = requests.request(
628
+ method="POST",
629
+ url=f"{url}/api/embeddings",
630
+ data=form_data.model_dump_json(exclude_none=True).encode(),
631
+ )
632
  r.raise_for_status()
633
 
634
  data = r.json()
 
638
  if "embedding" in data:
639
  return data["embedding"]
640
  else:
641
+ raise "Something went wrong :/"
642
  except Exception as e:
643
  log.exception(e)
644
  error_detail = "Open WebUI: Server Connection Error"
 
647
  res = r.json()
648
  if "error" in res:
649
  error_detail = f"Ollama: {res['error']}"
650
+ except:
651
  error_detail = f"Ollama: {e}"
652
 
653
+ raise error_detail
654
 
655
 
656
  class GenerateCompletionForm(BaseModel):
 
674
  url_idx: Optional[int] = None,
675
  user=Depends(get_verified_user),
676
  ):
677
+
678
+ if url_idx == None:
679
  model = form_data.model
680
 
681
  if ":" not in model:
 
713
  keep_alive: Optional[Union[int, str]] = None
714
 
715
 
 
 
 
 
 
 
 
 
 
 
 
 
716
  @app.post("/api/chat")
717
  @app.post("/api/chat/{url_idx}")
718
  async def generate_chat_completion(
 
720
  url_idx: Optional[int] = None,
721
  user=Depends(get_verified_user),
722
  ):
723
+
724
+ log.debug(
725
+ "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
726
+ form_data.model_dump_json(exclude_none=True).encode()
727
+ )
728
+ )
729
 
730
  payload = {
731
  **form_data.model_dump(exclude_none=True, exclude=["metadata"]),
 
740
  if model_info.base_model_id:
741
  payload["model"] = model_info.base_model_id
742
 
743
+ model_info.params = model_info.params.model_dump()
744
 
745
+ if model_info.params:
746
  if payload.get("options") is None:
747
  payload["options"] = {}
748
 
749
+ if (
750
+ model_info.params.get("mirostat", None)
751
+ and payload["options"].get("mirostat") is None
752
+ ):
753
+ payload["options"]["mirostat"] = model_info.params.get("mirostat", None)
754
+
755
+ if (
756
+ model_info.params.get("mirostat_eta", None)
757
+ and payload["options"].get("mirostat_eta") is None
758
+ ):
759
+ payload["options"]["mirostat_eta"] = model_info.params.get(
760
+ "mirostat_eta", None
761
+ )
762
+
763
+ if (
764
+ model_info.params.get("mirostat_tau", None)
765
+ and payload["options"].get("mirostat_tau") is None
766
+ ):
767
+ payload["options"]["mirostat_tau"] = model_info.params.get(
768
+ "mirostat_tau", None
769
+ )
770
+
771
+ if (
772
+ model_info.params.get("num_ctx", None)
773
+ and payload["options"].get("num_ctx") is None
774
+ ):
775
+ payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None)
776
+
777
+ if (
778
+ model_info.params.get("num_batch", None)
779
+ and payload["options"].get("num_batch") is None
780
+ ):
781
+ payload["options"]["num_batch"] = model_info.params.get(
782
+ "num_batch", None
783
+ )
784
+
785
+ if (
786
+ model_info.params.get("num_keep", None)
787
+ and payload["options"].get("num_keep") is None
788
+ ):
789
+ payload["options"]["num_keep"] = model_info.params.get("num_keep", None)
790
+
791
+ if (
792
+ model_info.params.get("repeat_last_n", None)
793
+ and payload["options"].get("repeat_last_n") is None
794
+ ):
795
+ payload["options"]["repeat_last_n"] = model_info.params.get(
796
+ "repeat_last_n", None
797
+ )
798
+
799
+ if (
800
+ model_info.params.get("frequency_penalty", None)
801
+ and payload["options"].get("frequency_penalty") is None
802
+ ):
803
+ payload["options"]["repeat_penalty"] = model_info.params.get(
804
+ "frequency_penalty", None
805
+ )
806
+
807
+ if (
808
+ model_info.params.get("temperature", None) is not None
809
+ and payload["options"].get("temperature") is None
810
+ ):
811
+ payload["options"]["temperature"] = model_info.params.get(
812
+ "temperature", None
813
+ )
814
+
815
+ if (
816
+ model_info.params.get("seed", None) is not None
817
+ and payload["options"].get("seed") is None
818
+ ):
819
+ payload["options"]["seed"] = model_info.params.get("seed", None)
820
+
821
+ if (
822
+ model_info.params.get("stop", None)
823
+ and payload["options"].get("stop") is None
824
+ ):
825
+ payload["options"]["stop"] = (
826
+ [
827
+ bytes(stop, "utf-8").decode("unicode_escape")
828
+ for stop in model_info.params["stop"]
829
+ ]
830
+ if model_info.params.get("stop", None)
831
+ else None
832
+ )
833
+
834
+ if (
835
+ model_info.params.get("tfs_z", None)
836
+ and payload["options"].get("tfs_z") is None
837
+ ):
838
+ payload["options"]["tfs_z"] = model_info.params.get("tfs_z", None)
839
+
840
+ if (
841
+ model_info.params.get("max_tokens", None)
842
+ and payload["options"].get("max_tokens") is None
843
+ ):
844
+ payload["options"]["num_predict"] = model_info.params.get(
845
+ "max_tokens", None
846
+ )
847
+
848
+ if (
849
+ model_info.params.get("top_k", None)
850
+ and payload["options"].get("top_k") is None
851
+ ):
852
+ payload["options"]["top_k"] = model_info.params.get("top_k", None)
853
+
854
+ if (
855
+ model_info.params.get("top_p", None)
856
+ and payload["options"].get("top_p") is None
857
+ ):
858
+ payload["options"]["top_p"] = model_info.params.get("top_p", None)
859
+
860
+ if (
861
+ model_info.params.get("min_p", None)
862
+ and payload["options"].get("min_p") is None
863
+ ):
864
+ payload["options"]["min_p"] = model_info.params.get("min_p", None)
865
+
866
+ if (
867
+ model_info.params.get("use_mmap", None)
868
+ and payload["options"].get("use_mmap") is None
869
+ ):
870
+ payload["options"]["use_mmap"] = model_info.params.get("use_mmap", None)
871
+
872
+ if (
873
+ model_info.params.get("use_mlock", None)
874
+ and payload["options"].get("use_mlock") is None
875
+ ):
876
+ payload["options"]["use_mlock"] = model_info.params.get(
877
+ "use_mlock", None
878
+ )
879
+
880
+ if (
881
+ model_info.params.get("num_thread", None)
882
+ and payload["options"].get("num_thread") is None
883
+ ):
884
+ payload["options"]["num_thread"] = model_info.params.get(
885
+ "num_thread", None
886
+ )
887
+
888
+ system = model_info.params.get("system", None)
889
+ if system:
890
+ system = prompt_template(
891
+ system,
892
+ **(
893
+ {
894
+ "user_name": user.name,
895
+ "user_location": (
896
+ user.info.get("location") if user.info else None
897
+ ),
898
+ }
899
+ if user
900
+ else {}
901
+ ),
902
  )
 
903
 
904
+ if payload.get("messages"):
905
+ payload["messages"] = add_or_update_system_message(
906
+ system, payload["messages"]
907
+ )
908
+
909
+ if url_idx == None:
910
+ if ":" not in payload["model"]:
911
+ payload["model"] = f"{payload['model']}:latest"
912
+
913
+ if payload["model"] in app.state.MODELS:
914
+ url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
915
+ else:
916
+ raise HTTPException(
917
+ status_code=400,
918
+ detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
919
+ )
920
 
921
+ url = app.state.config.OLLAMA_BASE_URLS[url_idx]
922
  log.info(f"url: {url}")
923
  log.debug(payload)
924
 
 
952
  url_idx: Optional[int] = None,
953
  user=Depends(get_verified_user),
954
  ):
955
+ form_data = OpenAIChatCompletionForm(**form_data)
956
+ payload = {**form_data.model_dump(exclude_none=True, exclude=["metadata"])}
957
+
958
  if "metadata" in payload:
959
  del payload["metadata"]
960
 
961
+ model_id = form_data.model
962
  model_info = Models.get_model_by_id(model_id)
963
 
964
  if model_info:
965
  if model_info.base_model_id:
966
  payload["model"] = model_info.base_model_id
967
 
968
+ model_info.params = model_info.params.model_dump()
969
 
970
+ if model_info.params:
971
+ payload["temperature"] = model_info.params.get("temperature", None)
972
+ payload["top_p"] = model_info.params.get("top_p", None)
973
+ payload["max_tokens"] = model_info.params.get("max_tokens", None)
974
+ payload["frequency_penalty"] = model_info.params.get(
975
+ "frequency_penalty", None
976
+ )
977
+ payload["seed"] = model_info.params.get("seed", None)
978
+ payload["stop"] = (
979
+ [
980
+ bytes(stop, "utf-8").decode("unicode_escape")
981
+ for stop in model_info.params["stop"]
982
+ ]
983
+ if model_info.params.get("stop", None)
984
+ else None
985
+ )
986
 
987
+ system = model_info.params.get("system", None)
988
+
989
+ if system:
990
+ system = prompt_template(
991
+ system,
992
+ **(
993
+ {
994
+ "user_name": user.name,
995
+ "user_location": (
996
+ user.info.get("location") if user.info else None
997
+ ),
998
+ }
999
+ if user
1000
+ else {}
1001
+ ),
1002
+ )
1003
+ # Check if the payload already has a system message
1004
+ # If not, add a system message to the payload
1005
+ if payload.get("messages"):
1006
+ for message in payload["messages"]:
1007
+ if message.get("role") == "system":
1008
+ message["content"] = system + message["content"]
1009
+ break
1010
+ else:
1011
+ payload["messages"].insert(
1012
+ 0,
1013
+ {
1014
+ "role": "system",
1015
+ "content": system,
1016
+ },
1017
+ )
1018
 
1019
+ if url_idx == None:
1020
+ if ":" not in payload["model"]:
1021
+ payload["model"] = f"{payload['model']}:latest"
1022
+
1023
+ if payload["model"] in app.state.MODELS:
1024
+ url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
1025
+ else:
1026
+ raise HTTPException(
1027
+ status_code=400,
1028
+ detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
1029
+ )
1030
+
1031
+ url = app.state.config.OLLAMA_BASE_URLS[url_idx]
1032
  log.info(f"url: {url}")
1033
 
1034
  return await post_streaming_url(
 
1044
  url_idx: Optional[int] = None,
1045
  user=Depends(get_verified_user),
1046
  ):
1047
+ if url_idx == None:
1048
  models = await get_all_models()
1049
 
1050
  if app.state.config.ENABLE_MODEL_FILTER:
 
1099
  res = r.json()
1100
  if "error" in res:
1101
  error_detail = f"Ollama: {res['error']}"
1102
+ except:
1103
  error_detail = f"Ollama: {e}"
1104
 
1105
  raise HTTPException(
 
1125
  path_components = parsed_url.path.split("/")
1126
 
1127
  # Extract the desired output
1128
+ user_repo = "/".join(path_components[1:3])
1129
  model_file = path_components[-1]
1130
 
1131
  return model_file
 
1190
  url_idx: Optional[int] = None,
1191
  user=Depends(get_admin_user),
1192
  ):
1193
+
1194
  allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
1195
 
1196
  if not any(form_data.url.startswith(host) for host in allowed_hosts):
 
1199
  detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
1200
  )
1201
 
1202
+ if url_idx == None:
1203
  url_idx = 0
1204
  url = app.state.config.OLLAMA_BASE_URLS[url_idx]
1205
 
 
1222
  url_idx: Optional[int] = None,
1223
  user=Depends(get_admin_user),
1224
  ):
1225
+ if url_idx == None:
1226
  url_idx = 0
1227
  ollama_url = app.state.config.OLLAMA_BASE_URLS[url_idx]
1228
 
backend/apps/openai/main.py CHANGED
@@ -17,10 +17,7 @@ from utils.utils import (
17
  get_verified_user,
18
  get_admin_user,
19
  )
20
- from utils.misc import (
21
- apply_model_params_to_body_openai,
22
- apply_model_system_prompt_to_body,
23
- )
24
 
25
  from config import (
26
  SRC_LOG_LEVELS,
@@ -371,7 +368,7 @@ async def generate_chat_completion(
371
  payload["model"] = model_info.base_model_id
372
 
373
  params = model_info.params.model_dump()
374
- payload = apply_model_params_to_body_openai(params, payload)
375
  payload = apply_model_system_prompt_to_body(params, payload, user)
376
 
377
  model = app.state.MODELS[payload.get("model")]
 
17
  get_verified_user,
18
  get_admin_user,
19
  )
20
+ from utils.misc import apply_model_params_to_body, apply_model_system_prompt_to_body
 
 
 
21
 
22
  from config import (
23
  SRC_LOG_LEVELS,
 
368
  payload["model"] = model_info.base_model_id
369
 
370
  params = model_info.params.model_dump()
371
+ payload = apply_model_params_to_body(params, payload)
372
  payload = apply_model_system_prompt_to_body(params, payload, user)
373
 
374
  model = app.state.MODELS[payload.get("model")]
backend/apps/webui/main.py CHANGED
@@ -22,7 +22,7 @@ from apps.webui.utils import load_function_module_by_id
22
  from utils.misc import (
23
  openai_chat_chunk_message_template,
24
  openai_chat_completion_message_template,
25
- apply_model_params_to_body_openai,
26
  apply_model_system_prompt_to_body,
27
  )
28
 
@@ -291,7 +291,7 @@ async def generate_function_chat_completion(form_data, user):
291
  form_data["model"] = model_info.base_model_id
292
 
293
  params = model_info.params.model_dump()
294
- form_data = apply_model_params_to_body_openai(params, form_data)
295
  form_data = apply_model_system_prompt_to_body(params, form_data, user)
296
 
297
  pipe_id = get_pipe_id(form_data)
 
22
  from utils.misc import (
23
  openai_chat_chunk_message_template,
24
  openai_chat_completion_message_template,
25
+ apply_model_params_to_body,
26
  apply_model_system_prompt_to_body,
27
  )
28
 
 
291
  form_data["model"] = model_info.base_model_id
292
 
293
  params = model_info.params.model_dump()
294
+ form_data = apply_model_params_to_body(params, form_data)
295
  form_data = apply_model_system_prompt_to_body(params, form_data, user)
296
 
297
  pipe_id = get_pipe_id(form_data)
backend/requirements.txt CHANGED
@@ -11,7 +11,7 @@ python-jose==3.3.0
11
  passlib[bcrypt]==1.7.4
12
 
13
  requests==2.32.3
14
- aiohttp==3.10.2
15
 
16
  sqlalchemy==2.0.31
17
  alembic==1.13.2
@@ -34,12 +34,12 @@ anthropic
34
  google-generativeai==0.7.2
35
  tiktoken
36
 
37
- langchain==0.2.12
38
  langchain-community==0.2.10
39
  langchain-chroma==0.1.2
40
 
41
  fake-useragent==1.5.1
42
- chromadb==0.5.5
43
  sentence-transformers==3.0.1
44
  pypdf==4.3.1
45
  docx2txt==0.8
@@ -62,11 +62,11 @@ rank-bm25==0.2.2
62
 
63
  faster-whisper==1.0.2
64
 
65
- PyJWT[crypto]==2.9.0
66
  authlib==1.3.1
67
 
68
  black==24.8.0
69
- langfuse==2.43.3
70
  youtube-transcript-api==0.6.2
71
  pytube==15.0.0
72
 
@@ -76,5 +76,5 @@ duckduckgo-search~=6.2.1
76
 
77
  ## Tests
78
  docker~=7.1.0
79
- pytest~=8.3.2
80
  pytest-docker~=3.1.1
 
11
  passlib[bcrypt]==1.7.4
12
 
13
  requests==2.32.3
14
+ aiohttp==3.9.5
15
 
16
  sqlalchemy==2.0.31
17
  alembic==1.13.2
 
34
  google-generativeai==0.7.2
35
  tiktoken
36
 
37
+ langchain==0.2.11
38
  langchain-community==0.2.10
39
  langchain-chroma==0.1.2
40
 
41
  fake-useragent==1.5.1
42
+ chromadb==0.5.4
43
  sentence-transformers==3.0.1
44
  pypdf==4.3.1
45
  docx2txt==0.8
 
62
 
63
  faster-whisper==1.0.2
64
 
65
+ PyJWT[crypto]==2.8.0
66
  authlib==1.3.1
67
 
68
  black==24.8.0
69
+ langfuse==2.39.2
70
  youtube-transcript-api==0.6.2
71
  pytube==15.0.0
72
 
 
76
 
77
  ## Tests
78
  docker~=7.1.0
79
+ pytest~=8.2.2
80
  pytest-docker~=3.1.1
backend/utils/misc.py CHANGED
@@ -2,7 +2,7 @@ from pathlib import Path
2
  import hashlib
3
  import re
4
  from datetime import timedelta
5
- from typing import Optional, List, Tuple, Callable
6
  import uuid
7
  import time
8
 
@@ -135,21 +135,10 @@ def apply_model_system_prompt_to_body(params: dict, form_data: dict, user) -> di
135
 
136
 
137
  # inplace function: form_data is modified
138
- def apply_model_params_to_body(
139
- params: dict, form_data: dict, mappings: dict[str, Callable]
140
- ) -> dict:
141
  if not params:
142
  return form_data
143
 
144
- for key, cast_func in mappings.items():
145
- if (value := params.get(key)) is not None:
146
- form_data[key] = cast_func(value)
147
-
148
- return form_data
149
-
150
-
151
- # inplace function: form_data is modified
152
- def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict:
153
  mappings = {
154
  "temperature": float,
155
  "top_p": int,
@@ -158,40 +147,10 @@ def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict:
158
  "seed": lambda x: x,
159
  "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
160
  }
161
- return apply_model_params_to_body(params, form_data, mappings)
162
-
163
-
164
- def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
165
- opts = [
166
- "temperature",
167
- "top_p",
168
- "seed",
169
- "mirostat",
170
- "mirostat_eta",
171
- "mirostat_tau",
172
- "num_ctx",
173
- "num_batch",
174
- "num_keep",
175
- "repeat_last_n",
176
- "tfs_z",
177
- "top_k",
178
- "min_p",
179
- "use_mmap",
180
- "use_mlock",
181
- "num_thread",
182
- "num_gpu",
183
- ]
184
- mappings = {i: lambda x: x for i in opts}
185
- form_data = apply_model_params_to_body(params, form_data, mappings)
186
-
187
- name_differences = {
188
- "max_tokens": "num_predict",
189
- "frequency_penalty": "repeat_penalty",
190
- }
191
 
192
- for key, value in name_differences.items():
193
- if (param := params.get(key, None)) is not None:
194
- form_data[value] = param
195
 
196
  return form_data
197
 
 
2
  import hashlib
3
  import re
4
  from datetime import timedelta
5
+ from typing import Optional, List, Tuple
6
  import uuid
7
  import time
8
 
 
135
 
136
 
137
  # inplace function: form_data is modified
138
+ def apply_model_params_to_body(params: dict, form_data: dict) -> dict:
 
 
139
  if not params:
140
  return form_data
141
 
 
 
 
 
 
 
 
 
 
142
  mappings = {
143
  "temperature": float,
144
  "top_p": int,
 
147
  "seed": lambda x: x,
148
  "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
149
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
+ for key, cast_func in mappings.items():
152
+ if (value := params.get(key)) is not None:
153
+ form_data[key] = cast_func(value)
154
 
155
  return form_data
156
 
cypress/e2e/chat.cy.ts CHANGED
@@ -38,10 +38,9 @@ describe('Settings', () => {
38
  // User's message should be visible
39
  cy.get('.chat-user').should('exist');
40
  // Wait for the response
41
- // .chat-assistant is created after the first token is received
42
- cy.get('.chat-assistant', { timeout: 10_000 }).should('exist');
43
- // Generation Info is created after the stop token is received
44
- cy.get('div[aria-label="Generation Info"]', { timeout: 120_000 }).should('exist');
45
  });
46
 
47
  it('user can share chat', () => {
@@ -58,24 +57,21 @@ describe('Settings', () => {
58
  // User's message should be visible
59
  cy.get('.chat-user').should('exist');
60
  // Wait for the response
61
- // .chat-assistant is created after the first token is received
62
- cy.get('.chat-assistant', { timeout: 10_000 }).should('exist');
63
- // Generation Info is created after the stop token is received
64
- cy.get('div[aria-label="Generation Info"]', { timeout: 120_000 }).should('exist');
65
  // spy on requests
66
  const spy = cy.spy();
67
- cy.intercept('POST', '/api/v1/chats/**/share', spy);
68
  // Open context menu
69
  cy.get('#chat-context-menu-button').click();
70
  // Click share button
71
  cy.get('#chat-share-button').click();
72
  // Check if the share dialog is visible
73
  cy.get('#copy-and-share-chat-button').should('exist');
74
- // Click the copy button
75
- cy.get('#copy-and-share-chat-button').click();
76
- cy.wrap({}, { timeout: 5_000 }).should(() => {
77
- // Check if the share request was made
78
- expect(spy).to.be.callCount(1);
79
  });
80
  });
81
 
@@ -93,10 +89,9 @@ describe('Settings', () => {
93
  // User's message should be visible
94
  cy.get('.chat-user').should('exist');
95
  // Wait for the response
96
- // .chat-assistant is created after the first token is received
97
- cy.get('.chat-assistant', { timeout: 10_000 }).should('exist');
98
- // Generation Info is created after the stop token is received
99
- cy.get('div[aria-label="Generation Info"]', { timeout: 120_000 }).should('exist');
100
  // Click on the generate image button
101
  cy.get('[aria-label="Generate Image"]').click();
102
  // Wait for image to be visible
 
38
  // User's message should be visible
39
  cy.get('.chat-user').should('exist');
40
  // Wait for the response
41
+ cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
42
+ .find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
43
+ .should('exist');
 
44
  });
45
 
46
  it('user can share chat', () => {
 
57
  // User's message should be visible
58
  cy.get('.chat-user').should('exist');
59
  // Wait for the response
60
+ cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
61
+ .find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
62
+ .should('exist');
 
63
  // spy on requests
64
  const spy = cy.spy();
65
+ cy.intercept('GET', '/api/v1/chats/*', spy);
66
  // Open context menu
67
  cy.get('#chat-context-menu-button').click();
68
  // Click share button
69
  cy.get('#chat-share-button').click();
70
  // Check if the share dialog is visible
71
  cy.get('#copy-and-share-chat-button').should('exist');
72
+ cy.wrap({}, { timeout: 5000 }).should(() => {
73
+ // Check if the request was made twice (once for to replace chat object and once more due to change event)
74
+ expect(spy).to.be.callCount(2);
 
 
75
  });
76
  });
77
 
 
89
  // User's message should be visible
90
  cy.get('.chat-user').should('exist');
91
  // Wait for the response
92
+ cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
93
+ .find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
94
+ .should('exist');
 
95
  // Click on the generate image button
96
  cy.get('[aria-label="Generate Image"]').click();
97
  // Wait for image to be visible
docs/CONTRIBUTING.md CHANGED
@@ -22,6 +22,7 @@ Noticed something off? Have an idea? Check our [Issues tab](https://github.com/o
22
  > [!IMPORTANT]
23
  >
24
  > - **Template Compliance:** Please be aware that failure to follow the provided issue template, or not providing the requested information at all, will likely result in your issue being closed without further consideration. This approach is critical for maintaining the manageability and integrity of issue tracking.
 
25
  > - **Detail is Key:** To ensure your issue is understood and can be effectively addressed, it's imperative to include comprehensive details. Descriptions should be clear, including steps to reproduce, expected outcomes, and actual results. Lack of sufficient detail may hinder our ability to resolve your issue.
26
 
27
  ### 🧭 Scope of Support
 
22
  > [!IMPORTANT]
23
  >
24
  > - **Template Compliance:** Please be aware that failure to follow the provided issue template, or not providing the requested information at all, will likely result in your issue being closed without further consideration. This approach is critical for maintaining the manageability and integrity of issue tracking.
25
+ >
26
  > - **Detail is Key:** To ensure your issue is understood and can be effectively addressed, it's imperative to include comprehensive details. Descriptions should be clear, including steps to reproduce, expected outcomes, and actual results. Lack of sufficient detail may hinder our ability to resolve your issue.
27
 
28
  ### 🧭 Scope of Support
pyproject.toml CHANGED
@@ -19,7 +19,7 @@ dependencies = [
19
  "passlib[bcrypt]==1.7.4",
20
 
21
  "requests==2.32.3",
22
- "aiohttp==3.10.2",
23
 
24
  "sqlalchemy==2.0.31",
25
  "alembic==1.13.2",
 
19
  "passlib[bcrypt]==1.7.4",
20
 
21
  "requests==2.32.3",
22
+ "aiohttp==3.9.5",
23
 
24
  "sqlalchemy==2.0.31",
25
  "alembic==1.13.2",
src/app.html CHANGED
@@ -1,4 +1,4 @@
1
- <!doctype html>
2
  <html lang="en">
3
  <head>
4
  <meta charset="utf-8" />
 
1
+ <!DOCTYPE html>
2
  <html lang="en">
3
  <head>
4
  <meta charset="utf-8" />
src/lib/apis/openai/index.ts CHANGED
@@ -260,7 +260,7 @@ export const getOpenAIModelsDirect = async (
260
  throw error;
261
  }
262
 
263
- const models = Array.isArray(res) ? res : (res?.data ?? null);
264
 
265
  return models
266
  .map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
 
260
  throw error;
261
  }
262
 
263
+ const models = Array.isArray(res) ? res : res?.data ?? null;
264
 
265
  return models
266
  .map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
src/lib/components/ChangelogModal.svelte CHANGED
@@ -75,12 +75,12 @@
75
  class="font-semibold uppercase text-xs {section === 'added'
76
  ? 'text-white bg-blue-600'
77
  : section === 'fixed'
78
- ? 'text-white bg-green-600'
79
- : section === 'changed'
80
- ? 'text-white bg-yellow-600'
81
- : section === 'removed'
82
- ? 'text-white bg-red-600'
83
- : ''} w-fit px-3 rounded-full my-2.5"
84
  >
85
  {section}
86
  </div>
 
75
  class="font-semibold uppercase text-xs {section === 'added'
76
  ? 'text-white bg-blue-600'
77
  : section === 'fixed'
78
+ ? 'text-white bg-green-600'
79
+ : section === 'changed'
80
+ ? 'text-white bg-yellow-600'
81
+ : section === 'removed'
82
+ ? 'text-white bg-red-600'
83
+ : ''} w-fit px-3 rounded-full my-2.5"
84
  >
85
  {section}
86
  </div>
src/lib/components/admin/Settings/Documents.svelte CHANGED
@@ -112,7 +112,7 @@
112
  url: OpenAIUrl,
113
  batch_size: OpenAIBatchSize
114
  }
115
- }
116
  : {})
117
  }).catch(async (error) => {
118
  toast.error(error);
 
112
  url: OpenAIUrl,
113
  batch_size: OpenAIBatchSize
114
  }
115
+ }
116
  : {})
117
  }).catch(async (error) => {
118
  toast.error(error);
src/lib/components/chat/Chat.svelte CHANGED
@@ -579,8 +579,8 @@
579
  let selectedModelIds = modelId
580
  ? [modelId]
581
  : atSelectedModel !== undefined
582
- ? [atSelectedModel.id]
583
- : selectedModels;
584
 
585
  // Create response messages for each selected model
586
  const responseMessageIds = {};
@@ -739,11 +739,11 @@
739
  ? await getAndUpdateUserLocation(localStorage.token)
740
  : undefined
741
  )}${
742
- (responseMessage?.userContext ?? null)
743
  ? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
744
  : ''
745
  }`
746
- }
747
  : undefined,
748
  ...messages
749
  ]
@@ -811,10 +811,10 @@
811
  options: {
812
  ...(params ?? $settings.params ?? {}),
813
  stop:
814
- (params?.stop ?? $settings?.params?.stop ?? undefined)
815
  ? (params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop).map(
816
  (str) => decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
817
- )
818
  : undefined,
819
  num_predict: params?.max_tokens ?? $settings?.params?.max_tokens ?? undefined,
820
  repeat_penalty:
@@ -1056,10 +1056,10 @@
1056
  stream: true,
1057
  model: model.id,
1058
  stream_options:
1059
- (model.info?.meta?.capabilities?.usage ?? false)
1060
  ? {
1061
  include_usage: true
1062
- }
1063
  : undefined,
1064
  messages: [
1065
  params?.system || $settings.system || (responseMessage?.userContext ?? null)
@@ -1072,11 +1072,11 @@
1072
  ? await getAndUpdateUserLocation(localStorage.token)
1073
  : undefined
1074
  )}${
1075
- (responseMessage?.userContext ?? null)
1076
  ? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
1077
  : ''
1078
  }`
1079
- }
1080
  : undefined,
1081
  ...messages
1082
  ]
@@ -1092,7 +1092,7 @@
1092
  text:
1093
  arr.length - 1 !== idx
1094
  ? message.content
1095
- : (message?.raContent ?? message.content)
1096
  },
1097
  ...message.files
1098
  .filter((file) => file.type === 'image')
@@ -1103,20 +1103,20 @@
1103
  }
1104
  }))
1105
  ]
1106
- }
1107
  : {
1108
  content:
1109
  arr.length - 1 !== idx
1110
  ? message.content
1111
- : (message?.raContent ?? message.content)
1112
- })
1113
  })),
1114
  seed: params?.seed ?? $settings?.params?.seed ?? undefined,
1115
  stop:
1116
- (params?.stop ?? $settings?.params?.stop ?? undefined)
1117
  ? (params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop).map(
1118
  (str) => decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
1119
- )
1120
  : undefined,
1121
  temperature: params?.temperature ?? $settings?.params?.temperature ?? undefined,
1122
  top_p: params?.top_p ?? $settings?.params?.top_p ?? undefined,
 
579
  let selectedModelIds = modelId
580
  ? [modelId]
581
  : atSelectedModel !== undefined
582
+ ? [atSelectedModel.id]
583
+ : selectedModels;
584
 
585
  // Create response messages for each selected model
586
  const responseMessageIds = {};
 
739
  ? await getAndUpdateUserLocation(localStorage.token)
740
  : undefined
741
  )}${
742
+ responseMessage?.userContext ?? null
743
  ? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
744
  : ''
745
  }`
746
+ }
747
  : undefined,
748
  ...messages
749
  ]
 
811
  options: {
812
  ...(params ?? $settings.params ?? {}),
813
  stop:
814
+ params?.stop ?? $settings?.params?.stop ?? undefined
815
  ? (params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop).map(
816
  (str) => decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
817
+ )
818
  : undefined,
819
  num_predict: params?.max_tokens ?? $settings?.params?.max_tokens ?? undefined,
820
  repeat_penalty:
 
1056
  stream: true,
1057
  model: model.id,
1058
  stream_options:
1059
+ model.info?.meta?.capabilities?.usage ?? false
1060
  ? {
1061
  include_usage: true
1062
+ }
1063
  : undefined,
1064
  messages: [
1065
  params?.system || $settings.system || (responseMessage?.userContext ?? null)
 
1072
  ? await getAndUpdateUserLocation(localStorage.token)
1073
  : undefined
1074
  )}${
1075
+ responseMessage?.userContext ?? null
1076
  ? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
1077
  : ''
1078
  }`
1079
+ }
1080
  : undefined,
1081
  ...messages
1082
  ]
 
1092
  text:
1093
  arr.length - 1 !== idx
1094
  ? message.content
1095
+ : message?.raContent ?? message.content
1096
  },
1097
  ...message.files
1098
  .filter((file) => file.type === 'image')
 
1103
  }
1104
  }))
1105
  ]
1106
+ }
1107
  : {
1108
  content:
1109
  arr.length - 1 !== idx
1110
  ? message.content
1111
+ : message?.raContent ?? message.content
1112
+ })
1113
  })),
1114
  seed: params?.seed ?? $settings?.params?.seed ?? undefined,
1115
  stop:
1116
+ params?.stop ?? $settings?.params?.stop ?? undefined
1117
  ? (params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop).map(
1118
  (str) => decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
1119
+ )
1120
  : undefined,
1121
  temperature: params?.temperature ?? $settings?.params?.temperature ?? undefined,
1122
  top_p: params?.top_p ?? $settings?.params?.top_p ?? undefined,
src/lib/components/chat/Controls/Controls.svelte CHANGED
@@ -9,8 +9,6 @@
9
  import FileItem from '$lib/components/common/FileItem.svelte';
10
  import Collapsible from '$lib/components/common/Collapsible.svelte';
11
 
12
- import { user } from '$lib/stores';
13
-
14
  export let models = [];
15
 
16
  export let chatFiles = [];
@@ -80,7 +78,7 @@
80
  <Collapsible title={$i18n.t('Advanced Params')} open={true}>
81
  <div class="text-sm mt-1.5" slot="content">
82
  <div>
83
- <AdvancedParams admin={$user?.role === 'admin'} bind:params />
84
  </div>
85
  </div>
86
  </Collapsible>
 
9
  import FileItem from '$lib/components/common/FileItem.svelte';
10
  import Collapsible from '$lib/components/common/Collapsible.svelte';
11
 
 
 
12
  export let models = [];
13
 
14
  export let chatFiles = [];
 
78
  <Collapsible title={$i18n.t('Advanced Params')} open={true}>
79
  <div class="text-sm mt-1.5" slot="content">
80
  <div>
81
+ <AdvancedParams bind:params />
82
  </div>
83
  </div>
84
  </Collapsible>
src/lib/components/chat/MessageInput/CallOverlay.svelte CHANGED
@@ -609,10 +609,10 @@
609
  style="font-size:{rmsLevel * 100 > 4
610
  ? '4.5'
611
  : rmsLevel * 100 > 2
612
- ? '4.25'
613
- : rmsLevel * 100 > 1
614
- ? '3.75'
615
- : '3.5'}rem;width: 100%; text-align:center;"
616
  >
617
  {emoji}
618
  </div>
@@ -658,10 +658,10 @@
658
  class=" {rmsLevel * 100 > 4
659
  ? ' size-[4.5rem]'
660
  : rmsLevel * 100 > 2
661
- ? ' size-16'
662
- : rmsLevel * 100 > 1
663
- ? 'size-14'
664
- : 'size-12'} transition-all rounded-full {(model?.info?.meta
665
  ?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
666
  ? ' bg-cover bg-center bg-no-repeat'
667
  : 'bg-black dark:bg-white'} bg-black dark:bg-white"
@@ -691,10 +691,10 @@
691
  style="font-size:{rmsLevel * 100 > 4
692
  ? '13'
693
  : rmsLevel * 100 > 2
694
- ? '12'
695
- : rmsLevel * 100 > 1
696
- ? '11.5'
697
- : '11'}rem;width:100%;text-align:center;"
698
  >
699
  {emoji}
700
  </div>
@@ -740,10 +740,10 @@
740
  class=" {rmsLevel * 100 > 4
741
  ? ' size-52'
742
  : rmsLevel * 100 > 2
743
- ? 'size-48'
744
- : rmsLevel * 100 > 1
745
- ? 'size-[11.5rem]'
746
- : 'size-44'} transition-all rounded-full {(model?.info?.meta
747
  ?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
748
  ? ' bg-cover bg-center bg-no-repeat'
749
  : 'bg-black dark:bg-white'} "
 
609
  style="font-size:{rmsLevel * 100 > 4
610
  ? '4.5'
611
  : rmsLevel * 100 > 2
612
+ ? '4.25'
613
+ : rmsLevel * 100 > 1
614
+ ? '3.75'
615
+ : '3.5'}rem;width: 100%; text-align:center;"
616
  >
617
  {emoji}
618
  </div>
 
658
  class=" {rmsLevel * 100 > 4
659
  ? ' size-[4.5rem]'
660
  : rmsLevel * 100 > 2
661
+ ? ' size-16'
662
+ : rmsLevel * 100 > 1
663
+ ? 'size-14'
664
+ : 'size-12'} transition-all rounded-full {(model?.info?.meta
665
  ?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
666
  ? ' bg-cover bg-center bg-no-repeat'
667
  : 'bg-black dark:bg-white'} bg-black dark:bg-white"
 
691
  style="font-size:{rmsLevel * 100 > 4
692
  ? '13'
693
  : rmsLevel * 100 > 2
694
+ ? '12'
695
+ : rmsLevel * 100 > 1
696
+ ? '11.5'
697
+ : '11'}rem;width:100%;text-align:center;"
698
  >
699
  {emoji}
700
  </div>
 
740
  class=" {rmsLevel * 100 > 4
741
  ? ' size-52'
742
  : rmsLevel * 100 > 2
743
+ ? 'size-48'
744
+ : rmsLevel * 100 > 1
745
+ ? 'size-[11.5rem]'
746
+ : 'size-44'} transition-all rounded-full {(model?.info?.meta
747
  ?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
748
  ? ' bg-cover bg-center bg-no-repeat'
749
  : 'bg-black dark:bg-white'} "
src/lib/components/chat/MessageInput/Documents.svelte CHANGED
@@ -27,7 +27,7 @@
27
  title: $i18n.t('All Documents'),
28
  collection_names: $documents.map((doc) => doc.collection_name)
29
  }
30
- ]
31
  : []),
32
  ...$documents
33
  .reduce((a, e, i, arr) => {
 
27
  title: $i18n.t('All Documents'),
28
  collection_names: $documents.map((doc) => doc.collection_name)
29
  }
30
+ ]
31
  : []),
32
  ...$documents
33
  .reduce((a, e, i, arr) => {
src/lib/components/chat/Messages.svelte CHANGED
@@ -305,7 +305,7 @@
305
  {#each messages as message, messageIdx}
306
  <div class=" w-full {messageIdx === messages.length - 1 ? ' pb-12' : ''}">
307
  <div
308
- class="flex flex-col justify-between px-5 mb-3 {($settings?.widescreenMode ?? null)
309
  ? 'max-w-full'
310
  : 'max-w-5xl'} mx-auto rounded-lg group"
311
  >
@@ -317,10 +317,10 @@
317
  {message}
318
  isFirstMessage={messageIdx === 0}
319
  siblings={message.parentId !== null
320
- ? (history.messages[message.parentId]?.childrenIds ?? [])
321
- : (Object.values(history.messages)
322
  .filter((message) => message.parentId === null)
323
- .map((message) => message.id) ?? [])}
324
  {confirmEditMessage}
325
  {showPreviousMessage}
326
  {showNextMessage}
 
305
  {#each messages as message, messageIdx}
306
  <div class=" w-full {messageIdx === messages.length - 1 ? ' pb-12' : ''}">
307
  <div
308
+ class="flex flex-col justify-between px-5 mb-3 {$settings?.widescreenMode ?? null
309
  ? 'max-w-full'
310
  : 'max-w-5xl'} mx-auto rounded-lg group"
311
  >
 
317
  {message}
318
  isFirstMessage={messageIdx === 0}
319
  siblings={message.parentId !== null
320
+ ? history.messages[message.parentId]?.childrenIds ?? []
321
+ : Object.values(history.messages)
322
  .filter((message) => message.parentId === null)
323
+ .map((message) => message.id) ?? []}
324
  {confirmEditMessage}
325
  {showPreviousMessage}
326
  {showNextMessage}
src/lib/components/chat/Messages/CitationsModal.svelte CHANGED
@@ -60,8 +60,8 @@
60
  href={document?.metadata?.file_id
61
  ? `/api/v1/files/${document?.metadata?.file_id}/content`
62
  : document.source.name.includes('http')
63
- ? document.source.name
64
- : `#`}
65
  target="_blank"
66
  >
67
  {document?.metadata?.name ?? document.source.name}
 
60
  href={document?.metadata?.file_id
61
  ? `/api/v1/files/${document?.metadata?.file_id}/content`
62
  : document.source.name.includes('http')
63
+ ? document.source.name
64
+ : `#`}
65
  target="_blank"
66
  >
67
  {document?.metadata?.name ?? document.source.name}
src/lib/components/chat/Messages/ResponseMessage.svelte CHANGED
@@ -191,7 +191,7 @@
191
  const res = await synthesizeOpenAISpeech(
192
  localStorage.token,
193
  $settings?.audio?.tts?.defaultVoice === $config.audio.tts.voice
194
- ? ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
195
  : $config?.audio?.tts?.voice,
196
  sentence
197
  ).catch((error) => {
@@ -803,7 +803,7 @@
803
  100
804
  ) / 100
805
  } tokens` ?? 'N/A'
806
- }<br/>
807
  prompt_token/s: ${
808
  Math.round(
809
  ((message.info.prompt_eval_count ?? 0) /
 
191
  const res = await synthesizeOpenAISpeech(
192
  localStorage.token,
193
  $settings?.audio?.tts?.defaultVoice === $config.audio.tts.voice
194
+ ? $settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice
195
  : $config?.audio?.tts?.voice,
196
  sentence
197
  ).catch((error) => {
 
803
  100
804
  ) / 100
805
  } tokens` ?? 'N/A'
806
+ }<br/>
807
  prompt_token/s: ${
808
  Math.round(
809
  ((message.info.prompt_eval_count ?? 0) /
src/lib/components/chat/Messages/UserMessage.svelte CHANGED
@@ -62,8 +62,8 @@
62
  {#if !($settings?.chatBubble ?? true)}
63
  <ProfileImage
64
  src={message.user
65
- ? ($models.find((m) => m.id === message.user)?.info?.meta?.profile_image_url ?? '/user.png')
66
- : (user?.profile_image_url ?? '/user.png')}
67
  />
68
  {/if}
69
  <div class="w-full overflow-hidden pl-1">
@@ -96,7 +96,7 @@
96
  {#if message.files}
97
  <div class="mt-2.5 mb-1 w-full flex flex-col justify-end overflow-x-auto gap-1 flex-wrap">
98
  {#each message.files as file}
99
- <div class={($settings?.chatBubble ?? true) ? 'self-end' : ''}>
100
  {#if file.type === 'image'}
101
  <img src={file.url} alt="input" class=" max-h-96 rounded-lg" draggable="false" />
102
  {:else}
@@ -162,12 +162,12 @@
162
  </div>
163
  {:else}
164
  <div class="w-full">
165
- <div class="flex {($settings?.chatBubble ?? true) ? 'justify-end' : ''} mb-2">
166
  <div
167
- class="rounded-3xl {($settings?.chatBubble ?? true)
168
  ? `max-w-[90%] px-5 py-2 bg-gray-50 dark:bg-gray-850 ${
169
  message.files ? 'rounded-tr-lg' : ''
170
- }`
171
  : ''} "
172
  >
173
  <pre id="user-message">{message.content}</pre>
@@ -175,7 +175,7 @@
175
  </div>
176
 
177
  <div
178
- class=" flex {($settings?.chatBubble ?? true)
179
  ? 'justify-end'
180
  : ''} text-gray-600 dark:text-gray-500"
181
  >
 
62
  {#if !($settings?.chatBubble ?? true)}
63
  <ProfileImage
64
  src={message.user
65
+ ? $models.find((m) => m.id === message.user)?.info?.meta?.profile_image_url ?? '/user.png'
66
+ : user?.profile_image_url ?? '/user.png'}
67
  />
68
  {/if}
69
  <div class="w-full overflow-hidden pl-1">
 
96
  {#if message.files}
97
  <div class="mt-2.5 mb-1 w-full flex flex-col justify-end overflow-x-auto gap-1 flex-wrap">
98
  {#each message.files as file}
99
+ <div class={$settings?.chatBubble ?? true ? 'self-end' : ''}>
100
  {#if file.type === 'image'}
101
  <img src={file.url} alt="input" class=" max-h-96 rounded-lg" draggable="false" />
102
  {:else}
 
162
  </div>
163
  {:else}
164
  <div class="w-full">
165
+ <div class="flex {$settings?.chatBubble ?? true ? 'justify-end' : ''} mb-2">
166
  <div
167
+ class="rounded-3xl {$settings?.chatBubble ?? true
168
  ? `max-w-[90%] px-5 py-2 bg-gray-50 dark:bg-gray-850 ${
169
  message.files ? 'rounded-tr-lg' : ''
170
+ }`
171
  : ''} "
172
  >
173
  <pre id="user-message">{message.content}</pre>
 
175
  </div>
176
 
177
  <div
178
+ class=" flex {$settings?.chatBubble ?? true
179
  ? 'justify-end'
180
  : ''} text-gray-600 dark:text-gray-500"
181
  >
src/lib/components/chat/ModelSelector/Selector.svelte CHANGED
@@ -66,7 +66,7 @@
66
  $: filteredItems = searchValue
67
  ? fuse.search(searchValue).map((e) => {
68
  return e.item;
69
- })
70
  : items.filter((item) => !item.model?.info?.meta?.hidden);
71
 
72
  const pullModelHandler = async () => {
 
66
  $: filteredItems = searchValue
67
  ? fuse.search(searchValue).map((e) => {
68
  return e.item;
69
+ })
70
  : items.filter((item) => !item.model?.info?.meta?.hidden);
71
 
72
  const pullModelHandler = async () => {
src/lib/components/chat/Settings/About.svelte CHANGED
@@ -65,8 +65,8 @@
65
  {updateAvailable === null
66
  ? $i18n.t('Checking for updates...')
67
  : updateAvailable
68
- ? `(v${version.latest} ${$i18n.t('available!')})`
69
- : $i18n.t('(latest)')}
70
  </a>
71
  </div>
72
 
 
65
  {updateAvailable === null
66
  ? $i18n.t('Checking for updates...')
67
  : updateAvailable
68
+ ? `(v${version.latest} ${$i18n.t('available!')})`
69
+ : $i18n.t('(latest)')}
70
  </a>
71
  </div>
72
 
src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte CHANGED
@@ -29,7 +29,6 @@
29
  use_mmap: null,
30
  use_mlock: null,
31
  num_thread: null,
32
- num_gpu: null,
33
  template: null
34
  };
35
 
@@ -865,52 +864,6 @@
865
  {/if}
866
  </div>
867
 
868
- <div class=" py-0.5 w-full justify-between">
869
- <div class="flex w-full justify-between">
870
- <div class=" self-center text-xs font-medium">{$i18n.t('num_gpu (Ollama)')}</div>
871
-
872
- <button
873
- class="p-1 px-3 text-xs flex rounded transition flex-shrink-0 outline-none"
874
- type="button"
875
- on:click={() => {
876
- params.num_gpu = (params?.num_gpu ?? null) === null ? 0 : null;
877
- }}
878
- >
879
- {#if (params?.num_gpu ?? null) === null}
880
- <span class="ml-2 self-center">{$i18n.t('Default')}</span>
881
- {:else}
882
- <span class="ml-2 self-center">{$i18n.t('Custom')}</span>
883
- {/if}
884
- </button>
885
- </div>
886
-
887
- {#if (params?.num_gpu ?? null) !== null}
888
- <div class="flex mt-0.5 space-x-2">
889
- <div class=" flex-1">
890
- <input
891
- id="steps-range"
892
- type="range"
893
- min="0"
894
- max="256"
895
- step="1"
896
- bind:value={params.num_gpu}
897
- class="w-full h-2 rounded-lg appearance-none cursor-pointer dark:bg-gray-700"
898
- />
899
- </div>
900
- <div class="">
901
- <input
902
- bind:value={params.num_gpu}
903
- type="number"
904
- class=" bg-transparent text-center w-14"
905
- min="0"
906
- max="256"
907
- step="1"
908
- />
909
- </div>
910
- </div>
911
- {/if}
912
- </div>
913
-
914
  <!-- <div class=" py-0.5 w-full justify-between">
915
  <div class="flex w-full justify-between">
916
  <div class=" self-center text-xs font-medium">{$i18n.t('Template')}</div>
 
29
  use_mmap: null,
30
  use_mlock: null,
31
  num_thread: null,
 
32
  template: null
33
  };
34
 
 
864
  {/if}
865
  </div>
866
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
867
  <!-- <div class=" py-0.5 w-full justify-between">
868
  <div class="flex w-full justify-between">
869
  <div class=" self-center text-xs font-medium">{$i18n.t('Template')}</div>
src/lib/components/common/Valves.svelte CHANGED
@@ -27,7 +27,7 @@
27
  on:click={() => {
28
  valves[property] =
29
  (valves[property] ?? null) === null
30
- ? (valvesSpec.properties[property]?.default ?? '')
31
  : null;
32
 
33
  dispatch('change');
 
27
  on:click={() => {
28
  valves[property] =
29
  (valves[property] ?? null) === null
30
+ ? valvesSpec.properties[property]?.default ?? ''
31
  : null;
32
 
33
  dispatch('change');
src/lib/components/layout/Sidebar/ChatItem.svelte CHANGED
@@ -83,8 +83,8 @@
83
  class=" w-full flex justify-between rounded-xl px-3 py-2 {chat.id === $chatId || confirmEdit
84
  ? 'bg-gray-200 dark:bg-gray-900'
85
  : selected
86
- ? 'bg-gray-100 dark:bg-gray-950'
87
- : 'group-hover:bg-gray-100 dark:group-hover:bg-gray-950'} whitespace-nowrap text-ellipsis"
88
  >
89
  <input
90
  use:focusEdit
@@ -97,8 +97,8 @@
97
  class=" w-full flex justify-between rounded-xl px-3 py-2 {chat.id === $chatId || confirmEdit
98
  ? 'bg-gray-200 dark:bg-gray-900'
99
  : selected
100
- ? 'bg-gray-100 dark:bg-gray-950'
101
- : ' group-hover:bg-gray-100 dark:group-hover:bg-gray-950'} whitespace-nowrap text-ellipsis"
102
  href="/c/{chat.id}"
103
  on:click={() => {
104
  dispatch('select');
@@ -134,8 +134,8 @@
134
  {chat.id === $chatId || confirmEdit
135
  ? 'from-gray-200 dark:from-gray-900'
136
  : selected
137
- ? 'from-gray-100 dark:from-gray-950'
138
- : 'invisible group-hover:visible from-gray-100 dark:from-gray-950'}
139
  absolute right-[10px] top-[6px] py-1 pr-2 pl-5 bg-gradient-to-l from-80%
140
 
141
  to-transparent"
 
83
  class=" w-full flex justify-between rounded-xl px-3 py-2 {chat.id === $chatId || confirmEdit
84
  ? 'bg-gray-200 dark:bg-gray-900'
85
  : selected
86
+ ? 'bg-gray-100 dark:bg-gray-950'
87
+ : 'group-hover:bg-gray-100 dark:group-hover:bg-gray-950'} whitespace-nowrap text-ellipsis"
88
  >
89
  <input
90
  use:focusEdit
 
97
  class=" w-full flex justify-between rounded-xl px-3 py-2 {chat.id === $chatId || confirmEdit
98
  ? 'bg-gray-200 dark:bg-gray-900'
99
  : selected
100
+ ? 'bg-gray-100 dark:bg-gray-950'
101
+ : ' group-hover:bg-gray-100 dark:group-hover:bg-gray-950'} whitespace-nowrap text-ellipsis"
102
  href="/c/{chat.id}"
103
  on:click={() => {
104
  dispatch('select');
 
134
  {chat.id === $chatId || confirmEdit
135
  ? 'from-gray-200 dark:from-gray-900'
136
  : selected
137
+ ? 'from-gray-100 dark:from-gray-950'
138
+ : 'invisible group-hover:visible from-gray-100 dark:from-gray-950'}
139
  absolute right-[10px] top-[6px] py-1 pr-2 pl-5 bg-gradient-to-l from-80%
140
 
141
  to-transparent"
src/lib/components/playground/Playground.svelte CHANGED
@@ -121,7 +121,7 @@
121
  ? {
122
  role: 'system',
123
  content: system
124
- }
125
  : undefined,
126
  ...messages
127
  ].filter((message) => message)
 
121
  ? {
122
  role: 'system',
123
  content: system
124
+ }
125
  : undefined,
126
  ...messages
127
  ].filter((message) => message)
src/lib/components/workspace/Documents.svelte CHANGED
@@ -88,7 +88,7 @@
88
  tags?.length > 0
89
  ? {
90
  tags: tags
91
- }
92
  : null
93
  ).catch((error) => {
94
  toast.error(error);
 
88
  tags?.length > 0
89
  ? {
90
  tags: tags
91
+ }
92
  : null
93
  ).catch((error) => {
94
  toast.error(error);
src/lib/components/workspace/Models.svelte CHANGED
@@ -292,7 +292,7 @@
292
  >
293
  <div class=" self-start w-8 pt-0.5">
294
  <div
295
- class=" rounded-full bg-stone-700 {(model?.info?.meta?.hidden ?? false)
296
  ? 'brightness-90 dark:brightness-50'
297
  : ''} "
298
  >
@@ -305,7 +305,7 @@
305
  </div>
306
 
307
  <div
308
- class=" flex-1 self-center {(model?.info?.meta?.hidden ?? false) ? 'text-gray-500' : ''}"
309
  >
310
  <div class=" font-semibold line-clamp-1">{model.name}</div>
311
  <div class=" text-xs overflow-hidden text-ellipsis line-clamp-1">
 
292
  >
293
  <div class=" self-start w-8 pt-0.5">
294
  <div
295
+ class=" rounded-full bg-stone-700 {model?.info?.meta?.hidden ?? false
296
  ? 'brightness-90 dark:brightness-50'
297
  : ''} "
298
  >
 
305
  </div>
306
 
307
  <div
308
+ class=" flex-1 self-center {model?.info?.meta?.hidden ?? false ? 'text-gray-500' : ''}"
309
  >
310
  <div class=" font-semibold line-clamp-1">{model.name}</div>
311
  <div class=" text-xs overflow-hidden text-ellipsis line-clamp-1">
src/lib/components/workspace/Models/Knowledge/Selector.svelte CHANGED
@@ -25,7 +25,7 @@
25
  title: $i18n.t('All Documents'),
26
  collection_names: $documents.map((doc) => doc.collection_name)
27
  }
28
- ]
29
  : []),
30
  ...$documents
31
  .reduce((a, e, i, arr) => {
 
25
  title: $i18n.t('All Documents'),
26
  collection_names: $documents.map((doc) => doc.collection_name)
27
  }
28
+ ]
29
  : []),
30
  ...$documents
31
  .reduce((a, e, i, arr) => {
src/lib/i18n/locales/ar-BH/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ملاحظة: إذا قمت بتعيين الحد الأدنى من النقاط، فلن يؤدي البحث إلا إلى إرجاع المستندات التي لها نقاط أكبر من أو تساوي الحد الأدنى من النقاط.",
419
  "Notifications": "إشعارات",
420
  "November": "نوفمبر",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (أولاما)",
423
  "OAuth ID": "",
424
  "October": "اكتوبر",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ملاحظة: إذا قمت بتعيين الحد الأدنى من النقاط، فلن يؤدي البحث إلا إلى إرجاع المستندات التي لها نقاط أكبر من أو تساوي الحد الأدنى من النقاط.",
419
  "Notifications": "إشعارات",
420
  "November": "نوفمبر",
 
421
  "num_thread (Ollama)": "num_thread (أولاما)",
422
  "OAuth ID": "",
423
  "October": "اكتوبر",
src/lib/i18n/locales/bg-BG/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Забележка: Ако зададете минимален резултат, търсенето ще върне само документи с резултат, по-голям или равен на минималния резултат.",
419
  "Notifications": "Десктоп Известия",
420
  "November": "Ноември",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "",
424
  "October": "Октомври",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Забележка: Ако зададете минимален резултат, търсенето ще върне само документи с резултат, по-голям или равен на минималния резултат.",
419
  "Notifications": "Десктоп Известия",
420
  "November": "Ноември",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "",
423
  "October": "Октомври",
src/lib/i18n/locales/bn-BD/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "দ্রষ্টব্য: আপনি যদি ন্যূনতম স্কোর সেট করেন তবে অনুসন্ধানটি কেবলমাত্র ন্যূনতম স্কোরের চেয়ে বেশি বা সমান স্কোর সহ নথিগুলি ফেরত দেবে।",
419
  "Notifications": "নোটিফিকেশনসমূহ",
420
  "November": "নভেম্বর",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (ওলামা)",
423
  "OAuth ID": "",
424
  "October": "অক্টোবর",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "দ্রষ্টব্য: আপনি যদি ন্যূনতম স্কোর সেট করেন তবে অনুসন্ধানটি কেবলমাত্র ন্যূনতম স্কোরের চেয়ে বেশি বা সমান স্কোর সহ নথিগুলি ফেরত দেবে।",
419
  "Notifications": "নোটিফিকেশনসমূহ",
420
  "November": "নভেম্বর",
 
421
  "num_thread (Ollama)": "num_thread (ওলামা)",
422
  "OAuth ID": "",
423
  "October": "অক্টোবর",
src/lib/i18n/locales/ca-ES/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si s'estableix una puntuació mínima, la cerca només retornarà documents amb una puntuació major o igual a la puntuació mínima.",
419
  "Notifications": "Notificacions",
420
  "November": "Novembre",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "ID OAuth",
424
  "October": "Octubre",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si s'estableix una puntuació mínima, la cerca només retornarà documents amb una puntuació major o igual a la puntuació mínima.",
419
  "Notifications": "Notificacions",
420
  "November": "Novembre",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "ID OAuth",
423
  "October": "Octubre",
src/lib/i18n/locales/ceb-PH/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "Mga pahibalo sa desktop",
420
  "November": "",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "",
423
  "OAuth ID": "",
424
  "October": "",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "Mga pahibalo sa desktop",
420
  "November": "",
 
421
  "num_thread (Ollama)": "",
422
  "OAuth ID": "",
423
  "October": "",
src/lib/i18n/locales/de-DE/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Hinweis: Wenn Sie eine Mindestpunktzahl festlegen, werden in der Suche nur Dokumente mit einer Punktzahl größer oder gleich der Mindestpunktzahl zurückgegeben.",
419
  "Notifications": "Benachrichtigungen",
420
  "November": "November",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "OAuth-ID",
424
  "October": "Oktober",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Hinweis: Wenn Sie eine Mindestpunktzahl festlegen, werden in der Suche nur Dokumente mit einer Punktzahl größer oder gleich der Mindestpunktzahl zurückgegeben.",
419
  "Notifications": "Benachrichtigungen",
420
  "November": "November",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "OAuth-ID",
423
  "October": "Oktober",
src/lib/i18n/locales/dg-DG/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "Notifications",
420
  "November": "",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "",
423
  "OAuth ID": "",
424
  "October": "",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "Notifications",
420
  "November": "",
 
421
  "num_thread (Ollama)": "",
422
  "OAuth ID": "",
423
  "October": "",
src/lib/i18n/locales/en-GB/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "",
420
  "November": "",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "",
423
  "OAuth ID": "",
424
  "October": "",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "",
420
  "November": "",
 
421
  "num_thread (Ollama)": "",
422
  "OAuth ID": "",
423
  "October": "",
src/lib/i18n/locales/en-US/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "",
420
  "November": "",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "",
423
  "OAuth ID": "",
424
  "October": "",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
419
  "Notifications": "",
420
  "November": "",
 
421
  "num_thread (Ollama)": "",
422
  "OAuth ID": "",
423
  "October": "",
src/lib/i18n/locales/es-ES/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si estableces una puntuación mínima, la búsqueda sólo devolverá documentos con una puntuación mayor o igual a la puntuación mínima.",
419
  "Notifications": "Notificaciones",
420
  "November": "Noviembre",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "OAuth ID",
424
  "October": "Octubre",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si estableces una puntuación mínima, la búsqueda sólo devolverá documentos con una puntuación mayor o igual a la puntuación mínima.",
419
  "Notifications": "Notificaciones",
420
  "November": "Noviembre",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "OAuth ID",
423
  "October": "Octubre",
src/lib/i18n/locales/fa-IR/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "توجه: اگر حداقل نمره را تعیین کنید، جستجو تنها اسنادی را با نمره بیشتر یا برابر با حداقل نمره باز می گرداند.",
419
  "Notifications": "اعلان",
420
  "November": "نوامبر",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (اولاما)",
423
  "OAuth ID": "",
424
  "October": "اکتبر",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "توجه: اگر حداقل نمره را تعیین کنید، جستجو تنها اسنادی را با نمره بیشتر یا برابر با حداقل نمره باز می گرداند.",
419
  "Notifications": "اعلان",
420
  "November": "نوامبر",
 
421
  "num_thread (Ollama)": "num_thread (اولاما)",
422
  "OAuth ID": "",
423
  "October": "اکتبر",
src/lib/i18n/locales/fi-FI/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Huom: Jos asetat vähimmäispisteet, haku palauttaa vain asiakirjat, joiden pisteet ovat suurempia tai yhtä suuria kuin vähimmäispistemäärä.",
419
  "Notifications": "Ilmoitukset",
420
  "November": "marraskuu",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "",
424
  "October": "lokakuu",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Huom: Jos asetat vähimmäispisteet, haku palauttaa vain asiakirjat, joiden pisteet ovat suurempia tai yhtä suuria kuin vähimmäispistemäärä.",
419
  "Notifications": "Ilmoitukset",
420
  "November": "marraskuu",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "",
423
  "October": "lokakuu",
src/lib/i18n/locales/fr-CA/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
419
  "Notifications": "Notifications",
420
  "November": "Novembre",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "ID OAuth",
424
  "October": "Octobre",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
419
  "Notifications": "Notifications",
420
  "November": "Novembre",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "ID OAuth",
423
  "October": "Octobre",
src/lib/i18n/locales/fr-FR/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
419
  "Notifications": "Notifications",
420
  "November": "Novembre",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "ID OAuth",
424
  "October": "Octobre",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
419
  "Notifications": "Notifications",
420
  "November": "Novembre",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "ID OAuth",
423
  "October": "Octobre",
src/lib/i18n/locales/he-IL/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "הערה: אם תקבע ציון מינימלי, החיפוש יחזיר רק מסמכים עם ציון שגבוה או שווה לציון המינימלי.",
419
  "Notifications": "התראות",
420
  "November": "נובמבר",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "",
424
  "October": "אוקטובר",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "הערה: אם תקבע ציון מינימלי, החיפוש יחזיר רק מסמכים עם ציון שגבוה או שווה לציון המינימלי.",
419
  "Notifications": "התראות",
420
  "November": "נובמבר",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "",
423
  "October": "אוקטובר",
src/lib/i18n/locales/hi-IN/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ध्यान दें: यदि आप न्यूनतम स्कोर निर्धारित करते हैं, तो खोज केवल न्यूनतम स्कोर से अधिक या उसके बराबर स्कोर वाले दस्तावेज़ वापस लाएगी।",
419
  "Notifications": "सूचनाएं",
420
  "November": "नवंबर",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (ओलामा)",
423
  "OAuth ID": "",
424
  "October": "अक्टूबर",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ध्यान दें: यदि आप न्यूनतम स्कोर निर्धारित करते हैं, तो खोज केवल न्यूनतम स्कोर से अधिक या उसके बराबर स्कोर वाले दस्तावेज़ वापस लाएगी।",
419
  "Notifications": "सूचनाएं",
420
  "November": "नवंबर",
 
421
  "num_thread (Ollama)": "num_thread (ओलामा)",
422
  "OAuth ID": "",
423
  "October": "अक्टूबर",
src/lib/i18n/locales/hr-HR/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Napomena: Ako postavite minimalnu ocjenu, pretraga će vratiti samo dokumente s ocjenom većom ili jednakom minimalnoj ocjeni.",
419
  "Notifications": "Obavijesti",
420
  "November": "Studeni",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "",
424
  "October": "Listopad",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Napomena: Ako postavite minimalnu ocjenu, pretraga će vratiti samo dokumente s ocjenom većom ili jednakom minimalnoj ocjeni.",
419
  "Notifications": "Obavijesti",
420
  "November": "Studeni",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "",
423
  "October": "Listopad",
src/lib/i18n/locales/id-ID/translation.json CHANGED
@@ -418,7 +418,6 @@
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Catatan: Jika Anda menetapkan skor minimum, pencarian hanya akan mengembalikan dokumen dengan skor yang lebih besar atau sama dengan skor minimum.",
419
  "Notifications": "Pemberitahuan",
420
  "November": "November",
421
- "num_gpu (Ollama)": "",
422
  "num_thread (Ollama)": "num_thread (Ollama)",
423
  "OAuth ID": "ID OAuth",
424
  "October": "Oktober",
 
418
  "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Catatan: Jika Anda menetapkan skor minimum, pencarian hanya akan mengembalikan dokumen dengan skor yang lebih besar atau sama dengan skor minimum.",
419
  "Notifications": "Pemberitahuan",
420
  "November": "November",
 
421
  "num_thread (Ollama)": "num_thread (Ollama)",
422
  "OAuth ID": "ID OAuth",
423
  "October": "Oktober",