terry-li-hm commited on
Commit
a55f0c0
·
1 Parent(s): ddc34cd

Add `LiteLLM`

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -16,7 +16,7 @@ from llama_index import (
16
  from llama_index.callbacks.base import CallbackManager
17
  from llama_index.chat_engine import CondenseQuestionChatEngine
18
  from llama_index.embeddings import HuggingFaceEmbedding
19
- from llama_index.llms import ChatMessage, HuggingFaceLLM, MessageRole, OpenAI
20
  from llama_index.prompts import PromptTemplate
21
  from llama_index.query_engine import SubQuestionQueryEngine
22
  from llama_index.tools import QueryEngineTool, ToolMetadata
@@ -73,7 +73,7 @@ async def start():
73
  Select(
74
  id="Model",
75
  label="Model",
76
- values=["gpt-3.5-turbo", "gpt-4", "zephyr"],
77
  initial_index=1,
78
  ),
79
  Slider(
@@ -134,6 +134,8 @@ async def setup_query_engine(settings):
134
  messages_to_prompt=messages_to_prompt,
135
  device_map="auto",
136
  )
 
 
137
  else:
138
  llm = OpenAI(model=settings["Model"], temperature=settings["Temperature"])
139
 
 
16
  from llama_index.callbacks.base import CallbackManager
17
  from llama_index.chat_engine import CondenseQuestionChatEngine
18
  from llama_index.embeddings import HuggingFaceEmbedding
19
+ from llama_index.llms import ChatMessage, HuggingFaceLLM, LiteLLM, MessageRole, OpenAI
20
  from llama_index.prompts import PromptTemplate
21
  from llama_index.query_engine import SubQuestionQueryEngine
22
  from llama_index.tools import QueryEngineTool, ToolMetadata
 
73
  Select(
74
  id="Model",
75
  label="Model",
76
+ values=["gpt-3.5-turbo", "gpt-4", "zephyr", "litellm-gpt-3.5-turbo"],
77
  initial_index=1,
78
  ),
79
  Slider(
 
134
  messages_to_prompt=messages_to_prompt,
135
  device_map="auto",
136
  )
137
+ elif settings["Model"] == "litellm-gpt-3.5-turbo":
138
+ llm = LiteLLM("gpt-3.5-turbo")
139
  else:
140
  llm = OpenAI(model=settings["Model"], temperature=settings["Temperature"])
141