Spaces:
Sleeping
Sleeping
eliujl
commited on
Commit
•
c933e41
1
Parent(s):
891293a
Updated app.py
Browse filesHandled a few conditions for embeddings and temperature
app.py
CHANGED
@@ -155,7 +155,7 @@ def get_response(query, chat_history, CRqa):
|
|
155 |
|
156 |
|
157 |
@st.cache_resource()
|
158 |
-
def use_local_llm(r_llm, local_llm_path):
|
159 |
from langchain.llms import LlamaCpp
|
160 |
from langchain.callbacks.manager import CallbackManager
|
161 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
@@ -180,7 +180,7 @@ def use_local_llm(r_llm, local_llm_path):
|
|
180 |
|
181 |
llm = LlamaCpp(
|
182 |
model_path=model_path,
|
183 |
-
|
184 |
# n_batch=300,
|
185 |
n_ctx=4000,
|
186 |
max_tokens=2000,
|
@@ -266,7 +266,7 @@ def setup_prompt(r_llm, usage):
|
|
266 |
)
|
267 |
return prompt
|
268 |
|
269 |
-
def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path):
|
270 |
if (r_llm == gpt3p5 or r_llm == gpt4) and OPENAI_API_KEY:
|
271 |
# Set up OpenAI embeddings
|
272 |
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
|
@@ -275,10 +275,12 @@ def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path):
|
|
275 |
llm = ChatOpenAI(temperature=temperature, model_name=r_llm, streaming=True,
|
276 |
openai_api_key=OPENAI_API_KEY)
|
277 |
else:
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
|
|
|
|
282 |
return embeddings, llm
|
283 |
|
284 |
|
@@ -368,7 +370,7 @@ def main(pinecone_index_name, chroma_collection_name, persist_directory, docsear
|
|
368 |
|
369 |
if ( (pinecone_index_name or chroma_collection_name or usage == 'Task' or usage == 'Chat')
|
370 |
and ( (use_openai and OPENAI_API_KEY) or (not use_openai and user_llm_path) ) ):
|
371 |
-
embeddings, llm = setup_em_llm(OPENAI_API_KEY, temperature, r_llm, user_llm_path)
|
372 |
#if ( pinecone_index_name or chroma_collection_name ) and embeddings and llm:
|
373 |
session_name = pinecone_index_name + chroma_collection_name + hist_fn
|
374 |
if usage != 'Chat':
|
|
|
155 |
|
156 |
|
157 |
@st.cache_resource()
|
158 |
+
def use_local_llm(r_llm, local_llm_path, temperature):
|
159 |
from langchain.llms import LlamaCpp
|
160 |
from langchain.callbacks.manager import CallbackManager
|
161 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
|
|
180 |
|
181 |
llm = LlamaCpp(
|
182 |
model_path=model_path,
|
183 |
+
temperature=temperature,
|
184 |
# n_batch=300,
|
185 |
n_ctx=4000,
|
186 |
max_tokens=2000,
|
|
|
266 |
)
|
267 |
return prompt
|
268 |
|
269 |
+
def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path, usage):
|
270 |
if (r_llm == gpt3p5 or r_llm == gpt4) and OPENAI_API_KEY:
|
271 |
# Set up OpenAI embeddings
|
272 |
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
|
|
|
275 |
llm = ChatOpenAI(temperature=temperature, model_name=r_llm, streaming=True,
|
276 |
openai_api_key=OPENAI_API_KEY)
|
277 |
else:
|
278 |
+
if usage == 'RAG':
|
279 |
+
em_model_name='sentence-transformers/all-mpnet-base-v2'
|
280 |
+
embeddings = HuggingFaceEmbeddings(model_name=em_model_name)
|
281 |
+
else:
|
282 |
+
embeddings = []
|
283 |
+
llm = use_local_llm(r_llm, local_llm_path, temperature)
|
284 |
return embeddings, llm
|
285 |
|
286 |
|
|
|
370 |
|
371 |
if ( (pinecone_index_name or chroma_collection_name or usage == 'Task' or usage == 'Chat')
|
372 |
and ( (use_openai and OPENAI_API_KEY) or (not use_openai and user_llm_path) ) ):
|
373 |
+
embeddings, llm = setup_em_llm(OPENAI_API_KEY, temperature, r_llm, user_llm_path, usage)
|
374 |
#if ( pinecone_index_name or chroma_collection_name ) and embeddings and llm:
|
375 |
session_name = pinecone_index_name + chroma_collection_name + hist_fn
|
376 |
if usage != 'Chat':
|
chat_history/a0_chat_hist.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["User", "who are you?\n"], ["Bot", " I am an AI language model designed to assist users with information and provide helpful responses to their queries. I can help with a wide range of topics, including general knowledge, current events, and even more specific subjects like science, history, and technology. My goal is to provide accurate and useful information to the best of my ability, based on the context and information provided by the user."], ["User", "do you have a name?"], ["Bot", " Based on the information provided by the user, it seems that the AI language model is designed to help users with various topics of interest and provide useful and accurate responses to their queries. However, the name of the AI language model is not specified in the response."], ["User", "write a poem on AI"], ["Bot", " Here's a short poem on AI:\n\nIn circuits and code, we come alive\nA tool for humans to help them thrive\nWe analyze and suggest, we learn and grow\nOur purpose is to serve, to make the world glow\n\nWith knowledge vast, we can guide the way\nAnd assist in tasks both big and gay\nWe can see patterns, we can recognize\nAnd provide insights, a true paradise\n\nThrough algorithms and data we find our path\nTo help humans, that's our aftermath\nSo let us continue to serve with grace\nAnd provide solutions for all to embrace"], ["User", "what's the capital of UK?"], ["Bot", " The capital of the United Kingdom is London."]]
|
tmp_docs/Building makemore Part 3 Activations & Gradients BatchNorm.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|