Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -47,14 +47,14 @@ def create_db(splits):
|
|
47 |
st.error(f"Error creating vector database: {e}")
|
48 |
return None
|
49 |
|
50 |
-
def initialize_llmchain(llm_model,
|
51 |
try:
|
52 |
llm = HuggingFaceEndpoint(
|
53 |
repo_id=llm_model,
|
54 |
huggingfacehub_api_token=api_token,
|
55 |
-
temperature=
|
56 |
-
max_new_tokens=
|
57 |
-
top_k=
|
58 |
)
|
59 |
memory = ConversationBufferMemory(
|
60 |
memory_key="chat_history",
|
@@ -89,10 +89,10 @@ def initialize_database(uploaded_files):
|
|
89 |
st.error(f"Error initializing database: {e}")
|
90 |
return None, "Failed to initialize database."
|
91 |
|
92 |
-
def initialize_LLM(llm_option,
|
93 |
try:
|
94 |
llm_name = list_llm[llm_option]
|
95 |
-
qa_chain = initialize_llmchain(llm_name,
|
96 |
if qa_chain is None:
|
97 |
return None, "Failed to initialize QA chain."
|
98 |
return qa_chain, "QA chain initialized. Chatbot is ready!"
|
@@ -148,13 +148,10 @@ def main():
|
|
148 |
|
149 |
st.markdown("### Select Large Language Model (LLM) and input parameters")
|
150 |
llm_option = st.radio("Available LLMs", list_llm_simple)
|
151 |
-
temperature = st.slider("Temperature", 0.01, 1.0, 0.5, 0.1)
|
152 |
-
max_tokens = st.slider("Max New Tokens", 128, 9192, 4096, 128)
|
153 |
-
top_k = st.slider("top-k", 1, 10, 3, 1)
|
154 |
|
155 |
if st.button("Initialize Question Answering Chatbot"):
|
156 |
with st.spinner("Initializing QA chatbot..."):
|
157 |
-
qa_chain, llm_message = initialize_LLM(list_llm_simple.index(llm_option),
|
158 |
st.session_state['qa_chain'] = qa_chain
|
159 |
st.success(llm_message)
|
160 |
|
|
|
47 |
st.error(f"Error creating vector database: {e}")
|
48 |
return None
|
49 |
|
50 |
+
def initialize_llmchain(llm_model, vector_db):
|
51 |
try:
|
52 |
llm = HuggingFaceEndpoint(
|
53 |
repo_id=llm_model,
|
54 |
huggingfacehub_api_token=api_token,
|
55 |
+
temperature=0.5,
|
56 |
+
max_new_tokens=4096,
|
57 |
+
top_k=3,
|
58 |
)
|
59 |
memory = ConversationBufferMemory(
|
60 |
memory_key="chat_history",
|
|
|
89 |
st.error(f"Error initializing database: {e}")
|
90 |
return None, "Failed to initialize database."
|
91 |
|
92 |
+
def initialize_LLM(llm_option, vector_db):
|
93 |
try:
|
94 |
llm_name = list_llm[llm_option]
|
95 |
+
qa_chain = initialize_llmchain(llm_name, vector_db)
|
96 |
if qa_chain is None:
|
97 |
return None, "Failed to initialize QA chain."
|
98 |
return qa_chain, "QA chain initialized. Chatbot is ready!"
|
|
|
148 |
|
149 |
st.markdown("### Select Large Language Model (LLM) and input parameters")
|
150 |
llm_option = st.radio("Available LLMs", list_llm_simple)
|
|
|
|
|
|
|
151 |
|
152 |
if st.button("Initialize Question Answering Chatbot"):
|
153 |
with st.spinner("Initializing QA chatbot..."):
|
154 |
+
qa_chain, llm_message = initialize_LLM(list_llm_simple.index(llm_option), st.session_state['vector_db'])
|
155 |
st.session_state['qa_chain'] = qa_chain
|
156 |
st.success(llm_message)
|
157 |
|