Update app.py
Browse files
app.py
CHANGED
@@ -95,8 +95,8 @@ def create_groq_llm(model):
|
|
95 |
|
96 |
def create_aws_ollama_llm():
|
97 |
return ChatOllama(
|
98 |
-
model="
|
99 |
-
base_url="
|
100 |
temperature=0.1,
|
101 |
max_tokens=None,
|
102 |
timeout=None,
|
@@ -226,7 +226,7 @@ def setup_rag_tab(llm):
|
|
226 |
return chain
|
227 |
|
228 |
def setup_fine_tuned_tab():
|
229 |
-
st.header("Fine-tuned
|
230 |
|
231 |
llm = create_aws_ollama_llm()
|
232 |
prompt = ChatPromptTemplate.from_messages(
|
@@ -277,7 +277,7 @@ with main_col:
|
|
277 |
st.info("Retrieval-augmented generation : the prompt used to interact with the model contains the relevant context from an archival collection of EAD files. The LLM used id the one selected in the sidebar list.",icon="ℹ️")
|
278 |
|
279 |
with tab4:
|
280 |
-
st.info("
|
281 |
|
282 |
# Process query for all tabs when submitted
|
283 |
if query:
|
@@ -319,7 +319,11 @@ with main_col:
|
|
319 |
|
320 |
# Process for Tab 4 - Fine-tuned model
|
321 |
with tab4:
|
322 |
-
|
|
|
|
|
|
|
|
|
323 |
|
324 |
# Display query history in the sidebar column
|
325 |
with history_col:
|
|
|
95 |
|
96 |
def create_aws_ollama_llm():
|
97 |
return ChatOllama(
|
98 |
+
model="hf.co/Geraldine/FineLlama-3.2-3B-Instruct-ead-GGUF:Q5_K_M",
|
99 |
+
base_url="http://129.80.86.176",
|
100 |
temperature=0.1,
|
101 |
max_tokens=None,
|
102 |
timeout=None,
|
|
|
226 |
return chain
|
227 |
|
228 |
def setup_fine_tuned_tab():
|
229 |
+
st.header("Fine-tuned FineLlama-3.2-3b-Instruct-ead model")
|
230 |
|
231 |
llm = create_aws_ollama_llm()
|
232 |
prompt = ChatPromptTemplate.from_messages(
|
|
|
277 |
st.info("Retrieval-augmented generation : the prompt used to interact with the model contains the relevant context from an archival collection of EAD files. The LLM used id the one selected in the sidebar list.",icon="ℹ️")
|
278 |
|
279 |
with tab4:
|
280 |
+
st.info("FineLlama-3.2-3b-Instruct-ead model : this is a custom fine-tuned adaptation of llama-3.2-3b-instruct post-trained on a dataset of archival descriptions in the EAD format",icon="ℹ️")
|
281 |
|
282 |
# Process query for all tabs when submitted
|
283 |
if query:
|
|
|
319 |
|
320 |
# Process for Tab 4 - Fine-tuned model
|
321 |
with tab4:
|
322 |
+
llm = create_aws_ollama_llm()
|
323 |
+
fine_tuned_chain = setup_fine_tuned_tab(llm)
|
324 |
+
st.session_state.response_fine_tuned = fine_tuned_chain.invoke(query)
|
325 |
+
with st.chat_message("assistant"):
|
326 |
+
st.markdown(st.session_state.response_fine_tuned)
|
327 |
|
328 |
# Display query history in the sidebar column
|
329 |
with history_col:
|