Geraldine commited on
Commit
5ac262d
·
verified ·
1 Parent(s): 734c13e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -11,8 +11,6 @@ from langchain_core.runnables import RunnablePassthrough
11
  from langchain_core.output_parsers import StrOutputParser
12
  from transformers import AutoModel, AutoTokenizer, pipeline
13
  import joblib
14
- from huggingface_hub import hf_hub_download
15
- from llama_cpp import Llama
16
  from typing import List
17
 
18
  def setup_page():
@@ -195,7 +193,7 @@ def setup_rag_tab(llm):
195
 
196
  return chain
197
 
198
- def set_up_local_fine_tuned_tab(query):
199
  st.header("Fine-tuned Zephir model")
200
 
201
  llm = Llama(
@@ -212,9 +210,9 @@ def set_up_local_fine_tuned_tab(query):
212
  }
213
  ]
214
  )
215
- return output["choices"][0]["message"]["content"]
216
 
217
- def setup_finetuned_tab():
218
  st.header("Fine-tuned Zephir model")
219
 
220
  llm = create_aws_ollama_llm()
@@ -314,7 +312,7 @@ with main_col:
314
  st.code(soup.prettify(), language="xml-doc")"""
315
  """with tab4:
316
  llm = create_aws_ollama_llm()
317
- fine_tuned_chain = setup_finetuned_tab(llm)
318
  st.session_state.response_fine_tuned = fine_tuned_chain.invoke(query)
319
  with st.chat_message("assistant"):
320
  st.markdown(st.session_state.response_fine_tuned)"""
 
11
  from langchain_core.output_parsers import StrOutputParser
12
  from transformers import AutoModel, AutoTokenizer, pipeline
13
  import joblib
 
 
14
  from typing import List
15
 
16
  def setup_page():
 
193
 
194
  return chain
195
 
196
+ """def setup_local_fine_tuned_tab(query):
197
  st.header("Fine-tuned Zephir model")
198
 
199
  llm = Llama(
 
210
  }
211
  ]
212
  )
213
+ return output["choices"][0]["message"]["content"]"""
214
 
215
+ def setup_fine_tuned_tab():
216
  st.header("Fine-tuned Zephir model")
217
 
218
  llm = create_aws_ollama_llm()
 
312
  st.code(soup.prettify(), language="xml-doc")"""
313
  """with tab4:
314
  llm = create_aws_ollama_llm()
315
+ fine_tuned_chain = setup_fine-tuned_tab(llm)
316
  st.session_state.response_fine_tuned = fine_tuned_chain.invoke(query)
317
  with st.chat_message("assistant"):
318
  st.markdown(st.session_state.response_fine_tuned)"""