isayahc commited on
Commit
7cedde1
·
verified ·
1 Parent(s): 2ee991b

added more configurations to the top of app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -58,6 +58,14 @@ AWS_S3_LOCATION=os.getenv('AWS_S3_LOCATION')
58
  AWS_S3_FILE=os.getenv('AWS_S3_FILE')
59
  VS_DESTINATION=os.getenv('VS_DESTINATION')
60
 
 
 
 
 
 
 
 
 
61
  # initialize Model config
62
  llm_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
63
 
@@ -74,14 +82,6 @@ llm = HuggingFaceHub(repo_id=llm_model_name, model_kwargs={
74
  embedding_model_name = "sentence-transformers/all-mpnet-base-v2"
75
  embeddings = HuggingFaceHubEmbeddings(repo_id=embedding_model_name)
76
 
77
- # remove old vectorstore
78
- if os.path.exists(VS_DESTINATION):
79
- os.remove(VS_DESTINATION)
80
-
81
- # remove old sqlite cache
82
- if os.path.exists('.langchain.sqlite'):
83
- os.remove('.langchain.sqlite')
84
-
85
  set_llm_cache(SQLiteCache(database_path=".langchain.sqlite"))
86
 
87
  # retrieve vectorsrore
@@ -97,8 +97,6 @@ db.get()
97
  retriever = db.as_retriever(search_type="mmr")#, search_kwargs={'k': 3, 'lambda_mult': 0.25})
98
 
99
  # asks LLM to create 3 alternatives baed on user query
100
-
101
-
102
  # asks LLM to extract relevant parts from retrieved documents
103
 
104
 
 
58
  AWS_S3_FILE=os.getenv('AWS_S3_FILE')
59
  VS_DESTINATION=os.getenv('VS_DESTINATION')
60
 
61
+ # remove old vectorstore
62
+ if os.path.exists(VS_DESTINATION):
63
+ os.remove(VS_DESTINATION)
64
+
65
+ # remove old sqlite cache
66
+ if os.path.exists('.langchain.sqlite'):
67
+ os.remove('.langchain.sqlite')
68
+
69
  # initialize Model config
70
  llm_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
71
 
 
82
  embedding_model_name = "sentence-transformers/all-mpnet-base-v2"
83
  embeddings = HuggingFaceHubEmbeddings(repo_id=embedding_model_name)
84
 
 
 
 
 
 
 
 
 
85
  set_llm_cache(SQLiteCache(database_path=".langchain.sqlite"))
86
 
87
  # retrieve vectorsrore
 
97
  retriever = db.as_retriever(search_type="mmr")#, search_kwargs={'k': 3, 'lambda_mult': 0.25})
98
 
99
  # asks LLM to create 3 alternatives baed on user query
 
 
100
  # asks LLM to extract relevant parts from retrieved documents
101
 
102