Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,25 +22,30 @@ from llm import get_groq_llm
|
|
22 |
load_dotenv()
|
23 |
|
24 |
# Load configuration from JSON file
|
25 |
-
with open('config.json') as config_file:
|
26 |
-
config = json.load(config_file)
|
27 |
|
28 |
-
with open("
|
29 |
config2 = json.load(file)
|
30 |
|
31 |
-
VECTORSTORE_DIRECTORY = config["vectorstore_directory"]
|
32 |
-
CHUNK_SIZE = config["chunk_size"]
|
33 |
-
CHUNK_OVERLAP = config["chunk_overlap"]
|
34 |
-
EMBEDDING_MODEL_NAME = config["embedding_model"]
|
35 |
-
LLM_MODEL_NAME = config["llm_model"]
|
36 |
-
LLM_TEMPERATURE = config["llm_temperature"]
|
37 |
-
HF_SPACE_NAME = config["hf_space_name"]
|
38 |
-
DATA_DIR = config["data_dir"]
|
39 |
-
|
40 |
GROQ_API_KEY = os.environ["GROQ_API_KEY"]
|
41 |
HF_TOKEN = os.environ["HF_Token"]
|
42 |
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
login(HF_TOKEN)
|
46 |
api = HfApi()
|
@@ -166,7 +171,7 @@ def initialize():
|
|
166 |
print(f"Total number of doc_chunks: {len(doc_chunks)}")
|
167 |
|
168 |
vector_store = embed_documents_into_vectorstore(doc_chunks + code_chunks, EMBEDDING_MODEL_NAME, VECTORSTORE_DIRECTORY)
|
169 |
-
llm = get_groq_llm(LLM_MODEL_NAME,
|
170 |
|
171 |
from langchain_community.document_loaders import TextLoader
|
172 |
|
|
|
22 |
load_dotenv()
|
23 |
|
24 |
# Load configuration from JSON file
|
|
|
|
|
25 |
|
26 |
+
with open("config.json", "r") as file:
|
27 |
config2 = json.load(file)
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
GROQ_API_KEY = os.environ["GROQ_API_KEY"]
|
30 |
HF_TOKEN = os.environ["HF_Token"]
|
31 |
|
32 |
|
33 |
+
VECTORSTORE_DIRECTORY = config.get("vectorstore_directory")
|
34 |
+
CHUNK_SIZE = config.get("chunking", "chunk_size")
|
35 |
+
CHUNK_OVERLAP = config.get("chunking", "chunk_overlap")
|
36 |
+
EMBEDDING_MODEL_NAME = config.get("embedding_model", "name")
|
37 |
+
LLM_MODEL_NAME = config.get("llm_model", "name")
|
38 |
+
LLM_MODEL_TEMPERATURE = config.get("llm_model", "temperature")
|
39 |
+
GITLAB_API_URL = config.get("gitlab", "api_url")
|
40 |
+
GITLAB_PROJECT_ID = config.get("gitlab", "project", "id")
|
41 |
+
GITLAB_PROJECT_VERSION = config.get("gitlab", "project", "version")
|
42 |
+
DATA_DIR = config.get("data_dir")
|
43 |
+
HF_SPACE_NAME = config.get("hf_space_name")
|
44 |
+
DOCS_FOLDER = config.get("usage", "docs", "folder")
|
45 |
+
DOCS_FILE = config.get("usage", "docs", "file")
|
46 |
+
KADI_APY_FOLDER = config.get("usage", "kadi_apy", "folder")
|
47 |
+
KADI_APY_FILE = config.get("usage", "kadi_apy", "file"
|
48 |
+
|
49 |
|
50 |
login(HF_TOKEN)
|
51 |
api = HfApi()
|
|
|
171 |
print(f"Total number of doc_chunks: {len(doc_chunks)}")
|
172 |
|
173 |
vector_store = embed_documents_into_vectorstore(doc_chunks + code_chunks, EMBEDDING_MODEL_NAME, VECTORSTORE_DIRECTORY)
|
174 |
+
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)
|
175 |
|
176 |
from langchain_community.document_loaders import TextLoader
|
177 |
|