Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -46,7 +46,7 @@ hf_api = HfApi()
|
|
46 |
|
47 |
|
48 |
def initialize():
|
49 |
-
global
|
50 |
|
51 |
|
52 |
download_gitlab_repo_to_hfspace(GITLAB_API_URL, GITLAB_PROJECT_ID, GITLAB_PROJECT_VERSION, DATA_DIR, hf_api, HF_SPACE_NAME)
|
@@ -66,59 +66,13 @@ def initialize():
|
|
66 |
vectorstore = setup_vectorstore(doc_chunks + code_chunks, EMBEDDING_MODEL_NAME, VECTORSTORE_DIRECTORY)
|
67 |
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)
|
68 |
|
|
|
69 |
|
70 |
initialize()
|
71 |
-
|
72 |
-
def rag_workflow(query):
|
73 |
-
"""
|
74 |
-
RAGChain class to perform the complete RAG workflow.
|
75 |
-
"""
|
76 |
-
# Assume 'llm' and 'vectorstore' are already initialized instances
|
77 |
-
rag_chain = RAGChain(llm, vectorstore)
|
78 |
-
|
79 |
-
|
80 |
-
"""
|
81 |
-
Pre-Retrieval-Stage
|
82 |
-
"""
|
83 |
-
# predict which python library to search in: (standard) kadiAPY-library or kadiAPY-cli-library
|
84 |
-
code_library_usage_prediction = rag_chain.predict_library_usage(query)
|
85 |
-
print(f"Predicted library usage: {code_library_usage_prediction}")
|
86 |
-
|
87 |
-
rewritten_query = rag_chain.rewrite_query(query)
|
88 |
-
print(f"\n\n Rewritten query: {rewritten_query}\n\n")
|
89 |
-
|
90 |
-
"""
|
91 |
-
Retrieval-Stage
|
92 |
-
"""
|
93 |
-
kadiAPY_doc_documents = rag_chain.retrieve_contexts(query, k=5, filter={"usage": "doc"})
|
94 |
-
kadiAPY_code_documents = rag_chain.retrieve_contexts(str(rewritten_query.content), k=3, filter={"usage": code_library_usage_prediction})
|
95 |
-
|
96 |
-
print("Retrieved Document Contexts:", kadiAPY_doc_documents)
|
97 |
-
print("Retrieved Code Contexts:", kadiAPY_code_documents)
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
"""
|
102 |
-
Pre-Generation-Stage
|
103 |
-
Adding each doc's metadata to the retrieved content (docs & code snippets)
|
104 |
-
"""
|
105 |
-
formatted_doc_snippets = rag_chain.format_documents(kadiAPY_doc_documents)
|
106 |
-
formatted_code_snippets = rag_chain.format_documents(kadiAPY_code_documents)
|
107 |
-
#print("FORMATTED Retrieved Document Contexts:", formatted_doc_snippets)
|
108 |
-
#print("FORMATTED Retrieved Code Contexts:" , formatted_code_snippets)
|
109 |
-
|
110 |
-
"""
|
111 |
-
Generation-Stage
|
112 |
-
"""
|
113 |
-
response = rag_chain.generate_response(query, formatted_doc_snippets, formatted_code_snippets)
|
114 |
-
print("Generated Response:", response)
|
115 |
-
|
116 |
-
return response
|
117 |
-
|
118 |
|
119 |
def bot_kadi(history):
|
120 |
-
user_query = history[-1][0]
|
121 |
-
response =
|
122 |
history[-1] = (user_query, response)
|
123 |
|
124 |
yield history
|
|
|
46 |
|
47 |
|
48 |
def initialize():
|
49 |
+
global kadiAPY_Bot
|
50 |
|
51 |
|
52 |
download_gitlab_repo_to_hfspace(GITLAB_API_URL, GITLAB_PROJECT_ID, GITLAB_PROJECT_VERSION, DATA_DIR, hf_api, HF_SPACE_NAME)
|
|
|
66 |
vectorstore = setup_vectorstore(doc_chunks + code_chunks, EMBEDDING_MODEL_NAME, VECTORSTORE_DIRECTORY)
|
67 |
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)
|
68 |
|
69 |
+
kadiAPY_bot = KadiAPYBot(llm, vectorstore)
|
70 |
|
71 |
initialize()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
def bot_kadi(history):
|
74 |
+
user_query = history[-1][0]
|
75 |
+
response = kadiAPY_Bot.process_query(user_query)
|
76 |
history[-1] = (user_query, response)
|
77 |
|
78 |
yield history
|