Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -103,13 +103,20 @@ def rag_workflow(query):
|
|
103 |
# Assume 'llm' and 'vector_store' are already initialized instances
|
104 |
rag_chain = RAGChain(llm, vector_store)
|
105 |
|
106 |
-
|
|
|
|
|
|
|
107 |
# predict which python library to search in: (standard) kadiAPY-library or kadiAPY-cli-library
|
108 |
code_library_usage_prediction = rag_chain.predict_library_usage(query)
|
109 |
print(f"Predicted library usage: {code_library_usage_prediction}")
|
110 |
-
|
111 |
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
113 |
kadiAPY_doc_documents = rag_chain.retrieve_contexts(query, k = 3, {"usage: doc"})
|
114 |
kadiAPY_code_documents =rag_chain.retrieve_contexts(rewritten_query, k = 3, {"usage": library_usage_prediction} )
|
115 |
|
@@ -118,12 +125,18 @@ def rag_workflow(query):
|
|
118 |
|
119 |
|
120 |
|
121 |
-
|
|
|
|
|
|
|
122 |
formatted_doc_snippets = rag_chain.format_documents(kadiAPY_doc_documents)
|
123 |
formatted_code_snippets = rag_chain.format_documents(kadiAPY_code_documents)
|
124 |
#print("FORMATTED Retrieved Document Contexts:", formatted_doc_snippets)
|
125 |
#print("FORMATTED Retrieved Code Contexts:" , formatted_code_snippets)
|
126 |
-
|
|
|
|
|
|
|
127 |
response = rag_chain.generate_response(query, formatted_doc_snippets, formatted_code_snippets)
|
128 |
print("Generated Response:", response)
|
129 |
|
|
|
103 |
# Assume 'llm' and 'vector_store' are already initialized instances
|
104 |
rag_chain = RAGChain(llm, vector_store)
|
105 |
|
106 |
+
|
107 |
+
"""
|
108 |
+
Pre-Retrieval-Stage
|
109 |
+
"""
|
110 |
# predict which python library to search in: (standard) kadiAPY-library or kadiAPY-cli-library
|
111 |
code_library_usage_prediction = rag_chain.predict_library_usage(query)
|
112 |
print(f"Predicted library usage: {code_library_usage_prediction}")
|
|
|
113 |
|
114 |
+
rewritten query = rag_chain.rewrite_query(self, query)
|
115 |
+
print(f"Rewritten query: {code_library_usage_prediction}"))
|
116 |
+
|
117 |
+
"""
|
118 |
+
Retrieval-Stage
|
119 |
+
"""
|
120 |
kadiAPY_doc_documents = rag_chain.retrieve_contexts(query, k = 3, {"usage: doc"})
|
121 |
kadiAPY_code_documents =rag_chain.retrieve_contexts(rewritten_query, k = 3, {"usage": library_usage_prediction} )
|
122 |
|
|
|
125 |
|
126 |
|
127 |
|
128 |
+
"""
|
129 |
+
Pre-Generation-Stage
|
130 |
+
Adding each doc's metadata to the retrieved content (docs & code snippets)
|
131 |
+
"""
|
132 |
formatted_doc_snippets = rag_chain.format_documents(kadiAPY_doc_documents)
|
133 |
formatted_code_snippets = rag_chain.format_documents(kadiAPY_code_documents)
|
134 |
#print("FORMATTED Retrieved Document Contexts:", formatted_doc_snippets)
|
135 |
#print("FORMATTED Retrieved Code Contexts:" , formatted_code_snippets)
|
136 |
+
|
137 |
+
"""
|
138 |
+
Generation-Stage
|
139 |
+
"""
|
140 |
response = rag_chain.generate_response(query, formatted_doc_snippets, formatted_code_snippets)
|
141 |
print("Generated Response:", response)
|
142 |
|