Spaces:
Sleeping
Sleeping
Update architecture-doc.md
Browse files- docs/architecture-doc.md +4 -4
docs/architecture-doc.md
CHANGED
@@ -78,7 +78,7 @@ The backend layer handles the core functionality of processing queries, retrievi
|
|
78 |
|
79 |
```python
|
80 |
@st.cache_data(ttl=3600, show_spinner=False)
|
81 |
-
def cached_process_query(query, top_k=5, word_limit=
|
82 |
"""
|
83 |
Process a user query with caching to avoid redundant computation.
|
84 |
|
@@ -123,7 +123,7 @@ def cached_process_query(query, top_k=5, word_limit=100):
|
|
123 |
# Return the complete response package
|
124 |
return {"query": query, "answer_with_rag": llm_answer_with_rag, "citations": sources}
|
125 |
|
126 |
-
def process_query(query, top_k=5, word_limit=
|
127 |
"""
|
128 |
Process a query through the RAG pipeline with proper formatting.
|
129 |
|
@@ -208,7 +208,7 @@ ddef retrieve_passages(query, faiss_index, text_chunks, metadata_dict, top_k=5,
|
|
208 |
- Formats the output with proper citations
|
209 |
|
210 |
```python
|
211 |
-
def answer_with_llm(query, context=None, word_limit=
|
212 |
"""
|
213 |
Generate an answer using the OpenAI GPT model with formatted citations.
|
214 |
|
@@ -390,7 +390,7 @@ def cached_load_data_files():
|
|
390 |
|
391 |
```python
|
392 |
@st.cache_data(ttl=3600, show_spinner=False)
|
393 |
-
def cached_process_query(query, top_k=5, word_limit=
|
394 |
# Cache query results for an hour
|
395 |
```
|
396 |
|
|
|
78 |
|
79 |
```python
|
80 |
@st.cache_data(ttl=3600, show_spinner=False)
|
81 |
+
def cached_process_query(query, top_k=5, word_limit=200):
|
82 |
"""
|
83 |
Process a user query with caching to avoid redundant computation.
|
84 |
|
|
|
123 |
# Return the complete response package
|
124 |
return {"query": query, "answer_with_rag": llm_answer_with_rag, "citations": sources}
|
125 |
|
126 |
+
def process_query(query, top_k=5, word_limit=200):
|
127 |
"""
|
128 |
Process a query through the RAG pipeline with proper formatting.
|
129 |
|
|
|
208 |
- Formats the output with proper citations
|
209 |
|
210 |
```python
|
211 |
+
def answer_with_llm(query, context=None, word_limit=200):
|
212 |
"""
|
213 |
Generate an answer using the OpenAI GPT model with formatted citations.
|
214 |
|
|
|
390 |
|
391 |
```python
|
392 |
@st.cache_data(ttl=3600, show_spinner=False)
|
393 |
+
def cached_process_query(query, top_k=5, word_limit=200):
|
394 |
# Cache query results for an hour
|
395 |
```
|
396 |
|