bupa1018 commited on
Commit
99bb0aa
·
1 Parent(s): 60544d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -0
app.py CHANGED
@@ -194,6 +194,25 @@ def rag_workflow(query):
194
  print(f"References for the query:\n{references}\n")
195
  prompt = f"You are an intelligent AI assistant who is very good in giving answers for anything asked or instructed by the user. Provide a clear and concise answer based only on the pieces of retrieved context. You must follow this very strictly, do not use anything else other than the retrieved context. If no related Information is found from the context, reply that you do not know. \n\nContext:\n{context}\n\nQuery: {query}"
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
  response = llm.invoke(prompt)
199
  return response.content, references
 
194
  print(f"References for the query:\n{references}\n")
195
  prompt = f"You are an intelligent AI assistant who is very good in giving answers for anything asked or instructed by the user. Provide a clear and concise answer based only on the pieces of retrieved context. You must follow this very strictly, do not use anything else other than the retrieved context. If no related Information is found from the context, reply that you do not know. \n\nContext:\n{context}\n\nQuery: {query}"
196
 
197
+
198
+ def rag_workflow(query):
199
+ docs = query_chroma(vectorstore, query, k=10)
200
+ context = "\n\n".join([doc for doc, _ in docs])
201
+ references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(docs)])
202
+ print(f"Context for the query:\n{context}\n")
203
+ print(f"References for the query:\n{references}\n")
204
+
205
+ prompt = f"""You are an intelligent Coding AI assistant who is very good in giving answers for anything asked or instructed by the user.
206
+ The retrieved context contains source code and documenation of an api library. Provide a clear and consice answer based on the information in the retrieved context.
207
+ If no related Information is found from the context, reply that you do not know.
208
+
209
+ Context:
210
+ {context}
211
+
212
+ Query:
213
+ {query}
214
+ """
215
+
216
 
217
  response = llm.invoke(prompt)
218
  return response.content, references