Spaces:
Sleeping
Sleeping
angelesteban00
commited on
Commit
•
2de5f29
1
Parent(s):
42c08ad
- README.md +1 -1
- app.py +8 -8
- requirements.txt +0 -2
README.md
CHANGED
@@ -7,7 +7,7 @@ sdk: gradio
|
|
7 |
sdk_version: 4.9.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
---
|
11 |
python3 -m venv venv
|
12 |
source venv/bin/activate
|
13 |
pip3 install -r requirements.txt
|
|
|
7 |
sdk_version: 4.9.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
--- for local tests, create .gitignore file and load env:
|
11 |
python3 -m venv venv
|
12 |
source venv/bin/activate
|
13 |
pip3 install -r requirements.txt
|
app.py
CHANGED
@@ -41,7 +41,7 @@ def query_data(query,openai_api_key,mongo_uri):
|
|
41 |
# If it's not specified (for example like in the code below),
|
42 |
# then the default OpenAI model used in LangChain is OpenAI GPT-3.5-turbo, as of August 30, 2023
|
43 |
|
44 |
-
llm = OpenAI(openai_api_key=openai_api_key, temperature=0
|
45 |
|
46 |
|
47 |
# Get VectorStoreRetriever: Specifically, Retriever for MongoDB VectorStore.
|
@@ -51,7 +51,7 @@ def query_data(query,openai_api_key,mongo_uri):
|
|
51 |
# Load "stuff" documents chain. Stuff documents chain takes a list of documents,
|
52 |
# inserts them all into a prompt and passes that prompt to an LLM.
|
53 |
|
54 |
-
qa = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=retriever
|
55 |
|
56 |
# Execute the chain
|
57 |
|
@@ -63,19 +63,19 @@ def query_data(query,openai_api_key,mongo_uri):
|
|
63 |
|
64 |
# Create a web interface for the app, using Gradio
|
65 |
|
66 |
-
with gr.Blocks(theme=Base(), title="
|
67 |
gr.Markdown(
|
68 |
"""
|
69 |
-
#
|
70 |
""")
|
71 |
-
openai_api_key = gr.Textbox(label = "OpenAI
|
72 |
-
mongo_uri = gr.Textbox(label = "Mongo URI", value = "mongodb+srv://", lines = 1)
|
73 |
textbox = gr.Textbox(label="Enter your Question:")
|
74 |
with gr.Row():
|
75 |
button = gr.Button("Submit", variant="primary")
|
76 |
with gr.Column():
|
77 |
-
output1 = gr.Textbox(lines=1, max_lines=10, label="
|
78 |
-
output2 = gr.Textbox(lines=1, max_lines=10, label="
|
79 |
|
80 |
# Call query_data function upon clicking the Submit button
|
81 |
|
|
|
41 |
# If it's not specified (for example like in the code below),
|
42 |
# then the default OpenAI model used in LangChain is OpenAI GPT-3.5-turbo, as of August 30, 2023
|
43 |
|
44 |
+
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
|
45 |
|
46 |
|
47 |
# Get VectorStoreRetriever: Specifically, Retriever for MongoDB VectorStore.
|
|
|
51 |
# Load "stuff" documents chain. Stuff documents chain takes a list of documents,
|
52 |
# inserts them all into a prompt and passes that prompt to an LLM.
|
53 |
|
54 |
+
qa = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=retriever)
|
55 |
|
56 |
# Execute the chain
|
57 |
|
|
|
63 |
|
64 |
# Create a web interface for the app, using Gradio
|
65 |
|
66 |
+
with gr.Blocks(theme=Base(), title="MongoDB Atlas Vector Search + RAG Architecture") as demo:
|
67 |
gr.Markdown(
|
68 |
"""
|
69 |
+
# MongoDB Atlas Vector Search + RAG Architecture
|
70 |
""")
|
71 |
+
openai_api_key = gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1)
|
72 |
+
mongo_uri = gr.Textbox(label = "Mongo Atlas URI", value = "mongodb+srv://", lines = 1)
|
73 |
textbox = gr.Textbox(label="Enter your Question:")
|
74 |
with gr.Row():
|
75 |
button = gr.Button("Submit", variant="primary")
|
76 |
with gr.Column():
|
77 |
+
output1 = gr.Textbox(lines=1, max_lines=10, label="Atlas Vector Search output (document field as is):")
|
78 |
+
output2 = gr.Textbox(lines=1, max_lines=10, label="Atlas Vector Search output + Langchain's RetrieverQA + OpenAI LLM:")
|
79 |
|
80 |
# Call query_data function upon clicking the Submit button
|
81 |
|
requirements.txt
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
langchain
|
2 |
-
langchain-community
|
3 |
-
langchain-openai
|
4 |
pymongo[srv]==4.1.1
|
5 |
bs4
|
6 |
openai
|
|
|
1 |
langchain
|
|
|
|
|
2 |
pymongo[srv]==4.1.1
|
3 |
bs4
|
4 |
openai
|