Update app.py
Browse files
app.py
CHANGED
@@ -33,16 +33,21 @@ def create_web_search_vectors(search_results):
|
|
33 |
documents.append(Document(page_content=content, metadata={"source": result['href']}))
|
34 |
return FAISS.from_documents(documents, embed)
|
35 |
|
36 |
-
def get_response_with_search(query, model, num_calls=3, temperature=0.2):
|
37 |
search_results = duckduckgo_search(query)
|
38 |
-
|
39 |
-
if not
|
40 |
yield "No web search results available. Please try again.", ""
|
41 |
return
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
46 |
prompt = f"""Using the following context from web search results:
|
47 |
{context}
|
48 |
Write a detailed and complete research document that fulfills the following user request: '{query}'
|
@@ -63,14 +68,15 @@ After writing the document, please provide a list of sources used in your respon
|
|
63 |
main_content += chunk
|
64 |
yield main_content, ""
|
65 |
|
66 |
-
def respond(message, history, model, temperature, num_calls):
|
67 |
logging.info(f"User Query: {message}")
|
68 |
logging.info(f"Model Used: {model}")
|
69 |
logging.info(f"Temperature: {temperature}")
|
70 |
logging.info(f"Number of API Calls: {num_calls}")
|
|
|
71 |
|
72 |
try:
|
73 |
-
for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
|
74 |
response = f"{main_content}\n\n{sources}"
|
75 |
yield response
|
76 |
except Exception as e:
|
@@ -99,6 +105,7 @@ def create_gradio_interface():
|
|
99 |
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2]),
|
100 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
101 |
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
|
|
102 |
],
|
103 |
title="AI-powered Web Search Assistant",
|
104 |
description="Use web search to answer questions or generate summaries.",
|
@@ -127,8 +134,9 @@ def create_gradio_interface():
|
|
127 |
2. Select the model you want to use from the dropdown.
|
128 |
3. Adjust the Temperature to control the randomness of the response.
|
129 |
4. Set the Number of API Calls to determine how many times the model will be queried.
|
130 |
-
5.
|
131 |
-
6.
|
|
|
132 |
""")
|
133 |
|
134 |
return demo
|
|
|
33 |
documents.append(Document(page_content=content, metadata={"source": result['href']}))
|
34 |
return FAISS.from_documents(documents, embed)
|
35 |
|
36 |
+
def get_response_with_search(query, model, use_embeddings, num_calls=3, temperature=0.2):
|
37 |
search_results = duckduckgo_search(query)
|
38 |
+
|
39 |
+
if not search_results:
|
40 |
yield "No web search results available. Please try again.", ""
|
41 |
return
|
42 |
|
43 |
+
if use_embeddings:
|
44 |
+
web_search_database = create_web_search_vectors(search_results)
|
45 |
+
retriever = web_search_database.as_retriever(search_kwargs={"k": 5})
|
46 |
+
relevant_docs = retriever.get_relevant_documents(query)
|
47 |
+
context = "\n".join([doc.page_content for doc in relevant_docs])
|
48 |
+
else:
|
49 |
+
context = "\n".join([f"{result['title']}\n{result['body']}\nSource: {result['href']}" for result in search_results])
|
50 |
+
|
51 |
prompt = f"""Using the following context from web search results:
|
52 |
{context}
|
53 |
Write a detailed and complete research document that fulfills the following user request: '{query}'
|
|
|
68 |
main_content += chunk
|
69 |
yield main_content, ""
|
70 |
|
71 |
+
def respond(message, history, model, temperature, num_calls, use_embeddings):
|
72 |
logging.info(f"User Query: {message}")
|
73 |
logging.info(f"Model Used: {model}")
|
74 |
logging.info(f"Temperature: {temperature}")
|
75 |
logging.info(f"Number of API Calls: {num_calls}")
|
76 |
+
logging.info(f"Use Embeddings: {use_embeddings}")
|
77 |
|
78 |
try:
|
79 |
+
for main_content, sources in get_response_with_search(message, model, use_embeddings, num_calls=num_calls, temperature=temperature):
|
80 |
response = f"{main_content}\n\n{sources}"
|
81 |
yield response
|
82 |
except Exception as e:
|
|
|
105 |
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2]),
|
106 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
107 |
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
108 |
+
gr.Checkbox(label="Use Embeddings", value=True),
|
109 |
],
|
110 |
title="AI-powered Web Search Assistant",
|
111 |
description="Use web search to answer questions or generate summaries.",
|
|
|
134 |
2. Select the model you want to use from the dropdown.
|
135 |
3. Adjust the Temperature to control the randomness of the response.
|
136 |
4. Set the Number of API Calls to determine how many times the model will be queried.
|
137 |
+
5. Check or uncheck the "Use Embeddings" box to toggle between using embeddings or direct text summarization.
|
138 |
+
6. Press Enter or click the submit button to get your answer.
|
139 |
+
7. Use the provided examples or ask your own questions.
|
140 |
""")
|
141 |
|
142 |
return demo
|