Update app.py
Browse files
app.py
CHANGED
@@ -339,18 +339,18 @@ def web_search_and_extract_threading(
|
|
339 |
@app.get("/api/adv_web_search")
|
340 |
async def adv_web_search(
|
341 |
q: str,
|
342 |
-
model: str = "
|
343 |
-
max_results: int =
|
344 |
timelimit: Optional[str] = None,
|
345 |
safesearch: str = "moderate",
|
346 |
region: str = "wt-wt",
|
347 |
backend: str = "html",
|
348 |
max_chars: int = 6000,
|
349 |
-
system_prompt: str = "You are
|
350 |
proxy: Optional[str] = None
|
351 |
):
|
352 |
"""
|
353 |
-
Combines web search, web extraction, and
|
354 |
"""
|
355 |
try:
|
356 |
with WEBS(proxy=proxy) as webs:
|
@@ -368,19 +368,17 @@ async def adv_web_search(
|
|
368 |
if result['text']:
|
369 |
extracted_text += f"## Content from: {result['link']}\n\n{result['text']}\n\n"
|
370 |
|
371 |
-
# 3. Construct the prompt for
|
372 |
-
|
373 |
-
|
374 |
-
# 4. Get the LLM's response using LLM class (similar to /api/llm)
|
375 |
-
messages = [{"role": "user", "content": llm_prompt}]
|
376 |
-
if system_prompt:
|
377 |
-
messages.insert(0, {"role": "system", "content": system_prompt})
|
378 |
|
379 |
-
|
380 |
-
|
|
|
|
|
|
|
381 |
|
382 |
# 5. Return the results
|
383 |
-
return JSONResponse(content=
|
384 |
|
385 |
except Exception as e:
|
386 |
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|
|
|
339 |
@app.get("/api/adv_web_search")
|
340 |
async def adv_web_search(
|
341 |
q: str,
|
342 |
+
model: str = "llama3-8b",
|
343 |
+
max_results: int = 5,
|
344 |
timelimit: Optional[str] = None,
|
345 |
safesearch: str = "moderate",
|
346 |
region: str = "wt-wt",
|
347 |
backend: str = "html",
|
348 |
max_chars: int = 6000,
|
349 |
+
system_prompt: str = "You are an advanced AI chatbot. Provide the best answer to the user based on Google search results.",
|
350 |
proxy: Optional[str] = None
|
351 |
):
|
352 |
"""
|
353 |
+
Combines web search, web extraction, and FastAI chat for advanced search.
|
354 |
"""
|
355 |
try:
|
356 |
with WEBS(proxy=proxy) as webs:
|
|
|
368 |
if result['text']:
|
369 |
extracted_text += f"## Content from: {result['link']}\n\n{result['text']}\n\n"
|
370 |
|
371 |
+
# 3. Construct the prompt for FastAI
|
372 |
+
ai_prompt = f"Query by user: {q} . Answer the query asked by user in detail. SEarch Result: {extracted_text}"
|
|
|
|
|
|
|
|
|
|
|
373 |
|
374 |
+
# 4. Get the FastAI's response using FastAI service
|
375 |
+
try:
|
376 |
+
response = await asyncio.to_thread(FastAI(model=model, system_prompt=system_prompt).get_response, ai_prompt)
|
377 |
+
except Exception as e:
|
378 |
+
raise HTTPException(status_code=500, detail=f"Error during FastAI request: {e}")
|
379 |
|
380 |
# 5. Return the results
|
381 |
+
return JSONResponse(content={"response": response})
|
382 |
|
383 |
except Exception as e:
|
384 |
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|