Spaces:
Running
Running
cleaned up the read me and readability of app/helpers
Browse files- README.md +1 -26
- app.py +18 -15
- helpers.py +1 -2
README.md
CHANGED
@@ -10,23 +10,7 @@ pinned: false
|
|
10 |
---
|
11 |
# Search Arena
|
12 |
|
13 |
-
**Search Arena** is a comprehensive platform designed to
|
14 |
-
|
15 |
-
## Key Features
|
16 |
-
|
17 |
-
- **Output Evaluations:** Analyze the quality and relevance of search results.
|
18 |
-
- **Perplexity:** Measure the predictive uncertainty of language models used by the agents.
|
19 |
-
- **Exa (Exhaustiveness Analysis):** Assess the breadth and depth of search coverage.
|
20 |
-
- **Multi-Agent Comparison:** Compare multiple agents side-by-side.
|
21 |
-
- **Customizable Benchmarks:** Define specific benchmarks and criteria for evaluation.
|
22 |
-
- **User Feedback Integration:** Incorporate user feedback to improve agent performance.
|
23 |
-
- **Performance Metrics:** Detailed reports on response time, precision, recall, and F1 score.
|
24 |
-
|
25 |
-
## Benefits
|
26 |
-
|
27 |
-
- **Enhanced Decision-Making:** Make informed decisions with clear, data-driven evaluations.
|
28 |
-
- **Optimization:** Help developers optimize their search agents.
|
29 |
-
- **Innovation:** Foster innovation by promoting the best-performing search technologies.
|
30 |
|
31 |
## Getting Started
|
32 |
|
@@ -93,17 +77,8 @@ Follow these steps to set up the project on your local machine.
|
|
93 |
### Usage
|
94 |
|
95 |
- Follow the on-screen instructions to evaluate and compare search-based web agents.
|
96 |
-
- Customize benchmarks and criteria as needed.
|
97 |
-
- Analyze detailed reports and visualizations to make informed decisions.
|
98 |
|
99 |
-
### Contributing
|
100 |
-
|
101 |
-
We welcome contributions! Please read our [Contributing Guide](CONTRIBUTING.md) to get started.
|
102 |
|
103 |
### License
|
104 |
|
105 |
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details.
|
106 |
-
|
107 |
-
### Contact
|
108 |
-
|
109 |
-
If you have any questions, feel free to open an issue or contact us at [[email protected]](mailto:[email protected]).
|
|
|
10 |
---
|
11 |
# Search Arena
|
12 |
|
13 |
+
**Search Arena** is a comprehensive platform designed to evaluate and compare search-based web agents. Leveraging a variety of metrics, Search Arena ensures that users can identify the most effective solutions for their needs.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
## Getting Started
|
16 |
|
|
|
77 |
### Usage
|
78 |
|
79 |
- Follow the on-screen instructions to evaluate and compare search-based web agents.
|
|
|
|
|
80 |
|
|
|
|
|
|
|
81 |
|
82 |
### License
|
83 |
|
84 |
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details.
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
|
|
2 |
import random
|
3 |
from helpers import query_you_com, query_tavily, query_perplexity, query_brave
|
4 |
from provider_info import search_providers
|
5 |
-
|
6 |
# from swarms.utils.loguru_logger import logger
|
7 |
import time
|
8 |
|
@@ -47,20 +47,23 @@ def ProcessQuestion(question):
|
|
47 |
answer_b = selected_functions[1](question)
|
48 |
|
49 |
# Log into mongodb
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
64 |
|
65 |
return answer_a, answer_b, selected_functions
|
66 |
|
|
|
2 |
import random
|
3 |
from helpers import query_you_com, query_tavily, query_perplexity, query_brave
|
4 |
from provider_info import search_providers
|
5 |
+
from mongod_db import MongoDBHandler
|
6 |
# from swarms.utils.loguru_logger import logger
|
7 |
import time
|
8 |
|
|
|
47 |
answer_b = selected_functions[1](question)
|
48 |
|
49 |
# Log into mongodb
|
50 |
+
mongo = MongoDBHandler()
|
51 |
+
|
52 |
+
try:
|
53 |
+
# logger.info(f"Logging question: {question}")
|
54 |
+
mongo.add(
|
55 |
+
{
|
56 |
+
"question": question,
|
57 |
+
"answer_a": answer_a,
|
58 |
+
"answer_b": answer_b,
|
59 |
+
"selected_functions": [f.__name__ for f in selected_functions],
|
60 |
+
"query_time": time.time(),
|
61 |
+
}
|
62 |
+
)
|
63 |
+
# logger.info("Successfully logged into mongodb")
|
64 |
+
except Exception as e:
|
65 |
+
# logger.error(f"Error logging into mongodb: {e}")
|
66 |
+
print("Error logging into mongodb: {e}")
|
67 |
|
68 |
return answer_a, answer_b, selected_functions
|
69 |
|
helpers.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import requests
|
2 |
from dotenv import load_dotenv
|
3 |
from typing import Optional
|
|
|
4 |
import os
|
5 |
|
6 |
# Load environment variables from .env file
|
@@ -89,8 +90,6 @@ def ProcessQuestion(question, model):
|
|
89 |
return "Model not supported"
|
90 |
|
91 |
|
92 |
-
from brave_ai import BraveAIWrapper
|
93 |
-
|
94 |
def query_brave(query: str) -> Optional[str]:
|
95 |
"""
|
96 |
Get a summary for the given query using BraveAIWrapper.
|
|
|
1 |
import requests
|
2 |
from dotenv import load_dotenv
|
3 |
from typing import Optional
|
4 |
+
from brave_ai import BraveAIWrapper
|
5 |
import os
|
6 |
|
7 |
# Load environment variables from .env file
|
|
|
90 |
return "Model not supported"
|
91 |
|
92 |
|
|
|
|
|
93 |
def query_brave(query: str) -> Optional[str]:
|
94 |
"""
|
95 |
Get a summary for the given query using BraveAIWrapper.
|