Spaces:
Sleeping
Sleeping
import os | |
from typing import Optional | |
import openai | |
import anthropic | |
from duckduckgo_search import DDGS | |
from perplexity.client import PerplexityClient | |
class Agent: | |
def __init__(self, base_model: str = "gpt-3.5-turbo", search_engine: str = "duckduckgo"): | |
self.base_model = base_model | |
self.search_engine = search_engine | |
# Initialize API clients | |
if "gpt" in base_model: | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
elif "claude" in base_model: | |
self.claude = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) | |
elif "sonar" in base_model: | |
self.perplexity = PerplexityClient(api_key=os.getenv("PERPLEXITY_API_KEY")) | |
def execute(self, message: str, project_name: str) -> Optional[str]: | |
try: | |
# Process the message based on the selected model | |
if "gpt" in self.base_model: | |
response = self._process_with_gpt(message) | |
elif "claude" in self.base_model: | |
response = self._process_with_claude(message) | |
elif "sonar" in self.base_model: | |
response = self._process_with_perplexity(message) | |
else: | |
response = "Unsupported model selected" | |
return response | |
except Exception as e: | |
return f"Error processing message: {str(e)}" | |
def subsequent_execute(self, message: str, project_name: str) -> Optional[str]: | |
return self.execute(message, project_name) | |
def _process_with_gpt(self, message: str) -> str: | |
response = openai.chat.completions.create( | |
model=self.base_model, | |
messages=[{"role": "user", "content": message}] | |
) | |
return response.choices[0].message.content | |
def _process_with_claude(self, message: str) -> str: | |
message = anthropic.Message( | |
role="user", | |
content=message | |
) | |
response = self.claude.messages.create( | |
model="claude-3-opus-20240229", | |
messages=[message] | |
) | |
return response.content[0].text | |
def _process_with_perplexity(self, message: str) -> str: | |
# Map model names to Perplexity models | |
model_mapping = { | |
# Current models | |
"sonar-reasoning-pro": "sonar-reasoning-pro", # 127k context, 8k output | |
"sonar-reasoning": "sonar-reasoning", # 127k context | |
"sonar-pro": "sonar-pro", # 200k context, 8k output | |
"sonar": "sonar", # 127k context | |
# Legacy models (will be deprecated after 2/22/2025) | |
"llama-3.1-sonar-small-128k-online": "llama-3.1-sonar-small-128k-online", | |
"llama-3.1-sonar-large-128k-online": "llama-3.1-sonar-large-128k-online", | |
"llama-3.1-sonar-huge-128k-online": "llama-3.1-sonar-huge-128k-online" | |
} | |
# Use the mapped model or default to sonar | |
model = model_mapping.get(self.base_model, "sonar") | |
response = self.perplexity.chat.create( | |
model=model, | |
messages=[{"role": "user", "content": message}] | |
) | |
return response.choices[0].message.content | |
def _search_web(self, query: str, num_results: int = 5) -> list: | |
if self.search_engine == "duckduckgo": | |
with DDGS() as ddgs: | |
return list(ddgs.text(query, max_results=num_results)) | |
# Add support for other search engines as needed | |
return [] |