πŸŒπŸ’
Add: Perplexity AI support with multiple models
02031df
raw
history blame
5.25 kB
import os
from dotenv import load_dotenv
load_dotenv() # Load environment variables from .env file
import gradio as gr
from threading import Thread
import tiktoken
import logging
from pathlib import Path
from src.config import Config
from src.logger import Logger
from src.project import ProjectManager
from src.state import AgentState
from src.agents import Agent
# Create necessary directories
base_dir = Path("/code")
for dir_name in ["db", "logs", "projects", "screenshots", "pdfs", ".gradio"]:
dir_path = base_dir / dir_name
dir_path.mkdir(exist_ok=True)
os.chmod(dir_path, 0o755)
# Initialize core components
manager = ProjectManager()
AgentState = AgentState()
config = Config()
logger = Logger()
TIKTOKEN_ENC = tiktoken.get_encoding("cl100k_base")
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def process_message(message, base_model="gpt-3.5-turbo", project_name="default", search_engine="duckduckgo"):
try:
agent = Agent(base_model=base_model, search_engine=search_engine.lower())
state = AgentState.get_latest_state(project_name)
if not state:
agent.execute(message, project_name)
else:
if AgentState.is_agent_completed(project_name):
agent.subsequent_execute(message, project_name)
else:
agent.execute(message, project_name)
# Get the latest messages
messages = manager.get_messages(project_name)
return messages[-1]["message"] if messages else "No response generated"
except Exception as e:
logger.error(f"Error processing message: {str(e)}")
return f"An error occurred: {str(e)}"
def create_gradio_interface():
with gr.Blocks(
title="Devika AI Assistant",
theme=gr.themes.Soft(),
analytics_enabled=False,
cache_examples=False # Disable example caching to avoid permission issues
) as interface:
gr.Markdown("""
# πŸ€– Devika AI Assistant
Devika is an advanced AI coding assistant that helps you with:
- Writing and debugging code
- Creating new projects
- Answering programming questions
- And much more!
Simply type your request below and Devika will help you out.
""")
with gr.Row():
with gr.Column(scale=2):
message_input = gr.Textbox(
label="Your Message",
placeholder="Type your coding request here...",
lines=3
)
with gr.Row():
model_dropdown = gr.Dropdown(
choices=[
# OpenAI Models
"gpt-3.5-turbo",
"gpt-4",
# Anthropic Models
"claude-3-opus",
# Perplexity Current Models
"sonar-reasoning-pro",
"sonar-reasoning",
"sonar-pro",
"sonar",
# Perplexity Legacy Models
"llama-3.1-sonar-small-128k-online",
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-huge-128k-online"
],
value="gpt-3.5-turbo",
label="Model"
)
search_engine_dropdown = gr.Dropdown(
choices=["DuckDuckGo", "Bing", "Google"],
value="DuckDuckGo",
label="Search Engine"
)
submit_btn = gr.Button("Send Message", variant="primary")
with gr.Column(scale=3):
output_box = gr.Markdown(label="Devika's Response")
# Add examples without caching
gr.Examples(
examples=[
["Create a React component for a todo list", "gpt-3.5-turbo", "DuckDuckGo"],
["Help me understand how to use Python decorators", "gpt-3.5-turbo", "DuckDuckGo"],
["Write a Node.js API endpoint for user authentication", "gpt-3.5-turbo", "DuckDuckGo"]
],
inputs=[message_input, model_dropdown, search_engine_dropdown],
outputs=output_box,
fn=lambda x, y, z: process_message(x, y, "default", z),
cache_examples=False # Disable example caching
)
submit_btn.click(
fn=process_message,
inputs=[message_input, model_dropdown, gr.Textbox(value="default", visible=False), search_engine_dropdown],
outputs=output_box
)
return interface
# Create and launch the Gradio interface
interface = create_gradio_interface()
if __name__ == "__main__":
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=False,
show_error=True,
cache_examples=False # Disable example caching
)