File size: 5,251 Bytes
3c10197
 
 
 
 
 
 
 
d7fcbbf
3c10197
 
 
 
 
 
 
d7fcbbf
 
 
 
 
 
 
3c10197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7fcbbf
 
 
 
 
 
3c10197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02031df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c10197
 
 
 
 
 
 
 
 
 
 
 
 
 
d7fcbbf
3c10197
 
 
 
 
 
 
 
 
d7fcbbf
3c10197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7fcbbf
 
 
3c10197
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
from dotenv import load_dotenv
load_dotenv()  # Load environment variables from .env file

import gradio as gr
from threading import Thread
import tiktoken
import logging
from pathlib import Path

from src.config import Config
from src.logger import Logger
from src.project import ProjectManager
from src.state import AgentState
from src.agents import Agent

# Create necessary directories
base_dir = Path("/code")
for dir_name in ["db", "logs", "projects", "screenshots", "pdfs", ".gradio"]:
    dir_path = base_dir / dir_name
    dir_path.mkdir(exist_ok=True)
    os.chmod(dir_path, 0o755)

# Initialize core components
manager = ProjectManager()
AgentState = AgentState()
config = Config()
logger = Logger()
TIKTOKEN_ENC = tiktoken.get_encoding("cl100k_base")

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def process_message(message, base_model="gpt-3.5-turbo", project_name="default", search_engine="duckduckgo"):
    try:
        agent = Agent(base_model=base_model, search_engine=search_engine.lower())
        
        state = AgentState.get_latest_state(project_name)
        if not state:
            agent.execute(message, project_name)
        else:
            if AgentState.is_agent_completed(project_name):
                agent.subsequent_execute(message, project_name)
            else:
                agent.execute(message, project_name)
        
        # Get the latest messages
        messages = manager.get_messages(project_name)
        return messages[-1]["message"] if messages else "No response generated"
    except Exception as e:
        logger.error(f"Error processing message: {str(e)}")
        return f"An error occurred: {str(e)}"

def create_gradio_interface():
    with gr.Blocks(
        title="Devika AI Assistant",
        theme=gr.themes.Soft(),
        analytics_enabled=False,
        cache_examples=False  # Disable example caching to avoid permission issues
    ) as interface:
        gr.Markdown("""
        # 🤖 Devika AI Assistant
        
        Devika is an advanced AI coding assistant that helps you with:
        - Writing and debugging code
        - Creating new projects
        - Answering programming questions
        - And much more!
        
        Simply type your request below and Devika will help you out.
        """)
        
        with gr.Row():
            with gr.Column(scale=2):
                message_input = gr.Textbox(
                    label="Your Message",
                    placeholder="Type your coding request here...",
                    lines=3
                )
                
                with gr.Row():
                    model_dropdown = gr.Dropdown(
                        choices=[
                            # OpenAI Models
                            "gpt-3.5-turbo",
                            "gpt-4",
                            # Anthropic Models
                            "claude-3-opus",
                            # Perplexity Current Models
                            "sonar-reasoning-pro",
                            "sonar-reasoning",
                            "sonar-pro",
                            "sonar",
                            # Perplexity Legacy Models
                            "llama-3.1-sonar-small-128k-online",
                            "llama-3.1-sonar-large-128k-online",
                            "llama-3.1-sonar-huge-128k-online"
                        ],
                        value="gpt-3.5-turbo",
                        label="Model"
                    )
                    search_engine_dropdown = gr.Dropdown(
                        choices=["DuckDuckGo", "Bing", "Google"],
                        value="DuckDuckGo",
                        label="Search Engine"
                    )
                
                submit_btn = gr.Button("Send Message", variant="primary")
            
            with gr.Column(scale=3):
                output_box = gr.Markdown(label="Devika's Response")
        
        # Add examples without caching
        gr.Examples(
            examples=[
                ["Create a React component for a todo list", "gpt-3.5-turbo", "DuckDuckGo"],
                ["Help me understand how to use Python decorators", "gpt-3.5-turbo", "DuckDuckGo"],
                ["Write a Node.js API endpoint for user authentication", "gpt-3.5-turbo", "DuckDuckGo"]
            ],
            inputs=[message_input, model_dropdown, search_engine_dropdown],
            outputs=output_box,
            fn=lambda x, y, z: process_message(x, y, "default", z),
            cache_examples=False  # Disable example caching
        )
        
        submit_btn.click(
            fn=process_message,
            inputs=[message_input, model_dropdown, gr.Textbox(value="default", visible=False), search_engine_dropdown],
            outputs=output_box
        )
    
    return interface

# Create and launch the Gradio interface
interface = create_gradio_interface()

if __name__ == "__main__":
    interface.launch(
        server_name="0.0.0.0",
        server_port=7860,
        share=False,
        debug=False,
        show_error=True,
        cache_examples=False  # Disable example caching
    )