acecalisto3 commited on
Commit
187aba8
1 Parent(s): 7600d4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +244 -191
app.py CHANGED
@@ -1,205 +1,258 @@
1
  import os
2
- import subprocess
3
- import random
4
- import json
5
- from datetime import datetime
6
- import gradio as gr # Corrected import for gradio
7
- import requests
8
- from bs4 import BeautifulSoup
9
-
10
- class App(gr.Blocks): # Corrected class inheritance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def __init__(self):
12
- super().__init__()
13
- self.app_state = {"components": []}
14
- self.terminal_history = ""
15
- self.components_registry = {
16
- "Button": {
17
- "properties": {
18
- "label": "Click Me",
19
- "onclick": ""
20
- },
21
- "description": "A clickable button",
22
- "code_snippet": "gr.Button(value='{{label}}', variant='primary')"
23
- },
24
- "Textbox": {
25
- "properties": {
26
- "label": "Enter text here...",
27
- "lines": 1
28
- },
29
- "description": "A simple textbox for user input.",
30
- "code_snippet": "gr.Textbox(label='{{label}}', lines={{lines}})"
31
- },
32
- "Slider": {
33
- "properties": {
34
- "label": "Adjust the slider:",
35
- "minimum": 0,
36
- "maximum": 100,
37
- "value": 50
38
- },
39
- "description": "A slider for selecting a value within a range.",
40
- "code_snippet": "gr.Slider(label='{{label}}', minimum={{minimum}}, maximum={{maximum}}, value={{value}})"
41
- },
42
- "Dropdown": {
43
- "properties": {
44
- "label": "Select an option:",
45
- "choices": ["Option 1", "Option 2", "Option 3"],
46
- "value": "Option 1"
47
- },
48
- "description": "A dropdown menu for selecting from a list of options.",
49
- "code_snippet": "gr.Dropdown(label='{{label}}', choices={{choices}}, value='{{value}}')"
50
- },
51
- "Image": {
52
- "properties": {
53
- "label": "Upload an image:",
54
- "type": "file"
55
- },
56
- "description": "An image component for displaying images.",
57
- "code_snippet": "gr.Image(label='{{label}}', type='{{type}}')"
58
- }
59
- # ... Other component definitions
60
- }
61
- self.nlp_model_names = [
62
- "facebook/bart-large-cnn", # Summarization
63
- "gpt2", # Text Generation
64
- "CodeGenForCausalLM" # Sentiment Analysis
65
- # ... Other NLP model names from Hugging Face
66
- ]
67
- self.nlp_models = []
68
- # self.initialize_nlp_models() # Moved to run() for Gradio
69
- self.exited = False # Add the missing attribute
70
-
71
- def initialize_nlp_models(self):
72
- for nlp_model_name in self.nlp_model_names:
73
- try:
74
- # Assuming the use of transformers library for NLP models
75
- from transformers import pipeline
76
- model = pipeline('text-generation', model=nlp_model_name) # Adjust pipeline task if needed
77
- self.nlp_models.append(model)
78
- except Exception as e:
79
- print(f"Failed to load model {nlp_model_name}: {e}")
80
- self.nlp_models.append(None)
81
-
82
- def get_nlp_response(self, input_text, model_index):
83
- if self.nlp_models[model_index]:
84
- response = self.nlp_models[model_index](input_text)
85
- return response[0]['generated_text'] # Adjust response extraction if needed
86
- else:
87
- return "NLP model not available."
88
-
89
- class Component:
90
- def __init__(self, type, properties=None, id=None):
91
- self.type = type
92
- self.properties = properties or {}
93
- self.id = id or f"{type}_{random.randint(1000, 9999)}"
94
-
95
- def to_dict(self):
96
- return {
97
- "type": self.type,
98
- "properties": self.properties,
99
- "id": self.id
100
- }
101
-
102
- def render(self):
103
- code_snippet = self.properties.get("code_snippet", "")
104
- for key, value in self.properties.items():
105
- code_snippet = code_snippet.replace(f"{{{{{key}}}}}", str(value))
106
- return code_snippet
107
-
108
- def update_app_canvas(self):
109
- code = ""
110
- for component in self.app_state["components"]:
111
- code += component.render() + "\n"
112
- return code
113
-
114
- def add_component(self, component_type):
115
- if component_type in self.components_registry:
116
- component = self.Component(
117
- type=component_type,
118
- properties=self.components_registry[component_type]["properties"]
119
- )
120
- self.app_state["components"].append(component)
121
- # self.update_app_canvas() # Updated to return code
122
- else:
123
- print(f"Component type {component_type} not found in registry.")
124
 
125
- def run_terminal_command(self, command, history):
 
 
126
  try:
127
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
128
- history += result.stdout + result.stderr
129
- return history
130
  except Exception as e:
131
- return str(e)
132
-
133
- def compress_history(self, history):
134
- lines = history.split('\\n')
135
- compressed_lines = [line for line in lines if not line.strip().startswith('#')]
136
- return '\\n'.join(compressed_lines)
137
-
138
- def understand_test_results(self, test_results):
139
- # Placeholder for understanding test results
140
- return "Test results understood."
141
-
142
- def get_help_message(self):
143
- return "Available commands: add_component, run_terminal_command, compress_history, understand_test_results, get_help_message"
144
-
145
- def process_input(self, input_text):
146
- if input_text.startswith("add_component"):
147
- _, component_type = input_text.split()
148
- self.add_component(component_type)
149
- return self.update_app_canvas() # Return updated code
150
- elif input_text.startswith("run_terminal_command"):
151
- _, command = input_text.split(maxsplit=1)
152
- self.terminal_history = self.run_terminal_command(command, self.terminal_history)
153
- return self.terminal_history
154
- elif input_text.startswith("compress_history"):
155
- self.terminal_history = self.compress_history(self.terminal_history)
156
- return self.terminal_history
157
- elif input_text.startswith("understand_test_results"):
158
- _, test_results = input_text.split(maxsplit=1)
159
- return self.understand_test_results(test_results)
160
- elif input_text == "get_help_message":
161
- return self.get_help_message()
162
- else:
163
- return "Unknown command. Type 'get_help_message' for available commands."
164
- def execute_code(self, code):
165
  try:
166
- exec(code)
 
167
  except Exception as e:
168
- return f"Error executing code: {e}"
169
- return "Code executed successfully"
170
-
171
- def read_file(self, file_path):
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  try:
173
- with open(file_path, 'r') as file:
174
- content = file.read()
175
- return content
176
  except Exception as e:
177
- return f"Error reading file: {e}"
 
 
 
 
 
178
 
179
- def write_file(self, file_path, content):
 
 
 
 
 
180
  try:
181
- with open(file_path, 'w') as file:
182
- file.write(content)
183
- return "File written successfully"
 
 
 
 
 
 
 
 
184
  except Exception as e:
185
- return f"Error writing to file: {e}"
186
-
187
- def fetch_web_content(self, url):
188
- response = requests.get(url)
189
- if response.status_code == 200:
190
- soup = BeautifulSoup(response.content, 'html.parser')
191
- return soup.prettify()
192
- else:
193
- return f"Failed to retrieve content. Status code: {response.status_code}"
194
- def run(self):
195
- self.initialize_nlp_models() # Initialize NLP models here
196
- with gr.Blocks() as demo:
197
- input_text = gr.Textbox(label="Enter your command:")
198
- output_text = gr.Textbox(label="Output:")
199
- btn = gr.Button("Run")
200
- btn.click(self.process_input, inputs=[input_text], outputs=[output_text])
201
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
  if __name__ == "__main__":
204
- app = App()
205
- app.run()
 
 
 
 
1
  import os
2
+ import logging
3
+ import asyncio
4
+ import yaml
5
+ from typing import Dict, List, Any, Tuple, Optional
6
+ from abc import ABC, abstractmethod
7
+
8
+ import gradio as gr
9
+ from dotenv import load_dotenv
10
+ from langchain.llms import HuggingFaceHub
11
+ from langchain.agents import initialize_agent, AgentType
12
+ from langchain.chains import LLMChain
13
+ from langchain.prompts import PromptTemplate
14
+
15
+ # Load environment variables
16
+ load_dotenv()
17
+
18
+ # Custom Exceptions
19
+ class CodeFusionError(Exception):
20
+ """Base exception class for CodeFusion."""
21
+ pass
22
+
23
+ class AgentInitializationError(CodeFusionError):
24
+ """Raised when there's an error initializing the agent."""
25
+ pass
26
+
27
+ class ToolExecutionError(CodeFusionError):
28
+ """Raised when there's an error executing a tool."""
29
+ pass
30
+
31
+ # Utility Functions
32
+ def load_config() -> Dict:
33
+ """Load configuration from config.yaml file or use default values."""
34
+ config_path = 'config.yaml'
35
+ default_config = {
36
+ 'model_name': "google/flan-t5-xl",
37
+ 'api_key': "your_default_api_key_here",
38
+ 'temperature': 0.5,
39
+ 'verbose': True
40
+ }
41
+
42
+ try:
43
+ with open(config_path, 'r') as config_file:
44
+ config = yaml.safe_load(config_file)
45
+ except FileNotFoundError:
46
+ print(f"Config file not found at {config_path}. Using default configuration.")
47
+ config = default_config
48
+
49
+ # Override with environment variables if set
50
+ config['api_key'] = os.getenv('HUGGINGFACE_API_KEY', config['api_key'])
51
+ return config
52
+
53
+ def setup_logging() -> logging.Logger:
54
+ """Set up logging configuration."""
55
+ logging.basicConfig(
56
+ level=logging.INFO,
57
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
58
+ filename='codefusion.log'
59
+ )
60
+ return logging.getLogger(__name__)
61
+
62
+ # Load configuration and set up logging
63
+ config = load_config()
64
+ logger = setup_logging()
65
+
66
+ # Tool Classes
67
+ class Tool(ABC):
68
+ """Abstract base class for all tools used by the agent."""
69
+
70
+ def __init__(self, name: str, description: str):
71
+ self.name = name
72
+ self.description = description
73
+ self.llm = HuggingFaceHub(
74
+ repo_id=config['model_name'],
75
+ model_kwargs={"temperature": config['temperature']},
76
+ huggingfacehub_api_token=config['api_key']
77
+ )
78
+
79
+ @abstractmethod
80
+ async def run(self, arguments: Dict[str, Any]) -> Dict[str, str]:
81
+ """Execute the tool's functionality."""
82
+ pass
83
+
84
+ class CodeGenerationTool(Tool):
85
+ """Tool for generating code snippets in various languages."""
86
+
87
  def __init__(self):
88
+ super().__init__("Code Generation", "Generates code snippets in various languages.")
89
+ self.prompt_template = PromptTemplate(
90
+ input_variables=["language", "code_description"],
91
+ template="Generate {language} code for: {code_description}"
92
+ )
93
+ self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ async def run(self, arguments: Dict[str, str]) -> Dict[str, str]:
96
+ language = arguments.get("language", "python")
97
+ code_description = arguments.get("code_description", "print('Hello, World!')")
98
  try:
99
+ code = await self.chain.arun(language=language, code_description=code_description)
100
+ return {"output": code}
 
101
  except Exception as e:
102
+ logger.error(f"Error in CodeGenerationTool: {e}")
103
+ raise ToolExecutionError(f"Failed to generate code: {e}")
104
+
105
+ class CodeExplanationTool(Tool):
106
+ """Tool for explaining code snippets."""
107
+
108
+ def __init__(self):
109
+ super().__init__("Code Explanation", "Explains code snippets in simple terms.")
110
+ self.prompt_template = PromptTemplate(
111
+ input_variables=["code"],
112
+ template="Explain the following code in simple terms:\n\n{code}"
113
+ )
114
+ self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
115
+
116
+ async def run(self, arguments: Dict[str, str]) -> Dict[str, str]:
117
+ code = arguments.get("code", "print('Hello, World!')")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  try:
119
+ explanation = await self.chain.arun(code=code)
120
+ return {"output": explanation}
121
  except Exception as e:
122
+ logger.error(f"Error in CodeExplanationTool: {e}")
123
+ raise ToolExecutionError(f"Failed to explain code: {e}")
124
+
125
+ class DebuggingTool(Tool):
126
+ """Tool for debugging code snippets."""
127
+
128
+ def __init__(self):
129
+ super().__init__("Debugging", "Helps identify and fix issues in code snippets.")
130
+ self.prompt_template = PromptTemplate(
131
+ input_variables=["code", "error_message"],
132
+ template="Debug the following code:\n\n{code}\n\nError message: {error_message}"
133
+ )
134
+ self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
135
+
136
+ async def run(self, arguments: Dict[str, str]) -> Dict[str, str]:
137
+ code = arguments.get("code", "")
138
+ error_message = arguments.get("error_message", "")
139
  try:
140
+ debug_result = await self.chain.arun(code=code, error_message=error_message)
141
+ return {"output": debug_result}
 
142
  except Exception as e:
143
+ logger.error(f"Error in DebuggingTool: {e}")
144
+ raise ToolExecutionError(f"Failed to debug code: {e}")
145
+
146
+ # Agent Class
147
+ class Agent:
148
+ """Represents an AI agent with specific tools and capabilities."""
149
 
150
+ def __init__(self, name: str, role: str, tools: List[Tool]):
151
+ self.name = name
152
+ self.role = role
153
+ self.tools = tools
154
+ self.memory: List[tuple] = []
155
+
156
  try:
157
+ self.llm = HuggingFaceHub(
158
+ repo_id=config['model_name'],
159
+ model_kwargs={"temperature": config['temperature']},
160
+ huggingfacehub_api_token=config['api_key']
161
+ )
162
+ self.agent = initialize_agent(
163
+ llm=self.llm,
164
+ tools=self.tools,
165
+ agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
166
+ verbose=config['verbose']
167
+ )
168
  except Exception as e:
169
+ logger.error(f"Error initializing agent: {e}")
170
+ raise AgentInitializationError(f"Failed to initialize agent: {e}")
171
+
172
+ async def act(self, prompt: str, context: str) -> str:
173
+ """Perform an action based on the given prompt and context."""
174
+ self.memory.append((prompt, context))
175
+ try:
176
+ action = await self.agent.arun(prompt, context)
177
+ return action
178
+ except Exception as e:
179
+ logger.error(f"Error during agent action: {e}")
180
+ raise
181
+
182
+ def __str__(self) -> str:
183
+ return f"Agent: {self.name} (Role: {self.role})"
184
+
185
+ # Main application functions
186
+ async def run(message: str, history: List[Tuple[str, str]]) -> str:
187
+ """Process user input and generate a response using the agent system."""
188
+ agent = Agent(
189
+ name="CodeFusion",
190
+ role="AI Coding Assistant",
191
+ tools=[CodeGenerationTool(), CodeExplanationTool(), DebuggingTool()]
192
+ )
193
+ context = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history])
194
+ try:
195
+ response = await agent.act(message, context)
196
+ return response
197
+ except Exception as e:
198
+ logger.error(f"Error processing request: {e}")
199
+ return "I apologize, but an error occurred while processing your request. Please try again."
200
+
201
+ async def main():
202
+ """Main function to run the Gradio interface."""
203
+ examples = [
204
+ ["What is the purpose of this AI agent?", "I am an AI coding assistant designed to help with various programming tasks."],
205
+ ["Can you help me generate a Python function to calculate the factorial of a number?", "Certainly! Here's a Python function to calculate the factorial of a number:"],
206
+ ["Explain the concept of recursion in programming.", "Recursion is a programming concept where a function calls itself to solve a problem by breaking it down into smaller, similar subproblems."],
207
+ ]
208
+
209
+ gr.ChatInterface(
210
+ fn=run,
211
+ title="CodeFusion: Your AI Coding Assistant",
212
+ description="Ask me about code generation, explanation, debugging, or any other coding task!",
213
+ examples=examples,
214
+ theme="default"
215
+ ).launch()
216
+
217
+ # Simple testing framework
218
+ def run_tests():
219
+ """Run basic tests for the CodeFusion components."""
220
+
221
+ async def test_code_generation():
222
+ tool = CodeGenerationTool()
223
+ result = await tool.run({"language": "python", "code_description": "function to add two numbers"})
224
+ assert "def" in result["output"], "Code generation failed to produce a function"
225
+ print("Code Generation Test: Passed")
226
+
227
+ async def test_code_explanation():
228
+ tool = CodeExplanationTool()
229
+ result = await tool.run({"code": "def factorial(n):\n return 1 if n == 0 else n * factorial(n-1)"})
230
+ assert "recursive" in result["output"].lower(), "Code explanation failed to mention recursion"
231
+ print("Code Explanation Test: Passed")
232
+
233
+ async def test_debugging():
234
+ tool = DebuggingTool()
235
+ result = await tool.run({"code": "def divide(a, b):\n return a / b", "error_message": "ZeroDivisionError"})
236
+ assert "zero" in result["output"].lower(), "Debugging failed to address division by zero"
237
+ print("Debugging Test: Passed")
238
+
239
+ async def test_agent():
240
+ agent = Agent("TestAgent", "Tester", [CodeGenerationTool(), CodeExplanationTool(), DebuggingTool()])
241
+ result = await agent.act("Generate a Python function to calculate the square of a number", "")
242
+ assert "def" in result and "return" in result, "Agent failed to generate a proper function"
243
+ print("Agent Test: Passed")
244
+
245
+ async def run_all_tests():
246
+ await test_code_generation()
247
+ await test_code_explanation()
248
+ await test_debugging()
249
+ await test_agent()
250
+
251
+ asyncio.run(run_all_tests())
252
 
253
  if __name__ == "__main__":
254
+ import sys
255
+ if len(sys.argv) > 1 and sys.argv[1] == "--test":
256
+ run_tests()
257
+ else:
258
+ asyncio.run(main())