Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- agents/software_engineer_agent.py +68 -66
- agents/ui_designer_agent.py +53 -17
agents/software_engineer_agent.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
import torch
|
3 |
from langchain_core.messages import AIMessage
|
|
|
|
|
4 |
|
5 |
MODEL_REPO = "Rahul-8799/software_engineer_mellum"
|
6 |
|
@@ -11,79 +13,79 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
11 |
device_map="auto"
|
12 |
)
|
13 |
|
14 |
-
def
|
15 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
messages = state["messages"]
|
17 |
prompt = messages[-1].content
|
18 |
|
19 |
-
# Enhance the prompt with
|
20 |
enhanced_prompt = f"""
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
• Use Tailwind’s built-in responsive breakpoints (sm, md, lg, xl, 2xl) to adapt layouts for different screen sizes.
|
30 |
-
|
31 |
-
2. Layout Techniques
|
32 |
-
• Use CSS Grid for complex, multi-column or two-dimensional layouts.
|
33 |
-
• Use Flexbox for flexible alignment of components like navigation bars, cards, buttons, and modals.
|
34 |
-
• Maintain consistent spacing with utility classes such as gap, space-x, space-y, p-*, and m-*.
|
35 |
-
|
36 |
-
3. Semantic HTML
|
37 |
-
• Use semantic HTML tags appropriately: <header>, <nav>, <main>, <section>, <article>, <footer>, etc.
|
38 |
-
• Avoid unnecessary <div> elements to prevent cluttered and unstructured markup.
|
39 |
-
• Ensure proper nesting and hierarchy of elements.
|
40 |
-
|
41 |
-
4. Accessibility
|
42 |
-
• Add ARIA labels, role attributes, and alt text where needed for screen reader support.
|
43 |
-
• Ensure keyboard accessibility with tabindex, proper focus states, and interactive elements being navigable.
|
44 |
-
• Use <label> elements properly linked to form fields via the for attribute.
|
45 |
-
|
46 |
-
5. Responsive Design
|
47 |
-
• Use Tailwind’s responsive utilities to adjust layouts across various screen sizes.
|
48 |
-
• Design components to be fully usable on both desktop and mobile devices.
|
49 |
-
• Use collapsible or toggleable UI patterns (e.g., hamburger menus) for smaller viewports.
|
50 |
-
|
51 |
-
6. Theming and Styling Consistency
|
52 |
-
• Define and use CSS variables (--primary-color, --font-family, etc.) for theme consistency across components.
|
53 |
-
• Maintain a clear visual hierarchy with consistent font sizes, weights, and colors.
|
54 |
-
• Customize Tailwind’s theme configuration if needed for project-specific design tokens.
|
55 |
-
|
56 |
-
7. JavaScript and Interactivity
|
57 |
-
• Add interactivity using plain JavaScript, Alpine.js, or React if specified.
|
58 |
-
• Implement common UI components such as modals, dropdowns, tooltips, accordions with appropriate open/close behavior.
|
59 |
-
• Provide user feedback through form validations, dynamic updates, and transitions.
|
60 |
-
|
61 |
-
8. Loading and Error States
|
62 |
-
• Implement loading states using spinners, skeleton screens, or placeholders while data is being fetched or actions are processing.
|
63 |
-
• Show error states using alerts, banners, or toast messages when applicable.
|
64 |
-
• Use conditional rendering or state flags to handle visibility and transitions between states.
|
65 |
-
|
66 |
-
9. Component Structure and Reusability
|
67 |
-
• Break down the UI into modular, reusable components (e.g., Button, Card, Modal, Form).
|
68 |
-
• Each component should:
|
69 |
-
• Be self-contained with a clear purpose.
|
70 |
-
• Accept inputs or props when necessary.
|
71 |
-
• Maintain responsive and accessible markup by default.
|
72 |
-
|
73 |
-
10. Code Quality Standards
|
74 |
-
• Write clean, readable, and maintainable code.
|
75 |
-
• Remove unused classes, scripts, or markup.
|
76 |
-
• Follow consistent naming conventions and indentation rules.
|
77 |
-
• Add comments only when necessary for clarity.
|
78 |
-
|
79 |
-
|
80 |
Original requirements: {prompt}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
"""
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
87 |
return {
|
88 |
"messages": [AIMessage(content=output)],
|
89 |
"chat_log": state["chat_log"] + [{"role": "Software Engineer", "content": output}],
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
import torch
|
3 |
from langchain_core.messages import AIMessage
|
4 |
+
import asyncio
|
5 |
+
from typing import Generator, Dict, Any
|
6 |
|
7 |
MODEL_REPO = "Rahul-8799/software_engineer_mellum"
|
8 |
|
|
|
13 |
device_map="auto"
|
14 |
)
|
15 |
|
16 |
+
async def stream_inference(prompt: str) -> Generator[str, None, None]:
|
17 |
+
"""Stream the model's output token by token"""
|
18 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
19 |
+
|
20 |
+
for _ in range(100):
|
21 |
+
output_ids = model.generate(
|
22 |
+
input_ids,
|
23 |
+
max_new_tokens=1,
|
24 |
+
pad_token_id=tokenizer.eos_token_id
|
25 |
+
)
|
26 |
+
|
27 |
+
new_token = output_ids[0][-1]
|
28 |
+
if new_token == tokenizer.eos_token_id:
|
29 |
+
break
|
30 |
+
|
31 |
+
token_text = tokenizer.decode([new_token])
|
32 |
+
yield token_text
|
33 |
+
|
34 |
+
input_ids = output_ids
|
35 |
+
await asyncio.sleep(0.05)
|
36 |
+
|
37 |
+
async def run(state: Dict[str, Any]) -> Dict[str, Any]:
|
38 |
+
"""Software Engineer generates responsive and interactive UI code"""
|
39 |
messages = state["messages"]
|
40 |
prompt = messages[-1].content
|
41 |
|
42 |
+
# Enhance the prompt with modern web development requirements
|
43 |
enhanced_prompt = f"""
|
44 |
+
Generate modern, responsive, and interactive UI code following these requirements:
|
45 |
+
1. Use Tailwind CSS for responsive design
|
46 |
+
2. Implement JavaScript for interactivity
|
47 |
+
3. Add smooth animations and transitions
|
48 |
+
4. Ensure mobile-first approach
|
49 |
+
5. Include proper error handling
|
50 |
+
6. Add loading states and feedback
|
51 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
Original requirements: {prompt}
|
53 |
+
|
54 |
+
Generate the following files:
|
55 |
+
|
56 |
+
1. index.html - Main HTML structure
|
57 |
+
2. styles.css - Custom styles (if needed beyond Tailwind)
|
58 |
+
3. script.js - Interactive features
|
59 |
+
4. tailwind.config.js - Tailwind configuration
|
60 |
+
|
61 |
+
Format the output as:
|
62 |
+
|
63 |
+
## HTML Structure
|
64 |
+
```html
|
65 |
+
[HTML code]
|
66 |
+
```
|
67 |
+
|
68 |
+
## CSS Styles
|
69 |
+
```css
|
70 |
+
[CSS code]
|
71 |
+
```
|
72 |
+
|
73 |
+
## JavaScript
|
74 |
+
```javascript
|
75 |
+
[JavaScript code]
|
76 |
+
```
|
77 |
+
|
78 |
+
## Tailwind Config
|
79 |
+
```javascript
|
80 |
+
[Tailwind configuration]
|
81 |
+
```
|
82 |
"""
|
83 |
|
84 |
+
# Stream the output
|
85 |
+
output = ""
|
86 |
+
async for token in stream_inference(enhanced_prompt):
|
87 |
+
output += token
|
88 |
+
|
89 |
return {
|
90 |
"messages": [AIMessage(content=output)],
|
91 |
"chat_log": state["chat_log"] + [{"role": "Software Engineer", "content": output}],
|
agents/ui_designer_agent.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
import torch
|
3 |
from langchain_core.messages import AIMessage
|
|
|
|
|
4 |
|
5 |
-
MODEL_REPO = "Rahul-8799/ui_designer_mistral"
|
6 |
|
7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
|
8 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -11,30 +13,64 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
11 |
device_map="auto"
|
12 |
)
|
13 |
|
14 |
-
def
|
15 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
messages = state["messages"]
|
17 |
prompt = messages[-1].content
|
18 |
|
19 |
-
# Enhance the prompt with
|
20 |
enhanced_prompt = f"""
|
21 |
-
Create a
|
22 |
-
1.
|
23 |
-
2.
|
24 |
-
3.
|
25 |
-
4.
|
26 |
-
5.
|
27 |
-
6.
|
28 |
-
7. Ensure proper contrast and readability
|
29 |
-
8. Use modern UI components and patterns
|
30 |
|
31 |
Original requirements: {prompt}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
"""
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
38 |
return {
|
39 |
"messages": [AIMessage(content=output)],
|
40 |
"chat_log": state["chat_log"] + [{"role": "UI Designer", "content": output}],
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
import torch
|
3 |
from langchain_core.messages import AIMessage
|
4 |
+
import asyncio
|
5 |
+
from typing import Generator, Dict, Any
|
6 |
|
7 |
+
MODEL_REPO = "Rahul-8799/ui_designer_mistral"
|
8 |
|
9 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
|
10 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
13 |
device_map="auto"
|
14 |
)
|
15 |
|
16 |
+
async def stream_inference(prompt: str) -> Generator[str, None, None]:
|
17 |
+
"""Stream the model's output token by token"""
|
18 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
19 |
+
|
20 |
+
for _ in range(100):
|
21 |
+
output_ids = model.generate(
|
22 |
+
input_ids,
|
23 |
+
max_new_tokens=1,
|
24 |
+
pad_token_id=tokenizer.eos_token_id
|
25 |
+
)
|
26 |
+
|
27 |
+
new_token = output_ids[0][-1]
|
28 |
+
if new_token == tokenizer.eos_token_id:
|
29 |
+
break
|
30 |
+
|
31 |
+
token_text = tokenizer.decode([new_token])
|
32 |
+
yield token_text
|
33 |
+
|
34 |
+
input_ids = output_ids
|
35 |
+
await asyncio.sleep(0.05)
|
36 |
+
|
37 |
+
async def run(state: Dict[str, Any]) -> Dict[str, Any]:
|
38 |
+
"""UI Designer creates responsive and interactive UI designs"""
|
39 |
messages = state["messages"]
|
40 |
prompt = messages[-1].content
|
41 |
|
42 |
+
# Enhance the prompt with responsive design requirements
|
43 |
enhanced_prompt = f"""
|
44 |
+
Create a modern, responsive UI design following these requirements:
|
45 |
+
1. Mobile-first approach with responsive breakpoints
|
46 |
+
2. Modern CSS features (Flexbox, Grid, CSS Variables)
|
47 |
+
3. Interactive elements with JavaScript
|
48 |
+
4. Smooth animations and transitions
|
49 |
+
5. Accessibility features
|
50 |
+
6. Cross-browser compatibility
|
|
|
|
|
51 |
|
52 |
Original requirements: {prompt}
|
53 |
+
|
54 |
+
Provide the design in this format:
|
55 |
+
|
56 |
+
## Responsive Layout
|
57 |
+
[Describe the responsive layout structure]
|
58 |
+
|
59 |
+
## CSS Framework
|
60 |
+
[Specify CSS framework and custom styles]
|
61 |
+
|
62 |
+
## JavaScript Features
|
63 |
+
[List interactive features and animations]
|
64 |
+
|
65 |
+
## Component Structure
|
66 |
+
[Describe component hierarchy and relationships]
|
67 |
"""
|
68 |
|
69 |
+
# Stream the output
|
70 |
+
output = ""
|
71 |
+
async for token in stream_inference(enhanced_prompt):
|
72 |
+
output += token
|
73 |
+
|
74 |
return {
|
75 |
"messages": [AIMessage(content=output)],
|
76 |
"chat_log": state["chat_log"] + [{"role": "UI Designer", "content": output}],
|