Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
|
17 |
import threading
|
18 |
from http.server import HTTPServer, BaseHTTPRequestHandler
|
19 |
import speech_recognition as sr
|
20 |
-
from code_editor import code_editor
|
21 |
from functools import lru_cache
|
22 |
import hashlib
|
23 |
import markdown2
|
@@ -25,7 +25,8 @@ from concurrent.futures import ThreadPoolExecutor
|
|
25 |
from hdbscan import HDBSCAN
|
26 |
import websockets
|
27 |
from websockets.exceptions import ConnectionClosed
|
28 |
-
from code_editor import code_editor
|
|
|
29 |
# ========== Configuration ==========
|
30 |
WORKSPACE = Path("/tmp/issue_workspace")
|
31 |
WORKSPACE.mkdir(exist_ok=True)
|
@@ -47,41 +48,67 @@ HF_MODELS = {
|
|
47 |
DEFAULT_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
48 |
|
49 |
# ========== Modern Theme ==========
|
|
|
50 |
theme = gr.themes.Soft(
|
51 |
primary_hue="violet",
|
52 |
secondary_hue="emerald",
|
53 |
radius_size="lg",
|
54 |
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui"]
|
55 |
).set(
|
|
|
56 |
button_primary_background_fill="linear-gradient(90deg, #8B5CF6 0%, #EC4899 100%)",
|
57 |
button_primary_text_color="white",
|
58 |
-
button_primary_border_radius="12px",
|
59 |
block_label_text_size="lg",
|
60 |
block_label_text_weight="600",
|
61 |
block_title_text_size="lg",
|
62 |
block_title_text_weight="800",
|
63 |
panel_background_fill="white",
|
64 |
-
panel_border_radius="16px",
|
65 |
block_shadow="*shadow_drop_lg",
|
66 |
)
|
67 |
|
68 |
# ========== Enhanced Webhook Handler ==========
|
69 |
class WebhookHandler(BaseHTTPRequestHandler):
|
|
|
|
|
|
|
70 |
def do_POST(self):
|
71 |
content_length = int(self.headers['Content-Length'])
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
event = self.headers.get('X-GitHub-Event')
|
74 |
-
|
75 |
-
|
|
|
76 |
action = payload.get('action')
|
|
|
77 |
if action in ['opened', 'reopened', 'closed', 'assigned']:
|
|
|
|
|
78 |
asyncio.run_coroutine_threadsafe(
|
79 |
-
|
80 |
-
|
81 |
)
|
82 |
-
|
|
|
|
|
|
|
|
|
83 |
self.send_response(200)
|
84 |
self.end_headers()
|
|
|
85 |
|
86 |
# ========== AI-Powered Issue Manager ==========
|
87 |
class IssueManager:
|
@@ -92,315 +119,1054 @@ class IssueManager:
|
|
92 |
self.current_issue: Optional[int] = None
|
93 |
self.github_token: Optional[str] = None
|
94 |
self.hf_token: Optional[str] = None
|
95 |
-
self.collaborators: Dict[str, dict] = {}
|
96 |
self.points: int = 0
|
97 |
self.severity_rules: Dict[str, List[str]] = {
|
98 |
-
"Critical": ["critical", "urgent", "security", "crash"],
|
99 |
-
"High": ["high", "important", "error", "regression"],
|
100 |
-
"Medium": ["medium", "bug", "performance"],
|
101 |
-
"Low": ["low", "documentation", "enhancement"]
|
102 |
}
|
103 |
-
self.issue_clusters: Dict[int, List[int]] = {} # Store clusters
|
104 |
-
self.
|
|
|
105 |
self.ws_clients: List[websockets.WebSocketClientProtocol] = []
|
106 |
self.code_editors: Dict[int, OTCodeEditor] = {} # Store code editors for each issue
|
107 |
-
|
|
|
108 |
def _init_local_models(self):
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
)
|
120 |
-
|
121 |
@lru_cache(maxsize=100)
|
122 |
-
async def cached_suggestion(self, issue_hash: str, model: str):
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
async def handle_webhook_event(self, event: str, action: str, payload: dict):
|
126 |
-
logger.info(f"Processing {event} {action}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
if action == 'closed':
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
else:
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
-
async def crawl_issues(self, repo_url: str, github_token: str, hf_token: str) -> Tuple[bool, str]:
|
133 |
try:
|
134 |
-
self.repo_url = repo_url
|
135 |
-
self.github_token = github_token
|
136 |
-
self.hf_token = hf_token
|
137 |
-
self.repo = Repo.clone_from(repo_url, WORKSPACE / "repo")
|
138 |
-
headers = {"Authorization": f"token {github_token}"}
|
139 |
async with aiohttp.ClientSession(headers=headers) as session:
|
140 |
-
async with session.get(
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
except Exception as e:
|
147 |
-
logger.
|
148 |
-
return
|
149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
async def _cluster_similar_issues(self):
|
151 |
-
embeddings
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
self.
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
-
async def generate_code_patch(self, issue_number: int) -> dict:
|
172 |
issue = self.issues[issue_number]
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
-
|
180 |
{context}
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
try:
|
192 |
-
|
193 |
-
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
async def _get_code_context(self, issue_number: int) -> str:
|
197 |
-
|
198 |
-
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
-
async def suggest_resolution(self, issue_hash: str, model: str) -> str:
|
202 |
-
issue = self.issues[int(issue_hash)]
|
203 |
-
prompt = f"""
|
204 |
-
## Issue: {issue['title']}
|
205 |
|
206 |
-
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
-
|
209 |
-
""
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
async def broadcast_collaboration_status(self):
|
|
|
220 |
while True:
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
|
231 |
-
|
|
|
|
|
232 |
if issue_num not in self.code_editors:
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
-
|
240 |
-
|
241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
gr.Markdown("""
|
243 |
-
|
244 |
-
|
|
|
|
|
245 |
""")
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
|
|
253 |
with gr.Column(scale=1):
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
|
|
|
|
|
|
262 |
with gr.Row():
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
|
273 |
-
|
|
|
274 |
with gr.Row():
|
|
|
275 |
with gr.Column(scale=1):
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
285 |
with gr.Column(scale=2):
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
with gr.Row():
|
300 |
-
gr.Markdown("### 🏆 Achievement System")
|
301 |
-
badges = gr.HTML("<div class='badges'></div>")
|
302 |
-
|
303 |
-
# Enhanced Event Handlers
|
304 |
-
async def generate_patch(issue_num):
|
305 |
-
patch = await manager.generate_code_patch(issue_num)
|
306 |
-
return gr.JSON(value=patch)
|
307 |
|
308 |
-
def update_code_editor(files):
|
309 |
-
return code_editor(value=files, language=language_select.value)
|
310 |
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
|
|
|
|
|
|
318 |
|
319 |
-
app.load(
|
320 |
-
fn=init_collaboration,
|
321 |
-
inputs=[],
|
322 |
-
outputs=collab_status,
|
323 |
-
_js=web_socket_js()
|
324 |
-
)
|
325 |
|
|
|
|
|
|
|
326 |
crawl_btn.click(
|
327 |
-
fn=
|
328 |
inputs=[repo_url, github_token, hf_token],
|
329 |
-
outputs=[issue_list, stats_plot]
|
|
|
330 |
)
|
331 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
suggest_btn.click(
|
333 |
-
fn=
|
334 |
-
inputs=[
|
335 |
-
outputs=
|
|
|
336 |
)
|
337 |
|
|
|
338 |
patch_btn.click(
|
339 |
-
fn=
|
340 |
-
inputs=[
|
341 |
-
outputs=
|
|
|
342 |
)
|
343 |
|
344 |
-
#
|
345 |
-
#
|
|
|
|
|
346 |
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
outputs=code_edit
|
351 |
-
)
|
352 |
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
collabWs.onmessage = function(event) {
|
359 |
-
const data = JSON.parse(event.data);
|
360 |
-
if (data.type === 'code_update') {
|
361 |
-
const issueNum = data.issue_num;
|
362 |
-
const delta = data.delta;
|
363 |
-
const codeEditor = document.getElementById(`code-editor-${issueNum}`);
|
364 |
-
if (codeEditor) {
|
365 |
-
codeEditor.applyDelta(delta);
|
366 |
-
}
|
367 |
-
} else if (data.type === 'collaboration_status') {
|
368 |
-
document.getElementById('collab-list').innerHTML =
|
369 |
-
data.map(u => `<div class="collab-item">${u.name}: ${u.status}</div>`).join('');
|
370 |
-
}
|
371 |
-
};
|
372 |
-
</script>
|
373 |
-
"""
|
374 |
|
375 |
-
|
376 |
-
|
377 |
-
|
|
|
|
|
|
|
|
|
|
|
378 |
|
379 |
-
|
380 |
-
|
381 |
-
async with websockets.serve(handle_ws_connection, "localhost", WS_PORT):
|
382 |
-
await asyncio.Future()
|
383 |
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
393 |
|
394 |
-
|
395 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
396 |
|
397 |
return app
|
398 |
|
399 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
400 |
if __name__ == "__main__":
|
|
|
401 |
manager = IssueManager()
|
402 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
403 |
app.launch(
|
404 |
-
share=True,
|
405 |
-
|
406 |
-
|
|
|
|
|
|
|
|
|
|
|
|
17 |
import threading
|
18 |
from http.server import HTTPServer, BaseHTTPRequestHandler
|
19 |
import speech_recognition as sr
|
20 |
+
# Removed duplicate import: from code_editor import code_editor
|
21 |
from functools import lru_cache
|
22 |
import hashlib
|
23 |
import markdown2
|
|
|
25 |
from hdbscan import HDBSCAN
|
26 |
import websockets
|
27 |
from websockets.exceptions import ConnectionClosed
|
28 |
+
from code_editor import code_editor, OTCodeEditor # Assuming OTCodeEditor is also in code_editor
|
29 |
+
|
30 |
# ========== Configuration ==========
|
31 |
WORKSPACE = Path("/tmp/issue_workspace")
|
32 |
WORKSPACE.mkdir(exist_ok=True)
|
|
|
48 |
DEFAULT_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
49 |
|
50 |
# ========== Modern Theme ==========
|
51 |
+
# Define the base theme
|
52 |
theme = gr.themes.Soft(
|
53 |
primary_hue="violet",
|
54 |
secondary_hue="emerald",
|
55 |
radius_size="lg",
|
56 |
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui"]
|
57 |
).set(
|
58 |
+
# Apply custom settings using .set()
|
59 |
button_primary_background_fill="linear-gradient(90deg, #8B5CF6 0%, #EC4899 100%)",
|
60 |
button_primary_text_color="white",
|
61 |
+
# button_primary_border_radius="12px", # <-- FIX: Removed this line causing the TypeError
|
62 |
block_label_text_size="lg",
|
63 |
block_label_text_weight="600",
|
64 |
block_title_text_size="lg",
|
65 |
block_title_text_weight="800",
|
66 |
panel_background_fill="white",
|
67 |
+
# panel_border_radius="16px", # Assuming this might also cause issues if not supported, commented out as a precaution. Uncomment if needed and supported.
|
68 |
block_shadow="*shadow_drop_lg",
|
69 |
)
|
70 |
|
71 |
# ========== Enhanced Webhook Handler ==========
|
72 |
class WebhookHandler(BaseHTTPRequestHandler):
|
73 |
+
# Keep a reference to the manager instance
|
74 |
+
manager_instance = None
|
75 |
+
|
76 |
def do_POST(self):
|
77 |
content_length = int(self.headers['Content-Length'])
|
78 |
+
try:
|
79 |
+
payload = json.loads(self.rfile.read(content_length).decode('utf-8'))
|
80 |
+
except json.JSONDecodeError:
|
81 |
+
self.send_response(400)
|
82 |
+
self.end_headers()
|
83 |
+
self.wfile.write(b"Invalid JSON payload")
|
84 |
+
return
|
85 |
+
except Exception as e:
|
86 |
+
logger.error(f"Error reading webhook payload: {e}")
|
87 |
+
self.send_response(500)
|
88 |
+
self.end_headers()
|
89 |
+
return
|
90 |
+
|
91 |
event = self.headers.get('X-GitHub-Event')
|
92 |
+
logger.info(f"Received GitHub webhook event: {event}")
|
93 |
+
|
94 |
+
if event == 'issues' and WebhookHandler.manager_instance:
|
95 |
action = payload.get('action')
|
96 |
+
logger.info(f"Issue action: {action}")
|
97 |
if action in ['opened', 'reopened', 'closed', 'assigned']:
|
98 |
+
# Ensure the event loop is running in the webhook thread if needed
|
99 |
+
loop = asyncio.get_event_loop()
|
100 |
asyncio.run_coroutine_threadsafe(
|
101 |
+
WebhookHandler.manager_instance.handle_webhook_event(event, action, payload),
|
102 |
+
loop
|
103 |
)
|
104 |
+
elif event == 'ping':
|
105 |
+
logger.info("Received GitHub webhook ping.")
|
106 |
+
else:
|
107 |
+
logger.warning(f"Unhandled event type: {event} or manager not initialized.")
|
108 |
+
|
109 |
self.send_response(200)
|
110 |
self.end_headers()
|
111 |
+
self.wfile.write(b"OK")
|
112 |
|
113 |
# ========== AI-Powered Issue Manager ==========
|
114 |
class IssueManager:
|
|
|
119 |
self.current_issue: Optional[int] = None
|
120 |
self.github_token: Optional[str] = None
|
121 |
self.hf_token: Optional[str] = None
|
122 |
+
self.collaborators: Dict[str, dict] = {} # Example: {"user1": {"status": "editing file.py"}}
|
123 |
self.points: int = 0
|
124 |
self.severity_rules: Dict[str, List[str]] = {
|
125 |
+
"Critical": ["critical", "urgent", "security", "crash", "blocker"],
|
126 |
+
"High": ["high", "important", "error", "regression", "major"],
|
127 |
+
"Medium": ["medium", "bug", "performance", "minor"],
|
128 |
+
"Low": ["low", "documentation", "enhancement", "trivial", "feature"]
|
129 |
}
|
130 |
+
self.issue_clusters: Dict[int, List[int]] = {} # Store clusters: {cluster_id: [issue_index1, issue_index2]}
|
131 |
+
self.issue_list_for_clustering: List[dict] = [] # Store issues in list order for clustering index mapping
|
132 |
+
# self._init_local_models() # Consider lazy loading or conditional loading
|
133 |
self.ws_clients: List[websockets.WebSocketClientProtocol] = []
|
134 |
self.code_editors: Dict[int, OTCodeEditor] = {} # Store code editors for each issue
|
135 |
+
|
136 |
+
# Placeholder for local model initialization - implement actual loading if needed
|
137 |
def _init_local_models(self):
|
138 |
+
logger.info("Initializing local models (placeholder)...")
|
139 |
+
# self.code_model = pipeline(...)
|
140 |
+
# self.summarizer = pipeline(...)
|
141 |
+
logger.info("Local models initialized (placeholder).")
|
142 |
+
|
143 |
+
# Simple hash for caching based on issue content
|
144 |
+
def _get_issue_hash(self, issue_data: dict) -> str:
|
145 |
+
content = f"{issue_data.get('title', '')}{issue_data.get('body', '')}"
|
146 |
+
return hashlib.md5(content.encode()).hexdigest()
|
147 |
+
|
|
|
|
|
148 |
@lru_cache(maxsize=100)
|
149 |
+
async def cached_suggestion(self, issue_hash: str, model: str) -> str:
|
150 |
+
# Find the issue corresponding to the hash (inefficient, improve if needed)
|
151 |
+
found_issue = None
|
152 |
+
for issue in self.issues.values():
|
153 |
+
if self._get_issue_hash(issue) == issue_hash:
|
154 |
+
found_issue = issue
|
155 |
+
break
|
156 |
+
if not found_issue:
|
157 |
+
return "Error: Issue not found for the given hash."
|
158 |
+
|
159 |
+
logger.info(f"Cache miss or first request for issue hash {issue_hash}. Requesting suggestion from {model}.")
|
160 |
+
return await self.suggest_resolution(found_issue, model)
|
161 |
|
162 |
async def handle_webhook_event(self, event: str, action: str, payload: dict):
|
163 |
+
logger.info(f"Processing webhook event: {event}, action: {action}")
|
164 |
+
issue_data = payload.get('issue')
|
165 |
+
if not issue_data:
|
166 |
+
logger.warning("Webhook payload missing 'issue' data.")
|
167 |
+
return
|
168 |
+
|
169 |
+
issue_number = issue_data.get('number')
|
170 |
+
if not issue_number:
|
171 |
+
logger.warning("Webhook issue data missing 'number'.")
|
172 |
+
return
|
173 |
+
|
174 |
if action == 'closed':
|
175 |
+
logger.info(f"Removing closed issue {issue_number} from active list.")
|
176 |
+
self.issues.pop(issue_number, None)
|
177 |
+
# Optionally remove associated editor, etc.
|
178 |
+
self.code_editors.pop(issue_number, None)
|
179 |
+
elif action in ['opened', 'reopened', 'edited']: # Handle edited issues too
|
180 |
+
logger.info(f"Adding/Updating issue {issue_number} from webhook.")
|
181 |
+
self.issues[issue_number] = issue_data
|
182 |
+
# Potentially trigger re-clustering or update specific issue details
|
183 |
+
elif action == 'assigned':
|
184 |
+
logger.info(f"Issue {issue_number} assigned to {payload.get('assignee', {}).get('login', 'N/A')}")
|
185 |
+
self.issues[issue_number] = issue_data # Update issue data
|
186 |
else:
|
187 |
+
logger.info(f"Ignoring action '{action}' for issue {issue_number}.")
|
188 |
+
|
189 |
+
# Consider triggering a UI update after handling the webhook
|
190 |
+
# This might involve re-crawling or just updating the specific issue
|
191 |
+
await self.broadcast_issue_update() # Example function to notify clients
|
192 |
+
|
193 |
+
async def crawl_issues(self, repo_url: str, github_token: str, hf_token: str) -> Tuple[List[List], go.Figure, str]:
|
194 |
+
"""
|
195 |
+
Crawls issues, updates internal state, performs clustering, and returns data for UI update.
|
196 |
+
"""
|
197 |
+
if not repo_url or not github_token or not hf_token:
|
198 |
+
return [], go.Figure(), "Error: Repository URL, GitHub Token, and HF Token are required."
|
199 |
+
|
200 |
+
logger.info(f"Starting issue crawl for {repo_url}")
|
201 |
+
self.repo_url = repo_url
|
202 |
+
self.github_token = github_token
|
203 |
+
self.hf_token = hf_token
|
204 |
+
self.issues = {} # Reset issues before crawl
|
205 |
+
|
206 |
+
# Extract owner/repo from URL
|
207 |
+
match = re.match(r"https?://github\.com/([^/]+)/([^/]+)", repo_url)
|
208 |
+
if not match:
|
209 |
+
logger.error(f"Invalid GitHub URL format: {repo_url}")
|
210 |
+
return [], go.Figure(), "Error: Invalid GitHub URL format. Use https://github.com/owner/repo"
|
211 |
+
owner, repo_name = match.groups()
|
212 |
+
api_url = f"{GITHUB_API}/{owner}/{repo_name}/issues?state=open" # Fetch only open issues
|
213 |
+
|
214 |
+
headers = {
|
215 |
+
"Authorization": f"token {github_token}",
|
216 |
+
"Accept": "application/vnd.github.v3+json"
|
217 |
+
}
|
218 |
|
|
|
219 |
try:
|
|
|
|
|
|
|
|
|
|
|
220 |
async with aiohttp.ClientSession(headers=headers) as session:
|
221 |
+
async with session.get(api_url) as response:
|
222 |
+
response.raise_for_status() # Raise exception for bad status codes
|
223 |
+
issues_data = await response.json()
|
224 |
+
logger.info(f"Fetched {len(issues_data)} open issues.")
|
225 |
+
for issue in issues_data:
|
226 |
+
issue_number = issue['number']
|
227 |
+
self.issues[issue_number] = {
|
228 |
+
"id": issue_number,
|
229 |
+
"title": issue.get('title', 'No Title'),
|
230 |
+
"body": issue.get('body', ''),
|
231 |
+
"state": issue.get('state', 'unknown'),
|
232 |
+
"labels": [label['name'] for label in issue.get('labels', [])],
|
233 |
+
"assignee": issue.get('assignee', {}).get('login') if issue.get('assignee') else None,
|
234 |
+
"url": issue.get('html_url', '#')
|
235 |
+
# Add other relevant fields if needed
|
236 |
+
}
|
237 |
+
|
238 |
+
if not self.issues:
|
239 |
+
logger.info("No open issues found.")
|
240 |
+
return [], go.Figure(), "No open issues found in the repository."
|
241 |
+
|
242 |
+
# Prepare data for clustering
|
243 |
+
self.issue_list_for_clustering = list(self.issues.values())
|
244 |
+
logger.info("Clustering issues...")
|
245 |
+
await self._cluster_similar_issues() # Update self.issue_clusters
|
246 |
+
|
247 |
+
# Prepare data for Gradio Dataframe
|
248 |
+
dataframe_data = []
|
249 |
+
severity_counts = {"Critical": 0, "High": 0, "Medium": 0, "Low": 0, "Unknown": 0}
|
250 |
+
|
251 |
+
# Map clustered indices back to issue numbers and determine severity
|
252 |
+
cluster_map = {} # {issue_index: cluster_id}
|
253 |
+
for cluster_id, indices in self.issue_clusters.items():
|
254 |
+
for index in indices:
|
255 |
+
cluster_map[index] = cluster_id
|
256 |
+
|
257 |
+
for i, issue in enumerate(self.issue_list_for_clustering):
|
258 |
+
severity = self._determine_severity(issue['labels'])
|
259 |
+
severity_counts[severity] += 1
|
260 |
+
cluster_id = cluster_map.get(i, -1) # -1 for noise/unclustered
|
261 |
+
dataframe_data.append([
|
262 |
+
issue['id'],
|
263 |
+
issue['title'],
|
264 |
+
severity,
|
265 |
+
cluster_id if cluster_id != -1 else "N/A" # Display N/A for noise
|
266 |
+
])
|
267 |
+
|
268 |
+
logger.info("Generating statistics plot...")
|
269 |
+
stats_fig = self._generate_stats_plot(severity_counts)
|
270 |
+
|
271 |
+
success_msg = f"Found {len(self.issues)} open issues. Clustered into {len(self.issue_clusters)} groups (excluding noise)."
|
272 |
+
logger.info(success_msg)
|
273 |
+
return dataframe_data, stats_fig, success_msg
|
274 |
+
|
275 |
+
except aiohttp.ClientResponseError as e:
|
276 |
+
logger.error(f"GitHub API request failed: {e.status} {e.message}")
|
277 |
+
error_msg = f"Error fetching issues: {e.status} - {e.message}. Check token permissions and repo URL."
|
278 |
+
if e.status == 404:
|
279 |
+
error_msg = f"Error: Repository not found at {repo_url}. Check the URL."
|
280 |
+
elif e.status == 401:
|
281 |
+
error_msg = "Error: Invalid GitHub token or insufficient permissions."
|
282 |
+
return [], go.Figure(), error_msg
|
283 |
+
except GitCommandError as e:
|
284 |
+
logger.error(f"Git clone error: {e}")
|
285 |
+
return [], go.Figure(), f"Error cloning repository: {e}"
|
286 |
except Exception as e:
|
287 |
+
logger.exception(f"An unexpected error occurred during issue crawl: {e}") # Log full traceback
|
288 |
+
return [], go.Figure(), f"An unexpected error occurred: {e}"
|
289 |
+
|
290 |
+
def _determine_severity(self, labels: List[str]) -> str:
|
291 |
+
"""Determines issue severity based on labels."""
|
292 |
+
labels_lower = [label.lower() for label in labels]
|
293 |
+
for severity, keywords in self.severity_rules.items():
|
294 |
+
if any(keyword in label for keyword in keywords for label in labels_lower):
|
295 |
+
return severity
|
296 |
+
return "Unknown" # Default if no matching label found
|
297 |
+
|
298 |
+
def _generate_stats_plot(self, severity_counts: Dict[str, int]) -> go.Figure:
|
299 |
+
"""Generates a Plotly bar chart for issue severity distribution."""
|
300 |
+
severities = list(severity_counts.keys())
|
301 |
+
counts = list(severity_counts.values())
|
302 |
+
|
303 |
+
fig = px.bar(
|
304 |
+
x=severities,
|
305 |
+
y=counts,
|
306 |
+
title="Issue Severity Distribution",
|
307 |
+
labels={'x': 'Severity', 'y': 'Number of Issues'},
|
308 |
+
color=severities, # Color bars by severity
|
309 |
+
color_discrete_map={ # Define colors
|
310 |
+
'Critical': '#DC2626', # Red
|
311 |
+
'High': '#F97316', # Orange
|
312 |
+
'Medium': '#FACC15', # Yellow
|
313 |
+
'Low': '#84CC16', # Lime
|
314 |
+
'Unknown': '#6B7280' # Gray
|
315 |
+
}
|
316 |
+
)
|
317 |
+
fig.update_layout(
|
318 |
+
showlegend=False, # Hide legend if coloring by severity directly
|
319 |
+
yaxis_title="Number of Issues",
|
320 |
+
xaxis_title="Severity Level",
|
321 |
+
plot_bgcolor='rgba(0,0,0,0)', # Transparent background
|
322 |
+
paper_bgcolor='rgba(0,0,0,0)'
|
323 |
+
)
|
324 |
+
return fig
|
325 |
+
|
326 |
async def _cluster_similar_issues(self):
|
327 |
+
"""Generates embeddings and clusters issues using HDBSCAN."""
|
328 |
+
if not self.issue_list_for_clustering or not self.hf_token:
|
329 |
+
logger.warning("Cannot cluster issues: No issues loaded or HF token missing.")
|
330 |
+
self.issue_clusters = {}
|
331 |
+
return
|
332 |
+
|
333 |
+
logger.info("Generating embeddings for clustering...")
|
334 |
+
try:
|
335 |
+
embeddings = await self._generate_embeddings([f"{i.get('title','')} {i.get('body','')}" for i in self.issue_list_for_clustering])
|
336 |
+
if not embeddings or len(embeddings) != len(self.issue_list_for_clustering):
|
337 |
+
logger.error("Failed to generate valid embeddings for all issues.")
|
338 |
+
self.issue_clusters = {}
|
339 |
+
return
|
340 |
+
|
341 |
+
logger.info(f"Generated {len(embeddings)} embeddings. Running HDBSCAN...")
|
342 |
+
# Use HDBSCAN for density-based clustering
|
343 |
+
# min_cluster_size: minimum number of samples in a cluster
|
344 |
+
# metric: distance metric used
|
345 |
+
# allow_single_cluster: If True, allows forming a single large cluster
|
346 |
+
clusterer = HDBSCAN(min_cluster_size=2, metric='cosine', allow_single_cluster=True, gen_min_span_tree=True)
|
347 |
+
clusters = clusterer.fit_predict(embeddings)
|
348 |
+
|
349 |
+
self.issue_clusters = {}
|
350 |
+
for i, cluster_id in enumerate(clusters):
|
351 |
+
if cluster_id == -1: # HDBSCAN uses -1 for noise points
|
352 |
+
continue # Skip noise points
|
353 |
+
if cluster_id not in self.issue_clusters:
|
354 |
+
self.issue_clusters[cluster_id] = []
|
355 |
+
self.issue_clusters[cluster_id].append(i) # Store original index
|
356 |
+
|
357 |
+
logger.info(f"Clustering complete. Found {len(self.issue_clusters)} clusters (excluding noise).")
|
358 |
+
|
359 |
+
except Exception as e:
|
360 |
+
logger.exception(f"Error during issue clustering: {e}")
|
361 |
+
self.issue_clusters = {}
|
362 |
+
|
363 |
+
async def _generate_embeddings(self, texts: List[str]):
|
364 |
+
"""Generates sentence embeddings using Hugging Face Inference API."""
|
365 |
+
if not self.hf_token:
|
366 |
+
logger.error("Hugging Face token is not set. Cannot generate embeddings.")
|
367 |
+
return None
|
368 |
+
|
369 |
+
# Recommended embedding model (check HF for alternatives if needed)
|
370 |
+
model_id = "sentence-transformers/all-mpnet-base-v2"
|
371 |
+
api_url = f"{HF_INFERENCE_API}/{model_id}"
|
372 |
+
headers = {"Authorization": f"Bearer {self.hf_token}"}
|
373 |
+
|
374 |
+
logger.info(f"Requesting embeddings from {api_url} for {len(texts)} texts.")
|
375 |
+
async with aiohttp.ClientSession(headers=headers) as session:
|
376 |
+
try:
|
377 |
+
response = await session.post(api_url, json={"inputs": texts})
|
378 |
+
response.raise_for_status()
|
379 |
+
result = await response.json()
|
380 |
+
# Check if the result is a list of embeddings (floats)
|
381 |
+
if isinstance(result, list) and all(isinstance(emb, list) for emb in result):
|
382 |
+
logger.info(f"Successfully received {len(result)} embeddings.")
|
383 |
+
return result
|
384 |
+
else:
|
385 |
+
logger.error(f"Unexpected embedding format received: {type(result)}. Full response: {result}")
|
386 |
+
return None
|
387 |
+
except aiohttp.ClientResponseError as e:
|
388 |
+
logger.error(f"HF Inference API request failed: {e.status} {e.message}")
|
389 |
+
logger.error(f"Response body: {await e.response.text()}")
|
390 |
+
return None
|
391 |
+
except Exception as e:
|
392 |
+
logger.exception(f"An unexpected error occurred during embedding generation: {e}")
|
393 |
+
return None
|
394 |
+
|
395 |
+
async def generate_code_patch(self, issue_number: int, model_key: str) -> dict:
|
396 |
+
"""Generates a code patch suggestion using a selected AI model."""
|
397 |
+
if issue_number not in self.issues:
|
398 |
+
return {"error": f"Issue {issue_number} not found."}
|
399 |
+
if not self.hf_token:
|
400 |
+
return {"error": "Hugging Face token not set."}
|
401 |
+
if model_key not in HF_MODELS:
|
402 |
+
return {"error": f"Invalid model key: {model_key}"}
|
403 |
|
|
|
404 |
issue = self.issues[issue_number]
|
405 |
+
model_id = HF_MODELS[model_key]
|
406 |
+
logger.info(f"Generating patch for issue {issue_number} using model {model_id}")
|
407 |
+
|
408 |
+
# --- Context Gathering (Simplified) ---
|
409 |
+
# In a real scenario, this needs to be much smarter:
|
410 |
+
# - Identify relevant files based on issue text, stack traces, etc.
|
411 |
+
# - Potentially use git history or blame to find relevant code sections.
|
412 |
+
# For now, we'll use a placeholder or skip context if too complex.
|
413 |
+
context = "Context gathering not implemented. Provide code snippets in the issue description."
|
414 |
+
# context = await self._get_code_context(issue_number) # Uncomment if implemented
|
415 |
+
|
416 |
+
# --- Prompt Engineering ---
|
417 |
+
prompt = f"""You are an expert programmer tasked with fixing a bug described in a GitHub issue.
|
418 |
+
Analyze the following issue and provide a code patch in standard `diff` format.
|
419 |
+
Focus only on the necessary changes to resolve the problem described.
|
420 |
+
Explain your reasoning briefly before the patch.
|
421 |
+
|
422 |
+
## Issue Title: {issue.get('title', 'N/A')}
|
423 |
+
## Issue Body:
|
424 |
+
{issue.get('body', 'N/A')}
|
425 |
|
426 |
+
## Relevant Code Context (if available):
|
427 |
{context}
|
428 |
+
|
429 |
+
## Instructions:
|
430 |
+
1. Analyze the issue and the context.
|
431 |
+
2. Determine the code changes needed.
|
432 |
+
3. Provide the changes as a Git diff block (```diff ... ```).
|
433 |
+
4. If you cannot determine a patch, explain why.
|
434 |
+
|
435 |
+
## Patch Suggestion:
|
436 |
+
"""
|
437 |
+
|
438 |
+
# --- Call Inference API ---
|
439 |
+
api_url = f"{HF_INFERENCE_API}/{model_id}"
|
440 |
+
headers = {"Authorization": f"Bearer {self.hf_token}"}
|
441 |
+
payload = {
|
442 |
+
"inputs": prompt,
|
443 |
+
"parameters": { # Adjust parameters as needed
|
444 |
+
"max_new_tokens": 1024, # Max length of the generated patch + explanation
|
445 |
+
"temperature": 0.3, # Lower temperature for more deterministic code
|
446 |
+
"return_full_text": False, # Only get the generated part
|
447 |
+
"do_sample": True,
|
448 |
+
"top_p": 0.9,
|
449 |
+
}
|
450 |
+
}
|
451 |
+
|
452 |
try:
|
453 |
+
async with aiohttp.ClientSession(headers=headers) as session:
|
454 |
+
async with session.post(api_url, json=payload) as response:
|
455 |
+
response.raise_for_status()
|
456 |
+
result = await response.json()
|
457 |
+
if result and isinstance(result, list):
|
458 |
+
generated_text = result[0].get('generated_text', '')
|
459 |
+
logger.info(f"Received patch suggestion from {model_id}")
|
460 |
+
# Basic extraction of diff block (improve if needed)
|
461 |
+
diff_match = re.search(r"```diff\n(.*?)```", generated_text, re.DOTALL)
|
462 |
+
explanation = generated_text.split("```diff")[0].strip()
|
463 |
+
patch = diff_match.group(1).strip() if diff_match else "No diff block found in response."
|
464 |
+
|
465 |
+
return {
|
466 |
+
"explanation": explanation,
|
467 |
+
"patch": patch,
|
468 |
+
"model_used": model_id
|
469 |
+
}
|
470 |
+
else:
|
471 |
+
logger.error(f"Unexpected response format from {model_id}: {result}")
|
472 |
+
return {"error": "Received unexpected response format from AI model."}
|
473 |
+
except aiohttp.ClientResponseError as e:
|
474 |
+
logger.error(f"HF Inference API request failed for patch generation: {e.status} {e.message}")
|
475 |
+
logger.error(f"Response body: {await e.response.text()}")
|
476 |
+
return {"error": f"AI model request failed ({e.status}). Check model availability and HF token."}
|
477 |
+
except Exception as e:
|
478 |
+
logger.exception(f"Error generating code patch: {e}")
|
479 |
+
return {"error": f"An unexpected error occurred: {e}"}
|
480 |
+
|
481 |
|
482 |
async def _get_code_context(self, issue_number: int) -> str:
|
483 |
+
"""Placeholder for retrieving relevant code context for an issue."""
|
484 |
+
# This needs a proper implementation based on how the repo is managed
|
485 |
+
# - Clone/pull the repo if not present/up-to-date
|
486 |
+
# - Identify relevant files (e.g., using file paths mentioned in the issue, heuristics)
|
487 |
+
# - Read relevant parts of the files
|
488 |
+
logger.warning(f"Code context retrieval for issue {issue_number} is not fully implemented.")
|
489 |
+
# Example: Look for file paths in the issue body
|
490 |
+
# issue_body = self.issues.get(issue_number, {}).get('body', '')
|
491 |
+
# Find potential file paths (very basic example)
|
492 |
+
# potential_files = re.findall(r'[\w/.-]+\.(?:py|js|java|cpp|c|ts|html|css)', issue_body)
|
493 |
+
# Read content from these files if they exist in the workspace repo
|
494 |
+
return "Code context retrieval is currently a placeholder."
|
495 |
|
|
|
|
|
|
|
|
|
496 |
|
497 |
+
async def suggest_resolution(self, issue: dict, model_key: str) -> str:
|
498 |
+
"""Suggests a resolution description using a selected AI model."""
|
499 |
+
if not self.hf_token:
|
500 |
+
return "Error: Hugging Face token not set."
|
501 |
+
if model_key not in HF_MODELS:
|
502 |
+
return f"Error: Invalid model key: {model_key}"
|
503 |
|
504 |
+
model_id = HF_MODELS[model_key]
|
505 |
+
logger.info(f"Requesting resolution suggestion for issue {issue.get('id','N/A')} using {model_id}")
|
506 |
+
|
507 |
+
prompt = f"""Analyze the following GitHub issue and provide a concise, step-by-step suggestion on how to resolve it. Focus on the technical steps required.
|
508 |
+
|
509 |
+
## Issue Title: {issue.get('title', 'N/A')}
|
510 |
+
## Issue Body:
|
511 |
+
{issue.get('body', 'N/A')}
|
512 |
+
## Labels: {', '.join(issue.get('labels', []))}
|
513 |
+
|
514 |
+
## Suggested Resolution Steps:
|
515 |
+
"""
|
516 |
+
api_url = f"{HF_INFERENCE_API}/{model_id}"
|
517 |
+
headers = {"Authorization": f"Bearer {self.hf_token}"}
|
518 |
+
payload = {
|
519 |
+
"inputs": prompt,
|
520 |
+
"parameters": {
|
521 |
+
"max_new_tokens": 512,
|
522 |
+
"temperature": 0.7, # Higher temp for more creative suggestions
|
523 |
+
"return_full_text": False,
|
524 |
+
"do_sample": True,
|
525 |
+
"top_p": 0.95,
|
526 |
+
}
|
527 |
+
}
|
528 |
+
|
529 |
+
try:
|
530 |
+
async with aiohttp.ClientSession(headers=headers) as session:
|
531 |
+
async with session.post(api_url, json=payload) as response:
|
532 |
+
response.raise_for_status()
|
533 |
+
result = await response.json()
|
534 |
+
if result and isinstance(result, list):
|
535 |
+
suggestion = result[0].get('generated_text', 'No suggestion generated.')
|
536 |
+
logger.info(f"Received suggestion from {model_id}")
|
537 |
+
return suggestion.strip()
|
538 |
+
else:
|
539 |
+
logger.error(f"Unexpected response format from {model_id} for suggestion: {result}")
|
540 |
+
return "Error: Received unexpected response format from AI model."
|
541 |
+
except aiohttp.ClientResponseError as e:
|
542 |
+
logger.error(f"HF Inference API request failed for suggestion: {e.status} {e.message}")
|
543 |
+
logger.error(f"Response body: {await e.response.text()}")
|
544 |
+
return f"Error: AI model request failed ({e.status}). Check model availability and HF token."
|
545 |
+
except Exception as e:
|
546 |
+
logger.exception(f"Error suggesting resolution: {e}")
|
547 |
+
return f"An unexpected error occurred: {e}"
|
548 |
+
|
549 |
+
# --- WebSocket Methods ---
|
550 |
|
551 |
async def broadcast_collaboration_status(self):
|
552 |
+
"""Periodically sends collaborator status to all connected clients."""
|
553 |
while True:
|
554 |
+
await asyncio.sleep(5) # Send updates every 5 seconds
|
555 |
+
if not self.ws_clients:
|
556 |
+
continue
|
557 |
+
|
558 |
+
status_payload = json.dumps({
|
559 |
+
"type": "collaboration_status",
|
560 |
+
"collaborators": self.collaborators
|
561 |
+
})
|
562 |
+
# Use asyncio.gather to send concurrently, handling potential errors
|
563 |
+
results = await asyncio.gather(
|
564 |
+
*[client.send(status_payload) for client in self.ws_clients],
|
565 |
+
return_exceptions=True # Don't let one failed send stop others
|
566 |
+
)
|
567 |
+
# Log any errors that occurred during broadcast
|
568 |
+
for i, result in enumerate(results):
|
569 |
+
if isinstance(result, Exception):
|
570 |
+
logger.warning(f"Failed to send status to client {i}: {result}")
|
571 |
|
572 |
+
|
573 |
+
async def handle_code_editor_update(self, issue_num: int, delta: str, client_id: str):
|
574 |
+
"""Applies a delta from one client and broadcasts it to others."""
|
575 |
if issue_num not in self.code_editors:
|
576 |
+
logger.warning(f"Received code update for non-existent editor for issue {issue_num}")
|
577 |
+
return # Or initialize editor: self.code_editors[issue_num] = OTCodeEditor(...)
|
578 |
+
|
579 |
+
try:
|
580 |
+
# Apply the delta to the server-side authoritative state
|
581 |
+
self.code_editors[issue_num].apply_delta(json.loads(delta))
|
582 |
+
logger.info(f"Applied delta for issue {issue_num} from client {client_id}")
|
583 |
+
|
584 |
+
# Broadcast the delta to all *other* connected clients
|
585 |
+
update_payload = json.dumps({
|
586 |
+
"type": "code_update",
|
587 |
+
"issue_num": issue_num,
|
588 |
+
"delta": delta # Send the original delta
|
589 |
+
})
|
590 |
+
|
591 |
+
tasks = []
|
592 |
+
for client in self.ws_clients:
|
593 |
+
# Check if the client has an associated ID and avoid sending back to originator
|
594 |
+
client_ws_id = getattr(client, 'client_id', None)
|
595 |
+
if client_ws_id != client_id:
|
596 |
+
tasks.append(client.send(update_payload))
|
597 |
+
|
598 |
+
if tasks:
|
599 |
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
600 |
+
# Log errors during broadcast
|
601 |
+
for i, result in enumerate(results):
|
602 |
+
if isinstance(result, Exception):
|
603 |
+
logger.warning(f"Failed to broadcast code update to client {i}: {result}")
|
604 |
+
|
605 |
+
except json.JSONDecodeError:
|
606 |
+
logger.error(f"Received invalid JSON delta for issue {issue_num}: {delta}")
|
607 |
+
except Exception as e:
|
608 |
+
logger.exception(f"Error handling code editor update for issue {issue_num}: {e}")
|
609 |
|
610 |
+
async def broadcast_issue_update(self):
|
611 |
+
"""Notifies clients that the issue list/data has changed."""
|
612 |
+
if not self.ws_clients:
|
613 |
+
return
|
614 |
+
|
615 |
+
logger.info("Broadcasting issue update notification to clients.")
|
616 |
+
update_payload = json.dumps({"type": "issues_updated"})
|
617 |
+
results = await asyncio.gather(
|
618 |
+
*[client.send(update_payload) for client in self.ws_clients],
|
619 |
+
return_exceptions=True
|
620 |
+
)
|
621 |
+
for i, result in enumerate(results):
|
622 |
+
if isinstance(result, Exception):
|
623 |
+
logger.warning(f"Failed to send issue update notification to client {i}: {result}")
|
624 |
+
|
625 |
+
|
626 |
+
# ========== Gradio UI Definition ==========
|
627 |
+
def create_ui(manager: IssueManager):
|
628 |
+
"""Creates the Gradio interface."""
|
629 |
+
|
630 |
+
# --- Helper Functions for UI ---
|
631 |
+
def generate_issue_preview(issue_num: Optional[int]) -> str:
|
632 |
+
"""Generates HTML preview for a selected issue."""
|
633 |
+
if issue_num is None or issue_num not in manager.issues:
|
634 |
+
return "<p>Select an issue from the board to see details.</p>"
|
635 |
+
issue = manager.issues[issue_num]
|
636 |
+
# Convert markdown body to HTML
|
637 |
+
html_body = markdown2.markdown(issue.get('body', '*No description provided.*'))
|
638 |
+
# Basic styling
|
639 |
+
preview_html = f"""
|
640 |
+
<div style="border: 1px solid #e5e7eb; padding: 15px; border-radius: 8px; background-color: #f9fafb;">
|
641 |
+
<h4><a href='{issue.get('url', '#')}' target='_blank' style='color: #6d28d9; text-decoration: none;'>#{issue['id']} - {issue.get('title', 'N/A')}</a></h4>
|
642 |
+
<hr style='margin: 10px 0; border-top: 1px solid #e5e7eb;'>
|
643 |
+
<p><strong>State:</strong> {issue.get('state', 'N/A')} | <strong>Assignee:</strong> {issue.get('assignee', 'None')}</p>
|
644 |
+
<p><strong>Labels:</strong> {' | '.join(f'<span style=\'background-color: #eee; padding: 2px 5px; border-radius: 4px; font-size: 0.9em;\'>{l}</span>' for l in issue.get('labels', [])) or 'None'}</p>
|
645 |
+
<div style="margin-top: 10px; max-height: 300px; overflow-y: auto; border-top: 1px dashed #ccc; padding-top: 10px;">
|
646 |
+
{html_body}
|
647 |
+
</div>
|
648 |
+
</div>
|
649 |
+
"""
|
650 |
+
return preview_html
|
651 |
+
|
652 |
+
async def get_ai_suggestion(issue_num: Optional[int], model_key: str) -> str:
|
653 |
+
"""Wrapper to get AI suggestion for the chat."""
|
654 |
+
if issue_num is None or issue_num not in manager.issues:
|
655 |
+
return "Please select a valid issue first."
|
656 |
+
issue = manager.issues[issue_num]
|
657 |
+
issue_hash = manager._get_issue_hash(issue) # Use hash for caching
|
658 |
+
# Use cached_suggestion which handles the actual API call via lru_cache
|
659 |
+
suggestion = await manager.cached_suggestion(issue_hash, HF_MODELS[model_key])
|
660 |
+
# Format for chat
|
661 |
+
return f"**Suggestion based on {model_key}:**\n\n{suggestion}"
|
662 |
+
|
663 |
+
async def get_ai_patch(issue_num: Optional[int], model_key: str) -> str:
|
664 |
+
"""Wrapper to get AI patch for the chat."""
|
665 |
+
if issue_num is None or issue_num not in manager.issues:
|
666 |
+
return "Please select a valid issue first."
|
667 |
+
result = await manager.generate_code_patch(issue_num, model_key)
|
668 |
+
if "error" in result:
|
669 |
+
return f"**Error generating patch:** {result['error']}"
|
670 |
+
else:
|
671 |
+
# Format for chat display
|
672 |
+
return f"""**Patch Suggestion from {result.get('model_used', model_key)}:**
|
673 |
+
|
674 |
+
**Explanation:**
|
675 |
+
{result.get('explanation', 'N/A')}
|
676 |
+
|
677 |
+
**Patch:**
|
678 |
+
```diff
|
679 |
+
{result.get('patch', 'N/A')}
|
680 |
+
```"""
|
681 |
+
|
682 |
+
# --- Gradio Blocks ---
|
683 |
+
with gr.Blocks(theme=theme, title="🤖 AI Issue Resolver Pro", css=".gradio-container {max-width: 1400px !important;}") as app:
|
684 |
gr.Markdown("""
|
685 |
+
<div style="text-align: center; margin-bottom: 20px;">
|
686 |
+
<h1 style="color: #6d28d9; font-weight: 800;">🚀 AI Issue Resolver Pro</h1>
|
687 |
+
<p style="color: #4b5563; font-size: 1.1em;">Next-generation issue resolution powered by AI collaboration</p>
|
688 |
+
</div>
|
689 |
""")
|
690 |
+
|
691 |
+
# --- Configuration Row ---
|
692 |
+
with gr.Row(variant="panel", elem_id="config-panel"):
|
693 |
+
with gr.Column(scale=3):
|
694 |
+
repo_url = gr.Textbox(label="GitHub Repository URL", placeholder="[https://github.com/owner/repo](https://github.com/owner/repo)", info="Enter the full URL of the public GitHub repository.", elem_id="repo_url")
|
695 |
+
with gr.Row():
|
696 |
+
github_token = gr.Textbox(label="GitHub Token (Optional)", type="password", info="Required for private repos or higher rate limits.", elem_id="github_token")
|
697 |
+
hf_token = gr.Textbox(label="Hugging Face Token", type="password", info="Required for AI model interactions.", elem_id="hf_token")
|
698 |
with gr.Column(scale=1):
|
699 |
+
# Removed language select as code editor handles it
|
700 |
+
model_select = gr.Dropdown(choices=list(HF_MODELS.keys()), value="Mistral-8x7B",
|
701 |
+
label="🤖 Select AI Model", info="Choose the AI for suggestions and patches.", elem_id="model_select")
|
702 |
+
crawl_btn = gr.Button("🛰️ Scan Repository Issues", variant="primary", icon="🔍", elem_id="crawl_btn")
|
703 |
+
status_output = gr.Textbox(label="Status", interactive=False, lines=1, placeholder="Status updates will appear here...", elem_id="status_output")
|
704 |
+
|
705 |
+
|
706 |
+
# --- Main Tabs ---
|
707 |
+
with gr.Tabs(elem_id="main-tabs"):
|
708 |
+
# --- Issue Board Tab ---
|
709 |
+
with gr.Tab("📋 Issue Board", id="board", elem_id="tab-board"):
|
710 |
with gr.Row():
|
711 |
+
with gr.Column(scale=3):
|
712 |
+
gr.Markdown("### Open Issues")
|
713 |
+
issue_list = gr.Dataframe(
|
714 |
+
headers=["ID", "Title", "Severity", "Cluster"],
|
715 |
+
datatype=["number", "str", "str", "str"], # Cluster ID shown as str
|
716 |
+
interactive=True,
|
717 |
+
height=500,
|
718 |
+
wrap=True, # Wrap long titles
|
719 |
+
elem_id="issue_list_df"
|
720 |
+
)
|
721 |
+
with gr.Column(scale=2):
|
722 |
+
gr.Markdown("### Issue Severity")
|
723 |
+
stats_plot = gr.Plot(elem_id="stats_plot")
|
724 |
+
# Placeholder for collaborators - updated via JS
|
725 |
+
collab_status = gr.HTML("""
|
726 |
+
<div style="margin-top: 20px; border: 1px solid #e5e7eb; padding: 10px; border-radius: 8px;">
|
727 |
+
<h4 style="margin-bottom: 5px; color: #374151;">👥 Active Collaborators</h4>
|
728 |
+
<div id="collab-list" style="font-size: 0.9em; max-height: 100px; overflow-y: auto;">
|
729 |
+
Connecting...
|
730 |
+
</div>
|
731 |
+
</div>
|
732 |
+
""", elem_id="collab_status_html")
|
733 |
|
734 |
+
# --- Resolution Studio Tab ---
|
735 |
+
with gr.Tab("💻 Resolution Studio", id="studio", elem_id="tab-studio"):
|
736 |
with gr.Row():
|
737 |
+
# Left Column: Issue Details & AI Tools
|
738 |
with gr.Column(scale=1):
|
739 |
+
gr.Markdown("### Selected Issue")
|
740 |
+
# Hidden number input to store selected issue ID
|
741 |
+
selected_issue_id = gr.Number(label="Selected Issue ID", visible=False, precision=0, elem_id="selected_issue_id")
|
742 |
+
issue_preview_html = gr.HTML(
|
743 |
+
"<p style='color: #6b7280;'>Select an issue from the 'Issue Board' tab.</p>",
|
744 |
+
elem_id="issue_preview"
|
745 |
+
)
|
746 |
+
|
747 |
+
with gr.Accordion("🛠️ AI Assistance Tools", open=True, elem_id="ai_tools_accordion"):
|
748 |
+
suggest_btn = gr.Button("🧠 Suggest Resolution Steps", icon="💡", elem_id="suggest_btn")
|
749 |
+
patch_btn = gr.Button("📝 Generate Code Patch", icon="🩹", elem_id="patch_btn")
|
750 |
+
# Add placeholders for other buttons if needed
|
751 |
+
# test_btn = gr.Button("🧪 Create Tests (Future)", icon="🔬", interactive=False)
|
752 |
+
# impact_btn = gr.Button("📊 Impact Analysis (Future)", icon="📈", interactive=False)
|
753 |
+
chat_output_display = gr.Textbox(label="AI Output", lines=10, interactive=False, placeholder="AI suggestions and patches will appear here...", elem_id="ai_output_display")
|
754 |
+
|
755 |
+
|
756 |
+
# Right Column: Code Editor & Chat (removed chat interface)
|
757 |
with gr.Column(scale=2):
|
758 |
+
gr.Markdown("### Collaborative Code Editor")
|
759 |
+
# Use the imported code_editor component
|
760 |
+
# We'll update its value dynamically when an issue is selected
|
761 |
+
code_edit_component = code_editor(
|
762 |
+
label="Code Editor",
|
763 |
+
# Initial value can be empty or a placeholder message
|
764 |
+
value={"main.py": "# Select an issue to load relevant code (placeholder)"},
|
765 |
+
# Language is set dynamically if needed, or defaults
|
766 |
+
language="python", # Default language
|
767 |
+
elem_id="code_editor_component"
|
768 |
+
)
|
769 |
+
# Hidden input to trigger code editor updates from server->client WS messages
|
770 |
+
code_editor_update_trigger = gr.Textbox(visible=False, elem_id="code-editor-update-trigger")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
771 |
|
|
|
|
|
772 |
|
773 |
+
# --- Analytics Tab (Placeholder) ---
|
774 |
+
with gr.Tab("📈 Analytics", id="analytics", elem_id="tab-analytics"):
|
775 |
+
gr.Markdown("### Analytics Dashboard (Placeholder)")
|
776 |
+
gr.Markdown("Future home for resolution timelines, achievement badges, and more detailed metrics.")
|
777 |
+
# with gr.Row():
|
778 |
+
# gr.Markdown("#### 📅 Resolution Timeline")
|
779 |
+
# timeline = gr.Timeline() # Requires specific data format
|
780 |
+
# with gr.Row():
|
781 |
+
# gr.Markdown("#### 🏆 Achievement System")
|
782 |
+
# badges = gr.HTML("<div class='badges'>Coming Soon!</div>")
|
783 |
|
|
|
|
|
|
|
|
|
|
|
|
|
784 |
|
785 |
+
# --- Event Handlers ---
|
786 |
+
|
787 |
+
# 1. Crawl Button Click
|
788 |
crawl_btn.click(
|
789 |
+
fn=manager.crawl_issues,
|
790 |
inputs=[repo_url, github_token, hf_token],
|
791 |
+
outputs=[issue_list, stats_plot, status_output],
|
792 |
+
api_name="crawl_issues" # For API access if needed
|
793 |
)
|
794 |
|
795 |
+
# 2. Issue Selection in Dataframe
|
796 |
+
async def handle_issue_select(evt: gr.SelectData):
|
797 |
+
"""Handles issue selection: updates preview, loads code (placeholder)."""
|
798 |
+
if evt.index[0] is None: # No row selected
|
799 |
+
return {
|
800 |
+
selected_issue_id: None,
|
801 |
+
issue_preview_html: "<p style='color: #6b7280;'>Select an issue from the table.</p>",
|
802 |
+
# Reset code editor or show placeholder
|
803 |
+
code_edit_component: gr.update(value={"placeholder.txt": "# Select an issue to load code."})
|
804 |
+
}
|
805 |
+
|
806 |
+
selected_id = int(evt.value[0]) # Get ID from the first column ('ID') of the selected row
|
807 |
+
logger.info(f"Issue selected: ID {selected_id}")
|
808 |
+
|
809 |
+
# Update the hidden ID field
|
810 |
+
updates = {selected_issue_id: selected_id}
|
811 |
+
|
812 |
+
# Generate and update the HTML preview
|
813 |
+
preview_html = generate_issue_preview(selected_id)
|
814 |
+
updates[issue_preview_html] = preview_html
|
815 |
+
|
816 |
+
# --- Code Loading Logic (Placeholder) ---
|
817 |
+
# This needs real implementation: Find relevant files for the issue
|
818 |
+
# and load their content into the editor component's value format.
|
819 |
+
# Example: Fetch files related to the issue (needs implementation)
|
820 |
+
# files_content = await fetch_relevant_code_for_issue(selected_id)
|
821 |
+
files_content = {
|
822 |
+
f"issue_{selected_id}_code.py": f"# Code related to issue {selected_id}\n# (Replace with actual file content)\n\nprint('Hello from issue {selected_id}')",
|
823 |
+
"README.md": f"# Issue {selected_id}\n\nDetails about the issue..."
|
824 |
+
}
|
825 |
+
updates[code_edit_component] = gr.update(value=files_content)
|
826 |
+
# --- End Placeholder ---
|
827 |
+
|
828 |
+
return updates
|
829 |
+
|
830 |
+
issue_list.select(
|
831 |
+
fn=handle_issue_select,
|
832 |
+
inputs=[], # Event data is passed automatically
|
833 |
+
outputs=[selected_issue_id, issue_preview_html, code_edit_component],
|
834 |
+
show_progress="minimal"
|
835 |
+
)
|
836 |
+
|
837 |
+
# 3. Suggest Resolution Button Click
|
838 |
suggest_btn.click(
|
839 |
+
fn=get_ai_suggestion,
|
840 |
+
inputs=[selected_issue_id, model_select],
|
841 |
+
outputs=[chat_output_display],
|
842 |
+
api_name="suggest_resolution"
|
843 |
)
|
844 |
|
845 |
+
# 4. Generate Patch Button Click
|
846 |
patch_btn.click(
|
847 |
+
fn=get_ai_patch,
|
848 |
+
inputs=[selected_issue_id, model_select],
|
849 |
+
outputs=[chat_output_display],
|
850 |
+
api_name="generate_patch"
|
851 |
)
|
852 |
|
853 |
+
# 5. Code Editor Change (User typing) -> Send update via WebSocket
|
854 |
+
# This requires JavaScript to capture the 'change' event from the Ace editor
|
855 |
+
# instance within the code_editor component and send it over WebSocket.
|
856 |
+
# The Python backend then receives it via handle_ws_connection.
|
857 |
|
858 |
+
# 6. WebSocket Message (Server -> Client) -> Trigger UI Update
|
859 |
+
# This uses JavaScript to listen for WebSocket messages and update Gradio components.
|
860 |
+
# Example: Update collaborator list, trigger code editor update.
|
|
|
|
|
861 |
|
862 |
+
# --- JavaScript for WebSocket Communication ---
|
863 |
+
def web_socket_js(ws_port):
|
864 |
+
# Generate unique client ID for this session
|
865 |
+
client_id = f"client_{hashlib.sha1(os.urandom(16)).hexdigest()[:8]}"
|
866 |
+
logger.info(f"Generated Client ID for WebSocket: {client_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
867 |
|
868 |
+
return f"""
|
869 |
+
<script>
|
870 |
+
// Ensure this runs only once
|
871 |
+
if (!window.collabWs) {{
|
872 |
+
console.log('Initializing WebSocket connection...');
|
873 |
+
const wsUrl = `ws://localhost:{ws_port}`; // Use localhost for local Gradio run
|
874 |
+
// For Hugging Face Spaces, you need to use the public WS endpoint:
|
875 |
+
// const wsUrl = `wss://YOUR_SPACE_NAME.hf.space/ws`; // Adjust if using custom domain/port mapping
|
876 |
|
877 |
+
window.collabWs = new WebSocket(wsUrl);
|
878 |
+
window.clientId = '{client_id}'; // Store client ID globally for this session
|
|
|
|
|
879 |
|
880 |
+
window.collabWs.onopen = function(event) {{
|
881 |
+
console.log('WebSocket connection established.');
|
882 |
+
// Optionally send a join message
|
883 |
+
window.collabWs.send(JSON.stringify({{ type: 'join', clientId: window.clientId }}));
|
884 |
+
// Initial update for collaborator list (optional)
|
885 |
+
const collabListDiv = document.getElementById('collab-list');
|
886 |
+
if (collabListDiv) collabListDiv.innerHTML = 'Connected.';
|
887 |
+
}};
|
888 |
+
|
889 |
+
window.collabWs.onmessage = function(event) {{
|
890 |
+
// console.log('WebSocket message received:', event.data);
|
891 |
+
try {{
|
892 |
+
const data = JSON.parse(event.data);
|
893 |
+
|
894 |
+
if (data.type === 'collaboration_status') {{
|
895 |
+
const collabListDiv = document.getElementById('collab-list');
|
896 |
+
if (collabListDiv) {{
|
897 |
+
if (Object.keys(data.collaborators).length > 0) {{
|
898 |
+
collabListDiv.innerHTML = Object.entries(data.collaborators)
|
899 |
+
.map(([id, info]) => `<div class="collab-item">${info.name || id}: ${info.status || 'Idle'}</div>`)
|
900 |
+
.join('');
|
901 |
+
}} else {{
|
902 |
+
collabListDiv.innerHTML = 'No other collaborators active.';
|
903 |
+
}}
|
904 |
+
}}
|
905 |
+
}} else if (data.type === 'code_update') {{
|
906 |
+
console.log('Received code update delta for issue:', data.issue_num);
|
907 |
+
// Find the Gradio Textbox used as a trigger
|
908 |
+
const triggerTextbox = document.getElementById('code-editor-update-trigger').querySelector('textarea');
|
909 |
+
if (triggerTextbox) {{
|
910 |
+
// Set its value to the received delta (JSON string)
|
911 |
+
// This change event will be picked up by Gradio if a .change() listener is attached
|
912 |
+
// However, directly manipulating the Ace editor instance is more reliable if possible.
|
913 |
+
// For now, we assume the code_editor component handles incoming deltas internally
|
914 |
+
// or provides a JS API. If not, this trigger approach is a fallback.
|
915 |
+
triggerTextbox.value = JSON.stringify(data); // Pass full data
|
916 |
+
// Manually dispatch an input event to ensure Gradio detects the change
|
917 |
+
triggerTextbox.dispatchEvent(new Event('input', {{ bubbles: true }}));
|
918 |
+
console.log('Triggered Gradio update for code editor.');
|
919 |
+
|
920 |
+
// --- Ideal approach: Directly update Ace Editor ---
|
921 |
+
// This requires the code_editor component to expose its Ace instance
|
922 |
+
// or provide a JS function like `window.updateCodeEditor(issueNum, delta)`
|
923 |
+
/*
|
924 |
+
if (window.ace && window.aceEditors && window.aceEditors[data.issue_num]) {{
|
925 |
+
const editor = window.aceEditors[data.issue_num];
|
926 |
+
editor.getSession().getDocument().applyDeltas([JSON.parse(data.delta)]);
|
927 |
+
console.log('Applied delta directly to Ace editor for issue:', data.issue_num);
|
928 |
+
}} else {{
|
929 |
+
console.warn('Ace editor instance not found for issue:', data.issue_num);
|
930 |
+
}}
|
931 |
+
*/
|
932 |
+
}} else {{
|
933 |
+
console.error('Code editor update trigger textbox not found.');
|
934 |
+
}}
|
935 |
+
}} else if (data.type === 'issues_updated') {{
|
936 |
+
console.log('Received issues updated notification.');
|
937 |
+
// Optionally trigger a refresh or show a notification
|
938 |
+
// Example: Update status bar
|
939 |
+
const statusBar = document.getElementById('status_output').querySelector('textarea');
|
940 |
+
if (statusBar) {{
|
941 |
+
statusBar.value = 'Issue list updated. Refresh may be needed.';
|
942 |
+
statusBar.dispatchEvent(new Event('input', {{ bubbles: true }}));
|
943 |
+
}}
|
944 |
+
// More robust: Trigger the crawl button's click event via JS? (Can be complex)
|
945 |
+
}}
|
946 |
+
|
947 |
+
}} catch (e) {{
|
948 |
+
console.error('Failed to parse WebSocket message or update UI:', e);
|
949 |
+
}}
|
950 |
+
}};
|
951 |
+
|
952 |
+
window.collabWs.onclose = function(event) {{
|
953 |
+
console.warn('WebSocket connection closed:', event.code, event.reason);
|
954 |
+
const collabListDiv = document.getElementById('collab-list');
|
955 |
+
if (collabListDiv) collabListDiv.innerHTML = '<span style="color: red;">Disconnected</span>';
|
956 |
+
// Implement reconnection logic if needed
|
957 |
+
}};
|
958 |
+
|
959 |
+
window.collabWs.onerror = function(error) {{
|
960 |
+
console.error('WebSocket error:', error);
|
961 |
+
const collabListDiv = document.getElementById('collab-list');
|
962 |
+
if (collabListDiv) collabListDiv.innerHTML = '<span style="color: red;">Connection Error</span>';
|
963 |
+
}};
|
964 |
+
|
965 |
+
// Function to send messages (e.g., code changes)
|
966 |
+
window.sendWsMessage = function(message) {{
|
967 |
+
if (window.collabWs && window.collabWs.readyState === WebSocket.OPEN) {{
|
968 |
+
window.collabWs.send(JSON.stringify(message));
|
969 |
+
}} else {{
|
970 |
+
console.error('WebSocket not connected. Cannot send message.');
|
971 |
+
}}
|
972 |
+
}};
|
973 |
+
|
974 |
+
// --- JS Integration with Code Editor Component ---
|
975 |
+
// This part is CRUCIAL and depends heavily on how the `code_editor`
|
976 |
+
// component is implemented (e.g., using Ace Editor).
|
977 |
+
// We need to:
|
978 |
+
// 1. Get the editor instance(s).
|
979 |
+
// 2. Attach a listener to its 'change' event (which provides deltas).
|
980 |
+
// 3. When a change occurs, send the delta via `sendWsMessage`.
|
981 |
+
|
982 |
+
// Example assuming Ace Editor and the component stores instances:
|
983 |
+
function setupCodeEditorListener() {{
|
984 |
+
// This needs to run *after* the Gradio component is rendered
|
985 |
+
// and the editor is initialized. Using setTimeout is a common hack.
|
986 |
+
setTimeout(() => {{
|
987 |
+
const editorElement = document.querySelector('#code_editor_component'); // Find editor container
|
988 |
+
// Find the actual Ace instance (this depends on the component's structure)
|
989 |
+
// This is a GUESS - inspect the component's HTML/JS to find the correct way
|
990 |
+
let aceEditor;
|
991 |
+
if (window.ace && editorElement) {{
|
992 |
+
// Try common ways Ace is attached
|
993 |
+
aceEditor = window.ace.edit(editorElement.querySelector('.ace_editor')); // Common pattern
|
994 |
+
// Or maybe the component stores it globally?
|
995 |
+
// aceEditor = window.activeAceEditor;
|
996 |
+
}}
|
997 |
+
|
998 |
+
if (aceEditor) {{
|
999 |
+
console.log('Ace Editor instance found. Attaching change listener.');
|
1000 |
+
aceEditor.getSession().on('change', function(delta) {{
|
1001 |
+
// Only send changes made by the user (ignore programmatic changes)
|
1002 |
+
if (aceEditor.curOp && aceEditor.curOp.command.name) {{
|
1003 |
+
console.log('Code changed by user:', delta);
|
1004 |
+
const issueIdElem = document.getElementById('selected_issue_id').querySelector('input');
|
1005 |
+
const currentIssueId = issueIdElem ? parseInt(issueIdElem.value, 10) : null;
|
1006 |
+
|
1007 |
+
if (currentIssueId !== null && !isNaN(currentIssueId)) {{
|
1008 |
+
window.sendWsMessage({{
|
1009 |
+
type: 'code_update',
|
1010 |
+
issue_num: currentIssueId,
|
1011 |
+
delta: JSON.stringify(delta), // Send delta as JSON string
|
1012 |
+
clientId: window.clientId
|
1013 |
+
}});
|
1014 |
+
}} else {{
|
1015 |
+
console.warn('No valid issue selected, cannot send code update.');
|
1016 |
+
}}
|
1017 |
+
}}
|
1018 |
+
}});
|
1019 |
+
}} else {{
|
1020 |
+
console.warn('Could not find Ace Editor instance to attach listener. Collaboration may not work.');
|
1021 |
+
// Retry after a delay?
|
1022 |
+
// setTimeout(setupCodeEditorListener, 2000);
|
1023 |
+
}}
|
1024 |
+
}}, 1500); // Wait 1.5 seconds for Gradio/Ace to initialize
|
1025 |
+
}}
|
1026 |
+
|
1027 |
+
// Call setup after initial load and potentially after issue selection changes
|
1028 |
+
// if the editor instance is recreated.
|
1029 |
+
setupCodeEditorListener();
|
1030 |
+
|
1031 |
+
// Re-attach listener if the editor component updates (e.g., on issue select)
|
1032 |
+
// This requires observing changes to the component's container
|
1033 |
+
const observer = new MutationObserver((mutationsList, observer) => {{
|
1034 |
+
for(const mutation of mutationsList) {{
|
1035 |
+
if (mutation.type === 'childList' && mutation.addedNodes.length > 0) {{
|
1036 |
+
// Check if the editor element was re-added/modified significantly
|
1037 |
+
if (document.querySelector('#code_editor_component .ace_editor')) {{
|
1038 |
+
console.log("Code editor component updated, re-attaching listener...");
|
1039 |
+
setupCodeEditorListener();
|
1040 |
+
break; // Assume we only need to re-attach once per mutation batch
|
1041 |
+
}}
|
1042 |
+
}}
|
1043 |
+
}}
|
1044 |
+
}});
|
1045 |
+
const targetNode = document.getElementById('code_editor_component');
|
1046 |
+
if(targetNode) {{
|
1047 |
+
observer.observe(targetNode, {{ childList: true, subtree: true }});
|
1048 |
+
}}
|
1049 |
|
1050 |
+
|
1051 |
+
}} else {{
|
1052 |
+
console.log('WebSocket connection already initialized.');
|
1053 |
+
}}
|
1054 |
+
</script>
|
1055 |
+
"""
|
1056 |
+
|
1057 |
+
# Inject the JavaScript into the Gradio app
|
1058 |
+
app.load(_js=web_socket_js(WS_PORT), fn=None, inputs=None, outputs=None)
|
1059 |
|
1060 |
return app
|
1061 |
|
1062 |
+
|
1063 |
+
# ========== WebSocket Server Logic ==========
|
1064 |
+
async def handle_ws_connection(websocket: websockets.WebSocketServerProtocol, path: str, manager: IssueManager):
|
1065 |
+
"""Handles incoming WebSocket connections and messages."""
|
1066 |
+
client_id = None # Initialize client_id for this connection
|
1067 |
+
manager.ws_clients.append(websocket)
|
1068 |
+
logger.info(f"WebSocket client connected: {websocket.remote_address}")
|
1069 |
+
try:
|
1070 |
+
async for message in websocket:
|
1071 |
+
try:
|
1072 |
+
data = json.loads(message)
|
1073 |
+
msg_type = data.get("type")
|
1074 |
+
logger.debug(f"Received WS message: {data}") # Log received message content
|
1075 |
+
|
1076 |
+
if msg_type == "join":
|
1077 |
+
client_id = data.get("clientId", f"anon_{websocket.id}")
|
1078 |
+
setattr(websocket, 'client_id', client_id) # Associate ID with socket object
|
1079 |
+
manager.collaborators[client_id] = {"name": client_id, "status": "Connected"} # Add to collaborators
|
1080 |
+
logger.info(f"Client {client_id} joined.")
|
1081 |
+
# Don't await broadcast here, let the periodic task handle it
|
1082 |
+
|
1083 |
+
elif msg_type == "code_update":
|
1084 |
+
issue_num = data.get("issue_num")
|
1085 |
+
delta = data.get("delta")
|
1086 |
+
sender_id = data.get("clientId") # ID of the client who sent the update
|
1087 |
+
if issue_num is not None and delta and sender_id:
|
1088 |
+
# Pass client_id to handler to avoid broadcasting back to sender
|
1089 |
+
await manager.handle_code_editor_update(issue_num, delta, sender_id)
|
1090 |
+
else:
|
1091 |
+
logger.warning(f"Invalid code_update message received: {data}")
|
1092 |
+
|
1093 |
+
elif msg_type == "status_update": # Client updates their status
|
1094 |
+
sender_id = data.get("clientId")
|
1095 |
+
status = data.get("status", "Idle")
|
1096 |
+
if sender_id and sender_id in manager.collaborators:
|
1097 |
+
manager.collaborators[sender_id]["status"] = status
|
1098 |
+
logger.info(f"Client {sender_id} status updated: {status}")
|
1099 |
+
# Don't await broadcast here
|
1100 |
+
|
1101 |
+
else:
|
1102 |
+
logger.warning(f"Unknown WebSocket message type received: {msg_type}")
|
1103 |
+
|
1104 |
+
except json.JSONDecodeError:
|
1105 |
+
logger.error(f"Received invalid JSON over WebSocket: {message}")
|
1106 |
+
except Exception as e:
|
1107 |
+
logger.exception(f"Error processing WebSocket message: {e}")
|
1108 |
+
|
1109 |
+
except ConnectionClosed as e:
|
1110 |
+
logger.info(f"WebSocket client disconnected: {websocket.remote_address} (Code: {e.code}, Reason: {e.reason})")
|
1111 |
+
except Exception as e:
|
1112 |
+
logger.exception(f"Unexpected error in WebSocket handler: {e}")
|
1113 |
+
finally:
|
1114 |
+
logger.info(f"Cleaning up connection for client {client_id if client_id else websocket.remote_address}")
|
1115 |
+
manager.ws_clients.remove(websocket)
|
1116 |
+
if client_id and client_id in manager.collaborators:
|
1117 |
+
del manager.collaborators[client_id] # Remove collaborator on disconnect
|
1118 |
+
logger.info(f"Removed collaborator {client_id}.")
|
1119 |
+
# Don't await broadcast here
|
1120 |
+
|
1121 |
+
async def start_websocket_server(manager: IssueManager, port: int):
|
1122 |
+
"""Starts the WebSocket server."""
|
1123 |
+
# Pass manager instance to the connection handler factory
|
1124 |
+
handler = lambda ws, path: handle_ws_connection(ws, path, manager)
|
1125 |
+
async with websockets.serve(handler, "localhost", port):
|
1126 |
+
logger.info(f"WebSocket server started on ws://localhost:{port}")
|
1127 |
+
await asyncio.Future() # Run forever
|
1128 |
+
|
1129 |
+
def run_webhook_server(manager: IssueManager, port: int):
|
1130 |
+
"""Starts the HTTP webhook server in a separate thread."""
|
1131 |
+
WebhookHandler.manager_instance = manager # Pass manager instance to the class
|
1132 |
+
server_address = ("", port)
|
1133 |
+
httpd = HTTPServer(server_address, WebhookHandler)
|
1134 |
+
logger.info(f"Webhook HTTP server started on port {port}")
|
1135 |
+
httpd.serve_forever()
|
1136 |
+
|
1137 |
+
|
1138 |
+
# ========== Main Execution ==========
|
1139 |
if __name__ == "__main__":
|
1140 |
+
# --- Setup ---
|
1141 |
manager = IssueManager()
|
1142 |
+
|
1143 |
+
# --- Start Background Servers ---
|
1144 |
+
# 1. Webhook Server (HTTP)
|
1145 |
+
webhook_thread = threading.Thread(target=run_webhook_server, args=(manager, WEBHOOK_PORT), daemon=True)
|
1146 |
+
webhook_thread.start()
|
1147 |
+
|
1148 |
+
# 2. WebSocket Server (Runs in main asyncio loop)
|
1149 |
+
# We need to run the WebSocket server and the collaborator status broadcast
|
1150 |
+
# within an asyncio event loop.
|
1151 |
+
async def main_async_tasks():
|
1152 |
+
# Start the periodic broadcast task
|
1153 |
+
broadcast_task = asyncio.create_task(manager.broadcast_collaboration_status())
|
1154 |
+
# Start the WebSocket server
|
1155 |
+
websocket_server_task = asyncio.create_task(start_websocket_server(manager, WS_PORT))
|
1156 |
+
await asyncio.gather(broadcast_task, websocket_server_task)
|
1157 |
+
|
1158 |
+
# Run the asyncio tasks in a separate thread
|
1159 |
+
asyncio_thread = threading.Thread(target=lambda: asyncio.run(main_async_tasks()), daemon=True)
|
1160 |
+
asyncio_thread.start()
|
1161 |
+
|
1162 |
+
# --- Create and Launch Gradio App ---
|
1163 |
+
app = create_ui(manager)
|
1164 |
app.launch(
|
1165 |
+
# share=True, # Enable for public access (use with caution)
|
1166 |
+
server_name="0.0.0.0", # Bind to all interfaces for accessibility in containers/networks
|
1167 |
+
server_port=7860, # Default Gradio port
|
1168 |
+
favicon_path="[https://huggingface.co/front/assets/huggingface_logo-noborder.svg](https://huggingface.co/front/assets/huggingface_logo-noborder.svg)"
|
1169 |
+
)
|
1170 |
+
|
1171 |
+
logger.info("Gradio app launched. Webhook and WebSocket servers running in background.")
|
1172 |
+
|