Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,1035 +1,385 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
###############################################################################
|
4 |
-
|
5 |
-
|
6 |
-
import
|
7 |
-
import
|
8 |
-
import
|
9 |
-
import
|
10 |
-
import
|
11 |
-
import
|
12 |
-
import
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
import
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
#
|
22 |
-
#
|
23 |
-
#
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
def
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
def
|
85 |
-
"""
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
#
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
if
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
def
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
#
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
""
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
#
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
r'(?:[A-Z][a-z]{2}\.)\s+\d{1,2},\s*\d{4},\s*(?:a\.?m\.?|p\.?m\.?)' # "Feb. 20, 2023, a.m."
|
387 |
-
r'(?:\s*\([a-z]\)\s*[A-Z][a-z]{2}\.\s+\d{1,2},\s*\d{4},\s*(?:a\.?m\.?|p\.?m\.?))*$', # repeats
|
388 |
-
re.IGNORECASE
|
389 |
-
)
|
390 |
-
MONTH = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.?'
|
391 |
-
TIME = r'(?:a\.?m\.?|p\.?m\.?)'
|
392 |
-
YEAR = r'\d{4}'
|
393 |
-
|
394 |
-
orphan_date_pat = re.compile(
|
395 |
-
rf'^(?:{MONTH}|{TIME}|{YEAR}|\d{{1,2}},?)$'
|
396 |
-
)
|
397 |
-
|
398 |
-
def clean_line(line: str, source: str) -> str:
|
399 |
-
# --- Apply OneReg prefix removal FIRST if applicable ---
|
400 |
-
if source == "onereg":
|
401 |
-
line = zap_auto_outline_ids(line) # Remove 11.3.5. etc. prefixes
|
402 |
-
if header_pat.match(line):
|
403 |
-
return ""
|
404 |
-
|
405 |
-
|
406 |
-
if source == "caa":
|
407 |
-
line = line.replace('β', '')
|
408 |
-
line = line.replace('β', '')
|
409 |
-
|
410 |
-
|
411 |
-
# --- Continue with other cleaning ---
|
412 |
-
line = page_pat.sub('', line)
|
413 |
-
if orphan_date_pat.match(line):
|
414 |
-
return
|
415 |
-
|
416 |
-
if source == "caa" and ("Civil Aviation Rules" in line or "CAA of NZ" in line):
|
417 |
-
# ... (CAA header cleaning remains the same) ...
|
418 |
-
line = re.sub(
|
419 |
-
r'Civil Aviation Rules\s+Part\s+\d+\s+CAA Consolidation', '', line
|
420 |
-
)
|
421 |
-
line = re.sub(
|
422 |
-
r'^\d{1,2}\s+[A-Za-z]+\s+\d{4}\s*\d*\s*CAA of NZ', '', line
|
423 |
-
) # Made middle number optional
|
424 |
-
line = re.sub(r'\s{2,}', ' ', line).strip()
|
425 |
-
if not line: return ""
|
426 |
-
|
427 |
-
# ... (Rest of general cleaning: dots, page, email, time, Exported, date) ...
|
428 |
-
if re.search(r'\.{4,}\s*\d+\s*$', line): return "" # Ellipsis followed by number (TOC)
|
429 |
-
if page_pat.fullmatch(line.strip()): return ""
|
430 |
-
line = re.sub(r'\S+@\S+', '', line)
|
431 |
-
line = re.sub(r'\b\d{1,2}:\d{2}(?:\s*(?:a\.?m\.?|p\.?m\.?))?', '', line, flags=re.IGNORECASE)
|
432 |
-
line = re.sub(r'Exported:.*$', '', line)
|
433 |
-
line = date_pat.sub('', line)
|
434 |
-
|
435 |
-
# --- Inline rule number zapping (careful not to zap appendix numbers like A.1) ---
|
436 |
-
line = re.sub(r'\b(rule)\s+(\d+\.\d+)', r'\1 \2', line, flags=re.IGNORECASE)
|
437 |
-
# Only zap digit.digit patterns, not Letter.digit
|
438 |
-
line = re.sub(
|
439 |
-
r'(?<!\brule\s)(?<!^[A-Z]\.)\b\d+\.\d+(?:[A-Z]?)\s*(?=\()', # Added negative lookbehind for Letter.
|
440 |
-
'',
|
441 |
-
line,
|
442 |
-
flags=re.IGNORECASE
|
443 |
-
)
|
444 |
-
# Zap table references like Table B-1 if needed, can be specific
|
445 |
-
line = re.sub(r'\bTable\s+[A-Z]-\d+\b', '', line, flags=re.IGNORECASE)
|
446 |
-
|
447 |
-
line = page_pat.sub('', line) # Redundant page check just in case
|
448 |
-
return re.sub(r'\s{2,}', ' ', line).strip()
|
449 |
-
|
450 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
451 |
-
# 4. CAPTURE ONLY TOP-LEVEL RULES
|
452 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
453 |
-
#rule_pat_1 = re.compile(r'^(?:\d+\.)*\s*(?P<rule>\d+\.\d+)\s*(?P<title>[A-Z].*)$') #works without A,B,C subparts
|
454 |
-
# In the Regex section
|
455 |
-
|
456 |
-
# REMOVE: appendix_main_pat = re.compile(...)
|
457 |
-
|
458 |
-
# MODIFIED: Regex for Appendix Items (like "A.1 Title" or "C.2.1 (a) Title")
|
459 |
-
# Now the primary way to find appendix sections.
|
460 |
-
appendix_item_pat = re.compile(
|
461 |
-
r'^\s*([A-Z])\.(\d+(?:\.\d+)*)' # Start of line, Letter.Number(s) e.g., "A.1", "C.2.1"
|
462 |
-
r'(?:\s*\(([^)]+)\))?' # Optional captured parenthetical part e.g., "(a)" or "(1)"
|
463 |
-
r'\s+' # Space separator REQUIRED before title
|
464 |
-
r'(?P<title>[A-Za-z0-9].*)$', # Capture title (must start alphanumeric)
|
465 |
-
re.IGNORECASE
|
466 |
-
)
|
467 |
-
|
468 |
-
# Keep rule_pat and subpart_pat as they were (assuming rule_pat ignores prefixes correctly now)
|
469 |
-
rule_pat = re.compile(
|
470 |
-
r'^(?:(?:\d+\.){2,}\s*)?' # Optional & Non-capturing: OneReg outline prefix
|
471 |
-
r'(?P<base_rule>\d+\.\d+(?:[A-Z]?))'
|
472 |
-
r'(?P<parens>(?:\s*\([^)]+\))*?)'
|
473 |
-
r'\s*'
|
474 |
-
r'(?P<title>.*)$',
|
475 |
-
re.IGNORECASE
|
476 |
-
)
|
477 |
-
|
478 |
-
subpart_pat = re.compile(
|
479 |
-
r'^\s*'
|
480 |
-
r'\d+\.\s*' # Still expect number for subpart heading based on earlier examples
|
481 |
-
r'Subpart\s+'
|
482 |
-
r'([A-Z]{1,2})\s*'
|
483 |
-
r'[β-]\s*'
|
484 |
-
r'(.+)$',
|
485 |
-
re.IGNORECASE
|
486 |
-
)
|
487 |
-
print(rule_pat)
|
488 |
-
def natural_key(r):
|
489 |
-
minor = r.split('.',1)[1]
|
490 |
-
m = re.match(r"^(\d+)([A-Z]?)$", minor)
|
491 |
-
if m:
|
492 |
-
return (int(m.group(1)), m.group(2)) # (5, "") or (5, "A")
|
493 |
-
return (0, minor)
|
494 |
-
|
495 |
-
|
496 |
-
# Inside parse_rules function loop
|
497 |
-
import re
|
498 |
-
|
499 |
-
def collapse_repeated_first_word(line: str) -> str:
|
500 |
-
"""
|
501 |
-
Finds the first word, then finds its next occurrence within 5 words,
|
502 |
-
and if so removes everything between (and including) that second match,
|
503 |
-
preserving all other whitespace and punctuation.
|
504 |
-
"""
|
505 |
-
# 1) Match and capture any leading indent + the first word
|
506 |
-
m = re.match(r'^(\s*)(\w+)\b', line)
|
507 |
-
if not m:
|
508 |
-
return line
|
509 |
-
prefix, first_word = m.groups()
|
510 |
-
|
511 |
-
# 2) Build a wordβboundary pattern for that first word
|
512 |
-
pat = rf'\b{re.escape(first_word)}\b'
|
513 |
-
|
514 |
-
# 3) Find all occurrences
|
515 |
-
matches = list(re.finditer(pat, line, flags=re.IGNORECASE))
|
516 |
-
if len(matches) < 2:
|
517 |
-
return line
|
518 |
-
|
519 |
-
# 4) Offsets of the first two matches
|
520 |
-
first_end = matches[0].end()
|
521 |
-
second_start, second_end = matches[1].span()
|
522 |
-
|
523 |
-
# 5) Count how many actual words lie between
|
524 |
-
between = line[first_end:second_start]
|
525 |
-
if len(re.findall(r'\b\w+\b', between)) > 5:
|
526 |
-
return line
|
527 |
-
|
528 |
-
# 6) Rebuild: prefix + first word + everything AFTER the second occurrence
|
529 |
-
rest = line[second_end:]
|
530 |
-
return f"{prefix}{first_word}{rest}"
|
531 |
-
|
532 |
-
|
533 |
-
def parse_rules(text: str, source: str) -> dict[str, str]:
|
534 |
-
rules, current, title = {}, None, ""
|
535 |
-
tail = ""
|
536 |
-
current_appendix_letter = None # Track the current main appendix
|
537 |
-
|
538 |
-
for raw in text.splitlines():
|
539 |
-
original_line_for_debug = raw.strip() # Keep original for debugging
|
540 |
-
line = clean_line(raw, source)
|
541 |
-
print(f"DEBUG: Checking cleaned line: '{line}'")
|
542 |
-
if not line: continue
|
543 |
-
if source == "onereg":
|
544 |
-
line = collapse_inner_parens(line)
|
545 |
-
#line = strip_stray_numbers(line)
|
546 |
-
#line = collapse_repeated_first_word(line)
|
547 |
-
#line = remove_repeated_prefix(line)
|
548 |
-
|
549 |
-
# --- Check Order: Appendix Item -> Subpart -> Rule ---
|
550 |
-
|
551 |
-
# 1. Appendix Item (e.g., "A.1 Title" or "C.2.1 (a) Title")
|
552 |
-
if m_ap_item := appendix_item_pat.match(line):
|
553 |
-
letter = m_ap_item.group(1).upper()
|
554 |
-
numbering = m_ap_item.group(2)
|
555 |
-
paren = m_ap_item.group(3) # Might be None
|
556 |
-
item_title = m_ap_item.group('title').strip()
|
557 |
-
|
558 |
-
# Construct the key: A.1, A.1(a), C.2.1(1) etc.
|
559 |
-
key_parts = [letter, numbering]
|
560 |
-
if paren:
|
561 |
-
# Clean paren content if needed (e.g., remove internal spaces?)
|
562 |
-
paren_clean = paren.strip()
|
563 |
-
key_parts.append(f"({paren_clean})")
|
564 |
-
key = ".".join(key_parts)
|
565 |
-
|
566 |
-
current = key
|
567 |
-
title = item_title
|
568 |
-
tail = key
|
569 |
-
rules.setdefault(current, []).append(title)
|
570 |
-
current_appendix_letter = letter # Remember we are inside this appendix
|
571 |
-
print(f"Matched Appendix Item: {key} => '{title}'")
|
572 |
-
continue
|
573 |
-
|
574 |
-
# 2. Subpart Heading
|
575 |
-
elif m_sp := subpart_pat.match(line):
|
576 |
-
# ... (Subpart logic remains the same) ...
|
577 |
-
code = m_sp.group(1).upper()
|
578 |
-
subpart_title = m_sp.group(2).strip()
|
579 |
-
heading = f"Subpart {code} β {subpart_title}"
|
580 |
-
key = f"subpart-{code}"
|
581 |
-
current = key
|
582 |
-
title = heading
|
583 |
-
tail = ""
|
584 |
-
rules.setdefault(current, []).append(heading)
|
585 |
-
current_appendix_letter = None # Exited appendix context
|
586 |
-
print(f"Matched Subpart: {key} => {heading}")
|
587 |
-
continue
|
588 |
-
|
589 |
-
# 3. Main Rule Heading
|
590 |
-
elif m_rule := rule_pat.match(line):
|
591 |
-
# ... (Main rule logic remains mostly the same, ensure key construction is correct) ...
|
592 |
-
base = m_rule.group('base_rule')
|
593 |
-
parens_str = m_rule.group('parens') or ""
|
594 |
-
title_text = m_rule.group('title').strip()
|
595 |
-
paren_parts = re.findall(r'\(([^)]+)\)', parens_str)
|
596 |
-
key = base + "".join(f"({p.strip()})" for p in paren_parts) # Construct key like 139.555(e)(1)
|
597 |
-
|
598 |
-
is_likely_heading_only = not title_text or \
|
599 |
-
re.match(r'^[\[\(]?[a-zA-Z0-9][\)\]\.]', title_text) or \
|
600 |
-
len(title_text) < 5
|
601 |
-
|
602 |
-
current = key
|
603 |
-
title = title_text
|
604 |
-
tail = key
|
605 |
-
|
606 |
-
if not is_likely_heading_only and title_text:
|
607 |
-
rules.setdefault(current, []).append(title_text)
|
608 |
-
print(f"Matched Rule + Title: {key} => '{title_text}'")
|
609 |
-
else:
|
610 |
-
rules.setdefault(current, [])
|
611 |
-
print(f"Matched Rule Heading: {key}")
|
612 |
-
current_appendix_letter = None # Exited appendix context
|
613 |
-
continue
|
614 |
-
|
615 |
-
# 4. Continuation lines
|
616 |
-
if current:
|
617 |
-
# If we are inside an appendix section (identified by A. B. etc.)
|
618 |
-
# be careful about dropping lines that might look like headings but aren't
|
619 |
-
is_potentially_new_appendix_item = appendix_item_pat.match(line)
|
620 |
-
|
621 |
-
if is_potentially_new_appendix_item and current_appendix_letter and line.startswith(current_appendix_letter):
|
622 |
-
# This looks like a new sub-item within the *same* appendix
|
623 |
-
# but wasn't matched above (maybe title was too short?).
|
624 |
-
# Treat as continuation for now, might need refinement.
|
625 |
-
print(f"Potential missed heading treated as continuation: {line}")
|
626 |
-
pass # Let it be added below
|
627 |
-
|
628 |
-
# Apply cleaning to continuation lines
|
629 |
-
# line = strip_inline_self_ref(line, tail) # Review if this works well now
|
630 |
-
line = drop_leading_repeated_title(line, title)
|
631 |
-
if line:
|
632 |
-
rules[current].append(line)
|
633 |
-
else:
|
634 |
-
|
635 |
-
print(f"DEBUG: Unmatched line (no current rule): '{line}'")
|
636 |
-
return {k: " ".join(v).strip() for k, v in rules.items()}
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
642 |
-
# 5. STRING DIFF (OneReg deletions / modified insertions)
|
643 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
644 |
-
def diff_cols(one: str, caa: str) -> tuple[str, str]:
|
645 |
-
sm = difflib.SequenceMatcher(None, one, caa)
|
646 |
-
d_one = d_mod = ""
|
647 |
-
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
648 |
-
if tag == "equal":
|
649 |
-
seg = one[i1:i2]
|
650 |
-
d_one += seg
|
651 |
-
d_mod += seg
|
652 |
-
elif tag == "delete":
|
653 |
-
d_one += f"<span style='background:#f8d4d4'>{one[i1:i2]}</span>"
|
654 |
-
elif tag == "insert":
|
655 |
-
d_mod += f"<span style='background:#d4f8d4'>{caa[j1:j2]}</span>"
|
656 |
-
else: # replace
|
657 |
-
d_one += f"<span style='background:#f8d4d4'>{one[i1:i2]}</span>"
|
658 |
-
d_mod += f"<span style='background:#d4f8d4'>{caa[j1:j2]}</span>"
|
659 |
-
return d_one, d_mod
|
660 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
661 |
-
# 5. STRING DIFF (Unified Inline View)
|
662 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
663 |
-
def diff_unified(one: str, caa: str) -> str:
|
664 |
-
"""
|
665 |
-
Generates a single HTML string showing differences inline.
|
666 |
-
Deletions (text in OneReg but not CAA) are shown with red background/strikethrough.
|
667 |
-
Insertions (text in CAA but not OneReg) are shown with green background.
|
668 |
-
Uses html.escape to handle special characters in the text.
|
669 |
-
"""
|
670 |
-
sm = difflib.SequenceMatcher(None, one, caa)
|
671 |
-
output = []
|
672 |
-
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
673 |
-
one_segment = html.escape(one[i1:i2]) # Escape text segments
|
674 |
-
caa_segment = html.escape(caa[j1:j2]) # Escape text segments
|
675 |
-
|
676 |
-
if tag == "equal":
|
677 |
-
output.append(one_segment)
|
678 |
-
elif tag == "delete":
|
679 |
-
# Wrap deleted text in <del> tags with specific styling
|
680 |
-
output.append(f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
|
681 |
-
elif tag == "insert":
|
682 |
-
# Wrap inserted text in <ins> tags with specific styling
|
683 |
-
output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
|
684 |
-
elif tag == "replace":
|
685 |
-
# Show deletion followed by insertion for replacements
|
686 |
-
output.append(f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
|
687 |
-
output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
|
688 |
-
|
689 |
-
# Join segments and wrap in a span that preserves whitespace and line breaks
|
690 |
-
# Add color: var(--text) to ensure it adapts to light/dark mode from the body style
|
691 |
-
return f"<span style='white-space: pre-wrap; color: var(--text);'>{''.join(output)}</span>"
|
692 |
-
|
693 |
-
# Remove or comment out the old natural_sort_key if not used elsewhere,
|
694 |
-
# or keep if needed for other parts. Let's assume it's not needed now.
|
695 |
-
# def natural_sort_key(rule_id: str): ... # Keep if used, remove/comment if not
|
696 |
-
def natural_sort_key(rule_id: str):
|
697 |
-
# rule_id is e.g. "139.5", "139.5A", "139.10"
|
698 |
-
minor = rule_id.split('.', 1)[1] # "5", "5A", "10"
|
699 |
-
m = re.match(r'^(\d+)([A-Z]?)$', minor) # capture digits + optional letter
|
700 |
-
if m:
|
701 |
-
return (int(m.group(1)), m.group(2)) # e.g. (5, ""), (5, "A"), (10, "")
|
702 |
-
# fallback: put anything weird at the end
|
703 |
-
return (float('inf'), minor)
|
704 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
705 |
-
# 6. SORTING KEY (Updated for Rule -> Appendix Order)
|
706 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββοΏ½οΏ½οΏ½βββββββββββββββββββββββββββ
|
707 |
-
def combined_sort_key(key: str):
|
708 |
-
# --- Sort Order Priorities ---
|
709 |
-
# 1: Subparts (subpart-A < subpart-AA < subpart-B)
|
710 |
-
# 2: Main Rules (139.1 < 139.5 < 139.5A < 139.10, including subdivisions)
|
711 |
-
# 3: Appendix Items (A.1 < A.1(a) < A.1(1) < A.2 < B.1)
|
712 |
-
|
713 |
-
# 1. Subparts
|
714 |
-
if key.startswith("subpart-"):
|
715 |
-
code = key.split('-', 1)[1]
|
716 |
-
return (1, len(code), code) # Priority 1
|
717 |
-
|
718 |
-
# 2. Main Rules (e.g., 139.5, 139.5(e)(1)) - Assign Priority 2
|
719 |
-
elif re.match(r'^\d+\.\d+', key): # Check if it starts like a rule number
|
720 |
-
try:
|
721 |
-
match = re.match(r'^(\d+\.\d+(?:[A-Z]?))((?:\([^)]+\))*)$', key)
|
722 |
-
if match:
|
723 |
-
base_rule_str = match.group(1) # e.g., "139.555" or "139.5A"
|
724 |
-
parens_str = match.group(2) or "" # e.g., "(e)(1)" or ""
|
725 |
-
part_str, minor_base = base_rule_str.split('.', 1)
|
726 |
-
part_num = int(part_str)
|
727 |
-
m_minor_base = re.match(r'^(\d+)([A-Z]?)$', minor_base)
|
728 |
-
if m_minor_base:
|
729 |
-
minor_num = int(m_minor_base.group(1))
|
730 |
-
minor_letter = m_minor_base.group(2)
|
731 |
-
paren_parts_raw = re.findall(r'\(([^)]+)\)', parens_str)
|
732 |
-
paren_sort_tuple_elements = ()
|
733 |
-
for p in paren_parts_raw:
|
734 |
-
p_strip = p.strip()
|
735 |
-
if p_strip.isdigit():
|
736 |
-
paren_sort_tuple_elements += (1, int(p_strip)) # Num first within parens
|
737 |
-
elif len(p_strip) == 1 and p_strip.isalpha():
|
738 |
-
paren_sort_tuple_elements += (2, ord(p_strip.lower())) # Letter second
|
739 |
-
else:
|
740 |
-
paren_sort_tuple_elements += (3, p_strip.lower()) # Others last
|
741 |
-
|
742 |
-
return (2, part_num, minor_num, minor_letter) + paren_sort_tuple_elements # Priority 2
|
743 |
-
except Exception as e:
|
744 |
-
print(f"Warning: Sort key error for rule '{key}': {e}")
|
745 |
-
pass
|
746 |
-
|
747 |
-
# 3. Appendix Items (e.g., A.1, B.2.1(a)) - Assign Priority 3
|
748 |
-
elif re.match(r'^[A-Z]\.', key):
|
749 |
-
parts = re.split(r'[.()]', key)
|
750 |
-
parts = [p for p in parts if p]
|
751 |
-
sortable_parts = [parts[0]] # Start with the letter (A, B, C...)
|
752 |
-
for part in parts[1:]:
|
753 |
-
if part.isdigit():
|
754 |
-
sortable_parts.append(int(part))
|
755 |
-
else:
|
756 |
-
if len(part) == 1 and part.isalpha():
|
757 |
-
sortable_parts.append(ord(part.lower())) # Use ASCII for single letters
|
758 |
-
else:
|
759 |
-
sortable_parts.append(part.lower()) # Lowercase others
|
760 |
-
# Priority 3, then sort by parts
|
761 |
-
return (3,) + tuple(sortable_parts)
|
762 |
-
|
763 |
-
# Fallback
|
764 |
-
return (float('inf'), key) # Put errors/unknowns last
|
765 |
-
DATE_HDR_RE = re.compile(
|
766 |
-
r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.\s+\d{1,2},\s+\d{4}'
|
767 |
-
r'(?:\s+[ap]\.?m\.?)?', # optional a.m./p.m.
|
768 |
-
re.I,
|
769 |
-
)
|
770 |
-
# --------------------------------------------------------------------
|
771 |
-
# 1) helpers β put these near the top of the file
|
772 |
-
# --------------------------------------------------------------------
|
773 |
-
MONTH_RE = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.'
|
774 |
-
DATE_RE = re.compile(
|
775 |
-
rf'\b{MONTH_RE}\s+\d{{1,2}},\s+\d{{4}}'
|
776 |
-
r'(?:,\s*)?(?:[ap]\.?m\.?)?', # optional β, a.m.β / β, p.m.β
|
777 |
-
re.I
|
778 |
-
)
|
779 |
-
# Stray leading βFeβ thatβs left behind when βFeb.β or βFe\nb.β is split
|
780 |
-
FE_CRUMB_RE = re.compile(r'\bFe(?=[a-z])') # Fevery, Feaccompanied β¦
|
781 |
-
|
782 |
-
def merge_and_clean(raw: str) -> str:
|
783 |
-
"""Collapse newlines, strip date headers, remove Fe* crumbs."""
|
784 |
-
# (i) merge β one long line
|
785 |
-
text = ' '.join(raw.splitlines())
|
786 |
-
|
787 |
-
# (ii) nuke any full-form dates
|
788 |
-
text = DATE_RE.sub('', text)
|
789 |
-
|
790 |
-
# (iii) wipe the βFeβ crumbs that remain after bad line-wrap
|
791 |
-
text = FE_CRUMB_RE.sub('', text)
|
792 |
-
|
793 |
-
# (iv) collapse doubled spaces made by the removals
|
794 |
-
return re.sub(r'\s{2,}', ' ', text).strip()
|
795 |
-
def strip_trailing_duplicate_heading(s: str) -> str:
|
796 |
-
"""
|
797 |
-
If a line starts with a heading (up to the first '(' or end-of-line)
|
798 |
-
and that identical heading is repeated at the very end, remove the
|
799 |
-
trailing copy.
|
800 |
-
|
801 |
-
>>> strip_trailing_duplicate_heading(
|
802 |
-
... "Changes to certificate holder's organisation (a) β¦ (e) Changes to certificate holder's organisation"
|
803 |
-
... )
|
804 |
-
"Changes to certificate holder's organisation (a) β¦ (e)"
|
805 |
-
"""
|
806 |
-
# 1) grab the prefix heading (everything before the first '(' or EOL)
|
807 |
-
head = s.split('(', 1)[0].strip()
|
808 |
-
if not head:
|
809 |
-
return s
|
810 |
-
|
811 |
-
# 2) does the string *end* with exactly that heading?
|
812 |
-
if s.rstrip().endswith(head):
|
813 |
-
# slice it off and tidy spaces
|
814 |
-
s = s[: -len(head)].rstrip()
|
815 |
-
|
816 |
-
return s
|
817 |
-
|
818 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
819 |
-
# 6. MAIN COMPARISON FUNCTION
|
820 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
821 |
-
def compare_regulations_app(part, onereg_pdf, caa_pdf):
|
822 |
-
try:
|
823 |
-
raw_one = extract_pdf_word(onereg_pdf)
|
824 |
-
raw_caa = extract_pdf_text(caa_pdf)
|
825 |
-
for ln in raw_caa.splitlines():
|
826 |
-
if any(tok in ln for tok in ("139.61", "139.63")):
|
827 |
-
print("[RAW]", ln[:120])
|
828 |
-
lines_one = merge_pdf_wrapped_lines(raw_one)
|
829 |
-
lines_caa = merge_pdf_wrapped_lines(raw_caa)
|
830 |
-
text_one = "\n".join(lines_one)
|
831 |
-
text_caa = "\n".join(lines_caa)
|
832 |
-
one = parse_rules(text_one, "onereg")
|
833 |
-
# print(one)
|
834 |
-
caa = parse_rules(text_caa, "caa")
|
835 |
-
|
836 |
-
# print(one)
|
837 |
-
|
838 |
-
# filter & sort as beforeβ¦
|
839 |
-
all_ids = set(one) | set(caa)
|
840 |
-
|
841 |
-
# Inside compare_regulations_app function
|
842 |
-
user_inputs = [] #to collect the inputs from the user
|
843 |
-
|
844 |
-
rules = [
|
845 |
-
r for r in all_ids
|
846 |
-
if r.startswith(f"{part}.") # Main rules like 139.5
|
847 |
-
or r.startswith("subpart-") # Subpart headings like subpart-A
|
848 |
-
# or r.startswith("appendix-") # REMOVED - No longer generating these keys
|
849 |
-
or re.match(r'^[A-Z]\.', r) # Appendix items like A.1, B.2(a)
|
850 |
-
]
|
851 |
-
print(rules)
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
rules.sort(key=combined_sort_key)
|
856 |
-
print(rules)
|
857 |
-
#rules.sort(key=natural_key)
|
858 |
-
|
859 |
-
sections = []
|
860 |
-
df_rows = []
|
861 |
-
for rule in rules:
|
862 |
-
o = one.get(rule, "")
|
863 |
-
if header_pat.match(o) or DATE_HDR_RE.fullmatch(o.strip()):
|
864 |
-
# print(f"Skipping header line for rule {rule}: {o}") # comment-out if noisy
|
865 |
-
continue
|
866 |
-
o = merge_and_clean(o)
|
867 |
-
o = collapse_repeated_first_word(o) # Apply the new function
|
868 |
-
o= strip_trailing_duplicate_heading(o) # Remove trailing duplicate headings
|
869 |
-
print(o)
|
870 |
-
c = caa.get(rule, "")
|
871 |
-
#o_html = inject_tables(o)
|
872 |
-
c_text_tables = inject_tables(c)
|
873 |
-
unified_diff_html = diff_unified(o, c)
|
874 |
-
|
875 |
-
sections.append(f"""
|
876 |
-
<div class="rule-section">
|
877 |
-
<input type="checkbox" id="chk_{rule}" name="rule" value="{rule}">
|
878 |
-
<label for="chk_{rule}" class="rule-label">{rule}</label>
|
879 |
-
<div class="rule-content">
|
880 |
-
<strong>Unified Diff (OneReg <del style='background:#fdd;text-decoration:line-through;'>deletions</del> / CAA <ins style='background:#dfd;text-decoration:none;'>additions</ins>)</strong><br>
|
881 |
-
{unified_diff_html}
|
882 |
-
<br><br>
|
883 |
-
{
|
884 |
-
f'''<strong>CAA (Cleaned + Tables)</strong><br>
|
885 |
-
{c_text_tables}'''
|
886 |
-
}
|
887 |
-
</div>
|
888 |
-
</div>
|
889 |
-
<hr>
|
890 |
-
""")
|
891 |
-
df_rows.append([rule, ""])
|
892 |
-
|
893 |
-
|
894 |
-
style = """
|
895 |
-
<style>
|
896 |
-
/* βββββββββββ colour tokens βββββββββββ */
|
897 |
-
:root{
|
898 |
-
--bg: #ffffff;
|
899 |
-
--text: #000000;
|
900 |
-
--border: #cccccc;
|
901 |
-
--rule-label-on: #ff8a80; /* light green */
|
902 |
-
--rule-content-on: #e8f5e9;
|
903 |
-
}
|
904 |
-
@media (prefers-color-scheme: dark){
|
905 |
-
:root{
|
906 |
-
--bg: #121212;
|
907 |
-
--text: #e0e0e0;
|
908 |
-
--border: #444444;
|
909 |
-
--rule-label-on: #ff8a80;; /* dark-mode green */
|
910 |
-
--rule-content-on:#1b5e20;
|
911 |
-
}
|
912 |
-
}
|
913 |
-
|
914 |
-
/* βββββββββββ global βββββββββββ */
|
915 |
-
body{
|
916 |
-
background: var(--bg);
|
917 |
-
color: var(--text);
|
918 |
-
font-family: Arial, Helvetica, sans-serif;
|
919 |
-
font-size: .9em;
|
920 |
-
}
|
921 |
-
span{white-space:pre-wrap}
|
922 |
-
hr{
|
923 |
-
border:none;
|
924 |
-
border-top:1px solid var(--border);
|
925 |
-
margin:1.2em 0;
|
926 |
-
}
|
927 |
-
|
928 |
-
/* βββββββββββ diff-viewer widgets βββββββββββ */
|
929 |
-
.rule-section{
|
930 |
-
padding:.5em;
|
931 |
-
transition:background .2s;
|
932 |
-
}
|
933 |
-
.rule-label{
|
934 |
-
font-weight:bold;
|
935 |
-
margin-left:.5em;
|
936 |
-
padding:.2em .4em;
|
937 |
-
border-radius:4px;
|
938 |
-
cursor:pointer;
|
939 |
-
}
|
940 |
-
.rule-content{
|
941 |
-
margin-left:2em;
|
942 |
-
padding:.5em;
|
943 |
-
border-radius:4px;
|
944 |
-
}
|
945 |
-
|
946 |
-
/* checked highlights */
|
947 |
-
.rule-section input[type=checkbox]:checked + .rule-label{
|
948 |
-
background:var(--rule-label-on);
|
949 |
-
}
|
950 |
-
.rule-section input[type=checkbox]:checked ~ .rule-content{
|
951 |
-
background:var(--rule-content-on);
|
952 |
-
}
|
953 |
-
|
954 |
-
/* make links + table borders visible in both modes */
|
955 |
-
a{color:inherit;text-decoration:underline;}
|
956 |
-
table{color:inherit;border-color:var(--border);}
|
957 |
-
th,td{border-color:var(--border);}
|
958 |
-
</style>
|
959 |
-
"""
|
960 |
-
html_out=style+"".join(sections)
|
961 |
-
# Create a DataFrame for the rules
|
962 |
-
comments_df = pd.DataFrame(df_rows, columns=["Rule", "comment"])
|
963 |
-
|
964 |
-
return html_out,comments_df
|
965 |
-
|
966 |
-
except Exception as e:
|
967 |
-
return ("<div style='color:red'>Error:<br>"
|
968 |
-
f"{e}<br><pre>{traceback.format_exc()}</pre></div>")
|
969 |
-
|
970 |
-
def save_comments_to_csv(df: pd.DataFrame):
|
971 |
-
"""
|
972 |
-
Writes the editable dataframe (rule, comment) to a CSV and
|
973 |
-
returns a file object that Gradio can offer for download.
|
974 |
-
"""
|
975 |
-
# keep only rows where the user actually wrote something
|
976 |
-
df = df[df["comment"].str.strip().astype(bool)]
|
977 |
-
|
978 |
-
if df.empty:
|
979 |
-
raise gr.Error("You didnβt write any comments yet!")
|
980 |
-
|
981 |
-
filename = f"rule_comments_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
982 |
-
csv_path = os.path.join(os.getcwd(), filename)
|
983 |
-
df.to_csv(csv_path, index=False)
|
984 |
-
return csv_path
|
985 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
986 |
-
# 7. GRADIO UI
|
987 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
988 |
-
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
989 |
-
gr.Markdown("## CAA β OneReg β rule-level diff (section format)")
|
990 |
-
part = gr.Textbox(label="Part Number", value="139")
|
991 |
-
onereg_pdf = gr.File(label="Upload OneReg PDF")
|
992 |
-
caa_pdf = gr.File(label="Upload CAA PDF")
|
993 |
-
btn_compare = gr.Button("Compare")
|
994 |
-
out_html = gr.HTML()
|
995 |
-
comment_df = gr.Dataframe(
|
996 |
-
headers=["rule", "comment"],
|
997 |
-
datatype=["str", "str"],
|
998 |
-
interactive=True,
|
999 |
-
label="βοΈ Type your comments in the **comment** column, then click βSave to CSVβ"
|
1000 |
-
)
|
1001 |
-
btn_save = gr.Button("πΎ Save to CSV")
|
1002 |
-
download = gr.File(label="Download your CSV")
|
1003 |
-
btn_compare.click(
|
1004 |
-
compare_regulations_app,
|
1005 |
-
inputs=[part, onereg_pdf, caa_pdf],
|
1006 |
-
outputs=[out_html, comment_df],
|
1007 |
-
)
|
1008 |
-
btn_save.click(
|
1009 |
-
save_comments_to_csv,
|
1010 |
-
inputs=[comment_df],
|
1011 |
-
outputs=[download],
|
1012 |
-
)
|
1013 |
-
|
1014 |
-
|
1015 |
-
if __name__ == "__main__":
|
1016 |
-
#api_key = os.getenv("GOOGLE_API_KEY")
|
1017 |
-
#print(api_key)
|
1018 |
-
current_os = platform.system()
|
1019 |
-
print(f"Current OS: {current_os}")
|
1020 |
-
if current_os == "Windows":
|
1021 |
-
print("Running on Windows")
|
1022 |
-
server_name = "localhost"
|
1023 |
-
elif current_os == "Linux":
|
1024 |
-
server_name="0.0.0.0"
|
1025 |
-
else:
|
1026 |
-
server_name = "0.0.0.0"
|
1027 |
-
|
1028 |
-
|
1029 |
-
|
1030 |
-
demo.launch(
|
1031 |
-
server_name=server_name,
|
1032 |
-
server_port=int(os.environ.get("GRADIO_SERVER_PORT", 7860)),
|
1033 |
-
share=False
|
1034 |
-
)
|
1035 |
-
|
|
|
1 |
+
###############################################################################
|
2 |
+
# CAA β OneReg | Dual Document Cleaning & Comparison Tool #
|
3 |
+
###############################################################################
|
4 |
+
import io
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
import html
|
8 |
+
import json
|
9 |
+
import traceback
|
10 |
+
import difflib
|
11 |
+
import platform
|
12 |
+
import pandas as pd
|
13 |
+
from datetime import datetime
|
14 |
+
|
15 |
+
import fitz # PyMuPDF
|
16 |
+
from PyPDF2 import PdfReader # plain text extraction
|
17 |
+
import gradio as gr # UI
|
18 |
+
from dotenv import load_dotenv # optional .env support
|
19 |
+
|
20 |
+
|
21 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
22 |
+
# 1. PDF & TEXT PROCESSING
|
23 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
24 |
+
|
25 |
+
def extract_pdf_text(pdf_file) -> str:
|
26 |
+
"""Extracts text from a PDF file using PyPDF2."""
|
27 |
+
reader = PdfReader(pdf_file)
|
28 |
+
return "\n".join(p.extract_text() or "" for p in reader.pages)
|
29 |
+
|
30 |
+
|
31 |
+
def extract_pdf_word(pdf_file) -> str:
|
32 |
+
"""Extracts text from PDF using PyMuPDF (fitz) for better layout preservation."""
|
33 |
+
doc = fitz.open(pdf_file)
|
34 |
+
text_blocks = [page.get_text("text") for page in doc]
|
35 |
+
return "\n".join(filter(None, text_blocks))
|
36 |
+
|
37 |
+
|
38 |
+
def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
|
39 |
+
"""Re-join hard-wrapped lines from PDF extraction."""
|
40 |
+
merged = []
|
41 |
+
for ln in raw_text.splitlines():
|
42 |
+
ln_stripped = ln.strip()
|
43 |
+
if not ln_stripped: continue
|
44 |
+
if merged:
|
45 |
+
prev = merged[-1]
|
46 |
+
if (re.search(r'[a-z]$', prev) and re.match(r'^[\(a-z]', ln_stripped)) or \
|
47 |
+
(re.search(r'\b(?:rule|may|and|or)$', prev, re.I) and re.match(r'^\d+\.\d+', ln_stripped)) or \
|
48 |
+
(re.search(r'\brule\s+\d+\.$', prev, re.I) and re.match(r'^\d', ln_stripped)):
|
49 |
+
merged[-1] = prev + (' ' if re.search(r'[a-z]$', prev) else '') + ln_stripped
|
50 |
+
continue
|
51 |
+
merged.append(ln_stripped)
|
52 |
+
return merged
|
53 |
+
|
54 |
+
|
55 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
56 |
+
# 2. RULE PARSING & CLEANING (Initial Automated Pass)
|
57 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
58 |
+
|
59 |
+
# --- Regex for rule structure ---
|
60 |
+
rule_pat = re.compile(
|
61 |
+
r'^(?:(?:\d+\.){2,}\s*)?(?P<base_rule>\d+\.\d+(?:[A-Z]?))(?P<parens>(?:\s*\([^)]+\))*?)\s*(?P<title>.*)$',
|
62 |
+
re.IGNORECASE
|
63 |
+
)
|
64 |
+
appendix_item_pat = re.compile(
|
65 |
+
r'^\s*([A-Z])\.(\d+(?:\.\d+)*)(?:\s*\(([^)]+)\))?\s+(?P<title>[A-Za-z0-9].*)$',
|
66 |
+
re.IGNORECASE
|
67 |
+
)
|
68 |
+
subpart_pat = re.compile(
|
69 |
+
r'^\s*\d+\.\s*Subpart\s+([A-Z]{1,2})\s*[β-]\s*(.+)$',
|
70 |
+
re.IGNORECASE
|
71 |
+
)
|
72 |
+
|
73 |
+
# --- Regex for cleaning ---
|
74 |
+
page_pat = re.compile(r'Page\s+\d+\s*/\s*\d+', re.IGNORECASE)
|
75 |
+
date_pat = re.compile(
|
76 |
+
r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[a-z.]*\s+\d{1,2},?\s+\d{4}',
|
77 |
+
re.IGNORECASE
|
78 |
+
)
|
79 |
+
header_pat = re.compile(
|
80 |
+
r'^(?:Purpose\s+)?(?:[A-Z][a-z]{2}\.)\s+\d{1,2},\s*\d{4},.*$', re.IGNORECASE
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
def clean_line(line: str, source: str) -> str:
|
85 |
+
"""Performs a basic, automated cleaning pass on a line of text."""
|
86 |
+
if source == "onereg":
|
87 |
+
line = re.sub(r'\b(?:\d+\.){3,}\s*', '', line) # Zap outline IDs 1.2.3.
|
88 |
+
if header_pat.match(line):
|
89 |
+
return ""
|
90 |
+
|
91 |
+
# Generic cleaning for both
|
92 |
+
line = page_pat.sub('', line)
|
93 |
+
line = date_pat.sub('', line)
|
94 |
+
line = re.sub(r'Civil Aviation Rules\s+Part\s+\d+\s+CAA Consolidation', '', line, flags=re.I)
|
95 |
+
line = re.sub(r'^\d{1,2}\s+[A-Za-z]+\s+\d{4}\s*\d*\s*CAA of NZ', '', line, flags=re.I)
|
96 |
+
line = re.sub(r'\S+@\S+', '', line) # email
|
97 |
+
line = re.sub(r'\s{2,}', ' ', line)
|
98 |
+
return line.strip()
|
99 |
+
|
100 |
+
|
101 |
+
def parse_rules(text: str, source: str) -> dict[str, str]:
|
102 |
+
"""Parses raw text into a dictionary of {rule_id: rule_text}."""
|
103 |
+
rules, current, title = {}, None, ""
|
104 |
+
|
105 |
+
lines = merge_pdf_wrapped_lines(text)
|
106 |
+
|
107 |
+
for raw_line in lines:
|
108 |
+
line = clean_line(raw_line, source)
|
109 |
+
if not line: continue
|
110 |
+
|
111 |
+
m_ap_item = appendix_item_pat.match(line)
|
112 |
+
m_sp = subpart_pat.match(line)
|
113 |
+
m_rule = rule_pat.match(line)
|
114 |
+
|
115 |
+
new_key = None
|
116 |
+
new_title = ""
|
117 |
+
|
118 |
+
if m_ap_item:
|
119 |
+
key_parts = [m_ap_item.group(1).upper(), m_ap_item.group(2)]
|
120 |
+
if m_ap_item.group(3): key_parts.append(f"({m_ap_item.group(3).strip()})")
|
121 |
+
new_key = ".".join(key_parts)
|
122 |
+
new_title = m_ap_item.group('title').strip()
|
123 |
+
elif m_sp:
|
124 |
+
new_key = f"subpart-{m_sp.group(1).upper()}"
|
125 |
+
new_title = f"Subpart {m_sp.group(1).upper()} β {m_sp.group(2).strip()}"
|
126 |
+
elif m_rule:
|
127 |
+
base = m_rule.group('base_rule')
|
128 |
+
parens_str = m_rule.group('parens') or ""
|
129 |
+
new_key = base + "".join(re.findall(r'\([^)]+\)', parens_str))
|
130 |
+
new_title = m_rule.group('title').strip()
|
131 |
+
|
132 |
+
if new_key:
|
133 |
+
current = new_key
|
134 |
+
title = new_title
|
135 |
+
rules.setdefault(current, [])
|
136 |
+
if title:
|
137 |
+
rules[current].append(title)
|
138 |
+
elif current:
|
139 |
+
if not title or line.lower() != title.lower():
|
140 |
+
rules[current].append(line)
|
141 |
+
|
142 |
+
return {k: " ".join(v).strip() for k, v in rules.items()}
|
143 |
+
|
144 |
+
|
145 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
146 |
+
# 3. COMPARISON & UI LOGIC
|
147 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
148 |
+
|
149 |
+
def diff_unified(one: str, caa: str) -> str:
|
150 |
+
"""Generates a single HTML string showing differences inline."""
|
151 |
+
sm = difflib.SequenceMatcher(None, one, caa, autojunk=False)
|
152 |
+
output = []
|
153 |
+
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
154 |
+
one_segment = html.escape(one[i1:i2])
|
155 |
+
caa_segment = html.escape(caa[j1:j2])
|
156 |
+
if tag == "equal":
|
157 |
+
output.append(one_segment)
|
158 |
+
elif tag == "delete":
|
159 |
+
output.append(
|
160 |
+
f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
|
161 |
+
elif tag == "insert":
|
162 |
+
output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
|
163 |
+
elif tag == "replace":
|
164 |
+
output.append(
|
165 |
+
f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
|
166 |
+
output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
|
167 |
+
return f"<span style='white-space: pre-wrap; color: var(--text);'>{''.join(output)}</span>"
|
168 |
+
|
169 |
+
|
170 |
+
def combined_sort_key(key: str):
|
171 |
+
"""Robustly sorts rules, subparts, and appendices."""
|
172 |
+
if key.startswith("subpart-"):
|
173 |
+
return (1, key)
|
174 |
+
|
175 |
+
sortable_tuple = ()
|
176 |
+
if re.match(r'^\d+\.\d+', key):
|
177 |
+
sortable_tuple += (2,)
|
178 |
+
elif re.match(r'^[A-Z]\.', key):
|
179 |
+
sortable_tuple += (3,)
|
180 |
+
else:
|
181 |
+
return (4, key)
|
182 |
+
|
183 |
+
parts = re.split(r'[.()]', key)
|
184 |
+
parts = [p for p in parts if p]
|
185 |
+
|
186 |
+
for part in parts:
|
187 |
+
if part.isdigit():
|
188 |
+
sortable_tuple += ((1, int(part)),)
|
189 |
+
else:
|
190 |
+
sortable_tuple += ((2, part.lower()),)
|
191 |
+
return sortable_tuple
|
192 |
+
|
193 |
+
|
194 |
+
def save_clean_and_dirty_versions(dirty_one, dirty_caa, clean_one, clean_caa, filename: str) -> str:
|
195 |
+
"""Saves both original and cleaned versions to a .jsonl file."""
|
196 |
+
all_ids = sorted(
|
197 |
+
list(set(dirty_one.keys()) | set(dirty_caa.keys())),
|
198 |
+
key=combined_sort_key
|
199 |
+
)
|
200 |
+
with open(filename, 'w', encoding='utf-8') as f:
|
201 |
+
for rule_id in all_ids:
|
202 |
+
# OneReg record
|
203 |
+
record_one = {
|
204 |
+
"rule_id": rule_id,
|
205 |
+
"source": "onereg",
|
206 |
+
"dirty_text": dirty_one.get(rule_id, ""),
|
207 |
+
"clean_text": clean_one.get(rule_id, "")
|
208 |
+
}
|
209 |
+
f.write(json.dumps(record_one) + '\n')
|
210 |
+
# CAA record
|
211 |
+
record_caa = {
|
212 |
+
"rule_id": rule_id,
|
213 |
+
"source": "caa",
|
214 |
+
"dirty_text": dirty_caa.get(rule_id, ""),
|
215 |
+
"clean_text": clean_caa.get(rule_id, "")
|
216 |
+
}
|
217 |
+
f.write(json.dumps(record_caa) + '\n')
|
218 |
+
return filename
|
219 |
+
|
220 |
+
|
221 |
+
# --- STAGE 1: Process PDFs and prepare for user review ---
|
222 |
+
def stage1_process_and_review(part, onereg_pdf, caa_pdf):
|
223 |
+
if not (onereg_pdf and caa_pdf):
|
224 |
+
raise gr.Error("Please upload both PDF files.")
|
225 |
+
try:
|
226 |
+
# Process OneReg PDF
|
227 |
+
raw_one = extract_pdf_word(onereg_pdf.name)
|
228 |
+
one_data = parse_rules(raw_one, "onereg")
|
229 |
+
|
230 |
+
# Process CAA PDF
|
231 |
+
raw_caa = extract_pdf_text(caa_pdf.name)
|
232 |
+
caa_data = parse_rules(raw_caa, "caa")
|
233 |
+
|
234 |
+
# Get all rule IDs and sort them
|
235 |
+
all_ids = sorted(
|
236 |
+
list(set(one_data.keys()) | set(caa_data.keys())),
|
237 |
+
key=combined_sort_key
|
238 |
+
)
|
239 |
+
|
240 |
+
rules_to_review = [
|
241 |
+
r for r in all_ids
|
242 |
+
if r.startswith(f"{part}.") or r.startswith("subpart-") or re.match(r'^[A-Z]\.', r)
|
243 |
+
]
|
244 |
+
|
245 |
+
# Prepare DataFrame for user editing with both documents
|
246 |
+
review_rows = []
|
247 |
+
for rule_id in rules_to_review:
|
248 |
+
one_text = one_data.get(rule_id, "[Rule not found in OneReg]")
|
249 |
+
caa_text = caa_data.get(rule_id, "[Rule not found in CAA]")
|
250 |
+
review_rows.append([rule_id, one_text, caa_text])
|
251 |
+
|
252 |
+
df = pd.DataFrame(review_rows, columns=["Rule ID", "OneReg Text (Editable)", "CAA Text (Editable)"])
|
253 |
+
|
254 |
+
return {
|
255 |
+
original_one_state: one_data,
|
256 |
+
original_caa_state: caa_data,
|
257 |
+
review_df: gr.update(value=df, visible=True),
|
258 |
+
btn_finalize: gr.update(visible=True),
|
259 |
+
}
|
260 |
+
except Exception as e:
|
261 |
+
traceback.print_exc()
|
262 |
+
raise gr.Error(f"Failed during initial processing: {e}")
|
263 |
+
|
264 |
+
|
265 |
+
# --- STAGE 2: Take user-cleaned text and perform the final comparison ---
|
266 |
+
def stage2_finalize_and_compare(review_df, original_one, original_caa):
|
267 |
+
if review_df is None or review_df.empty:
|
268 |
+
raise gr.Error("No data to compare. Please process the files first.")
|
269 |
+
|
270 |
+
# Convert the user-edited DataFrame back into dictionaries
|
271 |
+
clean_one_data = pd.Series(review_df['OneReg Text (Editable)'].values, index=review_df['Rule ID']).to_dict()
|
272 |
+
clean_caa_data = pd.Series(review_df['CAA Text (Editable)'].values, index=review_df['Rule ID']).to_dict()
|
273 |
+
|
274 |
+
# Save the training data file
|
275 |
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
276 |
+
jsonl_filename = f"cleaned_rules_{timestamp}.jsonl"
|
277 |
+
saved_filepath = save_clean_and_dirty_versions(original_one, original_caa, clean_one_data, clean_caa_data,
|
278 |
+
jsonl_filename)
|
279 |
+
|
280 |
+
# Perform the final comparison
|
281 |
+
all_ids = sorted(
|
282 |
+
list(set(clean_one_data.keys()) | set(clean_caa_data.keys())),
|
283 |
+
key=combined_sort_key
|
284 |
+
)
|
285 |
+
|
286 |
+
sections = []
|
287 |
+
for rule_id in all_ids:
|
288 |
+
one_clean = clean_one_data.get(rule_id, "")
|
289 |
+
caa_clean = clean_caa_data.get(rule_id, "")
|
290 |
+
|
291 |
+
diff_html = diff_unified(one_clean, caa_clean)
|
292 |
+
|
293 |
+
sections.append(f"""
|
294 |
+
<div class="rule-section">
|
295 |
+
<strong class="rule-label">{rule_id}</strong>
|
296 |
+
<div class="rule-content">
|
297 |
+
{diff_html}
|
298 |
+
</div>
|
299 |
+
</div>
|
300 |
+
<hr>
|
301 |
+
""")
|
302 |
+
|
303 |
+
style = """
|
304 |
+
<style>
|
305 |
+
body { font-family: sans-serif; color: var(--body-text-color); }
|
306 |
+
.rule-label { font-size: 1.1em; background: #f0f0f0; padding: 5px; display: block; border-top-left-radius: 5px; border-top-right-radius: 5px; }
|
307 |
+
.rule-content { padding: 10px; border: 1px solid #f0f0f0; border-top: none; margin-bottom: 1em; white-space: pre-wrap; }
|
308 |
+
hr { border: none; border-top: 1px solid #ccc; margin: 1.5em 0; }
|
309 |
+
</style>
|
310 |
+
"""
|
311 |
+
final_html = style + "".join(sections)
|
312 |
+
|
313 |
+
return {
|
314 |
+
out_html: gr.update(value=final_html, visible=True),
|
315 |
+
download_jsonl: gr.update(value=saved_filepath, visible=True)
|
316 |
+
}
|
317 |
+
|
318 |
+
|
319 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
320 |
+
# 4. GRADIO UI LAYOUT
|
321 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
322 |
+
|
323 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Dual Rule Cleaning Tool") as demo:
|
324 |
+
gr.Markdown("## CAA β OneReg β Dual Document Cleaning & Comparison Tool")
|
325 |
+
|
326 |
+
# State to hold the original "dirty" data between steps
|
327 |
+
original_one_state = gr.State({})
|
328 |
+
original_caa_state = gr.State({})
|
329 |
+
|
330 |
+
# --- Stage 1: Inputs and Initial Processing ---
|
331 |
+
with gr.Row():
|
332 |
+
part_num = gr.Textbox(label="Part Number", value="139")
|
333 |
+
onereg_pdf = gr.File(label="Upload OneReg PDF")
|
334 |
+
caa_pdf = gr.File(label="Upload CAA PDF")
|
335 |
+
|
336 |
+
btn_process = gr.Button("1. Process PDFs & Prepare for Cleaning", variant="secondary")
|
337 |
+
|
338 |
+
gr.Markdown("---")
|
339 |
+
|
340 |
+
# --- Stage 2: User Review and Cleaning ---
|
341 |
+
gr.Markdown("### 2. Review and Manually Clean Both Documents")
|
342 |
+
gr.Markdown(
|
343 |
+
"Edit the text in the table below to remove any headers, footers, or other noise from **both** documents. Once you are finished, click the 'Finalize, Compare & Save' button.")
|
344 |
+
|
345 |
+
review_df = gr.DataFrame(
|
346 |
+
headers=["Rule ID", "OneReg Text (Editable)", "CAA Text (Editable)"],
|
347 |
+
datatype=["str", "str", "str"],
|
348 |
+
interactive=True,
|
349 |
+
visible=False,
|
350 |
+
wrap=True,
|
351 |
+
row_count=(10, "dynamic")
|
352 |
+
)
|
353 |
+
|
354 |
+
btn_finalize = gr.Button("3. Finalize, Compare & Save", variant="primary", visible=False)
|
355 |
+
|
356 |
+
gr.Markdown("---")
|
357 |
+
|
358 |
+
# --- Stage 3: Final Comparison Output & Export ---
|
359 |
+
gr.Markdown("### 4. Final Comparison & Export")
|
360 |
+
gr.Markdown(
|
361 |
+
"Deletions from OneReg are in <del style='background:#fdd;'>red</del> and additions from CAA are in <ins style='background:#dfd;'>green</ins>.")
|
362 |
+
|
363 |
+
out_html = gr.HTML(visible=False)
|
364 |
+
download_jsonl = gr.File(label="Download Cleaned & Dirty Data (.jsonl)", visible=False)
|
365 |
+
|
366 |
+
# --- Wire up UI events ---
|
367 |
+
btn_process.click(
|
368 |
+
fn=stage1_process_and_review,
|
369 |
+
inputs=[part_num, onereg_pdf, caa_pdf],
|
370 |
+
outputs=[original_one_state, original_caa_state, review_df, btn_finalize]
|
371 |
+
)
|
372 |
+
|
373 |
+
btn_finalize.click(
|
374 |
+
fn=stage2_finalize_and_compare,
|
375 |
+
inputs=[review_df, original_one_state, original_caa_state],
|
376 |
+
outputs=[out_html, download_jsonl]
|
377 |
+
)
|
378 |
+
|
379 |
+
if __name__ == "__main__":
|
380 |
+
current_os = platform.system()
|
381 |
+
server_name = "0.0.0.0" if current_os == "Linux" else "127.0.0.1"
|
382 |
+
demo.launch(
|
383 |
+
server_name=server_name,
|
384 |
+
server_port=int(os.environ.get("GRADIO_SERVER_PORT", 7860)),
|
385 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|