HaiderAUT commited on
Commit
2b4f1a9
Β·
verified Β·
1 Parent(s): d620dcf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +385 -1035
app.py CHANGED
@@ -1,1035 +1,385 @@
1
-
2
-
3
- ###############################################################################
4
- # CAA ⇄ OneReg rule-level diff viewer (section format) #
5
- ###############################################################################
6
- import io
7
- import os
8
- import re
9
- import html
10
- import traceback
11
- import difflib
12
- import platform
13
- import pandas as pd
14
- from datetime import datetime
15
-
16
- import fitz # PyMuPDF (for future OneReg table OCR)
17
- from PyPDF2 import PdfReader # plain text extraction
18
- import gradio as gr # UI
19
- from dotenv import load_dotenv # optional .env support
20
- # from google import genai # uncomment when OCRing OneReg tables
21
- # from google.genai import type
22
- # ─────────────────────────────────────────────────────────────────────────────
23
- # 0. ENV / API KEY (Gemini – *not* used yet, but wired for future)
24
- # ─────────────────────────────────────────────────────────────────────────────
25
- #load_dotenv()
26
- #API_KEY = os.getenv("GOOGLE_API_KEY", "") # intentionally blank‑safe
27
-
28
-
29
- #client = genai.Client(api_key=api_key)
30
-
31
- _table_caption = re.compile(r'^\s*Table\s+\d+\.', re.I)
32
-
33
-
34
-
35
- # ═════════════════════════════════════════════════════════════════════════════
36
- # 1. PDF β†’ TEXT
37
- # ═════════════════════════════════════════════════════════════════════════════
38
- def extract_pdf_text(pdf_file) -> str:
39
- reader = PdfReader(pdf_file)
40
- out_pf= "\n".join(p.extract_text() or "" for p in reader.pages)
41
- print(out_pf)
42
- return out_pf
43
- def extract_pdf_word(pdf_file) -> str:
44
- """
45
- Extract text from PDF using PyMuPDF (fitz).
46
- This is a fallback if PyPDF2 fails to extract text properly.
47
- """
48
- doc = fitz.open(pdf_file)
49
- out_pf = []
50
- for page in doc:
51
- text = page.get_text("text")
52
- if text:
53
- out_pf.append(text.strip())
54
- return "\n".join(out_pf)
55
- def merge_pdf_wrapped_lines(lines):
56
- """
57
- Join isolated 'Feb. 20,' '2023,' 'a.m.' (or 'None') pieces that PyPDF2
58
- spits out on separate lines. Returns a fresh list with composites merged.
59
- """
60
- merged = []
61
- buffer = []
62
-
63
- def flush_buffer():
64
- if buffer:
65
- merged.append(' '.join(buffer))
66
- buffer.clear()
67
-
68
- for line in lines:
69
- if date_piece_pat.match(line.strip()):
70
- buffer.append(line.strip().rstrip(',')) # keep pieces
71
- # we leave it and wait for the next piece
72
- else:
73
- # Finished collecting date fragments β†’ flush if we have a date
74
- if buffer:
75
- flush_buffer()
76
- merged.append(line)
77
- flush_buffer() # leftover at EOF
78
- return merged
79
-
80
- import re, html
81
- # --- NEW helper ------------------------------------------------------------
82
- _ROW_START = re.compile(r"^\s*(?:[1-9]|10)\s+\d+\s*m\b")
83
-
84
- def _split_flat_table(line: str) -> list[str] | None:
85
- """If *line* contains a flattened table, return caption+rows list else None."""
86
- m = re.match(r"^(Table\s+\d+\.[^.]+?\.)\s+(.*)$", line, re.I)
87
- if not m:
88
- return None
89
-
90
- caption, body = m.groups()
91
- # Insert newline before every category number 1‑10.
92
- body = re.sub(r"\s(?=[1-9]\b|10\b)", "\n", body)
93
-
94
- rows = [r.strip() for r in body.splitlines() if _ROW_START.match(r)]
95
- if 3 <= len(rows) <= 12:
96
- return [caption] + rows
97
- return None
98
- def render_table_html(lines: list[str]) -> str:
99
- """lines[0] caption, lines[1:] rows with 2+ spaces delim –> <table>."""
100
- caption = html.escape(lines[0])
101
- header_cells = [""] + [html.escape(c) for c in re.split(r"\s{2,}", lines[1].strip())]
102
-
103
- out: list[str] = [
104
- '<table border="1" cellpadding="4" style="border-collapse:collapse;">',
105
- f'<caption>{caption}</caption>',
106
- '<thead><tr>' + "".join(f"<th>{c}</th>" for c in header_cells) + '</tr></thead>',
107
- '<tbody>'
108
- ]
109
- for row in lines[1:]:
110
- cells = re.split(r"\s{2,}", row.strip())
111
- out.append('<tr>' + "".join(f"<td>{html.escape(c)}</td>" for c in cells) + '</tr>')
112
- out.append('</tbody></table>')
113
- return "\n".join(out)
114
- def inject_tables(text: str) -> str:
115
- """Return *text* where flattened or multi‑line tables are turned into HTML."""
116
- out, buf, in_table = [], [], False
117
- lines = text.splitlines()
118
- i = 0
119
- while i < len(lines):
120
- ln = lines[i]
121
-
122
- # B) flattened single‑line table
123
- split_result = _split_flat_table(ln)
124
- if split_result:
125
- out.append(render_table_html(split_result))
126
- i += 1
127
- continue
128
-
129
- # A) normal multi‑line table (caption + numeric rows)
130
- if re.match(r"^\s*Table\s+\d+\.", ln, re.I):
131
- in_table, buf = True, [ln]
132
- i += 1
133
- continue
134
- if in_table and re.match(r"^\s*\d+(?:\s+\d+)+", ln):
135
- buf.append(ln)
136
- i += 1
137
- continue
138
- else:
139
- if in_table:
140
- out.append(render_table_html(buf))
141
- in_table, buf = False, []
142
-
143
- out.append(html.escape(ln))
144
- i += 1
145
-
146
- if in_table:
147
- out.append(render_table_html(buf))
148
- return "<br>".join(out)
149
-
150
- def inject_tables(text: str) -> str:
151
- """
152
- Convert *only* CAA tables into HTML tables.
153
- OneReg passes through unchanged.
154
- """
155
- out, buf, in_table = [], [], False
156
- lines = text.splitlines()
157
- i = 0
158
- while i < len(lines):
159
- ln = lines[i]
160
-
161
- # ── B) Flattened CAA table in a single physical line ───────────────
162
- split_result = _split_flat_table(ln)
163
- if split_result:
164
- out.append(render_table_html(split_result))
165
- i += 1
166
- continue
167
-
168
- # ── A) Normal caption + separate numeric rows table ────────────────
169
- if re.match(r'^\s*Table\s+\d+\.', ln, re.I): # caption line
170
- in_table, buf = True, [ln]
171
- i += 1
172
- continue
173
-
174
- if in_table and re.match(r'^\s*\d+(?:\s+\d+)+', ln): # numeric row
175
- buf.append(ln)
176
- i += 1
177
- continue
178
- else:
179
- if in_table: # end of numeric block
180
- out.append(render_table_html(buf))
181
- in_table, buf = False, []
182
-
183
- # not part of a table β–Ί just escape it
184
- out.append(html.escape(ln))
185
- i += 1
186
-
187
- if in_table: # file ended inside a table
188
- out.append(render_table_html(buf))
189
-
190
- return "<br>".join(out)
191
-
192
-
193
- def collapse_leading_repeats(line: str) -> str:
194
- """
195
- If a OneReg line begins with the same word repeated
196
- (possibly separated by commas/spaces), collapse them
197
- into a single occurrence plus one space.
198
- """
199
- # grab the first word
200
- m = re.match(r'^\s*(\w+)\b', line)
201
- if not m:
202
- return line
203
- prefix,first_word= m.groups()
204
- pat=rf'\b{re.escape(first_word)}\b'
205
- matches=list(re.finditer(pat, line, flags=re.IGNORECASE))
206
- if len(matches) < 2:
207
- return line
208
-
209
- # match any number of "word" occurrences, commas or spaces
210
- first_end=matches[0].end()
211
- second_start,second_end = matches[1].span()
212
- between= line[first_end:second_start]
213
- between_words = re.split(r'[ ,]+', between) # split by spaces or commas
214
- if len(between_words) <= 5: # max 5 chars between
215
- return line
216
- rest=line[second_end:] # everything after the second occurrence
217
- return f"{prefix}{first_word} {rest.strip() if rest else ''}" # collapse to single occurrence
218
-
219
-
220
-
221
-
222
- def strip_stray_numbers(line: str) -> str:
223
- """
224
- Remove inline β€œ55(a)”, β€œ12(ii)”, etc., UNLESS they are part of a genuine
225
- rule / section / paragraph citation such as β€œrule 139.21(b)”.
226
- """
227
-
228
- def _repl(m: re.Match) -> str:
229
- # text that precedes the match
230
- prefix = line[:m.start()]
231
-
232
- # keep the match if prefix ends with:
233
- # rule 139.21 ← full rule citation
234
- # rule ← plain β€œrule”
235
- # paragraph ← paragraph / paragraphs
236
- # section ← section / sections
237
- if re.search(
238
- r'\b(?:rule|rules?|paragraphs?|sections?)'
239
- r'(?:\s+\d+\.\d+)?\s*$',
240
- prefix,
241
- flags=re.IGNORECASE,
242
- ):
243
- return m.group(0) # preserve it
244
-
245
- return '' # otherwise drop it
246
-
247
- # match digits followed by (…) BUT NOT digits.digits(…)
248
- pattern = r'\b(?!\d+\.\d+)\d+\s*\([^)]*\)'
249
- return re.sub(pattern, _repl, line)
250
- def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
251
- """
252
- Re-join hard-wrapped lines produced by the PDF extractor.
253
-
254
- β€’ join when previous line ends with β€œrule ….” or any lower-case word
255
- and the next line starts with a digit, β€˜(’, or lower-case letter
256
- β€’ keep existing β€œrule/may + 123.45” glue logic
257
- """
258
- merged = []
259
- for ln in raw_text.splitlines():
260
- ln_stripped = ln.strip()
261
- if merged:
262
- prev = merged[-1]
263
-
264
- # A) break after β€œrule 139.” β†’ glue β€œ21(b)…”
265
- if re.search(r'\brule\s+\d+\.$', prev, re.I) and re.match(r'^\d', ln_stripped):
266
- merged[-1] = prev + ln_stripped # no space; dot already present
267
- continue
268
-
269
- # B) generic sentence wrap β€œβ€¦and” / β€œβ€¦or”
270
- if re.search(r'[a-z]$', prev) and re.match(r'^[\(a-z]', ln_stripped):
271
- merged[-1] = prev + ' ' + ln_stripped
272
- continue
273
-
274
- if re.search(r'\b(?:and|or)$', prev) and re.match(r'^\d+\.\d+', ln_stripped):
275
- merged[-1] = prev + ' ' + ln_stripped
276
- continue
277
-
278
- # C) original β€œrule|may + 123.45 …” glue
279
- if re.search(r'\b(?:rule|may)$', prev, re.I) and re.match(r'^\d+\.\d+', ln_stripped):
280
- merged[-1] = prev + ' ' + ln_stripped
281
- continue
282
-
283
- merged.append(ln_stripped)
284
- return merged
285
-
286
- """
287
- def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
288
-
289
- Glue back any PDF-wrapped continuation lines so that mid-sentence
290
- breaks (e.g. after 'and', 'or', or before a '(2)') get reattached.
291
-
292
- merged = []
293
- for ln in raw_text.splitlines():
294
- ln_stripped = ln.strip()
295
- if merged:
296
- prev = merged[-1]
297
- # 1) if prev ends in a lowercase letter (no punctuation),
298
- # and this line starts with '(' or lowercase, glue it:
299
- if re.search(r'[a-z]$', prev) and re.match(r'^[\(a-z]', ln_stripped):
300
- merged[-1] = prev + ' ' + ln_stripped
301
- continue
302
-
303
- # 2) keep your old rule‐merge logic too:
304
- if ( re.search(r'\b(?:rule|may)$', prev, re.IGNORECASE )
305
- and re.match(r'^\d+\.\d+', ln_stripped) ):
306
- merged[-1] = prev + ' ' + ln_stripped
307
- continue
308
-
309
- merged.append(ln_stripped)
310
- return merged
311
- """
312
- # ═════════════════════════════════════════════════════════════════════════════
313
- # 2. Helpers to drop OneReg auto IDs & inline rule numbers
314
- # ═════════════════════════════════════════════════════════════════════════════
315
- def zap_auto_outline_ids(s: str) -> str:
316
- return re.sub(r'\b(?:\d+\.){3,}\s*', '', s)
317
- def collapse_inner_parens(line: str) -> str:
318
- """
319
- For OneReg headings *only*:
320
- - If the line refers to a rule, section(s), or paragraph(s),
321
- return it unchanged (preserve all brackets).
322
- - Otherwise, if there are 2+ (...) groups, remove all but the last.
323
- """
324
- # 1) if it's a rule/section/paragraph referenceβ€”skip collapsing
325
- if re.search(r'\b(?:rule|section|sections|paragraph|paragraphs|under|and)\b',
326
- line,
327
- flags=re.IGNORECASE):
328
- return line
329
-
330
- # 2) find all paren-groups
331
- parens = re.findall(r'\([^()]*\)', line)
332
- if len(parens) <= 1:
333
- return line
334
-
335
- # 3) remove every but the final (...)
336
- last = parens[-1]
337
- new_line = line
338
- for p in parens[:-1]:
339
- new_line = new_line.replace(p, '')
340
-
341
- return new_line
342
-
343
- def zap_inline_rule_numbers(s: str) -> str:
344
- # only match digit-dot-digit[letter] if NOT preceded by β€œrule ”
345
- return re.sub(
346
- r'(?<!\brules\s)\b\d+\.\d+(?:[A-Z]?)\s*(?=\()',
347
- '',
348
- s,
349
- flags=re.IGNORECASE
350
- )
351
-
352
- def strip_inline_self_ref(text_line: str, tail: str) -> str:
353
- if not tail:
354
- return text_line
355
- pattern = rf'\b{re.escape(tail)}(?:\s*\([^)]+\))+'
356
- prev = None
357
- while prev != text_line:
358
- prev = text_line
359
- text_line = re.sub(pattern, '', text_line).strip()
360
- return text_line
361
-
362
-
363
- def drop_leading_repeated_title(line: str, title: str) -> str:
364
- if not title:
365
- return line
366
- pat = rf'^(?:{re.escape(title)}\s*){{2,}}'
367
- return re.sub(pat, f'{title} ', line, flags=re.IGNORECASE).strip()
368
- # catch β€œAppendix A β€” Title”
369
- appendix_pat = re.compile(
370
- r'^(?:Appendix)\s+([A-Z])\s*[–—-]\s*(?P<title>.+)$',
371
- re.IGNORECASE
372
- )
373
-
374
- page_pat = re.compile(r'Page\s+\d+\s*/\s*\d+', re.IGNORECASE)
375
- date_pat = re.compile(
376
- r'(?:Jan\.?|Feb\.?|Mar\.?|Apr\.?|May\.?|Jun\.?|Jul\.?|Aug\.?|'
377
- r'Sep\.?|Sept\.?|Oct\.?|Nov\.?|Dec\.?|January|February|March|April|May|'
378
- r'June|July|August|September|October|November|December)'
379
- r'\s+\d{1,2},\s*\d{4}(?:,\s*(?:a\.?m\.?|p\.?m\.?))?',
380
- re.IGNORECASE
381
- )
382
-
383
- # Inside clean_line function
384
- header_pat = re.compile(
385
- r'^(?:Purpose\s+)?' # optional "Purpose"
386
- r'(?:[A-Z][a-z]{2}\.)\s+\d{1,2},\s*\d{4},\s*(?:a\.?m\.?|p\.?m\.?)' # "Feb. 20, 2023, a.m."
387
- r'(?:\s*\([a-z]\)\s*[A-Z][a-z]{2}\.\s+\d{1,2},\s*\d{4},\s*(?:a\.?m\.?|p\.?m\.?))*$', # repeats
388
- re.IGNORECASE
389
- )
390
- MONTH = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.?'
391
- TIME = r'(?:a\.?m\.?|p\.?m\.?)'
392
- YEAR = r'\d{4}'
393
-
394
- orphan_date_pat = re.compile(
395
- rf'^(?:{MONTH}|{TIME}|{YEAR}|\d{{1,2}},?)$'
396
- )
397
-
398
- def clean_line(line: str, source: str) -> str:
399
- # --- Apply OneReg prefix removal FIRST if applicable ---
400
- if source == "onereg":
401
- line = zap_auto_outline_ids(line) # Remove 11.3.5. etc. prefixes
402
- if header_pat.match(line):
403
- return ""
404
-
405
-
406
- if source == "caa":
407
- line = line.replace('β€”', '')
408
- line = line.replace('–', '')
409
-
410
-
411
- # --- Continue with other cleaning ---
412
- line = page_pat.sub('', line)
413
- if orphan_date_pat.match(line):
414
- return
415
-
416
- if source == "caa" and ("Civil Aviation Rules" in line or "CAA of NZ" in line):
417
- # ... (CAA header cleaning remains the same) ...
418
- line = re.sub(
419
- r'Civil Aviation Rules\s+Part\s+\d+\s+CAA Consolidation', '', line
420
- )
421
- line = re.sub(
422
- r'^\d{1,2}\s+[A-Za-z]+\s+\d{4}\s*\d*\s*CAA of NZ', '', line
423
- ) # Made middle number optional
424
- line = re.sub(r'\s{2,}', ' ', line).strip()
425
- if not line: return ""
426
-
427
- # ... (Rest of general cleaning: dots, page, email, time, Exported, date) ...
428
- if re.search(r'\.{4,}\s*\d+\s*$', line): return "" # Ellipsis followed by number (TOC)
429
- if page_pat.fullmatch(line.strip()): return ""
430
- line = re.sub(r'\S+@\S+', '', line)
431
- line = re.sub(r'\b\d{1,2}:\d{2}(?:\s*(?:a\.?m\.?|p\.?m\.?))?', '', line, flags=re.IGNORECASE)
432
- line = re.sub(r'Exported:.*$', '', line)
433
- line = date_pat.sub('', line)
434
-
435
- # --- Inline rule number zapping (careful not to zap appendix numbers like A.1) ---
436
- line = re.sub(r'\b(rule)\s+(\d+\.\d+)', r'\1 \2', line, flags=re.IGNORECASE)
437
- # Only zap digit.digit patterns, not Letter.digit
438
- line = re.sub(
439
- r'(?<!\brule\s)(?<!^[A-Z]\.)\b\d+\.\d+(?:[A-Z]?)\s*(?=\()', # Added negative lookbehind for Letter.
440
- '',
441
- line,
442
- flags=re.IGNORECASE
443
- )
444
- # Zap table references like Table B-1 if needed, can be specific
445
- line = re.sub(r'\bTable\s+[A-Z]-\d+\b', '', line, flags=re.IGNORECASE)
446
-
447
- line = page_pat.sub('', line) # Redundant page check just in case
448
- return re.sub(r'\s{2,}', ' ', line).strip()
449
-
450
- # ═════════════════════════════════════════════════════════════════════════════
451
- # 4. CAPTURE ONLY TOP-LEVEL RULES
452
- # ═════════════════════════════════════════════════════════════════════════════
453
- #rule_pat_1 = re.compile(r'^(?:\d+\.)*\s*(?P<rule>\d+\.\d+)\s*(?P<title>[A-Z].*)$') #works without A,B,C subparts
454
- # In the Regex section
455
-
456
- # REMOVE: appendix_main_pat = re.compile(...)
457
-
458
- # MODIFIED: Regex for Appendix Items (like "A.1 Title" or "C.2.1 (a) Title")
459
- # Now the primary way to find appendix sections.
460
- appendix_item_pat = re.compile(
461
- r'^\s*([A-Z])\.(\d+(?:\.\d+)*)' # Start of line, Letter.Number(s) e.g., "A.1", "C.2.1"
462
- r'(?:\s*\(([^)]+)\))?' # Optional captured parenthetical part e.g., "(a)" or "(1)"
463
- r'\s+' # Space separator REQUIRED before title
464
- r'(?P<title>[A-Za-z0-9].*)$', # Capture title (must start alphanumeric)
465
- re.IGNORECASE
466
- )
467
-
468
- # Keep rule_pat and subpart_pat as they were (assuming rule_pat ignores prefixes correctly now)
469
- rule_pat = re.compile(
470
- r'^(?:(?:\d+\.){2,}\s*)?' # Optional & Non-capturing: OneReg outline prefix
471
- r'(?P<base_rule>\d+\.\d+(?:[A-Z]?))'
472
- r'(?P<parens>(?:\s*\([^)]+\))*?)'
473
- r'\s*'
474
- r'(?P<title>.*)$',
475
- re.IGNORECASE
476
- )
477
-
478
- subpart_pat = re.compile(
479
- r'^\s*'
480
- r'\d+\.\s*' # Still expect number for subpart heading based on earlier examples
481
- r'Subpart\s+'
482
- r'([A-Z]{1,2})\s*'
483
- r'[β€”-]\s*'
484
- r'(.+)$',
485
- re.IGNORECASE
486
- )
487
- print(rule_pat)
488
- def natural_key(r):
489
- minor = r.split('.',1)[1]
490
- m = re.match(r"^(\d+)([A-Z]?)$", minor)
491
- if m:
492
- return (int(m.group(1)), m.group(2)) # (5, "") or (5, "A")
493
- return (0, minor)
494
-
495
-
496
- # Inside parse_rules function loop
497
- import re
498
-
499
- def collapse_repeated_first_word(line: str) -> str:
500
- """
501
- Finds the first word, then finds its next occurrence within 5 words,
502
- and if so removes everything between (and including) that second match,
503
- preserving all other whitespace and punctuation.
504
- """
505
- # 1) Match and capture any leading indent + the first word
506
- m = re.match(r'^(\s*)(\w+)\b', line)
507
- if not m:
508
- return line
509
- prefix, first_word = m.groups()
510
-
511
- # 2) Build a word‐boundary pattern for that first word
512
- pat = rf'\b{re.escape(first_word)}\b'
513
-
514
- # 3) Find all occurrences
515
- matches = list(re.finditer(pat, line, flags=re.IGNORECASE))
516
- if len(matches) < 2:
517
- return line
518
-
519
- # 4) Offsets of the first two matches
520
- first_end = matches[0].end()
521
- second_start, second_end = matches[1].span()
522
-
523
- # 5) Count how many actual words lie between
524
- between = line[first_end:second_start]
525
- if len(re.findall(r'\b\w+\b', between)) > 5:
526
- return line
527
-
528
- # 6) Rebuild: prefix + first word + everything AFTER the second occurrence
529
- rest = line[second_end:]
530
- return f"{prefix}{first_word}{rest}"
531
-
532
-
533
- def parse_rules(text: str, source: str) -> dict[str, str]:
534
- rules, current, title = {}, None, ""
535
- tail = ""
536
- current_appendix_letter = None # Track the current main appendix
537
-
538
- for raw in text.splitlines():
539
- original_line_for_debug = raw.strip() # Keep original for debugging
540
- line = clean_line(raw, source)
541
- print(f"DEBUG: Checking cleaned line: '{line}'")
542
- if not line: continue
543
- if source == "onereg":
544
- line = collapse_inner_parens(line)
545
- #line = strip_stray_numbers(line)
546
- #line = collapse_repeated_first_word(line)
547
- #line = remove_repeated_prefix(line)
548
-
549
- # --- Check Order: Appendix Item -> Subpart -> Rule ---
550
-
551
- # 1. Appendix Item (e.g., "A.1 Title" or "C.2.1 (a) Title")
552
- if m_ap_item := appendix_item_pat.match(line):
553
- letter = m_ap_item.group(1).upper()
554
- numbering = m_ap_item.group(2)
555
- paren = m_ap_item.group(3) # Might be None
556
- item_title = m_ap_item.group('title').strip()
557
-
558
- # Construct the key: A.1, A.1(a), C.2.1(1) etc.
559
- key_parts = [letter, numbering]
560
- if paren:
561
- # Clean paren content if needed (e.g., remove internal spaces?)
562
- paren_clean = paren.strip()
563
- key_parts.append(f"({paren_clean})")
564
- key = ".".join(key_parts)
565
-
566
- current = key
567
- title = item_title
568
- tail = key
569
- rules.setdefault(current, []).append(title)
570
- current_appendix_letter = letter # Remember we are inside this appendix
571
- print(f"Matched Appendix Item: {key} => '{title}'")
572
- continue
573
-
574
- # 2. Subpart Heading
575
- elif m_sp := subpart_pat.match(line):
576
- # ... (Subpart logic remains the same) ...
577
- code = m_sp.group(1).upper()
578
- subpart_title = m_sp.group(2).strip()
579
- heading = f"Subpart {code} β€” {subpart_title}"
580
- key = f"subpart-{code}"
581
- current = key
582
- title = heading
583
- tail = ""
584
- rules.setdefault(current, []).append(heading)
585
- current_appendix_letter = None # Exited appendix context
586
- print(f"Matched Subpart: {key} => {heading}")
587
- continue
588
-
589
- # 3. Main Rule Heading
590
- elif m_rule := rule_pat.match(line):
591
- # ... (Main rule logic remains mostly the same, ensure key construction is correct) ...
592
- base = m_rule.group('base_rule')
593
- parens_str = m_rule.group('parens') or ""
594
- title_text = m_rule.group('title').strip()
595
- paren_parts = re.findall(r'\(([^)]+)\)', parens_str)
596
- key = base + "".join(f"({p.strip()})" for p in paren_parts) # Construct key like 139.555(e)(1)
597
-
598
- is_likely_heading_only = not title_text or \
599
- re.match(r'^[\[\(]?[a-zA-Z0-9][\)\]\.]', title_text) or \
600
- len(title_text) < 5
601
-
602
- current = key
603
- title = title_text
604
- tail = key
605
-
606
- if not is_likely_heading_only and title_text:
607
- rules.setdefault(current, []).append(title_text)
608
- print(f"Matched Rule + Title: {key} => '{title_text}'")
609
- else:
610
- rules.setdefault(current, [])
611
- print(f"Matched Rule Heading: {key}")
612
- current_appendix_letter = None # Exited appendix context
613
- continue
614
-
615
- # 4. Continuation lines
616
- if current:
617
- # If we are inside an appendix section (identified by A. B. etc.)
618
- # be careful about dropping lines that might look like headings but aren't
619
- is_potentially_new_appendix_item = appendix_item_pat.match(line)
620
-
621
- if is_potentially_new_appendix_item and current_appendix_letter and line.startswith(current_appendix_letter):
622
- # This looks like a new sub-item within the *same* appendix
623
- # but wasn't matched above (maybe title was too short?).
624
- # Treat as continuation for now, might need refinement.
625
- print(f"Potential missed heading treated as continuation: {line}")
626
- pass # Let it be added below
627
-
628
- # Apply cleaning to continuation lines
629
- # line = strip_inline_self_ref(line, tail) # Review if this works well now
630
- line = drop_leading_repeated_title(line, title)
631
- if line:
632
- rules[current].append(line)
633
- else:
634
-
635
- print(f"DEBUG: Unmatched line (no current rule): '{line}'")
636
- return {k: " ".join(v).strip() for k, v in rules.items()}
637
-
638
-
639
-
640
-
641
- # ═════════════════════════════════════════════════════════════════════════════
642
- # 5. STRING DIFF (OneReg deletions / modified insertions)
643
- # ═════════════════════════════════════════════════════════════════════════════
644
- def diff_cols(one: str, caa: str) -> tuple[str, str]:
645
- sm = difflib.SequenceMatcher(None, one, caa)
646
- d_one = d_mod = ""
647
- for tag, i1, i2, j1, j2 in sm.get_opcodes():
648
- if tag == "equal":
649
- seg = one[i1:i2]
650
- d_one += seg
651
- d_mod += seg
652
- elif tag == "delete":
653
- d_one += f"<span style='background:#f8d4d4'>{one[i1:i2]}</span>"
654
- elif tag == "insert":
655
- d_mod += f"<span style='background:#d4f8d4'>{caa[j1:j2]}</span>"
656
- else: # replace
657
- d_one += f"<span style='background:#f8d4d4'>{one[i1:i2]}</span>"
658
- d_mod += f"<span style='background:#d4f8d4'>{caa[j1:j2]}</span>"
659
- return d_one, d_mod
660
- # ═════════════════════════════════════════════════════════════════════════════
661
- # 5. STRING DIFF (Unified Inline View)
662
- # ═════════════════════════════════════════════════════════════════════════════
663
- def diff_unified(one: str, caa: str) -> str:
664
- """
665
- Generates a single HTML string showing differences inline.
666
- Deletions (text in OneReg but not CAA) are shown with red background/strikethrough.
667
- Insertions (text in CAA but not OneReg) are shown with green background.
668
- Uses html.escape to handle special characters in the text.
669
- """
670
- sm = difflib.SequenceMatcher(None, one, caa)
671
- output = []
672
- for tag, i1, i2, j1, j2 in sm.get_opcodes():
673
- one_segment = html.escape(one[i1:i2]) # Escape text segments
674
- caa_segment = html.escape(caa[j1:j2]) # Escape text segments
675
-
676
- if tag == "equal":
677
- output.append(one_segment)
678
- elif tag == "delete":
679
- # Wrap deleted text in <del> tags with specific styling
680
- output.append(f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
681
- elif tag == "insert":
682
- # Wrap inserted text in <ins> tags with specific styling
683
- output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
684
- elif tag == "replace":
685
- # Show deletion followed by insertion for replacements
686
- output.append(f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
687
- output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
688
-
689
- # Join segments and wrap in a span that preserves whitespace and line breaks
690
- # Add color: var(--text) to ensure it adapts to light/dark mode from the body style
691
- return f"<span style='white-space: pre-wrap; color: var(--text);'>{''.join(output)}</span>"
692
-
693
- # Remove or comment out the old natural_sort_key if not used elsewhere,
694
- # or keep if needed for other parts. Let's assume it's not needed now.
695
- # def natural_sort_key(rule_id: str): ... # Keep if used, remove/comment if not
696
- def natural_sort_key(rule_id: str):
697
- # rule_id is e.g. "139.5", "139.5A", "139.10"
698
- minor = rule_id.split('.', 1)[1] # "5", "5A", "10"
699
- m = re.match(r'^(\d+)([A-Z]?)$', minor) # capture digits + optional letter
700
- if m:
701
- return (int(m.group(1)), m.group(2)) # e.g. (5, ""), (5, "A"), (10, "")
702
- # fallback: put anything weird at the end
703
- return (float('inf'), minor)
704
- # ═════════════════════════════════════════════════════════════════════════════
705
- # 6. SORTING KEY (Updated for Rule -> Appendix Order)
706
- # ═════════════════════════════════════════════════���═══════════════════════════
707
- def combined_sort_key(key: str):
708
- # --- Sort Order Priorities ---
709
- # 1: Subparts (subpart-A < subpart-AA < subpart-B)
710
- # 2: Main Rules (139.1 < 139.5 < 139.5A < 139.10, including subdivisions)
711
- # 3: Appendix Items (A.1 < A.1(a) < A.1(1) < A.2 < B.1)
712
-
713
- # 1. Subparts
714
- if key.startswith("subpart-"):
715
- code = key.split('-', 1)[1]
716
- return (1, len(code), code) # Priority 1
717
-
718
- # 2. Main Rules (e.g., 139.5, 139.5(e)(1)) - Assign Priority 2
719
- elif re.match(r'^\d+\.\d+', key): # Check if it starts like a rule number
720
- try:
721
- match = re.match(r'^(\d+\.\d+(?:[A-Z]?))((?:\([^)]+\))*)$', key)
722
- if match:
723
- base_rule_str = match.group(1) # e.g., "139.555" or "139.5A"
724
- parens_str = match.group(2) or "" # e.g., "(e)(1)" or ""
725
- part_str, minor_base = base_rule_str.split('.', 1)
726
- part_num = int(part_str)
727
- m_minor_base = re.match(r'^(\d+)([A-Z]?)$', minor_base)
728
- if m_minor_base:
729
- minor_num = int(m_minor_base.group(1))
730
- minor_letter = m_minor_base.group(2)
731
- paren_parts_raw = re.findall(r'\(([^)]+)\)', parens_str)
732
- paren_sort_tuple_elements = ()
733
- for p in paren_parts_raw:
734
- p_strip = p.strip()
735
- if p_strip.isdigit():
736
- paren_sort_tuple_elements += (1, int(p_strip)) # Num first within parens
737
- elif len(p_strip) == 1 and p_strip.isalpha():
738
- paren_sort_tuple_elements += (2, ord(p_strip.lower())) # Letter second
739
- else:
740
- paren_sort_tuple_elements += (3, p_strip.lower()) # Others last
741
-
742
- return (2, part_num, minor_num, minor_letter) + paren_sort_tuple_elements # Priority 2
743
- except Exception as e:
744
- print(f"Warning: Sort key error for rule '{key}': {e}")
745
- pass
746
-
747
- # 3. Appendix Items (e.g., A.1, B.2.1(a)) - Assign Priority 3
748
- elif re.match(r'^[A-Z]\.', key):
749
- parts = re.split(r'[.()]', key)
750
- parts = [p for p in parts if p]
751
- sortable_parts = [parts[0]] # Start with the letter (A, B, C...)
752
- for part in parts[1:]:
753
- if part.isdigit():
754
- sortable_parts.append(int(part))
755
- else:
756
- if len(part) == 1 and part.isalpha():
757
- sortable_parts.append(ord(part.lower())) # Use ASCII for single letters
758
- else:
759
- sortable_parts.append(part.lower()) # Lowercase others
760
- # Priority 3, then sort by parts
761
- return (3,) + tuple(sortable_parts)
762
-
763
- # Fallback
764
- return (float('inf'), key) # Put errors/unknowns last
765
- DATE_HDR_RE = re.compile(
766
- r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.\s+\d{1,2},\s+\d{4}'
767
- r'(?:\s+[ap]\.?m\.?)?', # optional a.m./p.m.
768
- re.I,
769
- )
770
- # --------------------------------------------------------------------
771
- # 1) helpers – put these near the top of the file
772
- # --------------------------------------------------------------------
773
- MONTH_RE = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.'
774
- DATE_RE = re.compile(
775
- rf'\b{MONTH_RE}\s+\d{{1,2}},\s+\d{{4}}'
776
- r'(?:,\s*)?(?:[ap]\.?m\.?)?', # optional β€œ, a.m.” / β€œ, p.m.”
777
- re.I
778
- )
779
- # Stray leading β€œFe” that’s left behind when β€œFeb.” or β€œFe\nb.” is split
780
- FE_CRUMB_RE = re.compile(r'\bFe(?=[a-z])') # Fevery, Feaccompanied …
781
-
782
- def merge_and_clean(raw: str) -> str:
783
- """Collapse newlines, strip date headers, remove Fe* crumbs."""
784
- # (i) merge β†’ one long line
785
- text = ' '.join(raw.splitlines())
786
-
787
- # (ii) nuke any full-form dates
788
- text = DATE_RE.sub('', text)
789
-
790
- # (iii) wipe the β€˜Fe’ crumbs that remain after bad line-wrap
791
- text = FE_CRUMB_RE.sub('', text)
792
-
793
- # (iv) collapse doubled spaces made by the removals
794
- return re.sub(r'\s{2,}', ' ', text).strip()
795
- def strip_trailing_duplicate_heading(s: str) -> str:
796
- """
797
- If a line starts with a heading (up to the first '(' or end-of-line)
798
- and that identical heading is repeated at the very end, remove the
799
- trailing copy.
800
-
801
- >>> strip_trailing_duplicate_heading(
802
- ... "Changes to certificate holder's organisation (a) … (e) Changes to certificate holder's organisation"
803
- ... )
804
- "Changes to certificate holder's organisation (a) … (e)"
805
- """
806
- # 1) grab the prefix heading (everything before the first '(' or EOL)
807
- head = s.split('(', 1)[0].strip()
808
- if not head:
809
- return s
810
-
811
- # 2) does the string *end* with exactly that heading?
812
- if s.rstrip().endswith(head):
813
- # slice it off and tidy spaces
814
- s = s[: -len(head)].rstrip()
815
-
816
- return s
817
-
818
- # ═════════════════════════════════════════════════════════════════════════════
819
- # 6. MAIN COMPARISON FUNCTION
820
- # ═════════════════════════════════════════════════════════════════════════════
821
- def compare_regulations_app(part, onereg_pdf, caa_pdf):
822
- try:
823
- raw_one = extract_pdf_word(onereg_pdf)
824
- raw_caa = extract_pdf_text(caa_pdf)
825
- for ln in raw_caa.splitlines():
826
- if any(tok in ln for tok in ("139.61", "139.63")):
827
- print("[RAW]", ln[:120])
828
- lines_one = merge_pdf_wrapped_lines(raw_one)
829
- lines_caa = merge_pdf_wrapped_lines(raw_caa)
830
- text_one = "\n".join(lines_one)
831
- text_caa = "\n".join(lines_caa)
832
- one = parse_rules(text_one, "onereg")
833
- # print(one)
834
- caa = parse_rules(text_caa, "caa")
835
-
836
- # print(one)
837
-
838
- # filter & sort as before…
839
- all_ids = set(one) | set(caa)
840
-
841
- # Inside compare_regulations_app function
842
- user_inputs = [] #to collect the inputs from the user
843
-
844
- rules = [
845
- r for r in all_ids
846
- if r.startswith(f"{part}.") # Main rules like 139.5
847
- or r.startswith("subpart-") # Subpart headings like subpart-A
848
- # or r.startswith("appendix-") # REMOVED - No longer generating these keys
849
- or re.match(r'^[A-Z]\.', r) # Appendix items like A.1, B.2(a)
850
- ]
851
- print(rules)
852
-
853
-
854
-
855
- rules.sort(key=combined_sort_key)
856
- print(rules)
857
- #rules.sort(key=natural_key)
858
-
859
- sections = []
860
- df_rows = []
861
- for rule in rules:
862
- o = one.get(rule, "")
863
- if header_pat.match(o) or DATE_HDR_RE.fullmatch(o.strip()):
864
- # print(f"Skipping header line for rule {rule}: {o}") # comment-out if noisy
865
- continue
866
- o = merge_and_clean(o)
867
- o = collapse_repeated_first_word(o) # Apply the new function
868
- o= strip_trailing_duplicate_heading(o) # Remove trailing duplicate headings
869
- print(o)
870
- c = caa.get(rule, "")
871
- #o_html = inject_tables(o)
872
- c_text_tables = inject_tables(c)
873
- unified_diff_html = diff_unified(o, c)
874
-
875
- sections.append(f"""
876
- <div class="rule-section">
877
- <input type="checkbox" id="chk_{rule}" name="rule" value="{rule}">
878
- <label for="chk_{rule}" class="rule-label">{rule}</label>
879
- <div class="rule-content">
880
- <strong>Unified Diff (OneReg <del style='background:#fdd;text-decoration:line-through;'>deletions</del> / CAA <ins style='background:#dfd;text-decoration:none;'>additions</ins>)</strong><br>
881
- {unified_diff_html}
882
- <br><br>
883
- {
884
- f'''<strong>CAA (Cleaned + Tables)</strong><br>
885
- {c_text_tables}'''
886
- }
887
- </div>
888
- </div>
889
- <hr>
890
- """)
891
- df_rows.append([rule, ""])
892
-
893
-
894
- style = """
895
- <style>
896
- /* ─────────── colour tokens ─────────── */
897
- :root{
898
- --bg: #ffffff;
899
- --text: #000000;
900
- --border: #cccccc;
901
- --rule-label-on: #ff8a80; /* light green */
902
- --rule-content-on: #e8f5e9;
903
- }
904
- @media (prefers-color-scheme: dark){
905
- :root{
906
- --bg: #121212;
907
- --text: #e0e0e0;
908
- --border: #444444;
909
- --rule-label-on: #ff8a80;; /* dark-mode green */
910
- --rule-content-on:#1b5e20;
911
- }
912
- }
913
-
914
- /* ─────────── global ─────────── */
915
- body{
916
- background: var(--bg);
917
- color: var(--text);
918
- font-family: Arial, Helvetica, sans-serif;
919
- font-size: .9em;
920
- }
921
- span{white-space:pre-wrap}
922
- hr{
923
- border:none;
924
- border-top:1px solid var(--border);
925
- margin:1.2em 0;
926
- }
927
-
928
- /* ─────────── diff-viewer widgets ─────────── */
929
- .rule-section{
930
- padding:.5em;
931
- transition:background .2s;
932
- }
933
- .rule-label{
934
- font-weight:bold;
935
- margin-left:.5em;
936
- padding:.2em .4em;
937
- border-radius:4px;
938
- cursor:pointer;
939
- }
940
- .rule-content{
941
- margin-left:2em;
942
- padding:.5em;
943
- border-radius:4px;
944
- }
945
-
946
- /* checked highlights */
947
- .rule-section input[type=checkbox]:checked + .rule-label{
948
- background:var(--rule-label-on);
949
- }
950
- .rule-section input[type=checkbox]:checked ~ .rule-content{
951
- background:var(--rule-content-on);
952
- }
953
-
954
- /* make links + table borders visible in both modes */
955
- a{color:inherit;text-decoration:underline;}
956
- table{color:inherit;border-color:var(--border);}
957
- th,td{border-color:var(--border);}
958
- </style>
959
- """
960
- html_out=style+"".join(sections)
961
- # Create a DataFrame for the rules
962
- comments_df = pd.DataFrame(df_rows, columns=["Rule", "comment"])
963
-
964
- return html_out,comments_df
965
-
966
- except Exception as e:
967
- return ("<div style='color:red'>Error:<br>"
968
- f"{e}<br><pre>{traceback.format_exc()}</pre></div>")
969
-
970
- def save_comments_to_csv(df: pd.DataFrame):
971
- """
972
- Writes the editable dataframe (rule, comment) to a CSV and
973
- returns a file object that Gradio can offer for download.
974
- """
975
- # keep only rows where the user actually wrote something
976
- df = df[df["comment"].str.strip().astype(bool)]
977
-
978
- if df.empty:
979
- raise gr.Error("You didn’t write any comments yet!")
980
-
981
- filename = f"rule_comments_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
982
- csv_path = os.path.join(os.getcwd(), filename)
983
- df.to_csv(csv_path, index=False)
984
- return csv_path
985
- # ═════════════════════════════════════════════════════════════════════════════
986
- # 7. GRADIO UI
987
- # ═════════════════════════════════════════════════════════════════════════════
988
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
989
- gr.Markdown("## CAA ⇄ OneReg β€” rule-level diff (section format)")
990
- part = gr.Textbox(label="Part Number", value="139")
991
- onereg_pdf = gr.File(label="Upload OneReg PDF")
992
- caa_pdf = gr.File(label="Upload CAA PDF")
993
- btn_compare = gr.Button("Compare")
994
- out_html = gr.HTML()
995
- comment_df = gr.Dataframe(
996
- headers=["rule", "comment"],
997
- datatype=["str", "str"],
998
- interactive=True,
999
- label="✍️ Type your comments in the **comment** column, then click β€œSave to CSV”"
1000
- )
1001
- btn_save = gr.Button("πŸ’Ύ Save to CSV")
1002
- download = gr.File(label="Download your CSV")
1003
- btn_compare.click(
1004
- compare_regulations_app,
1005
- inputs=[part, onereg_pdf, caa_pdf],
1006
- outputs=[out_html, comment_df],
1007
- )
1008
- btn_save.click(
1009
- save_comments_to_csv,
1010
- inputs=[comment_df],
1011
- outputs=[download],
1012
- )
1013
-
1014
-
1015
- if __name__ == "__main__":
1016
- #api_key = os.getenv("GOOGLE_API_KEY")
1017
- #print(api_key)
1018
- current_os = platform.system()
1019
- print(f"Current OS: {current_os}")
1020
- if current_os == "Windows":
1021
- print("Running on Windows")
1022
- server_name = "localhost"
1023
- elif current_os == "Linux":
1024
- server_name="0.0.0.0"
1025
- else:
1026
- server_name = "0.0.0.0"
1027
-
1028
-
1029
-
1030
- demo.launch(
1031
- server_name=server_name,
1032
- server_port=int(os.environ.get("GRADIO_SERVER_PORT", 7860)),
1033
- share=False
1034
- )
1035
-
 
1
+ ###############################################################################
2
+ # CAA ⇄ OneReg | Dual Document Cleaning & Comparison Tool #
3
+ ###############################################################################
4
+ import io
5
+ import os
6
+ import re
7
+ import html
8
+ import json
9
+ import traceback
10
+ import difflib
11
+ import platform
12
+ import pandas as pd
13
+ from datetime import datetime
14
+
15
+ import fitz # PyMuPDF
16
+ from PyPDF2 import PdfReader # plain text extraction
17
+ import gradio as gr # UI
18
+ from dotenv import load_dotenv # optional .env support
19
+
20
+
21
+ # ─────────────────────────────────────────────────────────────────────────────
22
+ # 1. PDF & TEXT PROCESSING
23
+ # ─────────────────────────────────────────────────────────────────────────────
24
+
25
+ def extract_pdf_text(pdf_file) -> str:
26
+ """Extracts text from a PDF file using PyPDF2."""
27
+ reader = PdfReader(pdf_file)
28
+ return "\n".join(p.extract_text() or "" for p in reader.pages)
29
+
30
+
31
+ def extract_pdf_word(pdf_file) -> str:
32
+ """Extracts text from PDF using PyMuPDF (fitz) for better layout preservation."""
33
+ doc = fitz.open(pdf_file)
34
+ text_blocks = [page.get_text("text") for page in doc]
35
+ return "\n".join(filter(None, text_blocks))
36
+
37
+
38
+ def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
39
+ """Re-join hard-wrapped lines from PDF extraction."""
40
+ merged = []
41
+ for ln in raw_text.splitlines():
42
+ ln_stripped = ln.strip()
43
+ if not ln_stripped: continue
44
+ if merged:
45
+ prev = merged[-1]
46
+ if (re.search(r'[a-z]$', prev) and re.match(r'^[\(a-z]', ln_stripped)) or \
47
+ (re.search(r'\b(?:rule|may|and|or)$', prev, re.I) and re.match(r'^\d+\.\d+', ln_stripped)) or \
48
+ (re.search(r'\brule\s+\d+\.$', prev, re.I) and re.match(r'^\d', ln_stripped)):
49
+ merged[-1] = prev + (' ' if re.search(r'[a-z]$', prev) else '') + ln_stripped
50
+ continue
51
+ merged.append(ln_stripped)
52
+ return merged
53
+
54
+
55
+ # ─────────────────────────────────────────────────────────────────────────────
56
+ # 2. RULE PARSING & CLEANING (Initial Automated Pass)
57
+ # ─────────────────────────────────────────────────────────────────────────────
58
+
59
+ # --- Regex for rule structure ---
60
+ rule_pat = re.compile(
61
+ r'^(?:(?:\d+\.){2,}\s*)?(?P<base_rule>\d+\.\d+(?:[A-Z]?))(?P<parens>(?:\s*\([^)]+\))*?)\s*(?P<title>.*)$',
62
+ re.IGNORECASE
63
+ )
64
+ appendix_item_pat = re.compile(
65
+ r'^\s*([A-Z])\.(\d+(?:\.\d+)*)(?:\s*\(([^)]+)\))?\s+(?P<title>[A-Za-z0-9].*)$',
66
+ re.IGNORECASE
67
+ )
68
+ subpart_pat = re.compile(
69
+ r'^\s*\d+\.\s*Subpart\s+([A-Z]{1,2})\s*[β€”-]\s*(.+)$',
70
+ re.IGNORECASE
71
+ )
72
+
73
+ # --- Regex for cleaning ---
74
+ page_pat = re.compile(r'Page\s+\d+\s*/\s*\d+', re.IGNORECASE)
75
+ date_pat = re.compile(
76
+ r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[a-z.]*\s+\d{1,2},?\s+\d{4}',
77
+ re.IGNORECASE
78
+ )
79
+ header_pat = re.compile(
80
+ r'^(?:Purpose\s+)?(?:[A-Z][a-z]{2}\.)\s+\d{1,2},\s*\d{4},.*$', re.IGNORECASE
81
+ )
82
+
83
+
84
+ def clean_line(line: str, source: str) -> str:
85
+ """Performs a basic, automated cleaning pass on a line of text."""
86
+ if source == "onereg":
87
+ line = re.sub(r'\b(?:\d+\.){3,}\s*', '', line) # Zap outline IDs 1.2.3.
88
+ if header_pat.match(line):
89
+ return ""
90
+
91
+ # Generic cleaning for both
92
+ line = page_pat.sub('', line)
93
+ line = date_pat.sub('', line)
94
+ line = re.sub(r'Civil Aviation Rules\s+Part\s+\d+\s+CAA Consolidation', '', line, flags=re.I)
95
+ line = re.sub(r'^\d{1,2}\s+[A-Za-z]+\s+\d{4}\s*\d*\s*CAA of NZ', '', line, flags=re.I)
96
+ line = re.sub(r'\S+@\S+', '', line) # email
97
+ line = re.sub(r'\s{2,}', ' ', line)
98
+ return line.strip()
99
+
100
+
101
+ def parse_rules(text: str, source: str) -> dict[str, str]:
102
+ """Parses raw text into a dictionary of {rule_id: rule_text}."""
103
+ rules, current, title = {}, None, ""
104
+
105
+ lines = merge_pdf_wrapped_lines(text)
106
+
107
+ for raw_line in lines:
108
+ line = clean_line(raw_line, source)
109
+ if not line: continue
110
+
111
+ m_ap_item = appendix_item_pat.match(line)
112
+ m_sp = subpart_pat.match(line)
113
+ m_rule = rule_pat.match(line)
114
+
115
+ new_key = None
116
+ new_title = ""
117
+
118
+ if m_ap_item:
119
+ key_parts = [m_ap_item.group(1).upper(), m_ap_item.group(2)]
120
+ if m_ap_item.group(3): key_parts.append(f"({m_ap_item.group(3).strip()})")
121
+ new_key = ".".join(key_parts)
122
+ new_title = m_ap_item.group('title').strip()
123
+ elif m_sp:
124
+ new_key = f"subpart-{m_sp.group(1).upper()}"
125
+ new_title = f"Subpart {m_sp.group(1).upper()} β€” {m_sp.group(2).strip()}"
126
+ elif m_rule:
127
+ base = m_rule.group('base_rule')
128
+ parens_str = m_rule.group('parens') or ""
129
+ new_key = base + "".join(re.findall(r'\([^)]+\)', parens_str))
130
+ new_title = m_rule.group('title').strip()
131
+
132
+ if new_key:
133
+ current = new_key
134
+ title = new_title
135
+ rules.setdefault(current, [])
136
+ if title:
137
+ rules[current].append(title)
138
+ elif current:
139
+ if not title or line.lower() != title.lower():
140
+ rules[current].append(line)
141
+
142
+ return {k: " ".join(v).strip() for k, v in rules.items()}
143
+
144
+
145
+ # ─────────────────────────────────────────────────────────────────────────────
146
+ # 3. COMPARISON & UI LOGIC
147
+ # ─────────────────────────────────────────────────────────────────────────────
148
+
149
+ def diff_unified(one: str, caa: str) -> str:
150
+ """Generates a single HTML string showing differences inline."""
151
+ sm = difflib.SequenceMatcher(None, one, caa, autojunk=False)
152
+ output = []
153
+ for tag, i1, i2, j1, j2 in sm.get_opcodes():
154
+ one_segment = html.escape(one[i1:i2])
155
+ caa_segment = html.escape(caa[j1:j2])
156
+ if tag == "equal":
157
+ output.append(one_segment)
158
+ elif tag == "delete":
159
+ output.append(
160
+ f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
161
+ elif tag == "insert":
162
+ output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
163
+ elif tag == "replace":
164
+ output.append(
165
+ f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
166
+ output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
167
+ return f"<span style='white-space: pre-wrap; color: var(--text);'>{''.join(output)}</span>"
168
+
169
+
170
+ def combined_sort_key(key: str):
171
+ """Robustly sorts rules, subparts, and appendices."""
172
+ if key.startswith("subpart-"):
173
+ return (1, key)
174
+
175
+ sortable_tuple = ()
176
+ if re.match(r'^\d+\.\d+', key):
177
+ sortable_tuple += (2,)
178
+ elif re.match(r'^[A-Z]\.', key):
179
+ sortable_tuple += (3,)
180
+ else:
181
+ return (4, key)
182
+
183
+ parts = re.split(r'[.()]', key)
184
+ parts = [p for p in parts if p]
185
+
186
+ for part in parts:
187
+ if part.isdigit():
188
+ sortable_tuple += ((1, int(part)),)
189
+ else:
190
+ sortable_tuple += ((2, part.lower()),)
191
+ return sortable_tuple
192
+
193
+
194
+ def save_clean_and_dirty_versions(dirty_one, dirty_caa, clean_one, clean_caa, filename: str) -> str:
195
+ """Saves both original and cleaned versions to a .jsonl file."""
196
+ all_ids = sorted(
197
+ list(set(dirty_one.keys()) | set(dirty_caa.keys())),
198
+ key=combined_sort_key
199
+ )
200
+ with open(filename, 'w', encoding='utf-8') as f:
201
+ for rule_id in all_ids:
202
+ # OneReg record
203
+ record_one = {
204
+ "rule_id": rule_id,
205
+ "source": "onereg",
206
+ "dirty_text": dirty_one.get(rule_id, ""),
207
+ "clean_text": clean_one.get(rule_id, "")
208
+ }
209
+ f.write(json.dumps(record_one) + '\n')
210
+ # CAA record
211
+ record_caa = {
212
+ "rule_id": rule_id,
213
+ "source": "caa",
214
+ "dirty_text": dirty_caa.get(rule_id, ""),
215
+ "clean_text": clean_caa.get(rule_id, "")
216
+ }
217
+ f.write(json.dumps(record_caa) + '\n')
218
+ return filename
219
+
220
+
221
+ # --- STAGE 1: Process PDFs and prepare for user review ---
222
+ def stage1_process_and_review(part, onereg_pdf, caa_pdf):
223
+ if not (onereg_pdf and caa_pdf):
224
+ raise gr.Error("Please upload both PDF files.")
225
+ try:
226
+ # Process OneReg PDF
227
+ raw_one = extract_pdf_word(onereg_pdf.name)
228
+ one_data = parse_rules(raw_one, "onereg")
229
+
230
+ # Process CAA PDF
231
+ raw_caa = extract_pdf_text(caa_pdf.name)
232
+ caa_data = parse_rules(raw_caa, "caa")
233
+
234
+ # Get all rule IDs and sort them
235
+ all_ids = sorted(
236
+ list(set(one_data.keys()) | set(caa_data.keys())),
237
+ key=combined_sort_key
238
+ )
239
+
240
+ rules_to_review = [
241
+ r for r in all_ids
242
+ if r.startswith(f"{part}.") or r.startswith("subpart-") or re.match(r'^[A-Z]\.', r)
243
+ ]
244
+
245
+ # Prepare DataFrame for user editing with both documents
246
+ review_rows = []
247
+ for rule_id in rules_to_review:
248
+ one_text = one_data.get(rule_id, "[Rule not found in OneReg]")
249
+ caa_text = caa_data.get(rule_id, "[Rule not found in CAA]")
250
+ review_rows.append([rule_id, one_text, caa_text])
251
+
252
+ df = pd.DataFrame(review_rows, columns=["Rule ID", "OneReg Text (Editable)", "CAA Text (Editable)"])
253
+
254
+ return {
255
+ original_one_state: one_data,
256
+ original_caa_state: caa_data,
257
+ review_df: gr.update(value=df, visible=True),
258
+ btn_finalize: gr.update(visible=True),
259
+ }
260
+ except Exception as e:
261
+ traceback.print_exc()
262
+ raise gr.Error(f"Failed during initial processing: {e}")
263
+
264
+
265
+ # --- STAGE 2: Take user-cleaned text and perform the final comparison ---
266
+ def stage2_finalize_and_compare(review_df, original_one, original_caa):
267
+ if review_df is None or review_df.empty:
268
+ raise gr.Error("No data to compare. Please process the files first.")
269
+
270
+ # Convert the user-edited DataFrame back into dictionaries
271
+ clean_one_data = pd.Series(review_df['OneReg Text (Editable)'].values, index=review_df['Rule ID']).to_dict()
272
+ clean_caa_data = pd.Series(review_df['CAA Text (Editable)'].values, index=review_df['Rule ID']).to_dict()
273
+
274
+ # Save the training data file
275
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
276
+ jsonl_filename = f"cleaned_rules_{timestamp}.jsonl"
277
+ saved_filepath = save_clean_and_dirty_versions(original_one, original_caa, clean_one_data, clean_caa_data,
278
+ jsonl_filename)
279
+
280
+ # Perform the final comparison
281
+ all_ids = sorted(
282
+ list(set(clean_one_data.keys()) | set(clean_caa_data.keys())),
283
+ key=combined_sort_key
284
+ )
285
+
286
+ sections = []
287
+ for rule_id in all_ids:
288
+ one_clean = clean_one_data.get(rule_id, "")
289
+ caa_clean = clean_caa_data.get(rule_id, "")
290
+
291
+ diff_html = diff_unified(one_clean, caa_clean)
292
+
293
+ sections.append(f"""
294
+ <div class="rule-section">
295
+ <strong class="rule-label">{rule_id}</strong>
296
+ <div class="rule-content">
297
+ {diff_html}
298
+ </div>
299
+ </div>
300
+ <hr>
301
+ """)
302
+
303
+ style = """
304
+ <style>
305
+ body { font-family: sans-serif; color: var(--body-text-color); }
306
+ .rule-label { font-size: 1.1em; background: #f0f0f0; padding: 5px; display: block; border-top-left-radius: 5px; border-top-right-radius: 5px; }
307
+ .rule-content { padding: 10px; border: 1px solid #f0f0f0; border-top: none; margin-bottom: 1em; white-space: pre-wrap; }
308
+ hr { border: none; border-top: 1px solid #ccc; margin: 1.5em 0; }
309
+ </style>
310
+ """
311
+ final_html = style + "".join(sections)
312
+
313
+ return {
314
+ out_html: gr.update(value=final_html, visible=True),
315
+ download_jsonl: gr.update(value=saved_filepath, visible=True)
316
+ }
317
+
318
+
319
+ # ─────────────────────────────────────────────────────────────────────────────
320
+ # 4. GRADIO UI LAYOUT
321
+ # ─────────────────────────────────────────────────────────────────────────────
322
+
323
+ with gr.Blocks(theme=gr.themes.Soft(), title="Dual Rule Cleaning Tool") as demo:
324
+ gr.Markdown("## CAA ⇄ OneReg β€” Dual Document Cleaning & Comparison Tool")
325
+
326
+ # State to hold the original "dirty" data between steps
327
+ original_one_state = gr.State({})
328
+ original_caa_state = gr.State({})
329
+
330
+ # --- Stage 1: Inputs and Initial Processing ---
331
+ with gr.Row():
332
+ part_num = gr.Textbox(label="Part Number", value="139")
333
+ onereg_pdf = gr.File(label="Upload OneReg PDF")
334
+ caa_pdf = gr.File(label="Upload CAA PDF")
335
+
336
+ btn_process = gr.Button("1. Process PDFs & Prepare for Cleaning", variant="secondary")
337
+
338
+ gr.Markdown("---")
339
+
340
+ # --- Stage 2: User Review and Cleaning ---
341
+ gr.Markdown("### 2. Review and Manually Clean Both Documents")
342
+ gr.Markdown(
343
+ "Edit the text in the table below to remove any headers, footers, or other noise from **both** documents. Once you are finished, click the 'Finalize, Compare & Save' button.")
344
+
345
+ review_df = gr.DataFrame(
346
+ headers=["Rule ID", "OneReg Text (Editable)", "CAA Text (Editable)"],
347
+ datatype=["str", "str", "str"],
348
+ interactive=True,
349
+ visible=False,
350
+ wrap=True,
351
+ row_count=(10, "dynamic")
352
+ )
353
+
354
+ btn_finalize = gr.Button("3. Finalize, Compare & Save", variant="primary", visible=False)
355
+
356
+ gr.Markdown("---")
357
+
358
+ # --- Stage 3: Final Comparison Output & Export ---
359
+ gr.Markdown("### 4. Final Comparison & Export")
360
+ gr.Markdown(
361
+ "Deletions from OneReg are in <del style='background:#fdd;'>red</del> and additions from CAA are in <ins style='background:#dfd;'>green</ins>.")
362
+
363
+ out_html = gr.HTML(visible=False)
364
+ download_jsonl = gr.File(label="Download Cleaned & Dirty Data (.jsonl)", visible=False)
365
+
366
+ # --- Wire up UI events ---
367
+ btn_process.click(
368
+ fn=stage1_process_and_review,
369
+ inputs=[part_num, onereg_pdf, caa_pdf],
370
+ outputs=[original_one_state, original_caa_state, review_df, btn_finalize]
371
+ )
372
+
373
+ btn_finalize.click(
374
+ fn=stage2_finalize_and_compare,
375
+ inputs=[review_df, original_one_state, original_caa_state],
376
+ outputs=[out_html, download_jsonl]
377
+ )
378
+
379
+ if __name__ == "__main__":
380
+ current_os = platform.system()
381
+ server_name = "0.0.0.0" if current_os == "Linux" else "127.0.0.1"
382
+ demo.launch(
383
+ server_name=server_name,
384
+ server_port=int(os.environ.get("GRADIO_SERVER_PORT", 7860)),
385
+ )