Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,1035 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
###############################################################################
|
4 |
+
# CAA β OneReg rule-level diff viewer (section format) #
|
5 |
+
###############################################################################
|
6 |
+
import io
|
7 |
+
import os
|
8 |
+
import re
|
9 |
+
import html
|
10 |
+
import traceback
|
11 |
+
import difflib
|
12 |
+
import platform
|
13 |
+
import pandas as pd
|
14 |
+
from datetime import datetime
|
15 |
+
|
16 |
+
import fitz # PyMuPDF (for future OneReg table OCR)
|
17 |
+
from PyPDF2 import PdfReader # plain text extraction
|
18 |
+
import gradio as gr # UI
|
19 |
+
from dotenv import load_dotenv # optional .env support
|
20 |
+
# from google import genai # uncomment when OCRing OneReg tables
|
21 |
+
# from google.genai import type
|
22 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
23 |
+
# 0. ENV / API KEY (Gemini β *not* used yet, but wired for future)
|
24 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
25 |
+
#load_dotenv()
|
26 |
+
#API_KEY = os.getenv("GOOGLE_API_KEY", "") # intentionally blankβsafe
|
27 |
+
|
28 |
+
|
29 |
+
#client = genai.Client(api_key=api_key)
|
30 |
+
|
31 |
+
_table_caption = re.compile(r'^\s*Table\s+\d+\.', re.I)
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
36 |
+
# 1. PDF β TEXT
|
37 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
38 |
+
def extract_pdf_text(pdf_file) -> str:
|
39 |
+
reader = PdfReader(pdf_file)
|
40 |
+
out_pf= "\n".join(p.extract_text() or "" for p in reader.pages)
|
41 |
+
print(out_pf)
|
42 |
+
return out_pf
|
43 |
+
def extract_pdf_word(pdf_file) -> str:
|
44 |
+
"""
|
45 |
+
Extract text from PDF using PyMuPDF (fitz).
|
46 |
+
This is a fallback if PyPDF2 fails to extract text properly.
|
47 |
+
"""
|
48 |
+
doc = fitz.open(pdf_file)
|
49 |
+
out_pf = []
|
50 |
+
for page in doc:
|
51 |
+
text = page.get_text("text")
|
52 |
+
if text:
|
53 |
+
out_pf.append(text.strip())
|
54 |
+
return "\n".join(out_pf)
|
55 |
+
def merge_pdf_wrapped_lines(lines):
|
56 |
+
"""
|
57 |
+
Join isolated 'Feb. 20,' '2023,' 'a.m.' (or 'None') pieces that PyPDF2
|
58 |
+
spits out on separate lines. Returns a fresh list with composites merged.
|
59 |
+
"""
|
60 |
+
merged = []
|
61 |
+
buffer = []
|
62 |
+
|
63 |
+
def flush_buffer():
|
64 |
+
if buffer:
|
65 |
+
merged.append(' '.join(buffer))
|
66 |
+
buffer.clear()
|
67 |
+
|
68 |
+
for line in lines:
|
69 |
+
if date_piece_pat.match(line.strip()):
|
70 |
+
buffer.append(line.strip().rstrip(',')) # keep pieces
|
71 |
+
# we leave it and wait for the next piece
|
72 |
+
else:
|
73 |
+
# Finished collecting date fragments β flush if we have a date
|
74 |
+
if buffer:
|
75 |
+
flush_buffer()
|
76 |
+
merged.append(line)
|
77 |
+
flush_buffer() # leftover at EOF
|
78 |
+
return merged
|
79 |
+
|
80 |
+
import re, html
|
81 |
+
# --- NEW helper ------------------------------------------------------------
|
82 |
+
_ROW_START = re.compile(r"^\s*(?:[1-9]|10)\s+\d+\s*m\b")
|
83 |
+
|
84 |
+
def _split_flat_table(line: str) -> list[str] | None:
|
85 |
+
"""If *line* contains a flattened table, return caption+rows list else None."""
|
86 |
+
m = re.match(r"^(Table\s+\d+\.[^.]+?\.)\s+(.*)$", line, re.I)
|
87 |
+
if not m:
|
88 |
+
return None
|
89 |
+
|
90 |
+
caption, body = m.groups()
|
91 |
+
# Insert newline before every category number 1β10.
|
92 |
+
body = re.sub(r"\s(?=[1-9]\b|10\b)", "\n", body)
|
93 |
+
|
94 |
+
rows = [r.strip() for r in body.splitlines() if _ROW_START.match(r)]
|
95 |
+
if 3 <= len(rows) <= 12:
|
96 |
+
return [caption] + rows
|
97 |
+
return None
|
98 |
+
def render_table_html(lines: list[str]) -> str:
|
99 |
+
"""lines[0] caption, lines[1:] rows with 2+ spaces delim β> <table>."""
|
100 |
+
caption = html.escape(lines[0])
|
101 |
+
header_cells = [""] + [html.escape(c) for c in re.split(r"\s{2,}", lines[1].strip())]
|
102 |
+
|
103 |
+
out: list[str] = [
|
104 |
+
'<table border="1" cellpadding="4" style="border-collapse:collapse;">',
|
105 |
+
f'<caption>{caption}</caption>',
|
106 |
+
'<thead><tr>' + "".join(f"<th>{c}</th>" for c in header_cells) + '</tr></thead>',
|
107 |
+
'<tbody>'
|
108 |
+
]
|
109 |
+
for row in lines[1:]:
|
110 |
+
cells = re.split(r"\s{2,}", row.strip())
|
111 |
+
out.append('<tr>' + "".join(f"<td>{html.escape(c)}</td>" for c in cells) + '</tr>')
|
112 |
+
out.append('</tbody></table>')
|
113 |
+
return "\n".join(out)
|
114 |
+
def inject_tables(text: str) -> str:
|
115 |
+
"""Return *text* where flattened or multiβline tables are turned into HTML."""
|
116 |
+
out, buf, in_table = [], [], False
|
117 |
+
lines = text.splitlines()
|
118 |
+
i = 0
|
119 |
+
while i < len(lines):
|
120 |
+
ln = lines[i]
|
121 |
+
|
122 |
+
# B) flattened singleβline table
|
123 |
+
split_result = _split_flat_table(ln)
|
124 |
+
if split_result:
|
125 |
+
out.append(render_table_html(split_result))
|
126 |
+
i += 1
|
127 |
+
continue
|
128 |
+
|
129 |
+
# A) normal multiβline table (caption + numeric rows)
|
130 |
+
if re.match(r"^\s*Table\s+\d+\.", ln, re.I):
|
131 |
+
in_table, buf = True, [ln]
|
132 |
+
i += 1
|
133 |
+
continue
|
134 |
+
if in_table and re.match(r"^\s*\d+(?:\s+\d+)+", ln):
|
135 |
+
buf.append(ln)
|
136 |
+
i += 1
|
137 |
+
continue
|
138 |
+
else:
|
139 |
+
if in_table:
|
140 |
+
out.append(render_table_html(buf))
|
141 |
+
in_table, buf = False, []
|
142 |
+
|
143 |
+
out.append(html.escape(ln))
|
144 |
+
i += 1
|
145 |
+
|
146 |
+
if in_table:
|
147 |
+
out.append(render_table_html(buf))
|
148 |
+
return "<br>".join(out)
|
149 |
+
|
150 |
+
def inject_tables(text: str) -> str:
|
151 |
+
"""
|
152 |
+
Convert *only* CAA tables into HTML tables.
|
153 |
+
OneReg passes through unchanged.
|
154 |
+
"""
|
155 |
+
out, buf, in_table = [], [], False
|
156 |
+
lines = text.splitlines()
|
157 |
+
i = 0
|
158 |
+
while i < len(lines):
|
159 |
+
ln = lines[i]
|
160 |
+
|
161 |
+
# ββ B) Flattened CAA table in a single physical line βββββββββββββββ
|
162 |
+
split_result = _split_flat_table(ln)
|
163 |
+
if split_result:
|
164 |
+
out.append(render_table_html(split_result))
|
165 |
+
i += 1
|
166 |
+
continue
|
167 |
+
|
168 |
+
# ββ A) Normal caption + separate numeric rows table ββββββββββββββββ
|
169 |
+
if re.match(r'^\s*Table\s+\d+\.', ln, re.I): # caption line
|
170 |
+
in_table, buf = True, [ln]
|
171 |
+
i += 1
|
172 |
+
continue
|
173 |
+
|
174 |
+
if in_table and re.match(r'^\s*\d+(?:\s+\d+)+', ln): # numeric row
|
175 |
+
buf.append(ln)
|
176 |
+
i += 1
|
177 |
+
continue
|
178 |
+
else:
|
179 |
+
if in_table: # end of numeric block
|
180 |
+
out.append(render_table_html(buf))
|
181 |
+
in_table, buf = False, []
|
182 |
+
|
183 |
+
# not part of a table βΊ just escape it
|
184 |
+
out.append(html.escape(ln))
|
185 |
+
i += 1
|
186 |
+
|
187 |
+
if in_table: # file ended inside a table
|
188 |
+
out.append(render_table_html(buf))
|
189 |
+
|
190 |
+
return "<br>".join(out)
|
191 |
+
|
192 |
+
|
193 |
+
def collapse_leading_repeats(line: str) -> str:
|
194 |
+
"""
|
195 |
+
If a OneReg line begins with the same word repeated
|
196 |
+
(possibly separated by commas/spaces), collapse them
|
197 |
+
into a single occurrence plus one space.
|
198 |
+
"""
|
199 |
+
# grab the first word
|
200 |
+
m = re.match(r'^\s*(\w+)\b', line)
|
201 |
+
if not m:
|
202 |
+
return line
|
203 |
+
prefix,first_word= m.groups()
|
204 |
+
pat=rf'\b{re.escape(first_word)}\b'
|
205 |
+
matches=list(re.finditer(pat, line, flags=re.IGNORECASE))
|
206 |
+
if len(matches) < 2:
|
207 |
+
return line
|
208 |
+
|
209 |
+
# match any number of "word" occurrences, commas or spaces
|
210 |
+
first_end=matches[0].end()
|
211 |
+
second_start,second_end = matches[1].span()
|
212 |
+
between= line[first_end:second_start]
|
213 |
+
between_words = re.split(r'[ ,]+', between) # split by spaces or commas
|
214 |
+
if len(between_words) <= 5: # max 5 chars between
|
215 |
+
return line
|
216 |
+
rest=line[second_end:] # everything after the second occurrence
|
217 |
+
return f"{prefix}{first_word} {rest.strip() if rest else ''}" # collapse to single occurrence
|
218 |
+
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
def strip_stray_numbers(line: str) -> str:
|
223 |
+
"""
|
224 |
+
Remove inline β55(a)β, β12(ii)β, etc., UNLESS they are part of a genuine
|
225 |
+
rule / section / paragraph citation such as βrule 139.21(b)β.
|
226 |
+
"""
|
227 |
+
|
228 |
+
def _repl(m: re.Match) -> str:
|
229 |
+
# text that precedes the match
|
230 |
+
prefix = line[:m.start()]
|
231 |
+
|
232 |
+
# keep the match if prefix ends with:
|
233 |
+
# rule 139.21 β full rule citation
|
234 |
+
# rule β plain βruleβ
|
235 |
+
# paragraph β paragraph / paragraphs
|
236 |
+
# section β section / sections
|
237 |
+
if re.search(
|
238 |
+
r'\b(?:rule|rules?|paragraphs?|sections?)'
|
239 |
+
r'(?:\s+\d+\.\d+)?\s*$',
|
240 |
+
prefix,
|
241 |
+
flags=re.IGNORECASE,
|
242 |
+
):
|
243 |
+
return m.group(0) # preserve it
|
244 |
+
|
245 |
+
return '' # otherwise drop it
|
246 |
+
|
247 |
+
# match digits followed by (β¦) BUT NOT digits.digits(β¦)
|
248 |
+
pattern = r'\b(?!\d+\.\d+)\d+\s*\([^)]*\)'
|
249 |
+
return re.sub(pattern, _repl, line)
|
250 |
+
def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
|
251 |
+
"""
|
252 |
+
Re-join hard-wrapped lines produced by the PDF extractor.
|
253 |
+
|
254 |
+
β’ join when previous line ends with βrule β¦.β or any lower-case word
|
255 |
+
and the next line starts with a digit, β(β, or lower-case letter
|
256 |
+
β’ keep existing βrule/may + 123.45β glue logic
|
257 |
+
"""
|
258 |
+
merged = []
|
259 |
+
for ln in raw_text.splitlines():
|
260 |
+
ln_stripped = ln.strip()
|
261 |
+
if merged:
|
262 |
+
prev = merged[-1]
|
263 |
+
|
264 |
+
# A) break after βrule 139.β β glue β21(b)β¦β
|
265 |
+
if re.search(r'\brule\s+\d+\.$', prev, re.I) and re.match(r'^\d', ln_stripped):
|
266 |
+
merged[-1] = prev + ln_stripped # no space; dot already present
|
267 |
+
continue
|
268 |
+
|
269 |
+
# B) generic sentence wrap ββ¦andβ / ββ¦orβ
|
270 |
+
if re.search(r'[a-z]$', prev) and re.match(r'^[\(a-z]', ln_stripped):
|
271 |
+
merged[-1] = prev + ' ' + ln_stripped
|
272 |
+
continue
|
273 |
+
|
274 |
+
if re.search(r'\b(?:and|or)$', prev) and re.match(r'^\d+\.\d+', ln_stripped):
|
275 |
+
merged[-1] = prev + ' ' + ln_stripped
|
276 |
+
continue
|
277 |
+
|
278 |
+
# C) original βrule|may + 123.45 β¦β glue
|
279 |
+
if re.search(r'\b(?:rule|may)$', prev, re.I) and re.match(r'^\d+\.\d+', ln_stripped):
|
280 |
+
merged[-1] = prev + ' ' + ln_stripped
|
281 |
+
continue
|
282 |
+
|
283 |
+
merged.append(ln_stripped)
|
284 |
+
return merged
|
285 |
+
|
286 |
+
"""
|
287 |
+
def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
|
288 |
+
|
289 |
+
Glue back any PDF-wrapped continuation lines so that mid-sentence
|
290 |
+
breaks (e.g. after 'and', 'or', or before a '(2)') get reattached.
|
291 |
+
|
292 |
+
merged = []
|
293 |
+
for ln in raw_text.splitlines():
|
294 |
+
ln_stripped = ln.strip()
|
295 |
+
if merged:
|
296 |
+
prev = merged[-1]
|
297 |
+
# 1) if prev ends in a lowercase letter (no punctuation),
|
298 |
+
# and this line starts with '(' or lowercase, glue it:
|
299 |
+
if re.search(r'[a-z]$', prev) and re.match(r'^[\(a-z]', ln_stripped):
|
300 |
+
merged[-1] = prev + ' ' + ln_stripped
|
301 |
+
continue
|
302 |
+
|
303 |
+
# 2) keep your old ruleβmerge logic too:
|
304 |
+
if ( re.search(r'\b(?:rule|may)$', prev, re.IGNORECASE )
|
305 |
+
and re.match(r'^\d+\.\d+', ln_stripped) ):
|
306 |
+
merged[-1] = prev + ' ' + ln_stripped
|
307 |
+
continue
|
308 |
+
|
309 |
+
merged.append(ln_stripped)
|
310 |
+
return merged
|
311 |
+
"""
|
312 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
313 |
+
# 2. Helpers to drop OneReg auto IDs & inline rule numbers
|
314 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
315 |
+
def zap_auto_outline_ids(s: str) -> str:
|
316 |
+
return re.sub(r'\b(?:\d+\.){3,}\s*', '', s)
|
317 |
+
def collapse_inner_parens(line: str) -> str:
|
318 |
+
"""
|
319 |
+
For OneReg headings *only*:
|
320 |
+
- If the line refers to a rule, section(s), or paragraph(s),
|
321 |
+
return it unchanged (preserve all brackets).
|
322 |
+
- Otherwise, if there are 2+ (...) groups, remove all but the last.
|
323 |
+
"""
|
324 |
+
# 1) if it's a rule/section/paragraph referenceβskip collapsing
|
325 |
+
if re.search(r'\b(?:rule|section|sections|paragraph|paragraphs|under|and)\b',
|
326 |
+
line,
|
327 |
+
flags=re.IGNORECASE):
|
328 |
+
return line
|
329 |
+
|
330 |
+
# 2) find all paren-groups
|
331 |
+
parens = re.findall(r'\([^()]*\)', line)
|
332 |
+
if len(parens) <= 1:
|
333 |
+
return line
|
334 |
+
|
335 |
+
# 3) remove every but the final (...)
|
336 |
+
last = parens[-1]
|
337 |
+
new_line = line
|
338 |
+
for p in parens[:-1]:
|
339 |
+
new_line = new_line.replace(p, '')
|
340 |
+
|
341 |
+
return new_line
|
342 |
+
|
343 |
+
def zap_inline_rule_numbers(s: str) -> str:
|
344 |
+
# only match digit-dot-digit[letter] if NOT preceded by βrule β
|
345 |
+
return re.sub(
|
346 |
+
r'(?<!\brules\s)\b\d+\.\d+(?:[A-Z]?)\s*(?=\()',
|
347 |
+
'',
|
348 |
+
s,
|
349 |
+
flags=re.IGNORECASE
|
350 |
+
)
|
351 |
+
|
352 |
+
def strip_inline_self_ref(text_line: str, tail: str) -> str:
|
353 |
+
if not tail:
|
354 |
+
return text_line
|
355 |
+
pattern = rf'\b{re.escape(tail)}(?:\s*\([^)]+\))+'
|
356 |
+
prev = None
|
357 |
+
while prev != text_line:
|
358 |
+
prev = text_line
|
359 |
+
text_line = re.sub(pattern, '', text_line).strip()
|
360 |
+
return text_line
|
361 |
+
|
362 |
+
|
363 |
+
def drop_leading_repeated_title(line: str, title: str) -> str:
|
364 |
+
if not title:
|
365 |
+
return line
|
366 |
+
pat = rf'^(?:{re.escape(title)}\s*){{2,}}'
|
367 |
+
return re.sub(pat, f'{title} ', line, flags=re.IGNORECASE).strip()
|
368 |
+
# catch βAppendix A β Titleβ
|
369 |
+
appendix_pat = re.compile(
|
370 |
+
r'^(?:Appendix)\s+([A-Z])\s*[ββ-]\s*(?P<title>.+)$',
|
371 |
+
re.IGNORECASE
|
372 |
+
)
|
373 |
+
|
374 |
+
page_pat = re.compile(r'Page\s+\d+\s*/\s*\d+', re.IGNORECASE)
|
375 |
+
date_pat = re.compile(
|
376 |
+
r'(?:Jan\.?|Feb\.?|Mar\.?|Apr\.?|May\.?|Jun\.?|Jul\.?|Aug\.?|'
|
377 |
+
r'Sep\.?|Sept\.?|Oct\.?|Nov\.?|Dec\.?|January|February|March|April|May|'
|
378 |
+
r'June|July|August|September|October|November|December)'
|
379 |
+
r'\s+\d{1,2},\s*\d{4}(?:,\s*(?:a\.?m\.?|p\.?m\.?))?',
|
380 |
+
re.IGNORECASE
|
381 |
+
)
|
382 |
+
|
383 |
+
# Inside clean_line function
|
384 |
+
header_pat = re.compile(
|
385 |
+
r'^(?:Purpose\s+)?' # optional "Purpose"
|
386 |
+
r'(?:[A-Z][a-z]{2}\.)\s+\d{1,2},\s*\d{4},\s*(?:a\.?m\.?|p\.?m\.?)' # "Feb. 20, 2023, a.m."
|
387 |
+
r'(?:\s*\([a-z]\)\s*[A-Z][a-z]{2}\.\s+\d{1,2},\s*\d{4},\s*(?:a\.?m\.?|p\.?m\.?))*$', # repeats
|
388 |
+
re.IGNORECASE
|
389 |
+
)
|
390 |
+
MONTH = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.?'
|
391 |
+
TIME = r'(?:a\.?m\.?|p\.?m\.?)'
|
392 |
+
YEAR = r'\d{4}'
|
393 |
+
|
394 |
+
orphan_date_pat = re.compile(
|
395 |
+
rf'^(?:{MONTH}|{TIME}|{YEAR}|\d{{1,2}},?)$'
|
396 |
+
)
|
397 |
+
|
398 |
+
def clean_line(line: str, source: str) -> str:
|
399 |
+
# --- Apply OneReg prefix removal FIRST if applicable ---
|
400 |
+
if source == "onereg":
|
401 |
+
line = zap_auto_outline_ids(line) # Remove 11.3.5. etc. prefixes
|
402 |
+
if header_pat.match(line):
|
403 |
+
return ""
|
404 |
+
|
405 |
+
|
406 |
+
if source == "caa":
|
407 |
+
line = line.replace('β', '')
|
408 |
+
line = line.replace('β', '')
|
409 |
+
|
410 |
+
|
411 |
+
# --- Continue with other cleaning ---
|
412 |
+
line = page_pat.sub('', line)
|
413 |
+
if orphan_date_pat.match(line):
|
414 |
+
return
|
415 |
+
|
416 |
+
if source == "caa" and ("Civil Aviation Rules" in line or "CAA of NZ" in line):
|
417 |
+
# ... (CAA header cleaning remains the same) ...
|
418 |
+
line = re.sub(
|
419 |
+
r'Civil Aviation Rules\s+Part\s+\d+\s+CAA Consolidation', '', line
|
420 |
+
)
|
421 |
+
line = re.sub(
|
422 |
+
r'^\d{1,2}\s+[A-Za-z]+\s+\d{4}\s*\d*\s*CAA of NZ', '', line
|
423 |
+
) # Made middle number optional
|
424 |
+
line = re.sub(r'\s{2,}', ' ', line).strip()
|
425 |
+
if not line: return ""
|
426 |
+
|
427 |
+
# ... (Rest of general cleaning: dots, page, email, time, Exported, date) ...
|
428 |
+
if re.search(r'\.{4,}\s*\d+\s*$', line): return "" # Ellipsis followed by number (TOC)
|
429 |
+
if page_pat.fullmatch(line.strip()): return ""
|
430 |
+
line = re.sub(r'\S+@\S+', '', line)
|
431 |
+
line = re.sub(r'\b\d{1,2}:\d{2}(?:\s*(?:a\.?m\.?|p\.?m\.?))?', '', line, flags=re.IGNORECASE)
|
432 |
+
line = re.sub(r'Exported:.*$', '', line)
|
433 |
+
line = date_pat.sub('', line)
|
434 |
+
|
435 |
+
# --- Inline rule number zapping (careful not to zap appendix numbers like A.1) ---
|
436 |
+
line = re.sub(r'\b(rule)\s+(\d+\.\d+)', r'\1 \2', line, flags=re.IGNORECASE)
|
437 |
+
# Only zap digit.digit patterns, not Letter.digit
|
438 |
+
line = re.sub(
|
439 |
+
r'(?<!\brule\s)(?<!^[A-Z]\.)\b\d+\.\d+(?:[A-Z]?)\s*(?=\()', # Added negative lookbehind for Letter.
|
440 |
+
'',
|
441 |
+
line,
|
442 |
+
flags=re.IGNORECASE
|
443 |
+
)
|
444 |
+
# Zap table references like Table B-1 if needed, can be specific
|
445 |
+
line = re.sub(r'\bTable\s+[A-Z]-\d+\b', '', line, flags=re.IGNORECASE)
|
446 |
+
|
447 |
+
line = page_pat.sub('', line) # Redundant page check just in case
|
448 |
+
return re.sub(r'\s{2,}', ' ', line).strip()
|
449 |
+
|
450 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
451 |
+
# 4. CAPTURE ONLY TOP-LEVEL RULES
|
452 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
453 |
+
#rule_pat_1 = re.compile(r'^(?:\d+\.)*\s*(?P<rule>\d+\.\d+)\s*(?P<title>[A-Z].*)$') #works without A,B,C subparts
|
454 |
+
# In the Regex section
|
455 |
+
|
456 |
+
# REMOVE: appendix_main_pat = re.compile(...)
|
457 |
+
|
458 |
+
# MODIFIED: Regex for Appendix Items (like "A.1 Title" or "C.2.1 (a) Title")
|
459 |
+
# Now the primary way to find appendix sections.
|
460 |
+
appendix_item_pat = re.compile(
|
461 |
+
r'^\s*([A-Z])\.(\d+(?:\.\d+)*)' # Start of line, Letter.Number(s) e.g., "A.1", "C.2.1"
|
462 |
+
r'(?:\s*\(([^)]+)\))?' # Optional captured parenthetical part e.g., "(a)" or "(1)"
|
463 |
+
r'\s+' # Space separator REQUIRED before title
|
464 |
+
r'(?P<title>[A-Za-z0-9].*)$', # Capture title (must start alphanumeric)
|
465 |
+
re.IGNORECASE
|
466 |
+
)
|
467 |
+
|
468 |
+
# Keep rule_pat and subpart_pat as they were (assuming rule_pat ignores prefixes correctly now)
|
469 |
+
rule_pat = re.compile(
|
470 |
+
r'^(?:(?:\d+\.){2,}\s*)?' # Optional & Non-capturing: OneReg outline prefix
|
471 |
+
r'(?P<base_rule>\d+\.\d+(?:[A-Z]?))'
|
472 |
+
r'(?P<parens>(?:\s*\([^)]+\))*?)'
|
473 |
+
r'\s*'
|
474 |
+
r'(?P<title>.*)$',
|
475 |
+
re.IGNORECASE
|
476 |
+
)
|
477 |
+
|
478 |
+
subpart_pat = re.compile(
|
479 |
+
r'^\s*'
|
480 |
+
r'\d+\.\s*' # Still expect number for subpart heading based on earlier examples
|
481 |
+
r'Subpart\s+'
|
482 |
+
r'([A-Z]{1,2})\s*'
|
483 |
+
r'[β-]\s*'
|
484 |
+
r'(.+)$',
|
485 |
+
re.IGNORECASE
|
486 |
+
)
|
487 |
+
print(rule_pat)
|
488 |
+
def natural_key(r):
|
489 |
+
minor = r.split('.',1)[1]
|
490 |
+
m = re.match(r"^(\d+)([A-Z]?)$", minor)
|
491 |
+
if m:
|
492 |
+
return (int(m.group(1)), m.group(2)) # (5, "") or (5, "A")
|
493 |
+
return (0, minor)
|
494 |
+
|
495 |
+
|
496 |
+
# Inside parse_rules function loop
|
497 |
+
import re
|
498 |
+
|
499 |
+
def collapse_repeated_first_word(line: str) -> str:
|
500 |
+
"""
|
501 |
+
Finds the first word, then finds its next occurrence within 5 words,
|
502 |
+
and if so removes everything between (and including) that second match,
|
503 |
+
preserving all other whitespace and punctuation.
|
504 |
+
"""
|
505 |
+
# 1) Match and capture any leading indent + the first word
|
506 |
+
m = re.match(r'^(\s*)(\w+)\b', line)
|
507 |
+
if not m:
|
508 |
+
return line
|
509 |
+
prefix, first_word = m.groups()
|
510 |
+
|
511 |
+
# 2) Build a wordβboundary pattern for that first word
|
512 |
+
pat = rf'\b{re.escape(first_word)}\b'
|
513 |
+
|
514 |
+
# 3) Find all occurrences
|
515 |
+
matches = list(re.finditer(pat, line, flags=re.IGNORECASE))
|
516 |
+
if len(matches) < 2:
|
517 |
+
return line
|
518 |
+
|
519 |
+
# 4) Offsets of the first two matches
|
520 |
+
first_end = matches[0].end()
|
521 |
+
second_start, second_end = matches[1].span()
|
522 |
+
|
523 |
+
# 5) Count how many actual words lie between
|
524 |
+
between = line[first_end:second_start]
|
525 |
+
if len(re.findall(r'\b\w+\b', between)) > 5:
|
526 |
+
return line
|
527 |
+
|
528 |
+
# 6) Rebuild: prefix + first word + everything AFTER the second occurrence
|
529 |
+
rest = line[second_end:]
|
530 |
+
return f"{prefix}{first_word}{rest}"
|
531 |
+
|
532 |
+
|
533 |
+
def parse_rules(text: str, source: str) -> dict[str, str]:
|
534 |
+
rules, current, title = {}, None, ""
|
535 |
+
tail = ""
|
536 |
+
current_appendix_letter = None # Track the current main appendix
|
537 |
+
|
538 |
+
for raw in text.splitlines():
|
539 |
+
original_line_for_debug = raw.strip() # Keep original for debugging
|
540 |
+
line = clean_line(raw, source)
|
541 |
+
print(f"DEBUG: Checking cleaned line: '{line}'")
|
542 |
+
if not line: continue
|
543 |
+
if source == "onereg":
|
544 |
+
line = collapse_inner_parens(line)
|
545 |
+
#line = strip_stray_numbers(line)
|
546 |
+
#line = collapse_repeated_first_word(line)
|
547 |
+
#line = remove_repeated_prefix(line)
|
548 |
+
|
549 |
+
# --- Check Order: Appendix Item -> Subpart -> Rule ---
|
550 |
+
|
551 |
+
# 1. Appendix Item (e.g., "A.1 Title" or "C.2.1 (a) Title")
|
552 |
+
if m_ap_item := appendix_item_pat.match(line):
|
553 |
+
letter = m_ap_item.group(1).upper()
|
554 |
+
numbering = m_ap_item.group(2)
|
555 |
+
paren = m_ap_item.group(3) # Might be None
|
556 |
+
item_title = m_ap_item.group('title').strip()
|
557 |
+
|
558 |
+
# Construct the key: A.1, A.1(a), C.2.1(1) etc.
|
559 |
+
key_parts = [letter, numbering]
|
560 |
+
if paren:
|
561 |
+
# Clean paren content if needed (e.g., remove internal spaces?)
|
562 |
+
paren_clean = paren.strip()
|
563 |
+
key_parts.append(f"({paren_clean})")
|
564 |
+
key = ".".join(key_parts)
|
565 |
+
|
566 |
+
current = key
|
567 |
+
title = item_title
|
568 |
+
tail = key
|
569 |
+
rules.setdefault(current, []).append(title)
|
570 |
+
current_appendix_letter = letter # Remember we are inside this appendix
|
571 |
+
print(f"Matched Appendix Item: {key} => '{title}'")
|
572 |
+
continue
|
573 |
+
|
574 |
+
# 2. Subpart Heading
|
575 |
+
elif m_sp := subpart_pat.match(line):
|
576 |
+
# ... (Subpart logic remains the same) ...
|
577 |
+
code = m_sp.group(1).upper()
|
578 |
+
subpart_title = m_sp.group(2).strip()
|
579 |
+
heading = f"Subpart {code} β {subpart_title}"
|
580 |
+
key = f"subpart-{code}"
|
581 |
+
current = key
|
582 |
+
title = heading
|
583 |
+
tail = ""
|
584 |
+
rules.setdefault(current, []).append(heading)
|
585 |
+
current_appendix_letter = None # Exited appendix context
|
586 |
+
print(f"Matched Subpart: {key} => {heading}")
|
587 |
+
continue
|
588 |
+
|
589 |
+
# 3. Main Rule Heading
|
590 |
+
elif m_rule := rule_pat.match(line):
|
591 |
+
# ... (Main rule logic remains mostly the same, ensure key construction is correct) ...
|
592 |
+
base = m_rule.group('base_rule')
|
593 |
+
parens_str = m_rule.group('parens') or ""
|
594 |
+
title_text = m_rule.group('title').strip()
|
595 |
+
paren_parts = re.findall(r'\(([^)]+)\)', parens_str)
|
596 |
+
key = base + "".join(f"({p.strip()})" for p in paren_parts) # Construct key like 139.555(e)(1)
|
597 |
+
|
598 |
+
is_likely_heading_only = not title_text or \
|
599 |
+
re.match(r'^[\[\(]?[a-zA-Z0-9][\)\]\.]', title_text) or \
|
600 |
+
len(title_text) < 5
|
601 |
+
|
602 |
+
current = key
|
603 |
+
title = title_text
|
604 |
+
tail = key
|
605 |
+
|
606 |
+
if not is_likely_heading_only and title_text:
|
607 |
+
rules.setdefault(current, []).append(title_text)
|
608 |
+
print(f"Matched Rule + Title: {key} => '{title_text}'")
|
609 |
+
else:
|
610 |
+
rules.setdefault(current, [])
|
611 |
+
print(f"Matched Rule Heading: {key}")
|
612 |
+
current_appendix_letter = None # Exited appendix context
|
613 |
+
continue
|
614 |
+
|
615 |
+
# 4. Continuation lines
|
616 |
+
if current:
|
617 |
+
# If we are inside an appendix section (identified by A. B. etc.)
|
618 |
+
# be careful about dropping lines that might look like headings but aren't
|
619 |
+
is_potentially_new_appendix_item = appendix_item_pat.match(line)
|
620 |
+
|
621 |
+
if is_potentially_new_appendix_item and current_appendix_letter and line.startswith(current_appendix_letter):
|
622 |
+
# This looks like a new sub-item within the *same* appendix
|
623 |
+
# but wasn't matched above (maybe title was too short?).
|
624 |
+
# Treat as continuation for now, might need refinement.
|
625 |
+
print(f"Potential missed heading treated as continuation: {line}")
|
626 |
+
pass # Let it be added below
|
627 |
+
|
628 |
+
# Apply cleaning to continuation lines
|
629 |
+
# line = strip_inline_self_ref(line, tail) # Review if this works well now
|
630 |
+
line = drop_leading_repeated_title(line, title)
|
631 |
+
if line:
|
632 |
+
rules[current].append(line)
|
633 |
+
else:
|
634 |
+
|
635 |
+
print(f"DEBUG: Unmatched line (no current rule): '{line}'")
|
636 |
+
return {k: " ".join(v).strip() for k, v in rules.items()}
|
637 |
+
|
638 |
+
|
639 |
+
|
640 |
+
|
641 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
642 |
+
# 5. STRING DIFF (OneReg deletions / modified insertions)
|
643 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
644 |
+
def diff_cols(one: str, caa: str) -> tuple[str, str]:
|
645 |
+
sm = difflib.SequenceMatcher(None, one, caa)
|
646 |
+
d_one = d_mod = ""
|
647 |
+
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
648 |
+
if tag == "equal":
|
649 |
+
seg = one[i1:i2]
|
650 |
+
d_one += seg
|
651 |
+
d_mod += seg
|
652 |
+
elif tag == "delete":
|
653 |
+
d_one += f"<span style='background:#f8d4d4'>{one[i1:i2]}</span>"
|
654 |
+
elif tag == "insert":
|
655 |
+
d_mod += f"<span style='background:#d4f8d4'>{caa[j1:j2]}</span>"
|
656 |
+
else: # replace
|
657 |
+
d_one += f"<span style='background:#f8d4d4'>{one[i1:i2]}</span>"
|
658 |
+
d_mod += f"<span style='background:#d4f8d4'>{caa[j1:j2]}</span>"
|
659 |
+
return d_one, d_mod
|
660 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
661 |
+
# 5. STRING DIFF (Unified Inline View)
|
662 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
663 |
+
def diff_unified(one: str, caa: str) -> str:
|
664 |
+
"""
|
665 |
+
Generates a single HTML string showing differences inline.
|
666 |
+
Deletions (text in OneReg but not CAA) are shown with red background/strikethrough.
|
667 |
+
Insertions (text in CAA but not OneReg) are shown with green background.
|
668 |
+
Uses html.escape to handle special characters in the text.
|
669 |
+
"""
|
670 |
+
sm = difflib.SequenceMatcher(None, one, caa)
|
671 |
+
output = []
|
672 |
+
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
673 |
+
one_segment = html.escape(one[i1:i2]) # Escape text segments
|
674 |
+
caa_segment = html.escape(caa[j1:j2]) # Escape text segments
|
675 |
+
|
676 |
+
if tag == "equal":
|
677 |
+
output.append(one_segment)
|
678 |
+
elif tag == "delete":
|
679 |
+
# Wrap deleted text in <del> tags with specific styling
|
680 |
+
output.append(f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
|
681 |
+
elif tag == "insert":
|
682 |
+
# Wrap inserted text in <ins> tags with specific styling
|
683 |
+
output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
|
684 |
+
elif tag == "replace":
|
685 |
+
# Show deletion followed by insertion for replacements
|
686 |
+
output.append(f"<del style='background:#fdd; text-decoration: line-through; color: #000;'>{one_segment}</del>")
|
687 |
+
output.append(f"<ins style='background:#dfd; text-decoration: none; color: #000;'>{caa_segment}</ins>")
|
688 |
+
|
689 |
+
# Join segments and wrap in a span that preserves whitespace and line breaks
|
690 |
+
# Add color: var(--text) to ensure it adapts to light/dark mode from the body style
|
691 |
+
return f"<span style='white-space: pre-wrap; color: var(--text);'>{''.join(output)}</span>"
|
692 |
+
|
693 |
+
# Remove or comment out the old natural_sort_key if not used elsewhere,
|
694 |
+
# or keep if needed for other parts. Let's assume it's not needed now.
|
695 |
+
# def natural_sort_key(rule_id: str): ... # Keep if used, remove/comment if not
|
696 |
+
def natural_sort_key(rule_id: str):
|
697 |
+
# rule_id is e.g. "139.5", "139.5A", "139.10"
|
698 |
+
minor = rule_id.split('.', 1)[1] # "5", "5A", "10"
|
699 |
+
m = re.match(r'^(\d+)([A-Z]?)$', minor) # capture digits + optional letter
|
700 |
+
if m:
|
701 |
+
return (int(m.group(1)), m.group(2)) # e.g. (5, ""), (5, "A"), (10, "")
|
702 |
+
# fallback: put anything weird at the end
|
703 |
+
return (float('inf'), minor)
|
704 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
705 |
+
# 6. SORTING KEY (Updated for Rule -> Appendix Order)
|
706 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
707 |
+
def combined_sort_key(key: str):
|
708 |
+
# --- Sort Order Priorities ---
|
709 |
+
# 1: Subparts (subpart-A < subpart-AA < subpart-B)
|
710 |
+
# 2: Main Rules (139.1 < 139.5 < 139.5A < 139.10, including subdivisions)
|
711 |
+
# 3: Appendix Items (A.1 < A.1(a) < A.1(1) < A.2 < B.1)
|
712 |
+
|
713 |
+
# 1. Subparts
|
714 |
+
if key.startswith("subpart-"):
|
715 |
+
code = key.split('-', 1)[1]
|
716 |
+
return (1, len(code), code) # Priority 1
|
717 |
+
|
718 |
+
# 2. Main Rules (e.g., 139.5, 139.5(e)(1)) - Assign Priority 2
|
719 |
+
elif re.match(r'^\d+\.\d+', key): # Check if it starts like a rule number
|
720 |
+
try:
|
721 |
+
match = re.match(r'^(\d+\.\d+(?:[A-Z]?))((?:\([^)]+\))*)$', key)
|
722 |
+
if match:
|
723 |
+
base_rule_str = match.group(1) # e.g., "139.555" or "139.5A"
|
724 |
+
parens_str = match.group(2) or "" # e.g., "(e)(1)" or ""
|
725 |
+
part_str, minor_base = base_rule_str.split('.', 1)
|
726 |
+
part_num = int(part_str)
|
727 |
+
m_minor_base = re.match(r'^(\d+)([A-Z]?)$', minor_base)
|
728 |
+
if m_minor_base:
|
729 |
+
minor_num = int(m_minor_base.group(1))
|
730 |
+
minor_letter = m_minor_base.group(2)
|
731 |
+
paren_parts_raw = re.findall(r'\(([^)]+)\)', parens_str)
|
732 |
+
paren_sort_tuple_elements = ()
|
733 |
+
for p in paren_parts_raw:
|
734 |
+
p_strip = p.strip()
|
735 |
+
if p_strip.isdigit():
|
736 |
+
paren_sort_tuple_elements += (1, int(p_strip)) # Num first within parens
|
737 |
+
elif len(p_strip) == 1 and p_strip.isalpha():
|
738 |
+
paren_sort_tuple_elements += (2, ord(p_strip.lower())) # Letter second
|
739 |
+
else:
|
740 |
+
paren_sort_tuple_elements += (3, p_strip.lower()) # Others last
|
741 |
+
|
742 |
+
return (2, part_num, minor_num, minor_letter) + paren_sort_tuple_elements # Priority 2
|
743 |
+
except Exception as e:
|
744 |
+
print(f"Warning: Sort key error for rule '{key}': {e}")
|
745 |
+
pass
|
746 |
+
|
747 |
+
# 3. Appendix Items (e.g., A.1, B.2.1(a)) - Assign Priority 3
|
748 |
+
elif re.match(r'^[A-Z]\.', key):
|
749 |
+
parts = re.split(r'[.()]', key)
|
750 |
+
parts = [p for p in parts if p]
|
751 |
+
sortable_parts = [parts[0]] # Start with the letter (A, B, C...)
|
752 |
+
for part in parts[1:]:
|
753 |
+
if part.isdigit():
|
754 |
+
sortable_parts.append(int(part))
|
755 |
+
else:
|
756 |
+
if len(part) == 1 and part.isalpha():
|
757 |
+
sortable_parts.append(ord(part.lower())) # Use ASCII for single letters
|
758 |
+
else:
|
759 |
+
sortable_parts.append(part.lower()) # Lowercase others
|
760 |
+
# Priority 3, then sort by parts
|
761 |
+
return (3,) + tuple(sortable_parts)
|
762 |
+
|
763 |
+
# Fallback
|
764 |
+
return (float('inf'), key) # Put errors/unknowns last
|
765 |
+
DATE_HDR_RE = re.compile(
|
766 |
+
r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.\s+\d{1,2},\s+\d{4}'
|
767 |
+
r'(?:\s+[ap]\.?m\.?)?', # optional a.m./p.m.
|
768 |
+
re.I,
|
769 |
+
)
|
770 |
+
# --------------------------------------------------------------------
|
771 |
+
# 1) helpers β put these near the top of the file
|
772 |
+
# --------------------------------------------------------------------
|
773 |
+
MONTH_RE = r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.'
|
774 |
+
DATE_RE = re.compile(
|
775 |
+
rf'\b{MONTH_RE}\s+\d{{1,2}},\s+\d{{4}}'
|
776 |
+
r'(?:,\s*)?(?:[ap]\.?m\.?)?', # optional β, a.m.β / β, p.m.β
|
777 |
+
re.I
|
778 |
+
)
|
779 |
+
# Stray leading βFeβ thatβs left behind when βFeb.β or βFe\nb.β is split
|
780 |
+
FE_CRUMB_RE = re.compile(r'\bFe(?=[a-z])') # Fevery, Feaccompanied β¦
|
781 |
+
|
782 |
+
def merge_and_clean(raw: str) -> str:
|
783 |
+
"""Collapse newlines, strip date headers, remove Fe* crumbs."""
|
784 |
+
# (i) merge β one long line
|
785 |
+
text = ' '.join(raw.splitlines())
|
786 |
+
|
787 |
+
# (ii) nuke any full-form dates
|
788 |
+
text = DATE_RE.sub('', text)
|
789 |
+
|
790 |
+
# (iii) wipe the βFeβ crumbs that remain after bad line-wrap
|
791 |
+
text = FE_CRUMB_RE.sub('', text)
|
792 |
+
|
793 |
+
# (iv) collapse doubled spaces made by the removals
|
794 |
+
return re.sub(r'\s{2,}', ' ', text).strip()
|
795 |
+
def strip_trailing_duplicate_heading(s: str) -> str:
|
796 |
+
"""
|
797 |
+
If a line starts with a heading (up to the first '(' or end-of-line)
|
798 |
+
and that identical heading is repeated at the very end, remove the
|
799 |
+
trailing copy.
|
800 |
+
|
801 |
+
>>> strip_trailing_duplicate_heading(
|
802 |
+
... "Changes to certificate holder's organisation (a) β¦ (e) Changes to certificate holder's organisation"
|
803 |
+
... )
|
804 |
+
"Changes to certificate holder's organisation (a) β¦ (e)"
|
805 |
+
"""
|
806 |
+
# 1) grab the prefix heading (everything before the first '(' or EOL)
|
807 |
+
head = s.split('(', 1)[0].strip()
|
808 |
+
if not head:
|
809 |
+
return s
|
810 |
+
|
811 |
+
# 2) does the string *end* with exactly that heading?
|
812 |
+
if s.rstrip().endswith(head):
|
813 |
+
# slice it off and tidy spaces
|
814 |
+
s = s[: -len(head)].rstrip()
|
815 |
+
|
816 |
+
return s
|
817 |
+
|
818 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
819 |
+
# 6. MAIN COMPARISON FUNCTION
|
820 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
821 |
+
def compare_regulations_app(part, onereg_pdf, caa_pdf):
|
822 |
+
try:
|
823 |
+
raw_one = extract_pdf_word(onereg_pdf)
|
824 |
+
raw_caa = extract_pdf_text(caa_pdf)
|
825 |
+
for ln in raw_caa.splitlines():
|
826 |
+
if any(tok in ln for tok in ("139.61", "139.63")):
|
827 |
+
print("[RAW]", ln[:120])
|
828 |
+
lines_one = merge_pdf_wrapped_lines(raw_one)
|
829 |
+
lines_caa = merge_pdf_wrapped_lines(raw_caa)
|
830 |
+
text_one = "\n".join(lines_one)
|
831 |
+
text_caa = "\n".join(lines_caa)
|
832 |
+
one = parse_rules(text_one, "onereg")
|
833 |
+
# print(one)
|
834 |
+
caa = parse_rules(text_caa, "caa")
|
835 |
+
|
836 |
+
# print(one)
|
837 |
+
|
838 |
+
# filter & sort as beforeβ¦
|
839 |
+
all_ids = set(one) | set(caa)
|
840 |
+
|
841 |
+
# Inside compare_regulations_app function
|
842 |
+
user_inputs = [] #to collect the inputs from the user
|
843 |
+
|
844 |
+
rules = [
|
845 |
+
r for r in all_ids
|
846 |
+
if r.startswith(f"{part}.") # Main rules like 139.5
|
847 |
+
or r.startswith("subpart-") # Subpart headings like subpart-A
|
848 |
+
# or r.startswith("appendix-") # REMOVED - No longer generating these keys
|
849 |
+
or re.match(r'^[A-Z]\.', r) # Appendix items like A.1, B.2(a)
|
850 |
+
]
|
851 |
+
print(rules)
|
852 |
+
|
853 |
+
|
854 |
+
|
855 |
+
rules.sort(key=combined_sort_key)
|
856 |
+
print(rules)
|
857 |
+
#rules.sort(key=natural_key)
|
858 |
+
|
859 |
+
sections = []
|
860 |
+
df_rows = []
|
861 |
+
for rule in rules:
|
862 |
+
o = one.get(rule, "")
|
863 |
+
if header_pat.match(o) or DATE_HDR_RE.fullmatch(o.strip()):
|
864 |
+
# print(f"Skipping header line for rule {rule}: {o}") # comment-out if noisy
|
865 |
+
continue
|
866 |
+
o = merge_and_clean(o)
|
867 |
+
o = collapse_repeated_first_word(o) # Apply the new function
|
868 |
+
o= strip_trailing_duplicate_heading(o) # Remove trailing duplicate headings
|
869 |
+
print(o)
|
870 |
+
c = caa.get(rule, "")
|
871 |
+
#o_html = inject_tables(o)
|
872 |
+
c_text_tables = inject_tables(c)
|
873 |
+
unified_diff_html = diff_unified(o, c)
|
874 |
+
|
875 |
+
sections.append(f"""
|
876 |
+
<div class="rule-section">
|
877 |
+
<input type="checkbox" id="chk_{rule}" name="rule" value="{rule}">
|
878 |
+
<label for="chk_{rule}" class="rule-label">{rule}</label>
|
879 |
+
<div class="rule-content">
|
880 |
+
<strong>Unified Diff (OneReg <del style='background:#fdd;text-decoration:line-through;'>deletions</del> / CAA <ins style='background:#dfd;text-decoration:none;'>additions</ins>)</strong><br>
|
881 |
+
{unified_diff_html}
|
882 |
+
<br><br>
|
883 |
+
{
|
884 |
+
f'''<strong>CAA (Cleaned + Tables)</strong><br>
|
885 |
+
{c_text_tables}'''
|
886 |
+
}
|
887 |
+
</div>
|
888 |
+
</div>
|
889 |
+
<hr>
|
890 |
+
""")
|
891 |
+
df_rows.append([rule, ""])
|
892 |
+
|
893 |
+
|
894 |
+
style = """
|
895 |
+
<style>
|
896 |
+
/* βββββββββββ colour tokens βββββββββββ */
|
897 |
+
:root{
|
898 |
+
--bg: #ffffff;
|
899 |
+
--text: #000000;
|
900 |
+
--border: #cccccc;
|
901 |
+
--rule-label-on: #ff8a80; /* light green */
|
902 |
+
--rule-content-on: #e8f5e9;
|
903 |
+
}
|
904 |
+
@media (prefers-color-scheme: dark){
|
905 |
+
:root{
|
906 |
+
--bg: #121212;
|
907 |
+
--text: #e0e0e0;
|
908 |
+
--border: #444444;
|
909 |
+
--rule-label-on: #ff8a80;; /* dark-mode green */
|
910 |
+
--rule-content-on:#1b5e20;
|
911 |
+
}
|
912 |
+
}
|
913 |
+
|
914 |
+
/* βββββββββββ global βββββββββββ */
|
915 |
+
body{
|
916 |
+
background: var(--bg);
|
917 |
+
color: var(--text);
|
918 |
+
font-family: Arial, Helvetica, sans-serif;
|
919 |
+
font-size: .9em;
|
920 |
+
}
|
921 |
+
span{white-space:pre-wrap}
|
922 |
+
hr{
|
923 |
+
border:none;
|
924 |
+
border-top:1px solid var(--border);
|
925 |
+
margin:1.2em 0;
|
926 |
+
}
|
927 |
+
|
928 |
+
/* βββββββββββ diff-viewer widgets βββββββββββ */
|
929 |
+
.rule-section{
|
930 |
+
padding:.5em;
|
931 |
+
transition:background .2s;
|
932 |
+
}
|
933 |
+
.rule-label{
|
934 |
+
font-weight:bold;
|
935 |
+
margin-left:.5em;
|
936 |
+
padding:.2em .4em;
|
937 |
+
border-radius:4px;
|
938 |
+
cursor:pointer;
|
939 |
+
}
|
940 |
+
.rule-content{
|
941 |
+
margin-left:2em;
|
942 |
+
padding:.5em;
|
943 |
+
border-radius:4px;
|
944 |
+
}
|
945 |
+
|
946 |
+
/* checked highlights */
|
947 |
+
.rule-section input[type=checkbox]:checked + .rule-label{
|
948 |
+
background:var(--rule-label-on);
|
949 |
+
}
|
950 |
+
.rule-section input[type=checkbox]:checked ~ .rule-content{
|
951 |
+
background:var(--rule-content-on);
|
952 |
+
}
|
953 |
+
|
954 |
+
/* make links + table borders visible in both modes */
|
955 |
+
a{color:inherit;text-decoration:underline;}
|
956 |
+
table{color:inherit;border-color:var(--border);}
|
957 |
+
th,td{border-color:var(--border);}
|
958 |
+
</style>
|
959 |
+
"""
|
960 |
+
html_out=style+"".join(sections)
|
961 |
+
# Create a DataFrame for the rules
|
962 |
+
comments_df = pd.DataFrame(df_rows, columns=["Rule", "comment"])
|
963 |
+
|
964 |
+
return html_out,comments_df
|
965 |
+
|
966 |
+
except Exception as e:
|
967 |
+
return ("<div style='color:red'>Error:<br>"
|
968 |
+
f"{e}<br><pre>{traceback.format_exc()}</pre></div>")
|
969 |
+
|
970 |
+
def save_comments_to_csv(df: pd.DataFrame):
|
971 |
+
"""
|
972 |
+
Writes the editable dataframe (rule, comment) to a CSV and
|
973 |
+
returns a file object that Gradio can offer for download.
|
974 |
+
"""
|
975 |
+
# keep only rows where the user actually wrote something
|
976 |
+
df = df[df["comment"].str.strip().astype(bool)]
|
977 |
+
|
978 |
+
if df.empty:
|
979 |
+
raise gr.Error("You didnβt write any comments yet!")
|
980 |
+
|
981 |
+
filename = f"rule_comments_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
982 |
+
csv_path = os.path.join(os.getcwd(), filename)
|
983 |
+
df.to_csv(csv_path, index=False)
|
984 |
+
return csv_path
|
985 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
986 |
+
# 7. GRADIO UI
|
987 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
988 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
989 |
+
gr.Markdown("## CAA β OneReg β rule-level diff (section format)")
|
990 |
+
part = gr.Textbox(label="Part Number", value="139")
|
991 |
+
onereg_pdf = gr.File(label="Upload OneReg PDF")
|
992 |
+
caa_pdf = gr.File(label="Upload CAA PDF")
|
993 |
+
btn_compare = gr.Button("Compare")
|
994 |
+
out_html = gr.HTML()
|
995 |
+
comment_df = gr.Dataframe(
|
996 |
+
headers=["rule", "comment"],
|
997 |
+
datatype=["str", "str"],
|
998 |
+
interactive=True,
|
999 |
+
label="βοΈ Type your comments in the **comment** column, then click βSave to CSVβ"
|
1000 |
+
)
|
1001 |
+
btn_save = gr.Button("πΎ Save to CSV")
|
1002 |
+
download = gr.File(label="Download your CSV")
|
1003 |
+
btn_compare.click(
|
1004 |
+
compare_regulations_app,
|
1005 |
+
inputs=[part, onereg_pdf, caa_pdf],
|
1006 |
+
outputs=[out_html, comment_df],
|
1007 |
+
)
|
1008 |
+
btn_save.click(
|
1009 |
+
save_comments_to_csv,
|
1010 |
+
inputs=[comment_df],
|
1011 |
+
outputs=[download],
|
1012 |
+
)
|
1013 |
+
|
1014 |
+
|
1015 |
+
if __name__ == "__main__":
|
1016 |
+
#api_key = os.getenv("GOOGLE_API_KEY")
|
1017 |
+
#print(api_key)
|
1018 |
+
current_os = platform.system()
|
1019 |
+
print(f"Current OS: {current_os}")
|
1020 |
+
if current_os == "Windows":
|
1021 |
+
print("Running on Windows")
|
1022 |
+
server_name = "localhost"
|
1023 |
+
elif current_os == "Linux":
|
1024 |
+
server_name="0.0.0.0"
|
1025 |
+
else:
|
1026 |
+
server_name = "0.0.0.0"
|
1027 |
+
|
1028 |
+
|
1029 |
+
|
1030 |
+
demo.launch(
|
1031 |
+
server_name=server_name,
|
1032 |
+
server_port=int(os.environ.get("GRADIO_SERVER_PORT", 7860)),
|
1033 |
+
share=False
|
1034 |
+
)
|
1035 |
+
|