Spaces:
Sleeping
Sleeping
Update app.py
Browse filesChanged the rule expression logic
app.py
CHANGED
@@ -35,18 +35,22 @@ def extract_pdf_word(pdf_file) -> str:
|
|
35 |
return "\n".join(filter(None, text_blocks))
|
36 |
|
37 |
|
|
|
38 |
def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
|
39 |
-
"""Re-join hard-wrapped lines from PDF extraction."""
|
40 |
merged = []
|
41 |
for ln in raw_text.splitlines():
|
42 |
ln_stripped = ln.strip()
|
43 |
-
if not ln_stripped:
|
|
|
|
|
44 |
if merged:
|
45 |
prev = merged[-1]
|
46 |
-
if
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
50 |
continue
|
51 |
merged.append(ln_stripped)
|
52 |
return merged
|
@@ -69,6 +73,9 @@ subpart_pat = re.compile(
|
|
69 |
r'^\s*\d+\.\s*Subpart\s+([A-Z]{1,2})\s*[β-]\s*(.+)$',
|
70 |
re.IGNORECASE
|
71 |
)
|
|
|
|
|
|
|
72 |
|
73 |
# --- Regex for cleaning ---
|
74 |
page_pat = re.compile(r'Page\s+\d+\s*/\s*\d+', re.IGNORECASE)
|
@@ -97,49 +104,88 @@ def clean_line(line: str, source: str) -> str:
|
|
97 |
line = re.sub(r'\s{2,}', ' ', line)
|
98 |
return line.strip()
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
|
|
101 |
def parse_rules(text: str, source: str) -> dict[str, str]:
|
102 |
-
"""
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
lines = merge_pdf_wrapped_lines(text)
|
106 |
|
107 |
-
for
|
108 |
-
|
109 |
-
if not
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
parens_str = m_rule.group('parens') or ""
|
129 |
-
new_key = base + "".join(re.findall(r'\([^)]+\)', parens_str))
|
130 |
-
new_title = m_rule.group('title').strip()
|
131 |
-
|
132 |
-
if new_key:
|
133 |
-
current = new_key
|
134 |
-
title = new_title
|
135 |
-
rules.setdefault(current, [])
|
136 |
-
if title:
|
137 |
-
rules[current].append(title)
|
138 |
-
elif current:
|
139 |
-
if not title or line.lower() != title.lower():
|
140 |
-
rules[current].append(line)
|
141 |
|
142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
|
144 |
|
145 |
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
@@ -180,7 +226,7 @@ def combined_sort_key(key: str):
|
|
180 |
else:
|
181 |
return (4, key)
|
182 |
|
183 |
-
parts = re.split(r'[
|
184 |
parts = [p for p in parts if p]
|
185 |
|
186 |
for part in parts:
|
@@ -219,16 +265,23 @@ def save_clean_and_dirty_versions(dirty_one, dirty_caa, clean_one, clean_caa, fi
|
|
219 |
|
220 |
|
221 |
# --- STAGE 1: Process PDFs and prepare for user review ---
|
|
|
222 |
def stage1_process_and_review(part, onereg_pdf, caa_pdf):
|
223 |
if not (onereg_pdf and caa_pdf):
|
224 |
raise gr.Error("Please upload both PDF files.")
|
225 |
try:
|
226 |
-
# Process OneReg PDF
|
227 |
-
|
|
|
|
|
|
|
228 |
one_data = parse_rules(raw_one, "onereg")
|
229 |
|
230 |
-
# Process CAA PDF
|
231 |
-
|
|
|
|
|
|
|
232 |
caa_data = parse_rules(raw_caa, "caa")
|
233 |
|
234 |
# Get all rule IDs and sort them
|
@@ -237,6 +290,7 @@ def stage1_process_and_review(part, onereg_pdf, caa_pdf):
|
|
237 |
key=combined_sort_key
|
238 |
)
|
239 |
|
|
|
240 |
rules_to_review = [
|
241 |
r for r in all_ids
|
242 |
if r.startswith(f"{part}.") or r.startswith("subpart-") or re.match(r'^[A-Z]\.', r)
|
@@ -286,7 +340,7 @@ def stage2_finalize_and_compare(review_df, original_one, original_caa):
|
|
286 |
sections = []
|
287 |
for rule_id in all_ids:
|
288 |
one_clean = clean_one_data.get(rule_id, "")
|
289 |
-
caa_clean =
|
290 |
|
291 |
diff_html = diff_unified(one_clean, caa_clean)
|
292 |
|
@@ -329,7 +383,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Dual Rule Cleaning Tool") as demo:
|
|
329 |
|
330 |
# --- Stage 1: Inputs and Initial Processing ---
|
331 |
with gr.Row():
|
332 |
-
part_num = gr.Textbox(label="Part Number", value="
|
333 |
onereg_pdf = gr.File(label="Upload OneReg PDF")
|
334 |
caa_pdf = gr.File(label="Upload CAA PDF")
|
335 |
|
|
|
35 |
return "\n".join(filter(None, text_blocks))
|
36 |
|
37 |
|
38 |
+
# MODIFIED FUNCTION: Improved logic for re-joining wrapped lines
|
39 |
def merge_pdf_wrapped_lines(raw_text: str) -> list[str]:
|
40 |
+
"""Re-join hard-wrapped lines from PDF extraction based on grammatical context."""
|
41 |
merged = []
|
42 |
for ln in raw_text.splitlines():
|
43 |
ln_stripped = ln.strip()
|
44 |
+
if not ln_stripped:
|
45 |
+
continue
|
46 |
+
|
47 |
if merged:
|
48 |
prev = merged[-1]
|
49 |
+
# YOUR INSIGHT: Merge if previous line ends with 'β' or lacks closing punctuation,
|
50 |
+
# and the next line appears to be a continuation (starts with lowercase or parenthesis).
|
51 |
+
if prev.endswith('β') or \
|
52 |
+
(not re.search(r'[.:;)]\s*$', prev) and re.match(r'^[a-z\(]', ln_stripped)):
|
53 |
+
merged[-1] = prev + ' ' + ln_stripped
|
54 |
continue
|
55 |
merged.append(ln_stripped)
|
56 |
return merged
|
|
|
73 |
r'^\s*\d+\.\s*Subpart\s+([A-Z]{1,2})\s*[β-]\s*(.+)$',
|
74 |
re.IGNORECASE
|
75 |
)
|
76 |
+
# NEW: Regex to specifically identify sub-rule paragraphs like (a), (1), (i)
|
77 |
+
sub_rule_pat = re.compile(r'^\s*(\((?:[a-z]{1,2}|[ivx]+|\d+)\))\s*(.*)', re.IGNORECASE)
|
78 |
+
|
79 |
|
80 |
# --- Regex for cleaning ---
|
81 |
page_pat = re.compile(r'Page\s+\d+\s*/\s*\d+', re.IGNORECASE)
|
|
|
104 |
line = re.sub(r'\s{2,}', ' ', line)
|
105 |
return line.strip()
|
106 |
|
107 |
+
# NEW HELPER: Determines nesting level of a sub-rule, e.g., (1) is level 1, (a) is 2, (i) is 3
|
108 |
+
def get_rule_level(paren_str):
|
109 |
+
content = paren_str.strip('()').lower()
|
110 |
+
if content.isdigit(): return 1
|
111 |
+
if all(c in 'ivxl' for c in content): return 3 # roman numerals
|
112 |
+
if content.isalpha(): return 2 # alphabetical
|
113 |
+
return 4 # Unknown level, treat as deeply nested
|
114 |
|
115 |
+
# REWRITTEN FUNCTION: Stateful, hierarchical parser to correctly handle sub-rules.
|
116 |
def parse_rules(text: str, source: str) -> dict[str, str]:
|
117 |
+
"""
|
118 |
+
Parses raw text into a dictionary of {rule_id: rule_text}.
|
119 |
+
This version is stateful and context-aware to handle hierarchies correctly.
|
120 |
+
"""
|
121 |
+
rules = {}
|
122 |
+
parent_parts = [] # Tracks the current rule hierarchy, e.g., ['108.51', '(3)']
|
123 |
+
lines_buffer = []
|
124 |
+
|
125 |
+
def commit_buffer():
|
126 |
+
"""Saves the buffered lines to the current rule ID."""
|
127 |
+
if parent_parts and lines_buffer:
|
128 |
+
rule_id = "".join(parent_parts)
|
129 |
+
# Append to existing text if rule already has a title, otherwise create it
|
130 |
+
existing_text = rules.get(rule_id, "")
|
131 |
+
new_text = " ".join(lines_buffer)
|
132 |
+
rules[rule_id] = (existing_text + " " + new_text).strip()
|
133 |
+
lines_buffer.clear()
|
134 |
|
135 |
lines = merge_pdf_wrapped_lines(text)
|
136 |
|
137 |
+
for line in lines:
|
138 |
+
cleaned = clean_line(line, source)
|
139 |
+
if not cleaned: continue
|
140 |
+
|
141 |
+
m_main = rule_pat.match(cleaned)
|
142 |
+
m_sub = sub_rule_pat.match(cleaned)
|
143 |
+
m_sp = subpart_pat.match(cleaned)
|
144 |
+
|
145 |
+
if m_sp:
|
146 |
+
commit_buffer()
|
147 |
+
parent_parts = [f"subpart-{m_sp.group(1).upper()}"]
|
148 |
+
rules["".join(parent_parts)] = f"Subpart {m_sp.group(1).upper()} β {m_sp.group(2).strip()}"
|
149 |
+
|
150 |
+
elif m_main:
|
151 |
+
new_base_id = m_main.group('base_rule')
|
152 |
+
current_base_id = parent_parts[0] if parent_parts and not parent_parts[0].startswith("subpart") else None
|
153 |
+
|
154 |
+
# YOUR INSIGHT: A rule never refers to itself. If it's the same base ID, it's content.
|
155 |
+
if new_base_id == current_base_id:
|
156 |
+
lines_buffer.append(cleaned)
|
157 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
+
commit_buffer()
|
160 |
+
parent_parts = [new_base_id]
|
161 |
+
title = m_main.group('title').strip()
|
162 |
+
if title:
|
163 |
+
rules["".join(parent_parts)] = title
|
164 |
+
|
165 |
+
elif m_sub and parent_parts:
|
166 |
+
commit_buffer()
|
167 |
+
paren_part = m_sub.group(1)
|
168 |
+
text_part = m_sub.group(2).strip()
|
169 |
+
new_level = get_rule_level(paren_part)
|
170 |
+
|
171 |
+
# Adjust hierarchy: pop parent parts until we are at the correct level
|
172 |
+
while len(parent_parts) > 1:
|
173 |
+
last_part = parent_parts[-1]
|
174 |
+
last_level = get_rule_level(last_part)
|
175 |
+
if last_level >= new_level:
|
176 |
+
parent_parts.pop()
|
177 |
+
else:
|
178 |
+
break
|
179 |
+
|
180 |
+
parent_parts.append(paren_part)
|
181 |
+
if text_part:
|
182 |
+
lines_buffer.append(text_part)
|
183 |
+
|
184 |
+
else: # It's continuation text
|
185 |
+
lines_buffer.append(cleaned)
|
186 |
+
|
187 |
+
commit_buffer()
|
188 |
+
return {k: v for k, v in rules.items() if v}
|
189 |
|
190 |
|
191 |
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
226 |
else:
|
227 |
return (4, key)
|
228 |
|
229 |
+
parts = re.split(r'([()])', key) # Split but keep delimiters
|
230 |
parts = [p for p in parts if p]
|
231 |
|
232 |
for part in parts:
|
|
|
265 |
|
266 |
|
267 |
# --- STAGE 1: Process PDFs and prepare for user review ---
|
268 |
+
# MODIFIED FUNCTION: Skips ToC pages before parsing.
|
269 |
def stage1_process_and_review(part, onereg_pdf, caa_pdf):
|
270 |
if not (onereg_pdf and caa_pdf):
|
271 |
raise gr.Error("Please upload both PDF files.")
|
272 |
try:
|
273 |
+
# --- Process OneReg PDF (skipping ToC) ---
|
274 |
+
onereg_doc = fitz.open(onereg_pdf.name)
|
275 |
+
# OneReg ToC for Part 108 is pages 2-4 (index 1-3). Content starts on page 5 (index 4).
|
276 |
+
onereg_text_blocks = [page.get_text("text") for i, page in enumerate(onereg_doc) if i >= 4]
|
277 |
+
raw_one = "\n".join(filter(None, onereg_text_blocks))
|
278 |
one_data = parse_rules(raw_one, "onereg")
|
279 |
|
280 |
+
# --- Process CAA PDF (skipping ToC) ---
|
281 |
+
caa_doc = PdfReader(caa_pdf.name)
|
282 |
+
# CAA 'List of Rules' for Part 108 is page 4 (index 3). Content starts on page 5 (index 4).
|
283 |
+
caa_text_blocks = [p.extract_text() or "" for i, p in enumerate(caa_doc.pages) if i >= 4]
|
284 |
+
raw_caa = "\n".join(caa_text_blocks)
|
285 |
caa_data = parse_rules(raw_caa, "caa")
|
286 |
|
287 |
# Get all rule IDs and sort them
|
|
|
290 |
key=combined_sort_key
|
291 |
)
|
292 |
|
293 |
+
# Filter for the relevant part, but always include subparts and appendices
|
294 |
rules_to_review = [
|
295 |
r for r in all_ids
|
296 |
if r.startswith(f"{part}.") or r.startswith("subpart-") or re.match(r'^[A-Z]\.', r)
|
|
|
340 |
sections = []
|
341 |
for rule_id in all_ids:
|
342 |
one_clean = clean_one_data.get(rule_id, "")
|
343 |
+
caa_clean = caa_data.get(rule_id, "")
|
344 |
|
345 |
diff_html = diff_unified(one_clean, caa_clean)
|
346 |
|
|
|
383 |
|
384 |
# --- Stage 1: Inputs and Initial Processing ---
|
385 |
with gr.Row():
|
386 |
+
part_num = gr.Textbox(label="Part Number", value="108")
|
387 |
onereg_pdf = gr.File(label="Upload OneReg PDF")
|
388 |
caa_pdf = gr.File(label="Upload CAA PDF")
|
389 |
|