Update app.py
Browse files
app.py
CHANGED
@@ -138,57 +138,92 @@ def computeAllFollows():
|
|
138 |
def createParseTable():
|
139 |
global diction, term_userdef, firsts, follows
|
140 |
|
141 |
-
# Initialize parse table with empty strings
|
142 |
parse_table = {}
|
143 |
for non_term in diction:
|
144 |
parse_table[non_term] = {}
|
145 |
for term in term_userdef + ['$']:
|
146 |
parse_table[non_term][term] = ""
|
147 |
|
148 |
-
# Fill parse table
|
149 |
grammar_is_LL = True
|
150 |
|
151 |
for non_term in diction:
|
152 |
for production in diction[non_term]:
|
153 |
first_set = first(production)
|
154 |
|
155 |
-
# If epsilon is in FIRST set, add FOLLOW set
|
156 |
if '#' in first_set:
|
157 |
first_set.remove('#')
|
158 |
-
|
|
|
159 |
|
160 |
for terminal in first_set:
|
161 |
-
if
|
162 |
-
parse_table[non_term][terminal]
|
163 |
-
|
164 |
-
|
|
|
165 |
|
166 |
return parse_table, grammar_is_LL
|
167 |
|
168 |
-
def validateStringUsingStackBuffer(parse_table, input_string):
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
|
|
|
|
|
|
|
|
172 |
while stack and buffer:
|
|
|
|
|
|
|
|
|
173 |
top_stack = stack[0]
|
174 |
current_input = buffer[0]
|
175 |
|
|
|
|
|
|
|
176 |
if top_stack == current_input:
|
177 |
stack.pop(0)
|
178 |
buffer.pop(0)
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
if
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
else:
|
187 |
-
|
|
|
|
|
|
|
188 |
|
189 |
-
if
|
190 |
-
|
191 |
-
|
|
|
|
|
192 |
|
193 |
# Streamlit UI
|
194 |
st.title("LL(1) Grammar Analyzer")
|
@@ -198,15 +233,15 @@ st.header("Grammar Input")
|
|
198 |
start_symbol = st.text_input("Enter Start Symbol:", "S")
|
199 |
|
200 |
with st.expander("Enter Grammar Rules"):
|
201 |
-
num_rules = st.number_input("Number of Rules:", min_value=1, value=
|
202 |
rules = []
|
203 |
for i in range(num_rules):
|
204 |
rule = st.text_input(f"Rule {i+1} (format: A -> B c | d)", key=f"rule_{i}")
|
205 |
if rule:
|
206 |
rules.append(rule)
|
207 |
|
208 |
-
nonterm_input = st.text_input("Enter Non-terminals (comma-separated):", "S,A,B")
|
209 |
-
term_input = st.text_input("Enter Terminals (comma-separated):", "a,b,c")
|
210 |
|
211 |
if nonterm_input.strip():
|
212 |
nonterm_userdef = [x.strip() for x in nonterm_input.split(',') if x.strip()]
|
@@ -255,7 +290,11 @@ if st.button("Analyze Grammar"):
|
|
255 |
for non_term in parse_table:
|
256 |
row = [non_term] # First column is non-terminal
|
257 |
for term in terminals:
|
258 |
-
|
|
|
|
|
|
|
|
|
259 |
df_data.append(row)
|
260 |
|
261 |
df = pd.DataFrame(df_data, columns=['Non-Terminal'] + terminals)
|
@@ -268,10 +307,17 @@ if st.button("Analyze Grammar"):
|
|
268 |
|
269 |
# String validation section
|
270 |
st.subheader("String Validation")
|
271 |
-
input_string = st.text_input("Enter string to validate (space-separated):", "")
|
272 |
if input_string:
|
273 |
-
|
274 |
-
|
275 |
-
|
|
|
|
|
276 |
else:
|
277 |
-
st.error(
|
|
|
|
|
|
|
|
|
|
|
|
138 |
def createParseTable():
|
139 |
global diction, term_userdef, firsts, follows
|
140 |
|
|
|
141 |
parse_table = {}
|
142 |
for non_term in diction:
|
143 |
parse_table[non_term] = {}
|
144 |
for term in term_userdef + ['$']:
|
145 |
parse_table[non_term][term] = ""
|
146 |
|
|
|
147 |
grammar_is_LL = True
|
148 |
|
149 |
for non_term in diction:
|
150 |
for production in diction[non_term]:
|
151 |
first_set = first(production)
|
152 |
|
|
|
153 |
if '#' in first_set:
|
154 |
first_set.remove('#')
|
155 |
+
follow_set = follows[non_term]
|
156 |
+
first_set.extend(follow_set)
|
157 |
|
158 |
for terminal in first_set:
|
159 |
+
if terminal in term_userdef + ['$']:
|
160 |
+
if parse_table[non_term][terminal] == "":
|
161 |
+
parse_table[non_term][terminal] = production
|
162 |
+
else:
|
163 |
+
grammar_is_LL = False
|
164 |
|
165 |
return parse_table, grammar_is_LL
|
166 |
|
167 |
+
def validateStringUsingStackBuffer(parse_table, input_string, start_sym):
|
168 |
+
print("Validating:", input_string) # Debug print
|
169 |
+
|
170 |
+
# Initialize stack and input buffer
|
171 |
+
stack = [start_sym, '$']
|
172 |
+
input_tokens = input_string.split()
|
173 |
+
input_tokens.append('$')
|
174 |
+
buffer = input_tokens
|
175 |
+
|
176 |
+
print("Initial stack:", stack) # Debug print
|
177 |
+
print("Initial buffer:", buffer) # Debug print
|
178 |
|
179 |
+
parsing_steps = []
|
180 |
+
parsing_steps.append(f"Stack: {stack}, Input: {buffer}")
|
181 |
+
|
182 |
+
# Processing loop
|
183 |
while stack and buffer:
|
184 |
+
print(f"\nCurrent stack: {stack}") # Debug print
|
185 |
+
print(f"Current buffer: {buffer}") # Debug print
|
186 |
+
|
187 |
+
# Get top of stack and current input
|
188 |
top_stack = stack[0]
|
189 |
current_input = buffer[0]
|
190 |
|
191 |
+
parsing_steps.append(f"Top of stack: {top_stack}, Current input: {current_input}")
|
192 |
+
|
193 |
+
# Case 1: Match found
|
194 |
if top_stack == current_input:
|
195 |
stack.pop(0)
|
196 |
buffer.pop(0)
|
197 |
+
parsing_steps.append("Matched and consumed")
|
198 |
+
continue
|
199 |
+
|
200 |
+
# Case 2: Non-terminal on top
|
201 |
+
if top_stack in parse_table:
|
202 |
+
if current_input in parse_table[top_stack]:
|
203 |
+
production = parse_table[top_stack][current_input]
|
204 |
+
|
205 |
+
if production:
|
206 |
+
# Pop the non-terminal
|
207 |
+
stack.pop(0)
|
208 |
+
# Push the production in reverse (if it's not epsilon)
|
209 |
+
if production != ['#']:
|
210 |
+
stack = production + stack
|
211 |
+
parsing_steps.append(f"Applied rule: {top_stack} -> {' '.join(production) if production else '#'}")
|
212 |
+
else:
|
213 |
+
return False, f"No production for {top_stack} with input {current_input}", parsing_steps
|
214 |
+
else:
|
215 |
+
return False, f"Input symbol {current_input} not in parse table for {top_stack}", parsing_steps
|
216 |
else:
|
217 |
+
# If top of stack is not in parse table and doesn't match input
|
218 |
+
return False, f"Unexpected symbol {top_stack} on stack", parsing_steps
|
219 |
+
|
220 |
+
print(f"Updated stack: {stack}") # Debug print
|
221 |
|
222 |
+
# Check if both stack and buffer are empty (except for $)
|
223 |
+
if len(stack) <= 1 and len(buffer) <= 1:
|
224 |
+
return True, "String accepted", parsing_steps
|
225 |
+
else:
|
226 |
+
return False, "String rejected - incomplete parse", parsing_steps
|
227 |
|
228 |
# Streamlit UI
|
229 |
st.title("LL(1) Grammar Analyzer")
|
|
|
233 |
start_symbol = st.text_input("Enter Start Symbol:", "S")
|
234 |
|
235 |
with st.expander("Enter Grammar Rules"):
|
236 |
+
num_rules = st.number_input("Number of Rules:", min_value=1, value=4)
|
237 |
rules = []
|
238 |
for i in range(num_rules):
|
239 |
rule = st.text_input(f"Rule {i+1} (format: A -> B c | d)", key=f"rule_{i}")
|
240 |
if rule:
|
241 |
rules.append(rule)
|
242 |
|
243 |
+
nonterm_input = st.text_input("Enter Non-terminals (comma-separated):", "S,A,B,C")
|
244 |
+
term_input = st.text_input("Enter Terminals (comma-separated):", "a,b,c,d,k,r,O")
|
245 |
|
246 |
if nonterm_input.strip():
|
247 |
nonterm_userdef = [x.strip() for x in nonterm_input.split(',') if x.strip()]
|
|
|
290 |
for non_term in parse_table:
|
291 |
row = [non_term] # First column is non-terminal
|
292 |
for term in terminals:
|
293 |
+
production = parse_table[non_term].get(term, "")
|
294 |
+
if production:
|
295 |
+
row.append(' '.join(production))
|
296 |
+
else:
|
297 |
+
row.append("")
|
298 |
df_data.append(row)
|
299 |
|
300 |
df = pd.DataFrame(df_data, columns=['Non-Terminal'] + terminals)
|
|
|
307 |
|
308 |
# String validation section
|
309 |
st.subheader("String Validation")
|
310 |
+
input_string = st.text_input("Enter string to validate (space-separated):", "a r k O")
|
311 |
if input_string:
|
312 |
+
is_valid, message, steps = validateStringUsingStackBuffer(parse_table, input_string, start_symbol)
|
313 |
+
|
314 |
+
# Display the result
|
315 |
+
if is_valid:
|
316 |
+
st.success(message)
|
317 |
else:
|
318 |
+
st.error(message)
|
319 |
+
|
320 |
+
# Display parsing steps
|
321 |
+
st.subheader("Parsing Steps")
|
322 |
+
for i, step in enumerate(steps):
|
323 |
+
st.text(f"Step {i+1}: {step}")
|