Neha13 commited on
Commit
6c72ad5
·
verified ·
1 Parent(s): be74f72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -113
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import streamlit as st
 
2
 
3
  rules = []
4
  nonterm_userdef = []
@@ -65,38 +66,36 @@ def LeftFactoring(rulesDiction):
65
  return newDict
66
 
67
  def first(rule):
68
- if len(rule) != 0 and rule[0] in term_userdef:
69
- return rule[0]
70
- elif len(rule) != 0 and rule[0] == '#':
71
- return '#'
72
- if len(rule) != 0 and rule[0] in diction:
 
 
 
 
73
  fres = []
74
  rhs_rules = diction[rule[0]]
75
  for itr in rhs_rules:
76
  indivRes = first(itr)
77
- if type(indivRes) is list:
78
  fres.extend(indivRes)
79
- else:
80
- fres.append(indivRes)
81
- if '#' not in fres:
82
- return fres
83
- else:
84
- newList = fres
85
  fres.remove('#')
86
- if len(rule) > 1:
87
- ansNew = first(rule[1:])
88
- if ansNew is not None:
89
- if type(ansNew) is list:
90
- newList = fres + ansNew
91
- else:
92
- newList = fres + [ansNew]
93
- fres.append('#')
94
- return fres
95
 
96
  def follow(nt):
 
97
  solset = set()
98
  if nt == start_symbol:
99
  solset.add('$')
 
100
  for curNT in diction:
101
  rhs = diction[curNT]
102
  for subrule in rhs:
@@ -104,135 +103,175 @@ def follow(nt):
104
  while nt in subrule:
105
  index_nt = subrule.index(nt)
106
  subrule = subrule[index_nt + 1:]
107
- if len(subrule) != 0:
108
  res = first(subrule)
109
- if res is not None:
110
- if '#' in res:
111
- res.remove('#')
112
  follow_res = follow(curNT)
113
  if follow_res:
114
- res += follow_res
115
- else:
116
- res = []
117
  else:
118
  if nt != curNT:
119
- res = follow(curNT)
120
- if res is None:
121
- res = []
122
- solset.update(res if type(res) is list else [res])
123
  return list(solset)
124
 
125
  def computeAllFirsts():
126
- global firsts
127
- for y in diction.keys():
 
128
  firsts[y] = set()
129
  for sub in diction[y]:
130
  result = first(sub)
131
- if result is not None:
132
- firsts[y].update(result if type(result) is list else [result])
133
 
134
  def computeAllFollows():
135
- global follows
136
- for NT in diction.keys():
 
137
  follows[NT] = set(follow(NT))
138
 
139
  def createParseTable():
140
- global term_userdef, firsts, follows
141
- table = {}
142
- grammar_is_LL = True
143
- for lhs in diction.keys():
144
- table[lhs] = {}
 
145
  for term in term_userdef + ['$']:
146
- table[lhs][term] = ""
147
- for lhs in diction:
148
- for rule in diction[lhs]:
149
- first_res = first(rule)
150
- if '#' in first_res:
151
- first_res.remove('#')
152
- follow_res = follows[lhs]
153
- first_res.update(follow_res)
154
- for term in first_res:
155
- if table[lhs][term] == "":
156
- table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
 
 
 
 
 
 
157
  else:
158
  grammar_is_LL = False
159
- return table, grammar_is_LL
 
160
 
161
  def validateStringUsingStackBuffer(parse_table, input_string):
162
  stack = [start_symbol, '$']
163
- buffer = ['$'] + input_string.split()[::-1]
164
- while stack:
165
- top_stack = stack.pop(0)
166
- top_buffer = buffer.pop()
167
- if top_stack in term_userdef:
168
- if top_stack != top_buffer:
169
- return "Invalid String"
170
- elif top_stack == top_buffer:
171
- continue
 
 
 
 
 
 
 
172
  else:
173
- rule = parse_table.get(top_stack, {}).get(top_buffer, None)
174
- if rule:
175
- rule_rhs = rule.split('->')[1].strip().split()
176
- stack = rule_rhs + stack
177
- else:
178
- return "Invalid String"
179
- return "Valid String"
180
-
181
 
182
- # Main Streamlit App
183
  st.title("LL(1) Grammar Analyzer")
184
 
185
- st.subheader("Grammar Rules Input")
186
- start_symbol = st.text_input("Enter Start Symbol (Non-terminal)")
187
- num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1)
188
- rules = []
189
- for i in range(num_rules):
190
- rule = st.text_input(f"Rule {i+1}")
191
- if rule:
192
- rules.append(rule)
 
 
 
193
 
194
- nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',')
195
- term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',')
196
 
 
 
 
 
197
 
198
  if st.button("Analyze Grammar"):
 
199
  diction.clear()
200
- firsts.clear()
201
- follows.clear()
202
-
203
  for rule in rules:
204
- lhs, rhs = rule.split("->")
205
- lhs = lhs.strip()
206
- rhs_list = [x.strip().split() for x in rhs.split("|")]
207
- diction[lhs] = rhs_list
208
-
209
- st.subheader("Grammar After Removing Left Recursion")
210
- diction = removeLeftRecursion(diction)
211
- st.write(diction)
212
-
213
- st.subheader("Grammar After Left Factoring")
214
- diction = LeftFactoring(diction)
215
- st.write(diction)
216
-
 
 
 
 
 
217
  computeAllFirsts()
218
- st.subheader("FIRST Sets")
219
- st.write(firsts)
220
-
221
  computeAllFollows()
222
- st.subheader("FOLLOW Sets")
223
- st.write(follows)
224
-
 
 
 
225
  parse_table, grammar_is_LL = createParseTable()
 
226
  st.subheader("Parse Table")
227
- st.write(parse_table)
228
-
 
 
 
 
 
 
 
 
 
 
 
229
  if grammar_is_LL:
230
- st.success("The grammar is LL(1)")
231
  else:
232
- st.error("The grammar is not LL(1)")
233
-
234
- input_string = st.text_input("Enter String to Validate (space-separated)")
 
 
235
  if input_string:
236
  result = validateStringUsingStackBuffer(parse_table, input_string)
237
- st.subheader("Validation Result")
238
- st.write(result)
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
 
4
  rules = []
5
  nonterm_userdef = []
 
66
  return newDict
67
 
68
  def first(rule):
69
+ global term_userdef, diction
70
+ if not rule:
71
+ return ['#']
72
+ if rule[0] in term_userdef:
73
+ return [rule[0]]
74
+ elif rule[0] == '#':
75
+ return ['#']
76
+
77
+ if rule[0] in diction:
78
  fres = []
79
  rhs_rules = diction[rule[0]]
80
  for itr in rhs_rules:
81
  indivRes = first(itr)
82
+ if indivRes:
83
  fres.extend(indivRes)
84
+
85
+ if '#' in fres and len(rule) > 1:
 
 
 
 
86
  fres.remove('#')
87
+ ansNew = first(rule[1:])
88
+ if ansNew:
89
+ fres.extend(ansNew)
90
+ return list(set(fres))
91
+ return []
 
 
 
 
92
 
93
  def follow(nt):
94
+ global start_symbol, diction
95
  solset = set()
96
  if nt == start_symbol:
97
  solset.add('$')
98
+
99
  for curNT in diction:
100
  rhs = diction[curNT]
101
  for subrule in rhs:
 
103
  while nt in subrule:
104
  index_nt = subrule.index(nt)
105
  subrule = subrule[index_nt + 1:]
106
+ if subrule:
107
  res = first(subrule)
108
+ if '#' in res:
109
+ res.remove('#')
110
+ if nt != curNT:
111
  follow_res = follow(curNT)
112
  if follow_res:
113
+ res.extend(follow_res)
114
+ solset.update(res)
 
115
  else:
116
  if nt != curNT:
117
+ follow_res = follow(curNT)
118
+ if follow_res:
119
+ solset.update(follow_res)
 
120
  return list(solset)
121
 
122
  def computeAllFirsts():
123
+ global firsts, diction
124
+ firsts.clear()
125
+ for y in diction:
126
  firsts[y] = set()
127
  for sub in diction[y]:
128
  result = first(sub)
129
+ if result:
130
+ firsts[y].update(result)
131
 
132
  def computeAllFollows():
133
+ global follows, diction
134
+ follows.clear()
135
+ for NT in diction:
136
  follows[NT] = set(follow(NT))
137
 
138
  def createParseTable():
139
+ global diction, term_userdef, firsts, follows
140
+
141
+ # Initialize parse table with empty strings
142
+ parse_table = {}
143
+ for non_term in diction:
144
+ parse_table[non_term] = {}
145
  for term in term_userdef + ['$']:
146
+ parse_table[non_term][term] = ""
147
+
148
+ # Fill parse table
149
+ grammar_is_LL = True
150
+
151
+ for non_term in diction:
152
+ for production in diction[non_term]:
153
+ first_set = first(production)
154
+
155
+ # If epsilon is in FIRST set, add FOLLOW set
156
+ if '#' in first_set:
157
+ first_set.remove('#')
158
+ first_set.extend(follows[non_term])
159
+
160
+ for terminal in first_set:
161
+ if parse_table[non_term].get(terminal, "") == "":
162
+ parse_table[non_term][terminal] = f"{non_term} -> {' '.join(production)}"
163
  else:
164
  grammar_is_LL = False
165
+
166
+ return parse_table, grammar_is_LL
167
 
168
  def validateStringUsingStackBuffer(parse_table, input_string):
169
  stack = [start_symbol, '$']
170
+ buffer = list(input_string.split()) + ['$']
171
+
172
+ while stack and buffer:
173
+ top_stack = stack[0]
174
+ current_input = buffer[0]
175
+
176
+ if top_stack == current_input:
177
+ stack.pop(0)
178
+ buffer.pop(0)
179
+ elif top_stack in term_userdef:
180
+ return "Invalid String: Terminal mismatch"
181
+ elif parse_table[top_stack][current_input]:
182
+ production = parse_table[top_stack][current_input].split('->')[1].strip().split()
183
+ stack.pop(0)
184
+ if production != ['#']:
185
+ stack = production + stack
186
  else:
187
+ return "Invalid String: No production rule found"
188
+
189
+ if not stack and not buffer:
190
+ return "Valid String"
191
+ return "Invalid String: Stack or buffer not empty"
 
 
 
192
 
193
+ # Streamlit UI
194
  st.title("LL(1) Grammar Analyzer")
195
 
196
+ # Input section
197
+ st.header("Grammar Input")
198
+ start_symbol = st.text_input("Enter Start Symbol:", "S")
199
+
200
+ with st.expander("Enter Grammar Rules"):
201
+ num_rules = st.number_input("Number of Rules:", min_value=1, value=3)
202
+ rules = []
203
+ for i in range(num_rules):
204
+ rule = st.text_input(f"Rule {i+1} (format: A -> B c | d)", key=f"rule_{i}")
205
+ if rule:
206
+ rules.append(rule)
207
 
208
+ nonterm_input = st.text_input("Enter Non-terminals (comma-separated):", "S,A,B")
209
+ term_input = st.text_input("Enter Terminals (comma-separated):", "a,b,c")
210
 
211
+ if nonterm_input.strip():
212
+ nonterm_userdef = [x.strip() for x in nonterm_input.split(',') if x.strip()]
213
+ if term_input.strip():
214
+ term_userdef = [x.strip() for x in term_input.split(',') if x.strip()]
215
 
216
  if st.button("Analyze Grammar"):
217
+ # Clear previous data
218
  diction.clear()
219
+
220
+ # Process rules
 
221
  for rule in rules:
222
+ if '->' in rule:
223
+ lhs, rhs = rule.split("->")
224
+ lhs = lhs.strip()
225
+ rhs_parts = [x.strip().split() for x in rhs.split("|")]
226
+ diction[lhs] = rhs_parts
227
+
228
+ # Remove left recursion and perform left factoring
229
+ st.subheader("Grammar Processing")
230
+ with st.expander("Show Grammar Transformations"):
231
+ st.write("After removing left recursion:")
232
+ diction = removeLeftRecursion(diction)
233
+ st.write(diction)
234
+
235
+ st.write("After left factoring:")
236
+ diction = LeftFactoring(diction)
237
+ st.write(diction)
238
+
239
+ # Compute FIRST and FOLLOW sets
240
  computeAllFirsts()
 
 
 
241
  computeAllFollows()
242
+
243
+ with st.expander("Show FIRST and FOLLOW Sets"):
244
+ st.write("FIRST Sets:", {k: list(v) for k, v in firsts.items()})
245
+ st.write("FOLLOW Sets:", {k: list(v) for k, v in follows.items()})
246
+
247
+ # Create and display parse table
248
  parse_table, grammar_is_LL = createParseTable()
249
+
250
  st.subheader("Parse Table")
251
+ # Convert parse table to pandas DataFrame for better display
252
+ df_data = []
253
+ terminals = term_userdef + ['$']
254
+
255
+ for non_term in parse_table:
256
+ row = [non_term] # First column is non-terminal
257
+ for term in terminals:
258
+ row.append(parse_table[non_term].get(term, ""))
259
+ df_data.append(row)
260
+
261
+ df = pd.DataFrame(df_data, columns=['Non-Terminal'] + terminals)
262
+ st.dataframe(df)
263
+
264
  if grammar_is_LL:
265
+ st.success("This grammar is LL(1)!")
266
  else:
267
+ st.error("This grammar is not LL(1)!")
268
+
269
+ # String validation section
270
+ st.subheader("String Validation")
271
+ input_string = st.text_input("Enter string to validate (space-separated):", "")
272
  if input_string:
273
  result = validateStringUsingStackBuffer(parse_table, input_string)
274
+ if "Valid" in result:
275
+ st.success(result)
276
+ else:
277
+ st.error(result)