Update app.py
Browse files
app.py
CHANGED
@@ -68,7 +68,7 @@ def LeftFactoring(rulesDiction):
|
|
68 |
return newDict
|
69 |
|
70 |
# Function to calculate FIRST set
|
71 |
-
def first(rule):
|
72 |
if len(rule) != 0 and rule[0] in term_userdef:
|
73 |
return rule[0]
|
74 |
elif len(rule) != 0 and rule[0] == '#':
|
@@ -77,8 +77,8 @@ def first(rule):
|
|
77 |
fres = []
|
78 |
rhs_rules = diction[rule[0]]
|
79 |
for itr in rhs_rules:
|
80 |
-
indivRes = first(itr)
|
81 |
-
if
|
82 |
fres.extend(indivRes)
|
83 |
else:
|
84 |
fres.append(indivRes)
|
@@ -88,9 +88,9 @@ def first(rule):
|
|
88 |
newList = fres
|
89 |
fres.remove('#')
|
90 |
if len(rule) > 1:
|
91 |
-
ansNew = first(rule[1:])
|
92 |
if ansNew is not None:
|
93 |
-
if
|
94 |
newList = fres + ansNew
|
95 |
else:
|
96 |
newList = fres + [ansNew]
|
@@ -98,7 +98,7 @@ def first(rule):
|
|
98 |
return fres
|
99 |
|
100 |
# Function to calculate FOLLOW set
|
101 |
-
def follow(nt):
|
102 |
solset = set()
|
103 |
if nt == start_symbol:
|
104 |
solset.add('$')
|
@@ -110,82 +110,67 @@ def follow(nt):
|
|
110 |
index_nt = subrule.index(nt)
|
111 |
subrule = subrule[index_nt + 1:]
|
112 |
if len(subrule) != 0:
|
113 |
-
res = first(subrule)
|
114 |
if res is not None:
|
115 |
if '#' in res:
|
116 |
res.remove('#')
|
117 |
-
follow_res = follow(curNT)
|
118 |
if follow_res:
|
119 |
res += follow_res
|
120 |
else:
|
121 |
res = []
|
122 |
else:
|
123 |
if nt != curNT:
|
124 |
-
res = follow(curNT)
|
125 |
if res is None:
|
126 |
res = []
|
127 |
-
solset.update(res if
|
128 |
return list(solset)
|
129 |
|
130 |
# Compute FIRST for all non-terminals
|
131 |
-
def computeAllFirsts():
|
132 |
-
|
133 |
for y in diction.keys():
|
134 |
firsts[y] = set()
|
135 |
for sub in diction[y]:
|
136 |
-
result = first(sub)
|
137 |
if result is not None:
|
138 |
-
firsts[y].update(result if
|
|
|
139 |
|
140 |
# Compute FOLLOW for all non-terminals
|
141 |
-
def computeAllFollows():
|
142 |
-
|
143 |
for NT in diction.keys():
|
144 |
-
follows[NT] = set(follow(NT))
|
|
|
145 |
|
146 |
# Parse table creation function
|
147 |
-
def createParseTable():
|
148 |
-
global term_userdef, firsts, follows
|
149 |
table = {}
|
150 |
grammar_is_LL = True
|
151 |
for lhs in diction.keys():
|
152 |
table[lhs] = {}
|
153 |
-
for term in term_userdef + ['$']:
|
154 |
-
table[lhs][term] = ""
|
|
|
|
|
155 |
for lhs in diction:
|
156 |
for rule in diction[lhs]:
|
157 |
-
first_res = first(rule)
|
158 |
-
if '#' in first_res:
|
159 |
first_res.remove('#')
|
160 |
-
follow_res = follows[
|
161 |
first_res.update(follow_res)
|
|
|
162 |
for term in first_res:
|
163 |
if table[lhs][term] == "":
|
164 |
table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
|
165 |
else:
|
166 |
grammar_is_LL = False
|
167 |
-
|
168 |
|
169 |
-
|
170 |
-
def validateStringUsingStackBuffer(parse_table, input_string):
|
171 |
-
stack = [start_symbol, '$']
|
172 |
-
buffer = ['$'] + input_string.split()[::-1]
|
173 |
-
while stack:
|
174 |
-
top_stack = stack.pop(0)
|
175 |
-
top_buffer = buffer.pop()
|
176 |
-
if top_stack in term_userdef:
|
177 |
-
if top_stack != top_buffer:
|
178 |
-
return "Invalid String"
|
179 |
-
elif top_stack == top_buffer:
|
180 |
-
continue
|
181 |
-
else:
|
182 |
-
rule = parse_table.get(top_stack, {}).get(top_buffer, None)
|
183 |
-
if rule:
|
184 |
-
rule_rhs = rule.split('->')[1].strip().split()
|
185 |
-
stack = rule_rhs + stack
|
186 |
-
else:
|
187 |
-
return "Invalid String"
|
188 |
-
return "Valid String"
|
189 |
|
190 |
# Streamlit Interface
|
191 |
st.title("LL(1) Grammar Analyzer")
|
@@ -221,15 +206,15 @@ if st.button("Analyze Grammar"):
|
|
221 |
diction = LeftFactoring(diction)
|
222 |
st.write(diction)
|
223 |
|
224 |
-
computeAllFirsts()
|
225 |
st.subheader("FIRST Sets")
|
226 |
st.write(firsts)
|
227 |
|
228 |
-
computeAllFollows()
|
229 |
st.subheader("FOLLOW Sets")
|
230 |
st.write(follows)
|
231 |
|
232 |
-
parse_table, grammar_is_LL = createParseTable()
|
233 |
st.subheader("Parse Table")
|
234 |
st.write(parse_table)
|
235 |
|
|
|
68 |
return newDict
|
69 |
|
70 |
# Function to calculate FIRST set
|
71 |
+
def first(rule, diction, term_userdef):
|
72 |
if len(rule) != 0 and rule[0] in term_userdef:
|
73 |
return rule[0]
|
74 |
elif len(rule) != 0 and rule[0] == '#':
|
|
|
77 |
fres = []
|
78 |
rhs_rules = diction[rule[0]]
|
79 |
for itr in rhs_rules:
|
80 |
+
indivRes = first(itr, diction, term_userdef)
|
81 |
+
if isinstance(indivRes, list):
|
82 |
fres.extend(indivRes)
|
83 |
else:
|
84 |
fres.append(indivRes)
|
|
|
88 |
newList = fres
|
89 |
fres.remove('#')
|
90 |
if len(rule) > 1:
|
91 |
+
ansNew = first(rule[1:], diction, term_userdef)
|
92 |
if ansNew is not None:
|
93 |
+
if isinstance(ansNew, list):
|
94 |
newList = fres + ansNew
|
95 |
else:
|
96 |
newList = fres + [ansNew]
|
|
|
98 |
return fres
|
99 |
|
100 |
# Function to calculate FOLLOW set
|
101 |
+
def follow(nt, diction, start_symbol):
|
102 |
solset = set()
|
103 |
if nt == start_symbol:
|
104 |
solset.add('$')
|
|
|
110 |
index_nt = subrule.index(nt)
|
111 |
subrule = subrule[index_nt + 1:]
|
112 |
if len(subrule) != 0:
|
113 |
+
res = first(subrule, diction, term_userdef)
|
114 |
if res is not None:
|
115 |
if '#' in res:
|
116 |
res.remove('#')
|
117 |
+
follow_res = follow(curNT, diction, start_symbol)
|
118 |
if follow_res:
|
119 |
res += follow_res
|
120 |
else:
|
121 |
res = []
|
122 |
else:
|
123 |
if nt != curNT:
|
124 |
+
res = follow(curNT, diction, start_symbol)
|
125 |
if res is None:
|
126 |
res = []
|
127 |
+
solset.update(res if isinstance(res, list) else [res])
|
128 |
return list(solset)
|
129 |
|
130 |
# Compute FIRST for all non-terminals
|
131 |
+
def computeAllFirsts(diction, term_userdef):
|
132 |
+
firsts = {}
|
133 |
for y in diction.keys():
|
134 |
firsts[y] = set()
|
135 |
for sub in diction[y]:
|
136 |
+
result = first(sub, diction, term_userdef)
|
137 |
if result is not None:
|
138 |
+
firsts[y].update(result if isinstance(result, list) else [result])
|
139 |
+
return firsts
|
140 |
|
141 |
# Compute FOLLOW for all non-terminals
|
142 |
+
def computeAllFollows(diction, start_symbol):
|
143 |
+
follows = {}
|
144 |
for NT in diction.keys():
|
145 |
+
follows[NT] = set(follow(NT, diction, start_symbol))
|
146 |
+
return follows
|
147 |
|
148 |
# Parse table creation function
|
149 |
+
def createParseTable(diction, term_userdef, firsts, follows):
|
|
|
150 |
table = {}
|
151 |
grammar_is_LL = True
|
152 |
for lhs in diction.keys():
|
153 |
table[lhs] = {}
|
154 |
+
for term in term_userdef + ['$']: # Include end of input symbol
|
155 |
+
table[lhs][term] = "" # Set default empty entries
|
156 |
+
|
157 |
+
# Populate the parse table
|
158 |
for lhs in diction:
|
159 |
for rule in diction[lhs]:
|
160 |
+
first_res = first(rule, diction, term_userdef)
|
161 |
+
if '#' in first_res: # Epsilon handling
|
162 |
first_res.remove('#')
|
163 |
+
follow_res = follows.get(lhs, [])
|
164 |
first_res.update(follow_res)
|
165 |
+
|
166 |
for term in first_res:
|
167 |
if table[lhs][term] == "":
|
168 |
table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
|
169 |
else:
|
170 |
grammar_is_LL = False
|
171 |
+
st.error(f"Conflict detected in parse table at [{lhs}, {term}]")
|
172 |
|
173 |
+
return table, grammar_is_LL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
175 |
# Streamlit Interface
|
176 |
st.title("LL(1) Grammar Analyzer")
|
|
|
206 |
diction = LeftFactoring(diction)
|
207 |
st.write(diction)
|
208 |
|
209 |
+
firsts = computeAllFirsts(diction, term_userdef)
|
210 |
st.subheader("FIRST Sets")
|
211 |
st.write(firsts)
|
212 |
|
213 |
+
follows = computeAllFollows(diction, start_symbol)
|
214 |
st.subheader("FOLLOW Sets")
|
215 |
st.write(follows)
|
216 |
|
217 |
+
parse_table, grammar_is_LL = createParseTable(diction, term_userdef, firsts, follows)
|
218 |
st.subheader("Parse Table")
|
219 |
st.write(parse_table)
|
220 |
|