Neha13 commited on
Commit
4290b1a
·
verified ·
1 Parent(s): 1f73aae

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -0
app.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ rules = []
4
+ nonterm_userdef = []
5
+ term_userdef = []
6
+ diction = {}
7
+ firsts = {}
8
+ follows = {}
9
+ start_symbol = None
10
+
11
+ def removeLeftRecursion(rulesDiction):
12
+ store = {}
13
+ for lhs in rulesDiction:
14
+ alphaRules = []
15
+ betaRules = []
16
+ allrhs = rulesDiction[lhs]
17
+ for subrhs in allrhs:
18
+ if subrhs[0] == lhs:
19
+ alphaRules.append(subrhs[1:])
20
+ else:
21
+ betaRules.append(subrhs)
22
+ if len(alphaRules) != 0:
23
+ lhs_ = lhs + "'"
24
+ while lhs_ in rulesDiction.keys() or lhs_ in store.keys():
25
+ lhs_ += "'"
26
+ for b in range(len(betaRules)):
27
+ betaRules[b].append(lhs_)
28
+ rulesDiction[lhs] = betaRules
29
+ for a in range(len(alphaRules)):
30
+ alphaRules[a].append(lhs_)
31
+ alphaRules.append(['#'])
32
+ store[lhs_] = alphaRules
33
+ for left in store:
34
+ rulesDiction[left] = store[left]
35
+ return rulesDiction
36
+
37
+ def LeftFactoring(rulesDiction):
38
+ newDict = {}
39
+ for lhs in rulesDiction:
40
+ allrhs = rulesDiction[lhs]
41
+ temp = dict()
42
+ for subrhs in allrhs:
43
+ if subrhs[0] not in list(temp.keys()):
44
+ temp[subrhs[0]] = [subrhs]
45
+ else:
46
+ temp[subrhs[0]].append(subrhs)
47
+ new_rule = []
48
+ tempo_dict = {}
49
+ for term_key in temp:
50
+ allStartingWithTermKey = temp[term_key]
51
+ if len(allStartingWithTermKey) > 1:
52
+ lhs_ = lhs + "'"
53
+ while lhs_ in rulesDiction.keys() or lhs_ in tempo_dict.keys():
54
+ lhs_ += "'"
55
+ new_rule.append([term_key, lhs_])
56
+ ex_rules = []
57
+ for g in temp[term_key]:
58
+ ex_rules.append(g[1:])
59
+ tempo_dict[lhs_] = ex_rules
60
+ else:
61
+ new_rule.append(allStartingWithTermKey[0])
62
+ newDict[lhs] = new_rule
63
+ for key in tempo_dict:
64
+ newDict[key] = tempo_dict[key]
65
+ return newDict
66
+
67
+ def first(rule):
68
+ if len(rule) != 0 and rule[0] in term_userdef:
69
+ return rule[0]
70
+ elif len(rule) != 0 and rule[0] == '#':
71
+ return '#'
72
+ if len(rule) != 0 and rule[0] in diction:
73
+ fres = []
74
+ rhs_rules = diction[rule[0]]
75
+ for itr in rhs_rules:
76
+ indivRes = first(itr)
77
+ if type(indivRes) is list:
78
+ fres.extend(indivRes)
79
+ else:
80
+ fres.append(indivRes)
81
+ if '#' not in fres:
82
+ return fres
83
+ else:
84
+ newList = fres
85
+ fres.remove('#')
86
+ if len(rule) > 1:
87
+ ansNew = first(rule[1:])
88
+ if ansNew is not None:
89
+ if type(ansNew) is list:
90
+ newList = fres + ansNew
91
+ else:
92
+ newList = fres + [ansNew]
93
+ fres.append('#')
94
+ return fres
95
+
96
+ def follow(nt):
97
+ solset = set()
98
+ if nt == start_symbol:
99
+ solset.add('$')
100
+ for curNT in diction:
101
+ rhs = diction[curNT]
102
+ for subrule in rhs:
103
+ if nt in subrule:
104
+ while nt in subrule:
105
+ index_nt = subrule.index(nt)
106
+ subrule = subrule[index_nt + 1:]
107
+ if len(subrule) != 0:
108
+ res = first(subrule)
109
+ if '#' in res:
110
+ res.remove('#')
111
+ res += follow(curNT)
112
+ else:
113
+ if nt != curNT:
114
+ res = follow(curNT)
115
+ solset.update(res if type(res) is list else [res])
116
+ return list(solset)
117
+
118
+ def computeAllFirsts():
119
+ global firsts
120
+ for y in diction.keys():
121
+ firsts[y] = set()
122
+ for sub in diction[y]:
123
+ result = first(sub)
124
+ if result is not None:
125
+ firsts[y].update(result if type(result) is list else [result])
126
+
127
+ def computeAllFollows():
128
+ global follows
129
+ for NT in diction.keys():
130
+ follows[NT] = set(follow(NT))
131
+
132
+ def createParseTable():
133
+ global term_userdef, firsts, follows
134
+ table = {}
135
+ grammar_is_LL = True
136
+ for lhs in diction.keys():
137
+ table[lhs] = {}
138
+ for term in term_userdef + ['$']:
139
+ table[lhs][term] = ""
140
+ for lhs in diction:
141
+ for rule in diction[lhs]:
142
+ first_res = first(rule)
143
+ if '#' in first_res:
144
+ first_res.remove('#')
145
+ follow_res = follows[lhs]
146
+ first_res.update(follow_res)
147
+ for term in first_res:
148
+ if table[lhs][term] == "":
149
+ table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
150
+ else:
151
+ grammar_is_LL = False
152
+ return table, grammar_is_LL
153
+
154
+ def validateStringUsingStackBuffer(parse_table, input_string):
155
+ stack = [start_symbol, '$']
156
+ buffer = ['$'] + input_string.split()[::-1]
157
+ while stack:
158
+ top_stack = stack.pop(0)
159
+ top_buffer = buffer.pop()
160
+ if top_stack in term_userdef:
161
+ if top_stack != top_buffer:
162
+ return "Invalid String"
163
+ elif top_stack == top_buffer:
164
+ continue
165
+ else:
166
+ rule = parse_table.get(top_stack, {}).get(top_buffer, None)
167
+ if rule:
168
+ rule_rhs = rule.split('->')[1].strip().split()
169
+ stack = rule_rhs + stack
170
+ else:
171
+ return "Invalid String"
172
+ return "Valid String"
173
+
174
+
175
+ st.title("LL(1) Grammar Analyzer")
176
+
177
+
178
+ st.subheader("Grammar Rules Input")
179
+ start_symbol = st.text_input("Enter Start Symbol (Non-terminal)")
180
+ num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1)
181
+ rules = []
182
+ for i in range(num_rules):
183
+ rule = st.text_input(f"Rule {i+1}")
184
+ if rule:
185
+ rules.append(rule)
186
+
187
+
188
+ nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',')
189
+ term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',')
190
+
191
+
192
+ if st.button("Analyze Grammar"):
193
+ diction.clear()
194
+ firsts.clear()
195
+ follows.clear()
196
+
197
+ for rule in rules:
198
+ lhs, rhs = rule.split("->")
199
+ lhs = lhs.strip()
200
+ rhs_list = [x.strip().split() for x in rhs.split("|")]
201
+ diction[lhs] = rhs_list
202
+
203
+ st.subheader("Grammar After Removing Left Recursion")
204
+ diction = removeLeftRecursion(diction)
205
+ st.write(diction)
206
+
207
+ st.subheader("Grammar After Left Factoring")
208
+ diction = LeftFactoring(diction)
209
+ st.write(diction)
210
+
211
+ computeAllFirsts()
212
+ st.subheader("FIRST Sets")
213
+ st.write(firsts)
214
+
215
+ computeAllFollows()
216
+ st.subheader("FOLLOW Sets")
217
+ st.write(follows)
218
+
219
+ parse_table, grammar_is_LL = createParseTable()
220
+ st.subheader("Parse Table")
221
+ st.write(parse_table)
222
+
223
+ if grammar_is_LL:
224
+ st.success("The grammar is LL(1)")
225
+ else:
226
+ st.error("The grammar is not LL(1)")
227
+
228
+ input_string = st.text_input("Enter String to Validate (space-separated)")
229
+ if input_string:
230
+ result = validateStringUsingStackBuffer(parse_table, input_string)
231
+ st.subheader("Validation Result")
232
+ st.write(result)