Neha13 commited on
Commit
2fb8011
·
verified ·
1 Parent(s): e6715c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -142
app.py CHANGED
@@ -1,15 +1,8 @@
1
  import streamlit as st
 
2
 
3
- # Initial setup for grammar input
4
- rules = []
5
- nonterm_userdef = []
6
- term_userdef = []
7
- diction = {}
8
- firsts = {}
9
- follows = {}
10
- start_symbol = None
11
 
12
- # Function to remove left recursion
13
  def removeLeftRecursion(rulesDiction):
14
  store = {}
15
  for lhs in rulesDiction:
@@ -23,7 +16,7 @@ def removeLeftRecursion(rulesDiction):
23
  betaRules.append(subrhs)
24
  if len(alphaRules) != 0:
25
  lhs_ = lhs + "'"
26
- while lhs_ in rulesDiction.keys() or lhs_ in store.keys():
27
  lhs_ += "'"
28
  for b in range(len(betaRules)):
29
  betaRules[b].append(lhs_)
@@ -36,24 +29,22 @@ def removeLeftRecursion(rulesDiction):
36
  rulesDiction[left] = store[left]
37
  return rulesDiction
38
 
39
- # Function to perform left factoring
40
  def LeftFactoring(rulesDiction):
41
  newDict = {}
42
  for lhs in rulesDiction:
43
  allrhs = rulesDiction[lhs]
44
- temp = dict()
45
  for subrhs in allrhs:
46
- if subrhs[0] not in list(temp.keys()):
47
  temp[subrhs[0]] = [subrhs]
48
  else:
49
  temp[subrhs[0]].append(subrhs)
50
  new_rule = []
51
  tempo_dict = {}
52
  for term_key in temp:
53
- allStartingWithTermKey = temp[term_key]
54
- if len(allStartingWithTermKey) > 1:
55
  lhs_ = lhs + "'"
56
- while lhs_ in rulesDiction.keys() or lhs_ in tempo_dict.keys():
57
  lhs_ += "'"
58
  new_rule.append([term_key, lhs_])
59
  ex_rules = []
@@ -61,44 +52,44 @@ def LeftFactoring(rulesDiction):
61
  ex_rules.append(g[1:])
62
  tempo_dict[lhs_] = ex_rules
63
  else:
64
- new_rule.append(allStartingWithTermKey[0])
65
  newDict[lhs] = new_rule
66
  for key in tempo_dict:
67
  newDict[key] = tempo_dict[key]
68
  return newDict
69
 
70
- # Function to calculate FIRST set
71
  def first(rule, diction, term_userdef):
72
- if len(rule) != 0 and rule[0] in term_userdef:
73
- return rule[0]
74
- elif len(rule) != 0 and rule[0] == '#':
75
- return '#'
 
76
  if len(rule) != 0 and rule[0] in diction:
77
  fres = []
78
  rhs_rules = diction[rule[0]]
79
  for itr in rhs_rules:
80
  indivRes = first(itr, diction, term_userdef)
81
- if isinstance(indivRes, list):
82
  fres.extend(indivRes)
83
  else:
84
  fres.append(indivRes)
85
  if '#' not in fres:
86
  return fres
87
  else:
88
- newList = fres
89
  fres.remove('#')
90
  if len(rule) > 1:
91
  ansNew = first(rule[1:], diction, term_userdef)
92
  if ansNew is not None:
93
- if isinstance(ansNew, list):
94
- newList = fres + ansNew
95
  else:
96
- newList = fres + [ansNew]
 
97
  fres.append('#')
98
  return fres
 
99
 
100
- # Function to calculate FOLLOW set
101
- def follow(nt, diction, start_symbol):
102
  solset = set()
103
  if nt == start_symbol:
104
  solset.add('$')
@@ -111,120 +102,82 @@ def follow(nt, diction, start_symbol):
111
  subrule = subrule[index_nt + 1:]
112
  if len(subrule) != 0:
113
  res = first(subrule, diction, term_userdef)
114
- if res is not None:
115
- if '#' in res:
116
- res.remove('#')
117
- follow_res = follow(curNT, diction, start_symbol)
118
- if follow_res:
119
- res += follow_res
120
- else:
121
- res = []
122
  else:
123
  if nt != curNT:
124
- res = follow(curNT, diction, start_symbol)
125
- if res is None:
126
- res = []
127
- solset.update(res if isinstance(res, list) else [res])
128
  return list(solset)
129
 
130
- # Compute FIRST for all non-terminals
131
- def computeAllFirsts(diction, term_userdef):
132
- firsts = {}
133
- for y in diction.keys():
134
- firsts[y] = set()
135
- for sub in diction[y]:
136
- result = first(sub, diction, term_userdef)
137
- if result is not None:
138
- firsts[y].update(result if isinstance(result, list) else [result])
139
- return firsts
140
-
141
- # Compute FOLLOW for all non-terminals
142
- def computeAllFollows(diction, start_symbol):
143
- follows = {}
144
- for NT in diction.keys():
145
- follows[NT] = set(follow(NT, diction, start_symbol))
146
- return follows
147
-
148
- # Parse table creation function
149
- def createParseTable(diction, term_userdef, firsts, follows):
150
- table = {}
151
- grammar_is_LL = True
152
- for lhs in diction.keys():
153
- table[lhs] = {}
154
- for term in term_userdef + ['$']: # Include end of input symbol
155
- table[lhs][term] = "" # Set default empty entries
156
-
157
- # Populate the parse table
158
- for lhs in diction:
159
- for rule in diction[lhs]:
160
- first_res = first(rule, diction, term_userdef)
161
- if '#' in first_res: # Epsilon handling
162
- first_res.remove('#')
163
- follow_res = follows.get(lhs, [])
164
- first_res.update(follow_res)
165
-
166
- for term in first_res:
167
- if table[lhs][term] == "":
168
- table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
169
- else:
170
- grammar_is_LL = False
171
- st.error(f"Conflict detected in parse table at [{lhs}, {term}]")
172
-
173
- return table, grammar_is_LL
174
-
175
- # Streamlit Interface
176
- st.title("LL(1) Grammar Analyzer")
177
-
178
- st.subheader("Grammar Rules Input")
179
- start_symbol = st.text_input("Enter Start Symbol (Non-terminal)")
180
- num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1)
181
- rules = []
182
- for i in range(num_rules):
183
- rule = st.text_input(f"Rule {i+1}")
184
- if rule:
185
- rules.append(rule)
186
-
187
- nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',')
188
- term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',')
189
-
190
- if st.button("Analyze Grammar"):
191
- diction.clear()
192
- firsts.clear()
193
- follows.clear()
194
-
195
- for rule in rules:
196
- lhs, rhs = rule.split("->")
197
- lhs = lhs.strip()
198
- rhs_list = [x.strip().split() for x in rhs.split("|")]
199
- diction[lhs] = rhs_list
200
-
201
- st.subheader("Grammar After Removing Left Recursion")
202
- diction = removeLeftRecursion(diction)
203
- st.write(diction)
204
-
205
- st.subheader("Grammar After Left Factoring")
206
- diction = LeftFactoring(diction)
207
- st.write(diction)
208
-
209
- firsts = computeAllFirsts(diction, term_userdef)
210
- st.subheader("FIRST Sets")
211
- st.write(firsts)
212
-
213
- follows = computeAllFollows(diction, start_symbol)
214
- st.subheader("FOLLOW Sets")
215
- st.write(follows)
216
-
217
- parse_table, grammar_is_LL = createParseTable(diction, term_userdef, firsts, follows)
218
- st.subheader("Parse Table")
219
- st.write(parse_table)
220
-
221
- if grammar_is_LL:
222
- st.success("The grammar is LL(1)")
223
- else:
224
- st.error("The grammar is not LL(1)")
225
-
226
- input_string = st.text_input("Enter String to Validate (space-separated)")
227
- if input_string:
228
- result = validateStringUsingStackBuffer(parse_table, input_string)
229
- st.subheader("Validation Result")
230
- st.write(result)
 
1
  import streamlit as st
2
+ import copy
3
 
4
+ # Helper functions
 
 
 
 
 
 
 
5
 
 
6
  def removeLeftRecursion(rulesDiction):
7
  store = {}
8
  for lhs in rulesDiction:
 
16
  betaRules.append(subrhs)
17
  if len(alphaRules) != 0:
18
  lhs_ = lhs + "'"
19
+ while (lhs_ in rulesDiction.keys()) or (lhs_ in store.keys()):
20
  lhs_ += "'"
21
  for b in range(len(betaRules)):
22
  betaRules[b].append(lhs_)
 
29
  rulesDiction[left] = store[left]
30
  return rulesDiction
31
 
 
32
  def LeftFactoring(rulesDiction):
33
  newDict = {}
34
  for lhs in rulesDiction:
35
  allrhs = rulesDiction[lhs]
36
+ temp = {}
37
  for subrhs in allrhs:
38
+ if subrhs[0] not in temp:
39
  temp[subrhs[0]] = [subrhs]
40
  else:
41
  temp[subrhs[0]].append(subrhs)
42
  new_rule = []
43
  tempo_dict = {}
44
  for term_key in temp:
45
+ if len(temp[term_key]) > 1:
 
46
  lhs_ = lhs + "'"
47
+ while (lhs_ in rulesDiction.keys()) or (lhs_ in tempo_dict.keys()):
48
  lhs_ += "'"
49
  new_rule.append([term_key, lhs_])
50
  ex_rules = []
 
52
  ex_rules.append(g[1:])
53
  tempo_dict[lhs_] = ex_rules
54
  else:
55
+ new_rule.append(temp[term_key][0])
56
  newDict[lhs] = new_rule
57
  for key in tempo_dict:
58
  newDict[key] = tempo_dict[key]
59
  return newDict
60
 
 
61
  def first(rule, diction, term_userdef):
62
+ if len(rule) != 0:
63
+ if rule[0] in term_userdef:
64
+ return rule[0]
65
+ elif rule[0] == '#':
66
+ return '#'
67
  if len(rule) != 0 and rule[0] in diction:
68
  fres = []
69
  rhs_rules = diction[rule[0]]
70
  for itr in rhs_rules:
71
  indivRes = first(itr, diction, term_userdef)
72
+ if type(indivRes) is list:
73
  fres.extend(indivRes)
74
  else:
75
  fres.append(indivRes)
76
  if '#' not in fres:
77
  return fres
78
  else:
 
79
  fres.remove('#')
80
  if len(rule) > 1:
81
  ansNew = first(rule[1:], diction, term_userdef)
82
  if ansNew is not None:
83
+ if type(ansNew) is list:
84
+ fres.extend(ansNew)
85
  else:
86
+ fres.append(ansNew)
87
+ return fres
88
  fres.append('#')
89
  return fres
90
+ return []
91
 
92
+ def follow(nt, start_symbol, diction, firsts, follows):
 
93
  solset = set()
94
  if nt == start_symbol:
95
  solset.add('$')
 
102
  subrule = subrule[index_nt + 1:]
103
  if len(subrule) != 0:
104
  res = first(subrule, diction, term_userdef)
105
+ if '#' in res:
106
+ newList = []
107
+ res.remove('#')
108
+ ansNew = follow(curNT, start_symbol, diction, firsts, follows)
109
+ if ansNew:
110
+ newList.extend(ansNew)
111
+ res.extend(newList)
 
112
  else:
113
  if nt != curNT:
114
+ res = follow(curNT, start_symbol, diction, firsts, follows)
115
+ if res is not None:
116
+ solset.update(res)
 
117
  return list(solset)
118
 
119
+ # Main Streamlit App Code
120
+
121
+ def main():
122
+ # Streamlit UI Elements
123
+ st.title("LL(1) Grammar Analyzer")
124
+
125
+ # Input for Grammar
126
+ st.subheader("Enter Grammar Rules")
127
+ grammar_input = st.text_area("Grammar (one rule per line, format: LHS -> RHS)")
128
+
129
+ # Parse Grammar
130
+ if grammar_input:
131
+ rules = grammar_input.split("\n")
132
+ diction = {}
133
+ for rule in rules:
134
+ k = rule.split("->")
135
+ k[0] = k[0].strip()
136
+ rhs = k[1].strip()
137
+ multirhs = rhs.split('|')
138
+ for i in range(len(multirhs)):
139
+ multirhs[i] = multirhs[i].split()
140
+ diction[k[0]] = multirhs
141
+
142
+ # Process Grammar (Eliminate Left Recursion and Apply Left Factoring)
143
+ diction = removeLeftRecursion(diction)
144
+ diction = LeftFactoring(diction)
145
+
146
+ st.write("Processed Grammar After Left Recursion and Left Factoring:")
147
+ for y in diction:
148
+ st.write(f"{y} -> {diction[y]}")
149
+
150
+ # Compute First and Follow sets
151
+ firsts = {}
152
+ follows = {}
153
+ start_symbol = list(diction.keys())[0]
154
+
155
+ for y in list(diction.keys()):
156
+ firsts[y] = set(first(rule, diction, ["a", "b"])) # Adjust terminal symbols as needed
157
+ follows[y] = follow(y, start_symbol, diction, firsts, follows)
158
+
159
+ st.subheader("First Sets")
160
+ for nonterminal, first_set in firsts.items():
161
+ st.write(f"First({nonterminal}) -> {first_set}")
162
+
163
+ st.subheader("Follow Sets")
164
+ for nonterminal, follow_set in follows.items():
165
+ st.write(f"Follow({nonterminal}) -> {follow_set}")
166
+
167
+ # Example for parsing table
168
+ st.subheader("Parse Table:")
169
+ parse_table, grammar_is_LL, terminals = createParseTable(diction, firsts, follows)
170
+
171
+ if grammar_is_LL:
172
+ st.write("Grammar is LL(1)")
173
+ else:
174
+ st.write("Grammar is not LL(1)")
175
+
176
+ # Display parsing table
177
+ st.write("Parse Table:")
178
+ for row in parse_table:
179
+ st.write(row)
180
+
181
+ # Run the app
182
+ if __name__ == '__main__':
183
+ main()