Neha13 commited on
Commit
be74f72
·
verified ·
1 Parent(s): 2dbf53a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +178 -147
app.py CHANGED
@@ -1,8 +1,13 @@
1
  import streamlit as st
2
- import numpy as np
3
- import pandas as pd
4
 
5
- # Function to remove left recursion
 
 
 
 
 
 
 
6
  def removeLeftRecursion(rulesDiction):
7
  store = {}
8
  for lhs in rulesDiction:
@@ -16,7 +21,7 @@ def removeLeftRecursion(rulesDiction):
16
  betaRules.append(subrhs)
17
  if len(alphaRules) != 0:
18
  lhs_ = lhs + "'"
19
- while (lhs_ in rulesDiction.keys()) or (lhs_ in store.keys()):
20
  lhs_ += "'"
21
  for b in range(len(betaRules)):
22
  betaRules[b].append(lhs_)
@@ -29,14 +34,13 @@ def removeLeftRecursion(rulesDiction):
29
  rulesDiction[left] = store[left]
30
  return rulesDiction
31
 
32
- # Function for Left Factoring
33
  def LeftFactoring(rulesDiction):
34
  newDict = {}
35
  for lhs in rulesDiction:
36
  allrhs = rulesDiction[lhs]
37
  temp = dict()
38
  for subrhs in allrhs:
39
- if subrhs[0] not in temp.keys():
40
  temp[subrhs[0]] = [subrhs]
41
  else:
42
  temp[subrhs[0]].append(subrhs)
@@ -46,7 +50,7 @@ def LeftFactoring(rulesDiction):
46
  allStartingWithTermKey = temp[term_key]
47
  if len(allStartingWithTermKey) > 1:
48
  lhs_ = lhs + "'"
49
- while (lhs_ in rulesDiction.keys()) or (lhs_ in tempo_dict.keys()):
50
  lhs_ += "'"
51
  new_rule.append([term_key, lhs_])
52
  ex_rules = []
@@ -60,148 +64,175 @@ def LeftFactoring(rulesDiction):
60
  newDict[key] = tempo_dict[key]
61
  return newDict
62
 
63
- # Function to compute the FIRST set
64
- def first(symbol, grammar, first_sets):
65
- if symbol in first_sets:
66
- return first_sets[symbol]
67
- first_set = set()
68
- if symbol not in grammar:
69
- first_set.add(symbol)
70
- else:
71
- for rule in grammar[symbol]:
72
- if rule == ['#']:
73
- first_set.add('#')
 
74
  else:
75
- for s in rule:
76
- first_set |= first(s, grammar, first_sets)
77
- if '#' not in first(s, grammar, first_sets):
78
- break
79
- first_sets[symbol] = first_set
80
- return first_set
81
-
82
- # Function to compute the FOLLOW set
83
- def follow(symbol, grammar, start_symbol, follow_sets, first_sets):
84
- if symbol in follow_sets:
85
- return follow_sets[symbol]
86
- follow_set = set()
87
- if symbol == start_symbol:
88
- follow_set.add('$')
89
- for lhs in grammar:
90
- for rule in grammar[lhs]:
91
- for i, s in enumerate(rule):
92
- if s == symbol:
93
- if i + 1 < len(rule):
94
- follow_set |= first(rule[i + 1], grammar, first_sets) - {'#'}
95
- if i + 1 == len(rule) or '#' in first(rule[i + 1], grammar, first_sets):
96
- follow_set |= follow(lhs, grammar, start_symbol, follow_sets, first_sets)
97
- follow_sets[symbol] = follow_set
98
- return follow_set
99
-
100
- # Function to compute all FIRST sets
101
- def computeAllFirsts(grammar):
102
- first_sets = {}
103
- for symbol in grammar:
104
- first(symbol, grammar, first_sets)
105
- return first_sets
106
-
107
- # Function to compute all FOLLOW sets
108
- def computeAllFollows(grammar, start_symbol, first_sets):
109
- follow_sets = {}
110
- for symbol in grammar:
111
- follow(symbol, grammar, start_symbol, follow_sets, first_sets)
112
- return follow_sets
113
-
114
- # Function to create the LL(1) parsing table
115
- def createParseTable(grammar, first_sets, follow_sets, terminals):
116
- parse_table = {}
117
- for lhs in grammar:
118
- for rule in grammar[lhs]:
119
- first_set = first(rule[0], grammar, first_sets)
120
- for terminal in first_set - {'#'}:
121
- if lhs not in parse_table:
122
- parse_table[lhs] = {}
123
- parse_table[lhs][terminal] = rule
124
- if '#' in first_set:
125
- for terminal in follow_sets[lhs]:
126
- if lhs not in parse_table:
127
- parse_table[lhs] = {}
128
- parse_table[lhs][terminal] = rule
129
- return parse_table
130
-
131
- # Function to validate a string using the LL(1) parsing table
132
- def validateStringUsingStackBuffer(parse_table, grammar_is_LL, terminals, input_string, term_userdef, start_symbol):
133
- stack = [start_symbol]
134
- input_string = input_string + ['$']
135
- idx = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  while stack:
137
- top = stack[-1]
138
- if top == input_string[idx]:
139
- stack.pop()
140
- idx += 1
141
- elif top in parse_table and input_string[idx] in parse_table[top]:
142
- rule = parse_table[top][input_string[idx]]
143
- stack.pop()
144
- if rule != ['#']:
145
- stack.extend(reversed(rule))
146
  else:
147
- return "String is not valid."
148
- if idx == len(input_string):
149
- return "String is valid."
150
- return "String is not valid."
 
 
 
 
151
 
152
  # Main Streamlit App
153
- def main():
154
- st.title("Grammar Analyzer")
155
- st.markdown("This app performs grammar analysis, including left recursion removal, left factoring, FIRST/FOLLOW set calculations, and LL(1) parsing.")
156
-
157
- # Input Section
158
- st.sidebar.header("Input Parameters")
159
- start_symbol = st.text_input("Start Symbol (Non-terminal):", "S")
160
- num_rules = st.number_input("Number of Grammar Rules:", min_value=1, value=4)
161
-
162
- # Input rules dynamically based on the number of rules
163
- grammar_rules = []
164
- for i in range(num_rules):
165
- rule = st.text_input(f"Rule {i + 1}:", f"S -> A k O")
166
- grammar_rules.append(rule)
167
-
168
- non_terminals = st.text_input("Non-Terminals (comma-separated):", "S, A, B, C")
169
- terminals = st.text_input("Terminals (comma-separated):", "a, b, c, d, k, r, O")
170
-
171
- # Process the inputs
172
- if st.button("Analyze"):
173
- # Parse the non-terminals and terminals
174
- non_terminals_list = [nt.strip() for nt in non_terminals.split(",")]
175
- terminals_list = [t.strip() for t in terminals.split(",")]
176
-
177
- # Parse the grammar rules
178
- grammar_dict = {}
179
- for rule in grammar_rules:
180
- lhs, rhs = rule.split("->")
181
- rhs_options = rhs.split("|")
182
- grammar_dict[lhs.strip()] = [option.strip().split() for option in rhs_options]
183
-
184
- # Remove left recursion
185
- grammar_dict = removeLeftRecursion(grammar_dict)
186
-
187
- # Apply left factoring
188
- grammar_dict = LeftFactoring(grammar_dict)
189
-
190
- # Compute FIRST and FOLLOW sets
191
- first_sets = computeAllFirsts(grammar_dict)
192
- follow_sets = computeAllFollows(grammar_dict, start_symbol, first_sets)
193
-
194
- # Generate parsing table
195
- parse_table = createParseTable(grammar_dict, first_sets, follow_sets, terminals_list)
196
-
197
- # Display the parsing table
198
- st.subheader("Parsing Table")
199
- st.write(parse_table)
200
-
201
- # Validate the input string using the parsing table
202
- input_string = st.text_input("Enter string to validate:", "a c")
203
- validation_result = validateStringUsingStackBuffer(parse_table, True, terminals_list, input_string.split(), terminals_list, start_symbol)
204
- st.write(validation_result)
205
-
206
- if __name__ == "__main__":
207
- main()
 
 
1
  import streamlit as st
 
 
2
 
3
+ rules = []
4
+ nonterm_userdef = []
5
+ term_userdef = []
6
+ diction = {}
7
+ firsts = {}
8
+ follows = {}
9
+ start_symbol = None
10
+
11
  def removeLeftRecursion(rulesDiction):
12
  store = {}
13
  for lhs in rulesDiction:
 
21
  betaRules.append(subrhs)
22
  if len(alphaRules) != 0:
23
  lhs_ = lhs + "'"
24
+ while lhs_ in rulesDiction.keys() or lhs_ in store.keys():
25
  lhs_ += "'"
26
  for b in range(len(betaRules)):
27
  betaRules[b].append(lhs_)
 
34
  rulesDiction[left] = store[left]
35
  return rulesDiction
36
 
 
37
  def LeftFactoring(rulesDiction):
38
  newDict = {}
39
  for lhs in rulesDiction:
40
  allrhs = rulesDiction[lhs]
41
  temp = dict()
42
  for subrhs in allrhs:
43
+ if subrhs[0] not in list(temp.keys()):
44
  temp[subrhs[0]] = [subrhs]
45
  else:
46
  temp[subrhs[0]].append(subrhs)
 
50
  allStartingWithTermKey = temp[term_key]
51
  if len(allStartingWithTermKey) > 1:
52
  lhs_ = lhs + "'"
53
+ while lhs_ in rulesDiction.keys() or lhs_ in tempo_dict.keys():
54
  lhs_ += "'"
55
  new_rule.append([term_key, lhs_])
56
  ex_rules = []
 
64
  newDict[key] = tempo_dict[key]
65
  return newDict
66
 
67
+ def first(rule):
68
+ if len(rule) != 0 and rule[0] in term_userdef:
69
+ return rule[0]
70
+ elif len(rule) != 0 and rule[0] == '#':
71
+ return '#'
72
+ if len(rule) != 0 and rule[0] in diction:
73
+ fres = []
74
+ rhs_rules = diction[rule[0]]
75
+ for itr in rhs_rules:
76
+ indivRes = first(itr)
77
+ if type(indivRes) is list:
78
+ fres.extend(indivRes)
79
  else:
80
+ fres.append(indivRes)
81
+ if '#' not in fres:
82
+ return fres
83
+ else:
84
+ newList = fres
85
+ fres.remove('#')
86
+ if len(rule) > 1:
87
+ ansNew = first(rule[1:])
88
+ if ansNew is not None:
89
+ if type(ansNew) is list:
90
+ newList = fres + ansNew
91
+ else:
92
+ newList = fres + [ansNew]
93
+ fres.append('#')
94
+ return fres
95
+
96
+ def follow(nt):
97
+ solset = set()
98
+ if nt == start_symbol:
99
+ solset.add('$')
100
+ for curNT in diction:
101
+ rhs = diction[curNT]
102
+ for subrule in rhs:
103
+ if nt in subrule:
104
+ while nt in subrule:
105
+ index_nt = subrule.index(nt)
106
+ subrule = subrule[index_nt + 1:]
107
+ if len(subrule) != 0:
108
+ res = first(subrule)
109
+ if res is not None:
110
+ if '#' in res:
111
+ res.remove('#')
112
+ follow_res = follow(curNT)
113
+ if follow_res:
114
+ res += follow_res
115
+ else:
116
+ res = []
117
+ else:
118
+ if nt != curNT:
119
+ res = follow(curNT)
120
+ if res is None:
121
+ res = []
122
+ solset.update(res if type(res) is list else [res])
123
+ return list(solset)
124
+
125
+ def computeAllFirsts():
126
+ global firsts
127
+ for y in diction.keys():
128
+ firsts[y] = set()
129
+ for sub in diction[y]:
130
+ result = first(sub)
131
+ if result is not None:
132
+ firsts[y].update(result if type(result) is list else [result])
133
+
134
+ def computeAllFollows():
135
+ global follows
136
+ for NT in diction.keys():
137
+ follows[NT] = set(follow(NT))
138
+
139
+ def createParseTable():
140
+ global term_userdef, firsts, follows
141
+ table = {}
142
+ grammar_is_LL = True
143
+ for lhs in diction.keys():
144
+ table[lhs] = {}
145
+ for term in term_userdef + ['$']:
146
+ table[lhs][term] = ""
147
+ for lhs in diction:
148
+ for rule in diction[lhs]:
149
+ first_res = first(rule)
150
+ if '#' in first_res:
151
+ first_res.remove('#')
152
+ follow_res = follows[lhs]
153
+ first_res.update(follow_res)
154
+ for term in first_res:
155
+ if table[lhs][term] == "":
156
+ table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
157
+ else:
158
+ grammar_is_LL = False
159
+ return table, grammar_is_LL
160
+
161
+ def validateStringUsingStackBuffer(parse_table, input_string):
162
+ stack = [start_symbol, '$']
163
+ buffer = ['$'] + input_string.split()[::-1]
164
  while stack:
165
+ top_stack = stack.pop(0)
166
+ top_buffer = buffer.pop()
167
+ if top_stack in term_userdef:
168
+ if top_stack != top_buffer:
169
+ return "Invalid String"
170
+ elif top_stack == top_buffer:
171
+ continue
 
 
172
  else:
173
+ rule = parse_table.get(top_stack, {}).get(top_buffer, None)
174
+ if rule:
175
+ rule_rhs = rule.split('->')[1].strip().split()
176
+ stack = rule_rhs + stack
177
+ else:
178
+ return "Invalid String"
179
+ return "Valid String"
180
+
181
 
182
  # Main Streamlit App
183
+ st.title("LL(1) Grammar Analyzer")
184
+
185
+ st.subheader("Grammar Rules Input")
186
+ start_symbol = st.text_input("Enter Start Symbol (Non-terminal)")
187
+ num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1)
188
+ rules = []
189
+ for i in range(num_rules):
190
+ rule = st.text_input(f"Rule {i+1}")
191
+ if rule:
192
+ rules.append(rule)
193
+
194
+ nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',')
195
+ term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',')
196
+
197
+
198
+ if st.button("Analyze Grammar"):
199
+ diction.clear()
200
+ firsts.clear()
201
+ follows.clear()
202
+
203
+ for rule in rules:
204
+ lhs, rhs = rule.split("->")
205
+ lhs = lhs.strip()
206
+ rhs_list = [x.strip().split() for x in rhs.split("|")]
207
+ diction[lhs] = rhs_list
208
+
209
+ st.subheader("Grammar After Removing Left Recursion")
210
+ diction = removeLeftRecursion(diction)
211
+ st.write(diction)
212
+
213
+ st.subheader("Grammar After Left Factoring")
214
+ diction = LeftFactoring(diction)
215
+ st.write(diction)
216
+
217
+ computeAllFirsts()
218
+ st.subheader("FIRST Sets")
219
+ st.write(firsts)
220
+
221
+ computeAllFollows()
222
+ st.subheader("FOLLOW Sets")
223
+ st.write(follows)
224
+
225
+ parse_table, grammar_is_LL = createParseTable()
226
+ st.subheader("Parse Table")
227
+ st.write(parse_table)
228
+
229
+ if grammar_is_LL:
230
+ st.success("The grammar is LL(1)")
231
+ else:
232
+ st.error("The grammar is not LL(1)")
233
+
234
+ input_string = st.text_input("Enter String to Validate (space-separated)")
235
+ if input_string:
236
+ result = validateStringUsingStackBuffer(parse_table, input_string)
237
+ st.subheader("Validation Result")
238
+ st.write(result)