File size: 7,733 Bytes
4290b1a 661eb32 4290b1a 661eb32 4290b1a 661eb32 4290b1a 661eb32 e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a 661eb32 e6715c6 4290b1a e6715c6 1e555f9 e6715c6 1e555f9 4290b1a e6715c6 1e555f9 e6715c6 4290b1a 661eb32 e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a 661eb32 e6715c6 4290b1a e6715c6 4290b1a 661eb32 e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a 661eb32 4290b1a e6715c6 4290b1a e6715c6 4290b1a e6715c6 4290b1a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
import streamlit as st
# Initial setup for grammar input
rules = []
nonterm_userdef = []
term_userdef = []
diction = {}
firsts = {}
follows = {}
start_symbol = None
# Function to remove left recursion
def removeLeftRecursion(rulesDiction):
store = {}
for lhs in rulesDiction:
alphaRules = []
betaRules = []
allrhs = rulesDiction[lhs]
for subrhs in allrhs:
if subrhs[0] == lhs:
alphaRules.append(subrhs[1:])
else:
betaRules.append(subrhs)
if len(alphaRules) != 0:
lhs_ = lhs + "'"
while lhs_ in rulesDiction.keys() or lhs_ in store.keys():
lhs_ += "'"
for b in range(len(betaRules)):
betaRules[b].append(lhs_)
rulesDiction[lhs] = betaRules
for a in range(len(alphaRules)):
alphaRules[a].append(lhs_)
alphaRules.append(['#'])
store[lhs_] = alphaRules
for left in store:
rulesDiction[left] = store[left]
return rulesDiction
# Function to perform left factoring
def LeftFactoring(rulesDiction):
newDict = {}
for lhs in rulesDiction:
allrhs = rulesDiction[lhs]
temp = dict()
for subrhs in allrhs:
if subrhs[0] not in list(temp.keys()):
temp[subrhs[0]] = [subrhs]
else:
temp[subrhs[0]].append(subrhs)
new_rule = []
tempo_dict = {}
for term_key in temp:
allStartingWithTermKey = temp[term_key]
if len(allStartingWithTermKey) > 1:
lhs_ = lhs + "'"
while lhs_ in rulesDiction.keys() or lhs_ in tempo_dict.keys():
lhs_ += "'"
new_rule.append([term_key, lhs_])
ex_rules = []
for g in temp[term_key]:
ex_rules.append(g[1:])
tempo_dict[lhs_] = ex_rules
else:
new_rule.append(allStartingWithTermKey[0])
newDict[lhs] = new_rule
for key in tempo_dict:
newDict[key] = tempo_dict[key]
return newDict
# Function to calculate FIRST set
def first(rule, diction, term_userdef):
if len(rule) != 0 and rule[0] in term_userdef:
return rule[0]
elif len(rule) != 0 and rule[0] == '#':
return '#'
if len(rule) != 0 and rule[0] in diction:
fres = []
rhs_rules = diction[rule[0]]
for itr in rhs_rules:
indivRes = first(itr, diction, term_userdef)
if isinstance(indivRes, list):
fres.extend(indivRes)
else:
fres.append(indivRes)
if '#' not in fres:
return fres
else:
newList = fres
fres.remove('#')
if len(rule) > 1:
ansNew = first(rule[1:], diction, term_userdef)
if ansNew is not None:
if isinstance(ansNew, list):
newList = fres + ansNew
else:
newList = fres + [ansNew]
fres.append('#')
return fres
# Function to calculate FOLLOW set
def follow(nt, diction, start_symbol):
solset = set()
if nt == start_symbol:
solset.add('$')
for curNT in diction:
rhs = diction[curNT]
for subrule in rhs:
if nt in subrule:
while nt in subrule:
index_nt = subrule.index(nt)
subrule = subrule[index_nt + 1:]
if len(subrule) != 0:
res = first(subrule, diction, term_userdef)
if res is not None:
if '#' in res:
res.remove('#')
follow_res = follow(curNT, diction, start_symbol)
if follow_res:
res += follow_res
else:
res = []
else:
if nt != curNT:
res = follow(curNT, diction, start_symbol)
if res is None:
res = []
solset.update(res if isinstance(res, list) else [res])
return list(solset)
# Compute FIRST for all non-terminals
def computeAllFirsts(diction, term_userdef):
firsts = {}
for y in diction.keys():
firsts[y] = set()
for sub in diction[y]:
result = first(sub, diction, term_userdef)
if result is not None:
firsts[y].update(result if isinstance(result, list) else [result])
return firsts
# Compute FOLLOW for all non-terminals
def computeAllFollows(diction, start_symbol):
follows = {}
for NT in diction.keys():
follows[NT] = set(follow(NT, diction, start_symbol))
return follows
# Parse table creation function
def createParseTable(diction, term_userdef, firsts, follows):
table = {}
grammar_is_LL = True
for lhs in diction.keys():
table[lhs] = {}
for term in term_userdef + ['$']: # Include end of input symbol
table[lhs][term] = "" # Set default empty entries
# Populate the parse table
for lhs in diction:
for rule in diction[lhs]:
first_res = first(rule, diction, term_userdef)
if '#' in first_res: # Epsilon handling
first_res.remove('#')
follow_res = follows.get(lhs, [])
first_res.update(follow_res)
for term in first_res:
if table[lhs][term] == "":
table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
else:
grammar_is_LL = False
st.error(f"Conflict detected in parse table at [{lhs}, {term}]")
return table, grammar_is_LL
# Streamlit Interface
st.title("LL(1) Grammar Analyzer")
st.subheader("Grammar Rules Input")
start_symbol = st.text_input("Enter Start Symbol (Non-terminal)")
num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1)
rules = []
for i in range(num_rules):
rule = st.text_input(f"Rule {i+1}")
if rule:
rules.append(rule)
nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',')
term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',')
if st.button("Analyze Grammar"):
diction.clear()
firsts.clear()
follows.clear()
for rule in rules:
lhs, rhs = rule.split("->")
lhs = lhs.strip()
rhs_list = [x.strip().split() for x in rhs.split("|")]
diction[lhs] = rhs_list
st.subheader("Grammar After Removing Left Recursion")
diction = removeLeftRecursion(diction)
st.write(diction)
st.subheader("Grammar After Left Factoring")
diction = LeftFactoring(diction)
st.write(diction)
firsts = computeAllFirsts(diction, term_userdef)
st.subheader("FIRST Sets")
st.write(firsts)
follows = computeAllFollows(diction, start_symbol)
st.subheader("FOLLOW Sets")
st.write(follows)
parse_table, grammar_is_LL = createParseTable(diction, term_userdef, firsts, follows)
st.subheader("Parse Table")
st.write(parse_table)
if grammar_is_LL:
st.success("The grammar is LL(1)")
else:
st.error("The grammar is not LL(1)")
input_string = st.text_input("Enter String to Validate (space-separated)")
if input_string:
result = validateStringUsingStackBuffer(parse_table, input_string)
st.subheader("Validation Result")
st.write(result)
|