cc1 / app.py
Neha13's picture
Update app.py
e6715c6 verified
raw
history blame
7.73 kB
import streamlit as st
# Initial setup for grammar input
rules = []
nonterm_userdef = []
term_userdef = []
diction = {}
firsts = {}
follows = {}
start_symbol = None
# Function to remove left recursion
def removeLeftRecursion(rulesDiction):
store = {}
for lhs in rulesDiction:
alphaRules = []
betaRules = []
allrhs = rulesDiction[lhs]
for subrhs in allrhs:
if subrhs[0] == lhs:
alphaRules.append(subrhs[1:])
else:
betaRules.append(subrhs)
if len(alphaRules) != 0:
lhs_ = lhs + "'"
while lhs_ in rulesDiction.keys() or lhs_ in store.keys():
lhs_ += "'"
for b in range(len(betaRules)):
betaRules[b].append(lhs_)
rulesDiction[lhs] = betaRules
for a in range(len(alphaRules)):
alphaRules[a].append(lhs_)
alphaRules.append(['#'])
store[lhs_] = alphaRules
for left in store:
rulesDiction[left] = store[left]
return rulesDiction
# Function to perform left factoring
def LeftFactoring(rulesDiction):
newDict = {}
for lhs in rulesDiction:
allrhs = rulesDiction[lhs]
temp = dict()
for subrhs in allrhs:
if subrhs[0] not in list(temp.keys()):
temp[subrhs[0]] = [subrhs]
else:
temp[subrhs[0]].append(subrhs)
new_rule = []
tempo_dict = {}
for term_key in temp:
allStartingWithTermKey = temp[term_key]
if len(allStartingWithTermKey) > 1:
lhs_ = lhs + "'"
while lhs_ in rulesDiction.keys() or lhs_ in tempo_dict.keys():
lhs_ += "'"
new_rule.append([term_key, lhs_])
ex_rules = []
for g in temp[term_key]:
ex_rules.append(g[1:])
tempo_dict[lhs_] = ex_rules
else:
new_rule.append(allStartingWithTermKey[0])
newDict[lhs] = new_rule
for key in tempo_dict:
newDict[key] = tempo_dict[key]
return newDict
# Function to calculate FIRST set
def first(rule, diction, term_userdef):
if len(rule) != 0 and rule[0] in term_userdef:
return rule[0]
elif len(rule) != 0 and rule[0] == '#':
return '#'
if len(rule) != 0 and rule[0] in diction:
fres = []
rhs_rules = diction[rule[0]]
for itr in rhs_rules:
indivRes = first(itr, diction, term_userdef)
if isinstance(indivRes, list):
fres.extend(indivRes)
else:
fres.append(indivRes)
if '#' not in fres:
return fres
else:
newList = fres
fres.remove('#')
if len(rule) > 1:
ansNew = first(rule[1:], diction, term_userdef)
if ansNew is not None:
if isinstance(ansNew, list):
newList = fres + ansNew
else:
newList = fres + [ansNew]
fres.append('#')
return fres
# Function to calculate FOLLOW set
def follow(nt, diction, start_symbol):
solset = set()
if nt == start_symbol:
solset.add('$')
for curNT in diction:
rhs = diction[curNT]
for subrule in rhs:
if nt in subrule:
while nt in subrule:
index_nt = subrule.index(nt)
subrule = subrule[index_nt + 1:]
if len(subrule) != 0:
res = first(subrule, diction, term_userdef)
if res is not None:
if '#' in res:
res.remove('#')
follow_res = follow(curNT, diction, start_symbol)
if follow_res:
res += follow_res
else:
res = []
else:
if nt != curNT:
res = follow(curNT, diction, start_symbol)
if res is None:
res = []
solset.update(res if isinstance(res, list) else [res])
return list(solset)
# Compute FIRST for all non-terminals
def computeAllFirsts(diction, term_userdef):
firsts = {}
for y in diction.keys():
firsts[y] = set()
for sub in diction[y]:
result = first(sub, diction, term_userdef)
if result is not None:
firsts[y].update(result if isinstance(result, list) else [result])
return firsts
# Compute FOLLOW for all non-terminals
def computeAllFollows(diction, start_symbol):
follows = {}
for NT in diction.keys():
follows[NT] = set(follow(NT, diction, start_symbol))
return follows
# Parse table creation function
def createParseTable(diction, term_userdef, firsts, follows):
table = {}
grammar_is_LL = True
for lhs in diction.keys():
table[lhs] = {}
for term in term_userdef + ['$']: # Include end of input symbol
table[lhs][term] = "" # Set default empty entries
# Populate the parse table
for lhs in diction:
for rule in diction[lhs]:
first_res = first(rule, diction, term_userdef)
if '#' in first_res: # Epsilon handling
first_res.remove('#')
follow_res = follows.get(lhs, [])
first_res.update(follow_res)
for term in first_res:
if table[lhs][term] == "":
table[lhs][term] = f"{lhs} -> {' '.join(rule)}"
else:
grammar_is_LL = False
st.error(f"Conflict detected in parse table at [{lhs}, {term}]")
return table, grammar_is_LL
# Streamlit Interface
st.title("LL(1) Grammar Analyzer")
st.subheader("Grammar Rules Input")
start_symbol = st.text_input("Enter Start Symbol (Non-terminal)")
num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1)
rules = []
for i in range(num_rules):
rule = st.text_input(f"Rule {i+1}")
if rule:
rules.append(rule)
nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',')
term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',')
if st.button("Analyze Grammar"):
diction.clear()
firsts.clear()
follows.clear()
for rule in rules:
lhs, rhs = rule.split("->")
lhs = lhs.strip()
rhs_list = [x.strip().split() for x in rhs.split("|")]
diction[lhs] = rhs_list
st.subheader("Grammar After Removing Left Recursion")
diction = removeLeftRecursion(diction)
st.write(diction)
st.subheader("Grammar After Left Factoring")
diction = LeftFactoring(diction)
st.write(diction)
firsts = computeAllFirsts(diction, term_userdef)
st.subheader("FIRST Sets")
st.write(firsts)
follows = computeAllFollows(diction, start_symbol)
st.subheader("FOLLOW Sets")
st.write(follows)
parse_table, grammar_is_LL = createParseTable(diction, term_userdef, firsts, follows)
st.subheader("Parse Table")
st.write(parse_table)
if grammar_is_LL:
st.success("The grammar is LL(1)")
else:
st.error("The grammar is not LL(1)")
input_string = st.text_input("Enter String to Validate (space-separated)")
if input_string:
result = validateStringUsingStackBuffer(parse_table, input_string)
st.subheader("Validation Result")
st.write(result)