import streamlit as st # Initial setup for grammar input rules = [] nonterm_userdef = [] term_userdef = [] diction = {} firsts = {} follows = {} start_symbol = None # Function to remove left recursion def removeLeftRecursion(rulesDiction): store = {} for lhs in rulesDiction: alphaRules = [] betaRules = [] allrhs = rulesDiction[lhs] for subrhs in allrhs: if subrhs[0] == lhs: alphaRules.append(subrhs[1:]) else: betaRules.append(subrhs) if len(alphaRules) != 0: lhs_ = lhs + "'" while lhs_ in rulesDiction.keys() or lhs_ in store.keys(): lhs_ += "'" for b in range(len(betaRules)): betaRules[b].append(lhs_) rulesDiction[lhs] = betaRules for a in range(len(alphaRules)): alphaRules[a].append(lhs_) alphaRules.append(['#']) store[lhs_] = alphaRules for left in store: rulesDiction[left] = store[left] return rulesDiction # Function to perform left factoring def LeftFactoring(rulesDiction): newDict = {} for lhs in rulesDiction: allrhs = rulesDiction[lhs] temp = dict() for subrhs in allrhs: if subrhs[0] not in list(temp.keys()): temp[subrhs[0]] = [subrhs] else: temp[subrhs[0]].append(subrhs) new_rule = [] tempo_dict = {} for term_key in temp: allStartingWithTermKey = temp[term_key] if len(allStartingWithTermKey) > 1: lhs_ = lhs + "'" while lhs_ in rulesDiction.keys() or lhs_ in tempo_dict.keys(): lhs_ += "'" new_rule.append([term_key, lhs_]) ex_rules = [] for g in temp[term_key]: ex_rules.append(g[1:]) tempo_dict[lhs_] = ex_rules else: new_rule.append(allStartingWithTermKey[0]) newDict[lhs] = new_rule for key in tempo_dict: newDict[key] = tempo_dict[key] return newDict # Function to calculate FIRST set def first(rule): if len(rule) != 0 and rule[0] in term_userdef: return rule[0] elif len(rule) != 0 and rule[0] == '#': return '#' if len(rule) != 0 and rule[0] in diction: fres = [] rhs_rules = diction[rule[0]] for itr in rhs_rules: indivRes = first(itr) if type(indivRes) is list: fres.extend(indivRes) else: fres.append(indivRes) if '#' not in fres: return fres else: newList = fres fres.remove('#') if len(rule) > 1: ansNew = first(rule[1:]) if ansNew is not None: if type(ansNew) is list: newList = fres + ansNew else: newList = fres + [ansNew] fres.append('#') return fres # Function to calculate FOLLOW set def follow(nt): solset = set() if nt == start_symbol: solset.add('$') for curNT in diction: rhs = diction[curNT] for subrule in rhs: if nt in subrule: while nt in subrule: index_nt = subrule.index(nt) subrule = subrule[index_nt + 1:] if len(subrule) != 0: res = first(subrule) if res is not None: if '#' in res: res.remove('#') follow_res = follow(curNT) if follow_res: res += follow_res else: res = [] else: if nt != curNT: res = follow(curNT) if res is None: res = [] solset.update(res if type(res) is list else [res]) return list(solset) # Compute FIRST for all non-terminals def computeAllFirsts(): global firsts for y in diction.keys(): firsts[y] = set() for sub in diction[y]: result = first(sub) if result is not None: firsts[y].update(result if type(result) is list else [result]) # Compute FOLLOW for all non-terminals def computeAllFollows(): global follows for NT in diction.keys(): follows[NT] = set(follow(NT)) # Parse table creation function def createParseTable(): global term_userdef, firsts, follows table = {} grammar_is_LL = True for lhs in diction.keys(): table[lhs] = {} for term in term_userdef + ['$']: table[lhs][term] = "" for lhs in diction: for rule in diction[lhs]: first_res = first(rule) if '#' in first_res: first_res.remove('#') follow_res = follows[lhs] first_res.update(follow_res) for term in first_res: if table[lhs][term] == "": table[lhs][term] = f"{lhs} -> {' '.join(rule)}" else: grammar_is_LL = False return table, grammar_is_LL # Validate input string function def validateStringUsingStackBuffer(parse_table, input_string): stack = [start_symbol, '$'] buffer = ['$'] + input_string.split()[::-1] while stack: top_stack = stack.pop(0) top_buffer = buffer.pop() if top_stack in term_userdef: if top_stack != top_buffer: return "Invalid String" elif top_stack == top_buffer: continue else: rule = parse_table.get(top_stack, {}).get(top_buffer, None) if rule: rule_rhs = rule.split('->')[1].strip().split() stack = rule_rhs + stack else: return "Invalid String" return "Valid String" # Streamlit Interface st.title("LL(1) Grammar Analyzer") st.subheader("Grammar Rules Input") start_symbol = st.text_input("Enter Start Symbol (Non-terminal)") num_rules = st.number_input("Number of Grammar Rules", min_value=1, step=1) rules = [] for i in range(num_rules): rule = st.text_input(f"Rule {i+1}") if rule: rules.append(rule) nonterm_userdef = st.text_input("Enter Non-Terminals (comma-separated)").split(',') term_userdef = st.text_input("Enter Terminals (comma-separated)").split(',') if st.button("Analyze Grammar"): diction.clear() firsts.clear() follows.clear() for rule in rules: lhs, rhs = rule.split("->") lhs = lhs.strip() rhs_list = [x.strip().split() for x in rhs.split("|")] diction[lhs] = rhs_list st.subheader("Grammar After Removing Left Recursion") diction = removeLeftRecursion(diction) st.write(diction) st.subheader("Grammar After Left Factoring") diction = LeftFactoring(diction) st.write(diction) computeAllFirsts() st.subheader("FIRST Sets") st.write(firsts) computeAllFollows() st.subheader("FOLLOW Sets") st.write(follows) parse_table, grammar_is_LL = createParseTable() st.subheader("Parse Table") st.write(parse_table) if grammar_is_LL: st.success("The grammar is LL(1)") else: st.error("The grammar is not LL(1)") input_string = st.text_input("Enter String to Validate (space-separated)") if input_string: result = validateStringUsingStackBuffer(parse_table, input_string) st.subheader("Validation Result") st.write(result)