# Step 1: Install the necessary libraries # (Only needed locally; Hugging Face Spaces handles dependencies via 'requirements.txt') # !pip install streamlit spacy numpy import streamlit as st import spacy import numpy as np import json from numpy.linalg import norm # Step 2: Load the spaCy model nlp = spacy.load("en_core_web_md") # Step 3: Load the FAQ data (ensure faqs.json is in the same directory) with open('faqs.json', 'r') as f: faqs = json.load(f) # Step 4: Flatten the FAQ structure and precompute vectors faq_docs = [] for category, faq_list in faqs.items(): for faq in faq_list: question = faq['question'] answer = faq['answer'] faq_vector = nlp(question).vector # Precompute the vector faq_docs.append((question, answer, faq_vector)) # Store question, answer, and vector # Step 5: Define the function to find the most relevant FAQs def find_most_relevant_faq_optimized(query, faq_docs): """Find the top 3 most relevant FAQs based on semantic similarity.""" query_vector = nlp(query).vector # Calculate cosine similarity between query and each FAQ similarities = [ (question, answer, np.dot(query_vector, faq_vector) / (norm(query_vector) * norm(faq_vector))) for question, answer, faq_vector in faq_docs ] # Sort by similarity score (highest first) similarities = sorted(similarities, key=lambda x: x[2], reverse=True) return similarities[:3] # Return top 3 FAQs # Step 6: Create the Streamlit UI st.title("Smart FAQ Search - SARAS AI Institute") st.markdown("### Find Answers to Your Questions Instantly") # Text input for the user query query = st.text_input("Enter your question here:") if query: # Find the most relevant FAQs top_faqs = find_most_relevant_faq_optimized(query, faq_docs) # Display the results st.markdown("### Top Relevant FAQs:") for i, (question, answer, score) in enumerate(top_faqs, 1): st.write(f"**{i}. {question}**") st.write(f"*Answer:* {answer}") st.write(f"**Similarity Score:** {score:.2f}") else: st.write("Please enter a query to search for relevant FAQs.")