import gradio as gr from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import SentenceTransformerEmbeddings from langchain.vectorstores import FAISS from langchain.memory import ConversationBufferMemory from groq import Groq import requests from bs4 import BeautifulSoup client = Groq(api_key="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") embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2") memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) def process_pdf_with_langchain(pdf_path): """Process the PDF file using LangChain for RAG.""" loader = PyPDFLoader(pdf_path) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50) split_documents = text_splitter.split_documents(documents) vectorstore = FAISS.from_documents(split_documents, embedding_model) retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) return retriever def scrape_google_search(query, num_results=3): """Search Google and return the top results.""" headers = {"User-Agent": "Mozilla/5.0"} search_url = f"https://www.google.com/search?q={query}" response = requests.get(search_url, headers=headers) soup = BeautifulSoup(response.text, "html.parser") results = [] for g in soup.find_all('div', class_='tF2Cxc')[:num_results]: title = g.find('h3').text link = g.find('a')['href'] results.append(f"{title}: {link}") return "\n".join(results) def generate_response(query, retriever=None, use_web_search=False): """Generate a response using LangChain with optional retriever and web search.""" knowledge = "" if retriever: relevant_docs = retriever.get_relevant_documents(query) knowledge += "\n".join([doc.page_content for doc in relevant_docs]) if use_web_search: web_results = scrape_google_search(query) knowledge += f"\n\nWeb Search Results:\n{web_results}" chat_history = memory.load_memory_variables({}).get("chat_history", "") context = ( f"This is a conversation with ParvizGPT, an AI model designed by Amir Mahdi Parviz, " f"to help with tasks like answering questions in Persian, providing recommendations, and decision-making." ) if knowledge: context += f"\n\nRelevant Knowledge:\n{knowledge}" if chat_history: context += f"\n\nChat History:\n{chat_history}" context += f"\n\nYou: {query}\nParvizGPT:" chat_completion = client.chat.completions.create( messages=[{"role": "user", "content": context}], model="llama-3.3-70b-versatile", ) response = chat_completion.choices[0].message.content.strip() memory.save_context({"input": query}, {"output": response}) return response def gradio_interface(user_message, pdf_file=None, enable_web_search=False): global retriever if pdf_file is not None: try: retriever = process_pdf_with_langchain(pdf_file.name) except Exception as e: return f"Error processing PDF: {e}" response = generate_response(user_message, retriever=retriever, use_web_search=enable_web_search) return response def clear_memory(): memory.clear() return "Memory cleared!" retriever = None with gr.Blocks() as interface: gr.Markdown("## ParvizGPT with Memory and Web Search Toggle") with gr.Row(): user_message = gr.Textbox(label="Your Question", placeholder="Type your question here...", lines=1) submit_btn = gr.Button("Submit") with gr.Row(): pdf_file = gr.File(label="Upload PDF for Context (Optional)", type="filepath") enable_web_search = gr.Checkbox(label="Enable Web Search", value=False) with gr.Row(): clear_memory_btn = gr.Button("Clear Memory") response_output = gr.Textbox(label="Response", placeholder="ParvizGPT's response will appear here.") submit_btn.click(gradio_interface, inputs=[user_message, pdf_file, enable_web_search], outputs=response_output) clear_memory_btn.click(clear_memory, inputs=[], outputs=response_output) gr.HTML( """ """ ) interface.launch()