import gradio as gr
from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_openai import ChatOpenAI
from langchain import hub
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
import os

os.environ['USER_AGENT'] = 'myagent'
os.environ['OPENAI_API_KEY'] = os.environ.get("OPENAI_API_KEY")

rag_chain = None

def process_url(url):
    try:
        loader = WebBaseLoader(web_paths=[url])
        docs = loader.load()
        type(f"Naveen - {docs}")
        print(docs)
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200, add_start_index=True)
        all_splits = text_splitter.split_documents(docs)
        print(f"Naveen : {all_splits} : type : {type(all_splits)}")
        vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())
        retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 2})
        prompt = hub.pull("rlm/rag-prompt")
        llm = ChatOpenAI(model="gpt-4")
        
        def format_docs(docs):
            return "\n\n".join(doc.page_content for doc in docs)
        
        global rag_chain
        rag_chain = (
            {"context": retriever | format_docs, "question": RunnablePassthrough()}
            | prompt
            | llm
            | StrOutputParser()
        )
        return "Successfully processed the URL. You can now ask questions."
    except Exception as e:
        return f"Error processing URL: {e}"

def chat_with_rag_chain(message, history):
    global rag_chain
    if rag_chain:
        try:
            response = rag_chain.invoke(message)
            return response
        except Exception as e:
            return f"Error invoking RAG chain: {e}"
    else:
        return "Please enter a URL first and process it."

with gr.Blocks() as demo:
    gr.Markdown("# RAG Chain URL Processor and Chat Interface")
    
    with gr.Tab("URL Processor"):
        url_input = gr.Textbox(label="Enter URL", placeholder="https://example.com")
        process_button = gr.Button("Process URL")
        url_output = gr.Textbox(label="Status")
        
        process_button.click(process_url, inputs=url_input, outputs=url_output)
    
    with gr.Tab("Chat Interface"):
        chatbot = gr.Chatbot()
        msg = gr.Textbox(label="Your Question")
        clear = gr.Button("Clear")

        def user(user_message, history):
            return "", history + [[user_message, None]]

        def bot(history):
            bot_message = chat_with_rag_chain(history[-1][0], history)
            history[-1][1] = bot_message
            return history

        msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
            bot, chatbot, chatbot
        )
        clear.click(lambda: None, None, chatbot, queue=False)

demo.launch(debug=True)