|
import gradio as gr |
|
from langchain_community.document_loaders import WebBaseLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import Chroma |
|
from langchain.embeddings import OpenAIEmbeddings |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain import hub |
|
from langchain.schema.runnable import RunnablePassthrough |
|
from langchain.schema.output_parser import StrOutputParser |
|
import os |
|
|
|
|
|
os.environ["OPENAI_API_KEY"] = "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" |
|
|
|
|
|
rag_chain = None |
|
|
|
def process_url(url): |
|
try: |
|
|
|
loader = WebBaseLoader(web_paths=[url]) |
|
docs = loader.load() |
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200, add_start_index=True) |
|
all_splits = text_splitter.split_documents(docs) |
|
|
|
|
|
vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings()) |
|
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 2}) |
|
|
|
|
|
prompt = hub.pull("rlm/rag-prompt") |
|
|
|
|
|
llm = ChatOpenAI(model="gpt-4") |
|
|
|
|
|
def format_docs(docs): |
|
return "\n\n".join(doc.page_content for doc in docs) |
|
|
|
global rag_chain |
|
rag_chain = ( |
|
{"context": retriever | format_docs, "question": RunnablePassthrough()} |
|
| prompt |
|
| llm |
|
| StrOutputParser() |
|
) |
|
|
|
return "Successfully processed the URL. You can now ask questions." |
|
except Exception as e: |
|
return f"Error processing URL: {e}" |
|
|
|
def chat_with_rag_chain(message): |
|
global rag_chain |
|
if rag_chain: |
|
try: |
|
response = rag_chain.invoke(message) |
|
return response |
|
except Exception as e: |
|
return f"Error invoking RAG chain: {e}" |
|
else: |
|
return "Please enter a URL first and process it." |
|
|
|
|
|
url_input_interface = gr.Interface( |
|
fn=process_url, |
|
inputs=gr.Textbox(label="Enter URL", placeholder="https://example.com"), |
|
outputs=gr.Textbox(label="Status"), |
|
title="RAG Chain URL Processor", |
|
description="Enter a URL to process the article using a RAG chain model." |
|
) |
|
|
|
|
|
chat_interface = gr.Interface( |
|
fn=chat_with_rag_chain, |
|
inputs=gr.Textbox(label="Your Question"), |
|
outputs=gr.Textbox(label="Response"), |
|
title="RAG Chain Chat Interface", |
|
description="Chat with the RAG chain model after processing a URL." |
|
) |
|
|
|
|
|
gr.TabbedInterface([url_input_interface, chat_interface], ["URL Processor", "Chat Interface"]).launch(debug=True, share=True) |
|
|