TRaw commited on
Commit
bfe4194
·
verified ·
1 Parent(s): 47eda04

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -21
app.py CHANGED
@@ -1,25 +1,45 @@
1
  import gradio as gr
2
- import subprocess
 
 
 
 
 
3
 
4
- def xterm_webpage():
5
- html_code = """
6
- <!doctype html>
7
- <html>
8
- <head>
9
- <link rel="stylesheet" href="node_modules/xterm/css/xterm.css" />
10
- <script src="node_modules/xterm/lib/xterm.js"></script>
11
- </head>
12
- <body>
13
- <div id="terminal"></div>
14
- <script>
15
- var term = new Terminal();
16
- term.open(document.getElementById('terminal'));
17
- term.write('Hello from \x1B[1;3;31mxterm.js\x1B[0m $ ')
18
- </script>
19
- </body>
20
- </html>
21
- """
22
- return gr.HTML(html_code)
23
 
24
- iface = gr.Interface(fn=xterm_webpage, live=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  iface.launch()
 
1
  import gradio as gr
2
+ import bs4
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain_community.document_loaders import WebBaseLoader
5
+ from langchain_community.vectorstores import Chroma
6
+ from langchain_community.embeddings import OllamaEmbeddings
7
+ import ollama
8
 
9
+ # Function to load, split, and retrieve documents
10
+ def load_and_retrieve_docs(url):
11
+ loader = WebBaseLoader(
12
+ web_paths=(url,),
13
+ bs_kwargs=dict()
14
+ )
15
+ docs = loader.load()
16
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
17
+ splits = text_splitter.split_documents(docs)
18
+ embeddings = OllamaEmbeddings(model="mistral")
19
+ vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
20
+ return vectorstore.as_retriever()
 
 
 
 
 
 
 
21
 
22
+ # Function to format documents
23
+ def format_docs(docs):
24
+ return "\n\n".join(doc.page_content for doc in docs)
25
+
26
+ # Function that defines the RAG chain
27
+ def rag_chain(url, question):
28
+ retriever = load_and_retrieve_docs(url)
29
+ retrieved_docs = retriever.invoke(question)
30
+ formatted_context = format_docs(retrieved_docs)
31
+ formatted_prompt = f"Question: {question}\n\nContext: {formatted_context}"
32
+ response = ollama.chat(model='mistral', messages=[{'role': 'user', 'content': formatted_prompt}])
33
+ return response['message']['content']
34
+
35
+ # Gradio interface
36
+ iface = gr.Interface(
37
+ fn=rag_chain,
38
+ inputs=["text", "text"],
39
+ outputs="text",
40
+ title="RAG Chain Question Answering",
41
+ description="Enter a URL and a query to get answers from the RAG chain."
42
+ )
43
+
44
+ # Launch the app
45
  iface.launch()