Dan Foley commited on
Commit
d094b68
·
unverified ·
1 Parent(s): 533edbd

Delete app1.1.py

Browse files
Files changed (1) hide show
  1. app1.1.py +0 -85
app1.1.py DELETED
@@ -1,85 +0,0 @@
1
- from dotenv import load_dotenv # Import dotenv to load environment variables
2
- import os
3
- import chainlit as cl
4
- from langchain.chains import RetrievalQA
5
- from langchain_community.vectorstores import FAISS
6
- from langchain_community.embeddings import OpenAIEmbeddings
7
- from langchain.text_splitter import CharacterTextSplitter
8
- from langchain.chat_models import ChatOpenAI
9
- from langchain.schema import Document
10
- from langchain.embeddings import HuggingFaceEmbeddings
11
- import json
12
-
13
- # Load environment variables from .env file
14
- load_dotenv()
15
-
16
- # Get the OpenAI API key from the environment
17
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
18
-
19
- if not OPENAI_API_KEY:
20
- raise ValueError("OPENAI_API_KEY is not set. Please add it to your .env file.")
21
-
22
- # Global variables for vector store and QA chain
23
- vector_store = None
24
- qa_chain = None
25
-
26
- # Step 1: Load and Process JSON Data
27
- def load_json_file(file_path):
28
- with open(file_path, "r", encoding="utf-8") as file:
29
- data = json.load(file)
30
- return data
31
-
32
- def setup_vector_store_from_json(json_data):
33
- # Create Document objects with URLs and content
34
- documents = [Document(page_content=item["content"], metadata={"url": item["url"]}) for item in json_data]
35
-
36
- # Create embeddings and store them in FAISS
37
- #embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
38
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
39
- vector_store = FAISS.from_documents(documents, embeddings)
40
- return vector_store
41
-
42
- def setup_qa_chain(vector_store):
43
- retriever = vector_store.as_retriever(search_kwargs={"k": 3})
44
- llm = ChatOpenAI(model="gpt-3.5-turbo", openai_api_key=OPENAI_API_KEY)
45
- qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever, return_source_documents=True)
46
- return qa_chain
47
-
48
- # Initialize Chainlit: Preload data when the chat starts
49
- @cl.on_chat_start
50
- async def chat_start():
51
- global vector_store, qa_chain
52
-
53
- # Load and preprocess the JSON file
54
- json_data = load_json_file("football_players.json")
55
- vector_store = setup_vector_store_from_json(json_data)
56
- qa_chain = setup_qa_chain(vector_store)
57
-
58
- # Send a welcome message
59
- await cl.Message(content="Welcome to the RAG app! Ask me any question based on the knowledge base.").send()
60
-
61
- # Process user queries
62
- @cl.on_message
63
- async def main(message: cl.Message):
64
- global qa_chain
65
-
66
- # Ensure the QA chain is ready
67
- if qa_chain is None:
68
- await cl.Message(content="The app is still initializing. Please wait a moment and try again.").send()
69
- return
70
-
71
- # Get query from the user and run the QA chain
72
- query = message.content
73
- response = qa_chain({"query": query})
74
-
75
- # Extract the answer and source documents
76
- answer = response["result"]
77
- sources = response["source_documents"]
78
-
79
- # Format and send the response
80
- await cl.Message(content=f"**Answer:** {answer}").send()
81
- if sources:
82
- await cl.Message(content="**Sources:**").send()
83
- for i, doc in enumerate(sources, 1):
84
- url = doc.metadata.get("url", "No URL available")
85
- await cl.Message(content=f"**Source {i}:** {doc.page_content}\n**URL:** {url}").send()