Spaces:
Runtime error
Runtime error
Srinivasulu kethanaboina
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,29 @@
|
|
1 |
from dotenv import load_dotenv
|
2 |
-
from fastapi import FastAPI, Request
|
3 |
import gradio as gr
|
4 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import firebase_admin
|
6 |
from firebase_admin import db, credentials
|
7 |
import datetime
|
8 |
import uuid
|
9 |
import random
|
10 |
-
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
|
11 |
-
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
12 |
-
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
13 |
-
import threading
|
14 |
-
import uvicorn
|
15 |
|
|
|
|
|
|
|
|
|
|
|
16 |
# Load environment variables
|
17 |
load_dotenv()
|
18 |
-
|
19 |
-
# Authenticate to Firebase
|
20 |
cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json")
|
21 |
firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"})
|
22 |
-
|
23 |
-
app = FastAPI()
|
24 |
-
|
25 |
Settings.llm = HuggingFaceInferenceAPI(
|
26 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
27 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
@@ -34,17 +36,19 @@ Settings.embed_model = HuggingFaceEmbedding(
|
|
34 |
model_name="BAAI/bge-small-en-v1.5"
|
35 |
)
|
36 |
|
37 |
-
|
38 |
-
# Define directories
|
39 |
PERSIST_DIR = "db"
|
40 |
-
PDF_DIRECTORY = 'data'
|
|
|
|
|
41 |
os.makedirs(PDF_DIRECTORY, exist_ok=True)
|
42 |
os.makedirs(PERSIST_DIR, exist_ok=True)
|
43 |
|
44 |
-
# Variable to store chat
|
45 |
current_chat_history = []
|
46 |
|
47 |
def data_ingestion_from_directory():
|
|
|
48 |
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
|
49 |
storage_context = StorageContext.from_defaults()
|
50 |
index = VectorStoreIndex.from_documents(documents)
|
@@ -55,7 +59,7 @@ def handle_query(query):
|
|
55 |
(
|
56 |
"user",
|
57 |
"""
|
58 |
-
You are
|
59 |
{context_str}
|
60 |
Question:
|
61 |
{query_str}
|
@@ -64,9 +68,11 @@ def handle_query(query):
|
|
64 |
]
|
65 |
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
66 |
|
|
|
67 |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
68 |
index = load_index_from_storage(storage_context)
|
69 |
|
|
|
70 |
context_str = ""
|
71 |
for past_query, response in reversed(current_chat_history):
|
72 |
if past_query.strip():
|
@@ -75,72 +81,99 @@ def handle_query(query):
|
|
75 |
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
|
76 |
answer = query_engine.query(query)
|
77 |
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
79 |
|
|
|
80 |
current_chat_history.append((query, response))
|
81 |
|
82 |
return response
|
83 |
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
ref.push().set(message_data)
|
87 |
|
88 |
-
|
|
|
89 |
try:
|
90 |
-
|
91 |
-
email = request.query_params.get('email')
|
92 |
session_id = str(uuid.uuid4())
|
|
|
|
|
93 |
response = handle_query(message)
|
94 |
|
|
|
95 |
message_data = {
|
96 |
-
"sender":
|
97 |
"message": message,
|
98 |
"response": response,
|
99 |
-
"timestamp": datetime.datetime.now().isoformat()
|
100 |
}
|
101 |
|
102 |
-
|
|
|
103 |
|
|
|
104 |
return response
|
105 |
except Exception as e:
|
106 |
return str(e)
|
107 |
|
|
|
108 |
css = '''
|
109 |
.circle-logo {
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
display: none !important;
|
130 |
background-color: #F8D7DA;
|
131 |
}
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
'''
|
143 |
-
|
144 |
@app.get("/chat")
|
145 |
async def chat_ui(username: str, email: str):
|
146 |
gr.ChatInterface(
|
@@ -154,5 +187,4 @@ async def chat_ui(username: str, email: str):
|
|
154 |
return {"message": "Chat interface launched."}
|
155 |
|
156 |
if __name__ == "__main__":
|
157 |
-
|
158 |
-
threading.Thread(target=lambda: uvicorn.run(app, host="0.0.0.0", port=8000), daemon=True).start()
|
|
|
1 |
from dotenv import load_dotenv
|
|
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
+
import uvicorn
|
5 |
+
from fastapi import FastAPI, Request
|
6 |
+
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
|
7 |
+
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
8 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
9 |
+
from sentence_transformers import SentenceTransformer
|
10 |
import firebase_admin
|
11 |
from firebase_admin import db, credentials
|
12 |
import datetime
|
13 |
import uuid
|
14 |
import random
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
def select_random_name():
|
17 |
+
names = ['Clara', 'Lily']
|
18 |
+
return random.choice(names)
|
19 |
+
|
20 |
+
# Example usage
|
21 |
# Load environment variables
|
22 |
load_dotenv()
|
23 |
+
# authenticate to firebase
|
|
|
24 |
cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json")
|
25 |
firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"})
|
26 |
+
# Configure the Llama index settings
|
|
|
|
|
27 |
Settings.llm = HuggingFaceInferenceAPI(
|
28 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
29 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
|
|
36 |
model_name="BAAI/bge-small-en-v1.5"
|
37 |
)
|
38 |
|
39 |
+
# Define the directory for persistent storage and data
|
|
|
40 |
PERSIST_DIR = "db"
|
41 |
+
PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
|
42 |
+
|
43 |
+
# Ensure directories exist
|
44 |
os.makedirs(PDF_DIRECTORY, exist_ok=True)
|
45 |
os.makedirs(PERSIST_DIR, exist_ok=True)
|
46 |
|
47 |
+
# Variable to store current chat conversation
|
48 |
current_chat_history = []
|
49 |
|
50 |
def data_ingestion_from_directory():
|
51 |
+
# Use SimpleDirectoryReader on the directory containing the PDF files
|
52 |
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
|
53 |
storage_context = StorageContext.from_defaults()
|
54 |
index = VectorStoreIndex.from_documents(documents)
|
|
|
59 |
(
|
60 |
"user",
|
61 |
"""
|
62 |
+
You are the clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. give response within 10-15 words only
|
63 |
{context_str}
|
64 |
Question:
|
65 |
{query_str}
|
|
|
68 |
]
|
69 |
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
70 |
|
71 |
+
# Load index from storage
|
72 |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
73 |
index = load_index_from_storage(storage_context)
|
74 |
|
75 |
+
# Use chat history to enhance response
|
76 |
context_str = ""
|
77 |
for past_query, response in reversed(current_chat_history):
|
78 |
if past_query.strip():
|
|
|
81 |
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
|
82 |
answer = query_engine.query(query)
|
83 |
|
84 |
+
if hasattr(answer, 'response'):
|
85 |
+
response = answer.response
|
86 |
+
elif isinstance(answer, dict) and 'response' in answer:
|
87 |
+
response = answer['response']
|
88 |
+
else:
|
89 |
+
response = "Sorry, I couldn't find an answer."
|
90 |
|
91 |
+
# Update current chat history
|
92 |
current_chat_history.append((query, response))
|
93 |
|
94 |
return response
|
95 |
|
96 |
+
# Example usage: Process PDF ingestion from directory
|
97 |
+
print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
|
98 |
+
data_ingestion_from_directory()
|
99 |
+
# Define the function to handle predictions
|
100 |
+
"""def predict(message,history):
|
101 |
+
response = handle_query(message)
|
102 |
+
return response"""
|
103 |
+
def predict(message, history):
|
104 |
+
logo_html = '''
|
105 |
+
<div class="circle-logo">
|
106 |
+
<img src="https://rb.gy/8r06eg" alt="FernAi">
|
107 |
+
</div>
|
108 |
+
'''
|
109 |
+
response = handle_query(message)
|
110 |
+
response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
|
111 |
+
return response_with_logo
|
112 |
+
def save_chat_message(session_id, message_data):
|
113 |
+
ref = db.reference(f'/chat_history/{session_id}') # Use the session ID to save chat data
|
114 |
ref.push().set(message_data)
|
115 |
|
116 |
+
# Define your Gradio chat interface function (replace with your actual logic)
|
117 |
+
def chat_interface(message, history):
|
118 |
try:
|
119 |
+
# Generate a unique session ID for this chat session
|
|
|
120 |
session_id = str(uuid.uuid4())
|
121 |
+
|
122 |
+
# Process the user message and generate a response (your chatbot logic)
|
123 |
response = handle_query(message)
|
124 |
|
125 |
+
# Capture the message data
|
126 |
message_data = {
|
127 |
+
"sender": "user",
|
128 |
"message": message,
|
129 |
"response": response,
|
130 |
+
"timestamp": datetime.datetime.now().isoformat() # Use a library like datetime
|
131 |
}
|
132 |
|
133 |
+
# Call the save function to store in Firebase with the generated session ID
|
134 |
+
save_chat_message(session_id, message_data)
|
135 |
|
136 |
+
# Return the bot response
|
137 |
return response
|
138 |
except Exception as e:
|
139 |
return str(e)
|
140 |
|
141 |
+
# Custom CSS for styling
|
142 |
css = '''
|
143 |
.circle-logo {
|
144 |
+
display: inline-block;
|
145 |
+
width: 40px;
|
146 |
+
height: 40px;
|
147 |
+
border-radius: 50%;
|
148 |
+
overflow: hidden;
|
149 |
+
margin-right: 10px;
|
150 |
+
vertical-align: middle;
|
151 |
+
}
|
152 |
+
.circle-logo img {
|
153 |
+
width: 100%;
|
154 |
+
height: 100%;
|
155 |
+
object-fit: cover;
|
156 |
+
}
|
157 |
+
.response-with-logo {
|
158 |
+
display: flex;
|
159 |
+
align-items: center;
|
160 |
+
margin-bottom: 10px;
|
161 |
+
}
|
162 |
+
footer {
|
163 |
display: none !important;
|
164 |
background-color: #F8D7DA;
|
165 |
}
|
166 |
+
.svelte-1ed2p3z p {
|
167 |
+
font-size: 24px;
|
168 |
+
font-weight: bold;
|
169 |
+
line-height: 1.2;
|
170 |
+
color: #111;
|
171 |
+
margin: 20px 0;
|
172 |
+
}
|
173 |
+
label.svelte-1b6s6s {display: none}
|
174 |
+
div.svelte-rk35yg {display: none;}
|
175 |
+
div.progress-text.svelte-z7cif2.meta-text {display: none;}
|
176 |
'''
|
|
|
177 |
@app.get("/chat")
|
178 |
async def chat_ui(username: str, email: str):
|
179 |
gr.ChatInterface(
|
|
|
187 |
return {"message": "Chat interface launched."}
|
188 |
|
189 |
if __name__ == "__main__":
|
190 |
+
threading.Thread(target=lambda: uvicorn.run(app, host="0.0.0.0", port=8000), daemon=True).start()
|
|