Spaces:
Runtime error
Runtime error
Srinivasulu kethanaboina
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -5,10 +5,7 @@ from llama_index.core import StorageContext, load_index_from_storage, VectorStor
|
|
5 |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
6 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
7 |
from sentence_transformers import SentenceTransformer
|
8 |
-
import firebase_admin
|
9 |
-
from firebase_admin import db, credentials
|
10 |
import datetime
|
11 |
-
import uuid
|
12 |
import random
|
13 |
|
14 |
def select_random_name():
|
@@ -18,9 +15,7 @@ def select_random_name():
|
|
18 |
# Example usage
|
19 |
# Load environment variables
|
20 |
load_dotenv()
|
21 |
-
|
22 |
-
cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json")
|
23 |
-
firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"})
|
24 |
# Configure the Llama index settings
|
25 |
Settings.llm = HuggingFaceInferenceAPI(
|
26 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
@@ -44,7 +39,8 @@ os.makedirs(PERSIST_DIR, exist_ok=True)
|
|
44 |
|
45 |
# Variable to store current chat conversation
|
46 |
current_chat_history = []
|
47 |
-
kkk=select_random_name()
|
|
|
48 |
def data_ingestion_from_directory():
|
49 |
# Use SimpleDirectoryReader on the directory containing the PDF files
|
50 |
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
|
@@ -96,10 +92,6 @@ print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
|
|
96 |
data_ingestion_from_directory()
|
97 |
|
98 |
# Define the function to handle predictions
|
99 |
-
"""def predict(message,history):
|
100 |
-
response = handle_query(message)
|
101 |
-
return response"""
|
102 |
-
|
103 |
def predict(message, history):
|
104 |
logo_html = '''
|
105 |
<div class="circle-logo">
|
@@ -109,30 +101,13 @@ def predict(message, history):
|
|
109 |
response = handle_query(message)
|
110 |
response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
|
111 |
return response_with_logo
|
112 |
-
def save_chat_message(session_id, message_data):
|
113 |
-
ref = db.reference(f'/chat_history/{session_id}') # Use the session ID to save chat data
|
114 |
-
ref.push().set(message_data)
|
115 |
|
116 |
# Define your Gradio chat interface function (replace with your actual logic)
|
117 |
def chat_interface(message, history):
|
118 |
try:
|
119 |
-
# Generate a unique session ID for this chat session
|
120 |
-
session_id = str(uuid.uuid4())
|
121 |
-
|
122 |
# Process the user message and generate a response (your chatbot logic)
|
123 |
response = handle_query(message)
|
124 |
|
125 |
-
# Capture the message data
|
126 |
-
message_data = {
|
127 |
-
"sender": "user",
|
128 |
-
"message": message,
|
129 |
-
"response": response,
|
130 |
-
"timestamp": datetime.datetime.now().isoformat() # Use a library like datetime
|
131 |
-
}
|
132 |
-
|
133 |
-
# Call the save function to store in Firebase with the generated session ID
|
134 |
-
save_chat_message(session_id, message_data)
|
135 |
-
|
136 |
# Return the bot response
|
137 |
return response
|
138 |
except Exception as e:
|
@@ -166,12 +141,10 @@ footer {
|
|
166 |
label.svelte-1b6s6s {display: none}
|
167 |
div.svelte-rk35yg {display: none;}
|
168 |
div.progress-text.svelte-z7cif2.meta-text {display: none;}
|
169 |
-
|
170 |
-
|
171 |
'''
|
172 |
|
173 |
gr.ChatInterface(chat_interface,
|
174 |
css=css,
|
175 |
description="Lily",
|
176 |
clear_btn=None, undo_btn=None, retry_btn=None,
|
177 |
-
).launch()
|
|
|
5 |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
6 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
7 |
from sentence_transformers import SentenceTransformer
|
|
|
|
|
8 |
import datetime
|
|
|
9 |
import random
|
10 |
|
11 |
def select_random_name():
|
|
|
15 |
# Example usage
|
16 |
# Load environment variables
|
17 |
load_dotenv()
|
18 |
+
|
|
|
|
|
19 |
# Configure the Llama index settings
|
20 |
Settings.llm = HuggingFaceInferenceAPI(
|
21 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
|
|
39 |
|
40 |
# Variable to store current chat conversation
|
41 |
current_chat_history = []
|
42 |
+
kkk = select_random_name()
|
43 |
+
|
44 |
def data_ingestion_from_directory():
|
45 |
# Use SimpleDirectoryReader on the directory containing the PDF files
|
46 |
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
|
|
|
92 |
data_ingestion_from_directory()
|
93 |
|
94 |
# Define the function to handle predictions
|
|
|
|
|
|
|
|
|
95 |
def predict(message, history):
|
96 |
logo_html = '''
|
97 |
<div class="circle-logo">
|
|
|
101 |
response = handle_query(message)
|
102 |
response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
|
103 |
return response_with_logo
|
|
|
|
|
|
|
104 |
|
105 |
# Define your Gradio chat interface function (replace with your actual logic)
|
106 |
def chat_interface(message, history):
|
107 |
try:
|
|
|
|
|
|
|
108 |
# Process the user message and generate a response (your chatbot logic)
|
109 |
response = handle_query(message)
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
# Return the bot response
|
112 |
return response
|
113 |
except Exception as e:
|
|
|
141 |
label.svelte-1b6s6s {display: none}
|
142 |
div.svelte-rk35yg {display: none;}
|
143 |
div.progress-text.svelte-z7cif2.meta-text {display: none;}
|
|
|
|
|
144 |
'''
|
145 |
|
146 |
gr.ChatInterface(chat_interface,
|
147 |
css=css,
|
148 |
description="Lily",
|
149 |
clear_btn=None, undo_btn=None, retry_btn=None,
|
150 |
+
).launch()
|