Spaces:
redfernstech
/
Runtime error

Srinivasulu kethanaboina commited on
Commit
595025f
·
verified ·
1 Parent(s): 4ca2037

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -39
app.py CHANGED
@@ -1,15 +1,19 @@
1
  from dotenv import load_dotenv
2
  import gradio as gr
3
  import os
4
- import csv
5
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
6
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
7
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
8
- from datasets import Dataset
9
-
 
 
 
10
  # Load environment variables
11
  load_dotenv()
12
-
 
 
13
  # Configure the Llama index settings
14
  Settings.llm = HuggingFaceInferenceAPI(
15
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
@@ -24,28 +28,24 @@ Settings.embed_model = HuggingFaceEmbedding(
24
  )
25
 
26
  # Define the directory for persistent storage and data
27
- PERSIST_DIR = "history" # Replace with your actual directory path
28
- CSV_FILE = os.path.join(PERSIST_DIR, "chat_history.csv")
29
 
30
  # Ensure directories exist
 
31
  os.makedirs(PERSIST_DIR, exist_ok=True)
32
 
33
  # Variable to store current chat conversation
34
- current_chat_history = Dataset.from_dict({"query": [], "response": []})
35
-
36
 
37
  def data_ingestion_from_directory():
38
  # Use SimpleDirectoryReader on the directory containing the PDF files
39
- PDF_DIRECTORY = 'data' # Replace with the directory containing your PDFs
40
  documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
41
  storage_context = StorageContext.from_defaults()
42
  index = VectorStoreIndex.from_documents(documents)
43
  index.storage_context.persist(persist_dir=PERSIST_DIR)
44
 
45
-
46
  def handle_query(query):
47
- global current_chat_history
48
-
49
  chat_text_qa_msgs = [
50
  (
51
  "user",
@@ -67,7 +67,7 @@ def handle_query(query):
67
 
68
  # Use chat history to enhance response
69
  context_str = ""
70
- for past_query, response in reversed(current_chat_history["query"]):
71
  if past_query.strip():
72
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
73
 
@@ -82,40 +82,55 @@ def handle_query(query):
82
  response = "Sorry, I couldn't find an answer."
83
 
84
  # Update current chat history
85
- current_chat_history = current_chat_history.concat(Dataset.from_dict({"query": [query], "response": [response]}))
86
-
87
- # Save chat history to CSV
88
- with open(CSV_FILE, 'a', newline='', encoding='utf-8') as file:
89
- csv_writer = csv.writer(file)
90
- csv_writer.writerow([query, response])
91
 
92
  return response
93
 
 
 
 
94
 
 
 
 
 
95
  def predict(message, history):
96
- # Your logo HTML code
97
  logo_html = '''
98
  <div class="circle-logo">
99
  <img src="https://rb.gy/8r06eg" alt="FernAi">
100
  </div>
101
  '''
102
-
103
- # Assuming handle_query function handles the message and returns a response
104
  response = handle_query(message)
105
-
106
- # Prepare the response with logo HTML
107
  response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
108
-
109
- # Convert history to a string (if it's a list)
110
- if isinstance(history, list):
111
- history = ' '.join(map(str, history))
112
-
113
- # Save history to kk.txt
114
- with open('kk.txt', 'a') as file:
115
- file.write(history + '\n')
116
-
117
  return response_with_logo
118
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  # Custom CSS for styling
121
  css = '''
@@ -128,13 +143,11 @@ css = '''
128
  margin-right: 10px;
129
  vertical-align: middle;
130
  }
131
-
132
  .circle-logo img {
133
  width: 100%;
134
  height: 100%;
135
  object-fit: cover;
136
  }
137
-
138
  .response-with-logo {
139
  display: flex;
140
  align-items: center;
@@ -146,11 +159,9 @@ footer {
146
  }
147
  label.svelte-1b6s6s {display: none}
148
  '''
149
-
150
- # Launch Gradio interface
151
  gr.ChatInterface(predict,
152
  css=css,
153
  description="FernAI",
154
  clear_btn=None, undo_btn=None, retry_btn=None,
155
  examples=['Tell me about Redfernstech?', 'Services in Redfernstech?']
156
- ).launch(share=False)
 
1
  from dotenv import load_dotenv
2
  import gradio as gr
3
  import os
 
4
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
+ from sentence_transformers import SentenceTransformer
8
+ import firebase_admin
9
+ from firebase_admin import db, credentials
10
+ import datetime
11
+ import uuid
12
  # Load environment variables
13
  load_dotenv()
14
+ # authenticate to firebase
15
+ cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json")
16
+ firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"})
17
  # Configure the Llama index settings
18
  Settings.llm = HuggingFaceInferenceAPI(
19
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
 
28
  )
29
 
30
  # Define the directory for persistent storage and data
31
+ PERSIST_DIR = "db"
32
+ PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
33
 
34
  # Ensure directories exist
35
+ os.makedirs(PDF_DIRECTORY, exist_ok=True)
36
  os.makedirs(PERSIST_DIR, exist_ok=True)
37
 
38
  # Variable to store current chat conversation
39
+ current_chat_history = []
 
40
 
41
  def data_ingestion_from_directory():
42
  # Use SimpleDirectoryReader on the directory containing the PDF files
 
43
  documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
44
  storage_context = StorageContext.from_defaults()
45
  index = VectorStoreIndex.from_documents(documents)
46
  index.storage_context.persist(persist_dir=PERSIST_DIR)
47
 
 
48
  def handle_query(query):
 
 
49
  chat_text_qa_msgs = [
50
  (
51
  "user",
 
67
 
68
  # Use chat history to enhance response
69
  context_str = ""
70
+ for past_query, response in reversed(current_chat_history):
71
  if past_query.strip():
72
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
73
 
 
82
  response = "Sorry, I couldn't find an answer."
83
 
84
  # Update current chat history
85
+ current_chat_history.append((query, response))
 
 
 
 
 
86
 
87
  return response
88
 
89
+ # Example usage: Process PDF ingestion from directory
90
+ print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
91
+ data_ingestion_from_directory()
92
 
93
+ # Define the function to handle predictions
94
+ """def predict(message,history):
95
+ response = handle_query(message)
96
+ return response"""
97
  def predict(message, history):
 
98
  logo_html = '''
99
  <div class="circle-logo">
100
  <img src="https://rb.gy/8r06eg" alt="FernAi">
101
  </div>
102
  '''
 
 
103
  response = handle_query(message)
 
 
104
  response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
 
 
 
 
 
 
 
 
 
105
  return response_with_logo
106
+ def save_chat_message(session_id, message_data):
107
+ ref = db.reference(f'/chat_history/{session_id}') # Use the session ID to save chat data
108
+ ref.push().set(message_data)
109
+
110
+ # Define your Gradio chat interface function (replace with your actual logic)
111
+ def chat_interface(message, history):
112
+ try:
113
+ # Generate a unique session ID for this chat session
114
+ session_id = str(uuid.uuid4())
115
+
116
+ # Process the user message and generate a response (your chatbot logic)
117
+ response = handle_query(message)
118
+
119
+ # Capture the message data
120
+ message_data = {
121
+ "sender": "user",
122
+ "message": message,
123
+ "response": response,
124
+ "timestamp": datetime.datetime.now().isoformat() # Use a library like datetime
125
+ }
126
+
127
+ # Call the save function to store in Firebase with the generated session ID
128
+ save_chat_message(session_id, message_data)
129
+
130
+ # Return the bot response
131
+ return response
132
+ except Exception as e:
133
+ return str(e)
134
 
135
  # Custom CSS for styling
136
  css = '''
 
143
  margin-right: 10px;
144
  vertical-align: middle;
145
  }
 
146
  .circle-logo img {
147
  width: 100%;
148
  height: 100%;
149
  object-fit: cover;
150
  }
 
151
  .response-with-logo {
152
  display: flex;
153
  align-items: center;
 
159
  }
160
  label.svelte-1b6s6s {display: none}
161
  '''
 
 
162
  gr.ChatInterface(predict,
163
  css=css,
164
  description="FernAI",
165
  clear_btn=None, undo_btn=None, retry_btn=None,
166
  examples=['Tell me about Redfernstech?', 'Services in Redfernstech?']
167
+ ).launch()