Srinivasulu kethanaboina commited on
Commit
d4aca7a
·
verified ·
1 Parent(s): dc2fd0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -166
app.py CHANGED
@@ -1,169 +1,55 @@
1
- from dotenv import load_dotenv
2
  import gradio as gr
3
- import os
4
- from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
- from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
- import datetime
8
- import uuid
9
- import random
10
-
11
- def select_random_name():
12
- names = ['Clara', 'Lily']
13
- return random.choice(names)
14
-
15
- # Example usage
16
- # Load environment variables
17
- load_dotenv()
18
-
19
- # Configure the Llama index settings
20
- Settings.llm = HuggingFaceInferenceAPI(
21
- model_name="meta-llama/Meta-Llama-3-8B-Instruct",
22
- tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
23
- context_window=3000,
24
- token=os.getenv("HF_TOKEN"),
25
- max_new_tokens=512,
26
- generate_kwargs={"temperature": 0.1},
27
- )
28
- Settings.embed_model = HuggingFaceEmbedding(
29
- model_name="BAAI/bge-small-en-v1.5"
30
- )
31
-
32
- # Define the directory for persistent storage and data
33
- PERSIST_DIR = "db"
34
- PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
35
-
36
- # Ensure directories exist
37
- os.makedirs(PDF_DIRECTORY, exist_ok=True)
38
- os.makedirs(PERSIST_DIR, exist_ok=True)
39
-
40
- # Variable to store current chat conversation
41
- current_chat_history = []
42
-
43
- def data_ingestion_from_directory():
44
- # Use SimpleDirectoryReader on the directory containing the PDF files
45
- documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
46
- storage_context = StorageContext.from_defaults()
47
- index = VectorStoreIndex.from_documents(documents)
48
- index.storage_context.persist(persist_dir=PERSIST_DIR)
49
-
50
- def handle_query(query):
51
- chat_text_qa_msgs = [
52
- (
53
- "user",
54
- """
55
- You are the Clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. give response within 10-15 words only
56
- {context_str}
57
- Question:
58
- {query_str}
59
- """
60
- )
61
- ]
62
- text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
63
-
64
- # Load index from storage
65
- storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
66
- index = load_index_from_storage(storage_context)
67
-
68
- # Use chat history to enhance response
69
- context_str = ""
70
- for past_query, response in reversed(current_chat_history):
71
- if past_query.strip():
72
- context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
73
-
74
- query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
75
- answer = query_engine.query(query)
76
-
77
- if hasattr(answer, 'response'):
78
- response = answer.response
79
- elif isinstance(answer, dict) and 'response' in answer:
80
- response = answer['response']
81
  else:
82
- response = "Sorry, I couldn't find an answer."
83
-
84
- # Update current chat history
85
- current_chat_history.append((query, response))
86
-
87
- return response
88
-
89
- # Example usage: Process PDF ingestion from directory
90
- print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
91
- data_ingestion_from_directory()
92
-
93
- def predict(message, history):
94
- logo_html = '''
95
- <div class="circle-logo">
96
- <img src="https://rb.gy/8r06eg" alt="FernAi">
97
- </div>
98
- '''
99
- response = handle_query(message)
100
- response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
101
- return response_with_logo
102
-
103
- def chat_interface(message, history):
104
- try:
105
- # Process the user message and generate a response
106
- response = handle_query(message)
107
-
108
- # Update chat history
109
- current_chat_history.append((message, response))
110
-
111
- return response
112
- except Exception as e:
113
- return str(e)
114
-
115
- # Custom CSS for styling
116
- css = '''
117
- .circle-logo {
118
- display: inline-block;
119
- width: 40px;
120
- height: 40px;
121
- border-radius: 50%;
122
- overflow: hidden;
123
- margin-right: 10px;
124
- vertical-align: middle;
125
- }
126
- .circle-logo img {
127
- width: 100%;
128
- height: 100%;
129
- object-fit: cover;
130
- }
131
- .response-with-logo {
132
- display: flex;
133
- align-items: center;
134
- margin-bottom: 10px;
135
- }
136
- footer {
137
- display: none !important;
138
- background-color: #F8D7DA;
139
- }
140
- .svelte-1ed2p3z p {
141
- font-size: 24px;
142
- font-weight: bold;
143
- line-height: 1.2;
144
- color: #111;
145
- margin: 20px 0;
146
- }
147
- label.svelte-1b6s6s {display: none}
148
- div.svelte-rk35yg {display: none;}
149
- div.progress-text.svelte-z7cif2.meta-text {display: none;}
150
- '''
151
-
152
- # Define JavaScript for redirection
153
- js = '''
154
- <script>
155
- function redirectToPage() {
156
- window.location.href = "https://example.com"; // Replace with your target URL
157
- }
158
- </script>
159
- <button onclick="redirectToPage()">Redirect to another page</button>
160
- '''
161
 
162
- gr.ChatInterface(
163
- fn=chat_interface,
164
- inputs="text",
165
- outputs="html",
166
- live=True,
167
- css=css,
168
- description=js
169
- ).launch()
 
 
1
  import gradio as gr
2
+ import firebase_admin
3
+ from firebase_admin import db, credentials
4
+ from gradio_client import Client
5
+
6
+ # Initialize Firebase
7
+ cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json")
8
+ firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"})
9
+
10
+ # Initialize Gradio Client
11
+ client = Client("vilarin/Llama-3.1-8B-Instruct")
12
+
13
+ def process_inputs(ip_address, chat_history):
14
+ # Process chat history with the model
15
+ result = client.predict(
16
+ message=chat_history,
17
+ system_prompt="summarize the text and give me the client interest within 30-40 words only in bullet points",
18
+ temperature=0.8,
19
+ max_new_tokens=1024,
20
+ top_p=1,
21
+ top_k=20,
22
+ penalty=1.2,
23
+ api_name="/chat"
24
+ )
25
+
26
+ # Print result for debugging
27
+ print(result)
28
+
29
+ # Check Firebase for existing IP address
30
+ ref = db.reference('ip_addresses')
31
+ ip_data = ref.get()
32
+
33
+ if ip_address in ip_data:
34
+ # Update existing record
35
+ ref.child(ip_address).update({"chat_history": chat_history, "result": result})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  else:
37
+ # Create new record
38
+ ref.child(ip_address).set({"chat_history": chat_history, "result": result})
39
+
40
+ return result
41
+
42
+ # Define the Gradio interface
43
+ interface = gr.Interface(
44
+ fn=process_inputs,
45
+ inputs=[
46
+ gr.Textbox(label="IP Address"),
47
+ gr.Textbox(label="Chat History")
48
+ ],
49
+ outputs="text",
50
+ title="Chat History Processor",
51
+ description="Enter an IP address and chat history to process and save to Firebase."
52
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
+ # Launch the Gradio interface
55
+ interface.launch()