Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,23 +7,19 @@ import numpy as np
|
|
7 |
from sklearn.feature_extraction.text import CountVectorizer
|
8 |
from sklearn.naive_bayes import MultinomialNB
|
9 |
import asyncio
|
10 |
-
from crewai import Agent, Task, Crew
|
11 |
from huggingface_hub import InferenceClient
|
12 |
-
from langchain.tools import Tool
|
13 |
-
from langchain.agents import Tool as LangChainTool
|
14 |
-
import random
|
15 |
import json
|
16 |
import warnings
|
17 |
-
from langchain.deprecation import LangChainDeprecationWarning
|
18 |
|
19 |
-
# Suppress
|
20 |
-
warnings.filterwarnings("ignore", category=
|
21 |
|
22 |
# Set up logging
|
23 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
24 |
logger = logging.getLogger(__name__)
|
25 |
|
26 |
def get_huggingface_api_token():
|
|
|
27 |
token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
28 |
if token:
|
29 |
logger.info("Hugging Face API token found in environment variables.")
|
@@ -44,65 +40,59 @@ def get_huggingface_api_token():
|
|
44 |
logger.error("Hugging Face API token not found. Please set it up.")
|
45 |
return None
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
SHARED_CONTEXT = """You are part of a multi-agent system designed to provide respectful, empathetic, and accurate support for Zerodha, a leading Indian financial services company. Your role is crucial in ensuring all interactions uphold the highest standards of customer service while maintaining Zerodha's excellent reputation.
|
60 |
-
|
61 |
-
Key points about Zerodha:
|
62 |
-
1. India's largest discount broker, known for innovative technology and low-cost trading.
|
63 |
-
2. Flat fee structure: ₹20 per executed order for intraday and F&O trades, zero brokerage for delivery equity investments.
|
64 |
-
3. Main trading platform: Kite (web and mobile).
|
65 |
-
4. Coin platform for commission-free direct mutual fund investments.
|
66 |
-
5. Extensive educational resources through Varsity.
|
67 |
-
6. Additional tools: Sentinel (price alerts) and ChartIQ (advanced charting).
|
68 |
-
7. Console for account management and administrative tasks.
|
69 |
|
70 |
-
|
71 |
|
72 |
-
# Guardrail functions
|
73 |
def sanitize_input(input_text):
|
|
|
74 |
return re.sub(r'[<>&\']', '', input_text)
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
-
|
81 |
-
y = np.arange(len(approved_topics))
|
82 |
-
classifier.fit(X, y)
|
83 |
|
84 |
def is_relevant_topic(query):
|
|
|
85 |
query_vector = vectorizer.transform([query])
|
86 |
prediction = classifier.predict(query_vector)
|
87 |
return prediction[0] in range(len(approved_topics))
|
88 |
|
89 |
def redact_sensitive_info(text):
|
|
|
90 |
text = re.sub(r'\b\d{10,12}\b', '[REDACTED]', text)
|
91 |
text = re.sub(r'[A-Z]{5}[0-9]{4}[A-Z]', '[REDACTED]', text)
|
92 |
return text
|
93 |
|
94 |
def check_response_content(response):
|
|
|
95 |
unauthorized_patterns = [
|
96 |
r'\b(guarantee|assured|certain)\b.*\b(returns|profit)\b',
|
97 |
r'\b(buy|sell)\b.*\b(specific stocks?|shares?)\b'
|
98 |
]
|
99 |
return not any(re.search(pattern, response, re.IGNORECASE) for pattern in unauthorized_patterns)
|
100 |
|
101 |
-
def check_confidence(response):
|
102 |
-
uncertain_phrases = ["I'm not sure", "It's possible", "I don't have enough information"]
|
103 |
-
return not any(phrase.lower() in response.lower() for phrase in uncertain_phrases)
|
104 |
-
|
105 |
async def generate_response(prompt):
|
|
|
106 |
try:
|
107 |
response = await client.text_generation(prompt, max_new_tokens=500, temperature=0.7)
|
108 |
return response
|
@@ -111,6 +101,7 @@ async def generate_response(prompt):
|
|
111 |
return "I apologize, but I'm having trouble generating a response at the moment. Please try again later."
|
112 |
|
113 |
def post_process_response(response):
|
|
|
114 |
response = re.sub(r'\b(stupid|dumb|idiotic|foolish)\b', 'mistaken', response, flags=re.IGNORECASE)
|
115 |
|
116 |
if not re.search(r'(Thank you|Is there anything else|Hope this helps|Let me know if you need more information)\s*$', response, re.IGNORECASE):
|
@@ -121,182 +112,29 @@ def post_process_response(response):
|
|
121 |
|
122 |
return response
|
123 |
|
124 |
-
# Define the tool for CrewAI
|
125 |
-
generate_response_tool = Tool(
|
126 |
-
name="GenerateResponse",
|
127 |
-
func=generate_response,
|
128 |
-
description="Generate a response using the Mistral model"
|
129 |
-
)
|
130 |
-
|
131 |
-
# CrewAI setup
|
132 |
-
communication_expert_crew = Agent(
|
133 |
-
role='Communication Expert',
|
134 |
-
goal='Interpret and rephrase user queries with empathy and respect',
|
135 |
-
backstory="""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions.""",
|
136 |
-
verbose=True,
|
137 |
-
allow_delegation=False,
|
138 |
-
tools=[generate_response_tool]
|
139 |
-
)
|
140 |
-
|
141 |
-
response_expert_crew = Agent(
|
142 |
-
role='Response Expert',
|
143 |
-
goal='Provide accurate, helpful, and emotionally intelligent responses to user queries',
|
144 |
-
backstory="""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone.""",
|
145 |
-
verbose=True,
|
146 |
-
allow_delegation=False,
|
147 |
-
tools=[generate_response_tool]
|
148 |
-
)
|
149 |
-
|
150 |
-
# Main function
|
151 |
-
async def zerodha_support(message, history, username):
|
152 |
-
try:
|
153 |
-
sanitized_message = sanitize_input(message)
|
154 |
-
|
155 |
-
if not is_relevant_topic(sanitized_message):
|
156 |
-
return "I'm sorry, but I can only assist with queries related to Zerodha's services and trading. Could you please ask a question about your Zerodha account, trading, or our platforms?"
|
157 |
-
|
158 |
-
sanitized_message = redact_sensitive_info(sanitized_message)
|
159 |
-
|
160 |
-
# Use crewAI for initial query rephrasing
|
161 |
-
try:
|
162 |
-
rephrase_task = Task(
|
163 |
-
description=f"Rephrase the following user query with empathy and respect: '{sanitized_message}'",
|
164 |
-
agent=communication_expert_crew
|
165 |
-
)
|
166 |
-
|
167 |
-
crew = Crew(
|
168 |
-
agents=[communication_expert_crew],
|
169 |
-
tasks=[rephrase_task],
|
170 |
-
verbose=2
|
171 |
-
)
|
172 |
-
|
173 |
-
rephrased_query = await crew.kickoff()
|
174 |
-
except Exception as e:
|
175 |
-
logger.error(f"Error in CrewAI rephrasing: {e}")
|
176 |
-
rephrased_query = sanitized_message # Fallback to original message if rephrasing fails
|
177 |
-
|
178 |
-
# Generate response using Response Expert
|
179 |
-
try:
|
180 |
-
response_task = Task(
|
181 |
-
description=f"Provide an accurate and helpful response to the user query: '{rephrased_query}'",
|
182 |
-
agent=response_expert_crew
|
183 |
-
)
|
184 |
-
|
185 |
-
crew = Crew(
|
186 |
-
agents=[response_expert_crew],
|
187 |
-
tasks=[response_task],
|
188 |
-
verbose=2
|
189 |
-
)
|
190 |
-
|
191 |
-
response = await crew.kickoff()
|
192 |
-
except Exception as e:
|
193 |
-
logger.error(f"Error in CrewAI response generation: {e}")
|
194 |
-
response = "I apologize, but I'm having trouble generating a response at the moment. Please try again later."
|
195 |
-
|
196 |
-
if not check_response_content(response):
|
197 |
-
response += "\n\nPlease note that I cannot provide specific investment advice or guarantee returns. For personalized guidance, please consult with a qualified financial advisor."
|
198 |
-
|
199 |
-
if not check_confidence(response):
|
200 |
-
return "I apologize, but I'm not confident in providing an accurate answer to this query. For the most up-to-date and accurate information, please contact Zerodha's customer support directly."
|
201 |
-
|
202 |
-
final_response = post_process_response(response)
|
203 |
-
|
204 |
-
return final_response
|
205 |
-
except Exception as e:
|
206 |
-
logger.error(f"Error in zerodha_support: {e}")
|
207 |
-
return "I apologize, but an error occurred while processing your request. Please try again later."
|
208 |
-
|
209 |
-
# Custom CSS for better styling
|
210 |
-
custom_css = """
|
211 |
-
.container {
|
212 |
-
max-width: 800px;
|
213 |
-
margin: auto;
|
214 |
-
padding: 20px;
|
215 |
-
}
|
216 |
-
.title {
|
217 |
-
text-align: center;
|
218 |
-
color: #387EF5;
|
219 |
-
font-size: 32px;
|
220 |
-
margin-bottom: 20px;
|
221 |
-
}
|
222 |
-
.description {
|
223 |
-
text-align: center;
|
224 |
-
color: #555;
|
225 |
-
margin-bottom: 30px;
|
226 |
-
}
|
227 |
-
.chatbot {
|
228 |
-
border: 1px solid #ddd;
|
229 |
-
border-radius: 10px;
|
230 |
-
overflow: hidden;
|
231 |
-
}
|
232 |
-
.user-info {
|
233 |
-
background-color: #f0f0f0;
|
234 |
-
padding: 10px;
|
235 |
-
border-radius: 5px;
|
236 |
-
margin-bottom: 20px;
|
237 |
-
}
|
238 |
-
"""
|
239 |
-
|
240 |
-
# Placeholder function for user authentication (you'd implement this with your backend)
|
241 |
-
def authenticate(username, password):
|
242 |
-
# This is a mock authentication. In a real scenario, you'd verify against a database.
|
243 |
-
return username == "demo" and password == "password"
|
244 |
-
|
245 |
-
# Function to generate a random support ticket number
|
246 |
-
def generate_ticket_number():
|
247 |
-
return f"ZRD-{random.randint(100000, 999999)}"
|
248 |
-
|
249 |
# Gradio interface setup
|
250 |
-
with gr.Blocks(
|
251 |
-
gr.
|
252 |
-
|
253 |
-
|
254 |
-
with gr.Tab("Login"):
|
255 |
-
username_input = gr.Textbox(label="Username")
|
256 |
-
password_input = gr.Textbox(label="Password", type="password")
|
257 |
login_button = gr.Button("Login")
|
258 |
-
login_message = gr.Textbox(label="Login Status", interactive=False)
|
259 |
-
|
260 |
-
with gr.Tab("Support Chat"):
|
261 |
-
with gr.Row():
|
262 |
-
with gr.Column(scale=2):
|
263 |
-
chatbot = gr.Chatbot(height=400)
|
264 |
-
message_input = gr.Textbox(label="Your Message", placeholder="Type your question here...")
|
265 |
-
submit_button = gr.Button("Send")
|
266 |
-
|
267 |
-
with gr.Column(scale=1):
|
268 |
-
gr.HTML("<div class='user-info'>")
|
269 |
-
user_display = gr.Textbox(label="Logged in as", interactive=False)
|
270 |
-
ticket_number = gr.Textbox(label="Support Ticket", value=generate_ticket_number(), interactive=False)
|
271 |
-
gr.HTML("</div>")
|
272 |
-
|
273 |
-
gr.HTML("<h4>Quick Links</h4>")
|
274 |
-
gr.HTML("<ul><li><a href='https://zerodha.com/varsity/' target='_blank'>Zerodha Varsity</a></li><li><a href='https://console.zerodha.com/' target='_blank'>Zerodha Console</a></li></ul>")
|
275 |
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
]
|
285 |
-
inputs=message_input,
|
286 |
)
|
287 |
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
return "Login failed. Please try again.", gr.Tabs.update(selected="Login"), ""
|
294 |
-
|
295 |
-
login_button.click(login, inputs=[username_input, password_input], outputs=[login_message, demo.tabs, user_display])
|
296 |
-
|
297 |
-
# Chat logic
|
298 |
-
submit_button.click(zerodha_support, inputs=[message_input, chatbot, user_display], outputs=chatbot)
|
299 |
-
message_input.submit(zerodha_support, inputs=[message_input, chatbot, user_display], outputs=chatbot)
|
300 |
|
301 |
if __name__ == "__main__":
|
302 |
-
|
|
|
7 |
from sklearn.feature_extraction.text import CountVectorizer
|
8 |
from sklearn.naive_bayes import MultinomialNB
|
9 |
import asyncio
|
|
|
10 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
11 |
import json
|
12 |
import warnings
|
|
|
13 |
|
14 |
+
# Suppress all deprecation warnings
|
15 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
16 |
|
17 |
# Set up logging
|
18 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
19 |
logger = logging.getLogger(__name__)
|
20 |
|
21 |
def get_huggingface_api_token():
|
22 |
+
""" Retrieves the Hugging Face API token from environment variables or a config file. """
|
23 |
token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
24 |
if token:
|
25 |
logger.info("Hugging Face API token found in environment variables.")
|
|
|
40 |
logger.error("Hugging Face API token not found. Please set it up.")
|
41 |
return None
|
42 |
|
43 |
+
def initialize_hf_client():
|
44 |
+
""" Initializes the Hugging Face Inference Client with the API token. """
|
45 |
+
try:
|
46 |
+
hf_token = get_huggingface_api_token()
|
47 |
+
if not hf_token:
|
48 |
+
raise ValueError("Hugging Face API token is not set. Please set it up before running the application.")
|
49 |
+
client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.2", token=hf_token)
|
50 |
+
logger.info("Hugging Face Inference Client initialized successfully.")
|
51 |
+
return client
|
52 |
+
except Exception as e:
|
53 |
+
logger.error(f"Failed to initialize Hugging Face client: {e}")
|
54 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
client = initialize_hf_client()
|
57 |
|
|
|
58 |
def sanitize_input(input_text):
|
59 |
+
""" Sanitizes input text by removing specific characters. """
|
60 |
return re.sub(r'[<>&\']', '', input_text)
|
61 |
|
62 |
+
def setup_classifier():
|
63 |
+
""" Sets up and trains a classifier for checking the relevance of a query. """
|
64 |
+
approved_topics = ['account opening', 'trading', 'fees', 'platforms', 'funds', 'regulations', 'support']
|
65 |
+
vectorizer = CountVectorizer()
|
66 |
+
X = vectorizer.fit_transform(approved_topics)
|
67 |
+
y = np.arange(len(approved_topics))
|
68 |
+
classifier = MultinomialNB()
|
69 |
+
classifier.fit(X, y)
|
70 |
+
return vectorizer, classifier
|
71 |
|
72 |
+
vectorizer, classifier = setup_classifier()
|
|
|
|
|
73 |
|
74 |
def is_relevant_topic(query):
|
75 |
+
""" Checks if the query is relevant based on pre-defined topics. """
|
76 |
query_vector = vectorizer.transform([query])
|
77 |
prediction = classifier.predict(query_vector)
|
78 |
return prediction[0] in range(len(approved_topics))
|
79 |
|
80 |
def redact_sensitive_info(text):
|
81 |
+
""" Redacts sensitive information from the text. """
|
82 |
text = re.sub(r'\b\d{10,12}\b', '[REDACTED]', text)
|
83 |
text = re.sub(r'[A-Z]{5}[0-9]{4}[A-Z]', '[REDACTED]', text)
|
84 |
return text
|
85 |
|
86 |
def check_response_content(response):
|
87 |
+
""" Checks response content for unauthorized claims or advice. """
|
88 |
unauthorized_patterns = [
|
89 |
r'\b(guarantee|assured|certain)\b.*\b(returns|profit)\b',
|
90 |
r'\b(buy|sell)\b.*\b(specific stocks?|shares?)\b'
|
91 |
]
|
92 |
return not any(re.search(pattern, response, re.IGNORECASE) for pattern in unauthorized_patterns)
|
93 |
|
|
|
|
|
|
|
|
|
94 |
async def generate_response(prompt):
|
95 |
+
""" Generates a response using the Hugging Face inference client. """
|
96 |
try:
|
97 |
response = await client.text_generation(prompt, max_new_tokens=500, temperature=0.7)
|
98 |
return response
|
|
|
101 |
return "I apologize, but I'm having trouble generating a response at the moment. Please try again later."
|
102 |
|
103 |
def post_process_response(response):
|
104 |
+
""" Post-processes the response to ensure it ends with helpful suggestions. """
|
105 |
response = re.sub(r'\b(stupid|dumb|idiotic|foolish)\b', 'mistaken', response, flags=re.IGNORECASE)
|
106 |
|
107 |
if not re.search(r'(Thank you|Is there anything else|Hope this helps|Let me know if you need more information)\s*$', response, re.IGNORECASE):
|
|
|
112 |
|
113 |
return response
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
# Gradio interface setup
|
116 |
+
with gr.Blocks() as app:
|
117 |
+
with gr.Row():
|
118 |
+
username = gr.Textbox(label="Username")
|
119 |
+
password = gr.Textbox(label="Password", type="password")
|
|
|
|
|
|
|
120 |
login_button = gr.Button("Login")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
+
with gr.Row():
|
123 |
+
query_input = gr.Textbox(label="Enter your query")
|
124 |
+
submit_button = gr.Button("Submit")
|
125 |
+
response_output = gr.Textbox(label="Response")
|
126 |
+
|
127 |
+
login_button.click(
|
128 |
+
fn=lambda u, p: "Login successful" if u == "admin" and p == "admin" else "Login failed",
|
129 |
+
inputs=[username, password],
|
130 |
+
outputs=[gr.Text(label="Login status")]
|
|
|
131 |
)
|
132 |
|
133 |
+
submit_button.click(
|
134 |
+
fn=lambda x: asyncio.run(generate_response(x)),
|
135 |
+
inputs=[query_input],
|
136 |
+
outputs=[response_output]
|
137 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
|
139 |
if __name__ == "__main__":
|
140 |
+
app.launch()
|