Spaces:
Sleeping
Sleeping
File size: 11,753 Bytes
5b2aed4 f46801a 414a697 4e0b2a3 7d3b780 48c0d8d 190c369 48c0d8d 0ea8c80 cfcb5f3 7d3b780 48c0d8d 7d3b780 9483f8f 1389b27 c51db37 1389b27 c100279 1389b27 c100279 1389b27 c51db37 1389b27 6bbc3b0 c100279 6bbc3b0 c100279 1389b27 4ec7452 c51db37 4ec7452 1389b27 c100279 1389b27 7d3b780 190c369 c51db37 779210b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
import os
import sys
import logging
import gradio as gr
import re
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import asyncio
from crewai import Agent
from huggingface_hub import InferenceClient
import json
import warnings
from typing import Literal, Tuple
# Suppress all deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def get_huggingface_api_token():
token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
if token:
logger.info("Hugging Face API token found in environment variables.")
return token
try:
with open('config.json', 'r') as config_file:
config = json.load(config_file)
token = config.get('HUGGINGFACEHUB_API_TOKEN')
if token:
logger.info("Hugging Face API token found in config.json file.")
return token
except FileNotFoundError:
logger.warning("Config file not found.")
except json.JSONDecodeError:
logger.error("Error reading the config file. Please check its format.")
logger.error("Hugging Face API token not found. Please set it up.")
return None
token = get_huggingface_api_token()
if not token:
logger.error("Hugging Face API token is not set. Exiting.")
sys.exit(1)
hf_client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.2", token=token)
vectorizer = CountVectorizer()
approved_topics = ['account opening', 'trading', 'fees', 'platforms', 'funds', 'regulations', 'support']
X = vectorizer.fit_transform(approved_topics)
classifier = MultinomialNB()
classifier.fit(X, np.arange(len(approved_topics)))
class CommunicationExpertAgent(Agent):
role: Literal["Communication Expert"] = "Communication Expert"
goal: Literal["To interpret and rephrase user queries with empathy and respect"] = "To interpret and rephrase user queries with empathy and respect"
backstory: Literal["""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions."""] = \
"""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions."""
async def run(self, query):
sanitized_query = re.sub(r'[<>&\']', '', query)
topic_relevance = classifier.predict(vectorizer.transform([sanitized_query]))[0] in range(len(approved_topics))
if not topic_relevance:
return "I apologize, but your query doesn't seem to be related to Zerodha's services. Could you please ask about account opening, trading, fees, our platforms, funds, regulations, or customer support?"
emotional_context = self.analyze_emotional_context(sanitized_query)
rephrased_query = f"Rephrased query with empathy: {sanitized_query}\nEmotional context: {emotional_context}"
return rephrased_query
def analyze_emotional_context(self, query):
if any(word in query.lower() for word in ['frustrated', 'angry', 'upset']):
return "The user seems frustrated or upset."
elif any(word in query.lower() for word in ['confused', 'unclear', 'don\'t understand']):
return "The user seems confused or seeking clarification."
else:
return "The user's emotional state is neutral or unclear."
class ResponseExpertAgent(Agent):
role: Literal["Response Expert"] = "Response Expert"
goal: Literal["To provide accurate, helpful, and emotionally intelligent responses to user queries"] = "To provide accurate, helpful, and emotionally intelligent responses to user queries"
backstory: Literal["""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone."""] = \
"""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone."""
async def run(self, rephrased_query):
try:
logger.info(f"Sending query for generation: {rephrased_query}")
response = hf_client.text_generation(rephrased_query, max_new_tokens=500, temperature=0.7)
return response
except Exception as e:
logger.error(f"Failed to generate text due to: {str(e)}")
return "I apologize, but I'm having trouble generating a response at the moment. Please try again or contact Zerodha support directly if the issue persists."
class PostprocessingAgent(Agent):
role: Literal["Postprocessing Expert"] = "Postprocessing Expert"
goal: Literal["To enhance and finalize responses ensuring quality and completeness"] = "To enhance and finalize responses ensuring quality and completeness"
backstory: Literal["""You are responsible for finalizing communications, adding polite terminations, and ensuring that the responses meet the quality standards expected in customer interactions."""] = \
"""You are responsible for finalizing communications, adding polite terminations, and ensuring that the responses meet the quality standards expected in customer interactions."""
def run(self, response):
processed_response = self.add_disclaimers(response)
processed_response = self.ensure_politeness(processed_response)
return processed_response
def add_disclaimers(self, response):
if any(word in response.lower() for word in ['invest', 'trade', 'buy', 'sell', 'market']):
response += "\n\nPlease note that this information is for educational purposes only and should not be considered as financial advice. Always do your own research and consider consulting with a qualified financial advisor before making investment decisions."
return response
def ensure_politeness(self, response):
if not re.search(r'(Thank you|Is there anything else|Hope this helps|Let me know if you need more information)\s*$', response, re.IGNORECASE):
response += "\n\nThank you for choosing Zerodha. Is there anything else I can assist you with today?"
return response
class RelationshipManagerAgent(Agent):
role: Literal["Relationship Manager"] = "Relationship Manager"
goal: Literal["To elevate communication to embody top 1% customer service qualities"] = "To elevate communication to embody top 1% customer service qualities"
backstory: Literal["""You are an expert in elite-level communication, capable of transforming interactions to build instant rapport, trust, and confidence. Your role is to enhance both user queries and system responses to create a warm, empathetic, and highly professional interaction."""] = \
"""You are an expert in elite-level communication, capable of transforming interactions to build instant rapport, trust, and confidence. Your role is to enhance both user queries and system responses to create a warm, empathetic, and highly professional interaction."""
async def rephrase_query(self, query: str) -> str:
rephrased_query = f"I understand that you're asking about {query}. "
rephrased_query += "Your question is important, and I'm here to provide you with the best possible assistance. "
rephrased_query += "To ensure I fully grasp your situation and can offer the most helpful and accurate information, "
rephrased_query += f"could you please confirm if you want to know about {query}? "
rephrased_query += "Feel free to add any additional details that might help me serve you better."
return rephrased_query
async def enhance_response(self, response: str) -> str:
enhanced_response = "I appreciate you bringing this to our attention. "
enhanced_response += response
enhanced_response += "\n\nI hope this information effectively addresses your query. "
enhanced_response += "Is there anything else about this matter that you'd like me to clarify or expand on? "
enhanced_response += "I'm here to ensure you have all the information you need to make informed decisions about your investments and interactions with Zerodha."
return enhanced_response
async def run(self, query: str, response: str = "") -> Tuple[str, str]:
rephrased_query = await self.rephrase_query(query)
if response:
enhanced_response = await self.enhance_response(response)
return rephrased_query, enhanced_response
return rephrased_query, ""
# Instantiate agents
communication_expert = CommunicationExpertAgent()
response_expert = ResponseExpertAgent()
postprocessing_agent = PostprocessingAgent()
relationship_manager = RelationshipManagerAgent()
async def handle_query(query):
try:
if not query.strip():
return "I'm sorry, but I didn't receive any query. Could you please ask a question about Zerodha's services?"
# Step 1: Rephrase the query using RelationshipManagerAgent
rephrased_query, _ = await relationship_manager.run(query)
# Step 2: Process the rephrased query with CommunicationExpertAgent
communication_expert_query = await communication_expert.run(rephrased_query)
# Step 3: Generate response using ResponseExpertAgent
initial_response = await response_expert.run(communication_expert_query)
# Step 4: Postprocess the response
postprocessed_response = postprocessing_agent.run(initial_response)
# Step 5: Enhance the response using RelationshipManagerAgent
_, final_response = await relationship_manager.run(query, postprocessed_response)
return final_response
except Exception as e:
logger.error(f"Error in handle_query: {str(e)}")
return "I apologize, but an error occurred while processing your request. Please try again or contact Zerodha support if the issue persists."
# Gradio interface setup
def setup_interface():
with gr.Blocks() as app:
gr.Markdown("# Zerodha Support Chatbot")
gr.Markdown("Ask questions about Zerodha's services, trading, account management, and more.")
with gr.Row():
query_input = gr.Textbox(label="Enter your query", placeholder="Type your question here...")
submit_button = gr.Button("Submit")
response_output = gr.Textbox(label="Response", lines=10)
submit_button.click(
fn=lambda x: asyncio.run(handle_query(x)),
inputs=[query_input],
outputs=[response_output]
)
gr.Examples(
examples=[
"How do I open a Zerodha account?",
"What are the brokerage charges for intraday trading?",
"Can you explain how to use the Kite platform?",
"I'm having trouble logging into my account. What should I do?",
"What are the margin requirements for F&O trading?"
],
inputs=[query_input]
)
return app
app = setup_interface()
if __name__ == "__main__":
app.launch() |