Spaces:
Sleeping
Sleeping
File size: 8,847 Bytes
5b2aed4 f46801a 414a697 4e0b2a3 7d3b780 48c0d8d 13c4d18 48c0d8d 0ea8c80 cfcb5f3 7d3b780 48c0d8d 7d3b780 9483f8f 1389b27 c100279 1389b27 c100279 1389b27 6bbc3b0 c100279 6bbc3b0 c100279 1389b27 4ec7452 1389b27 c100279 1389b27 7d3b780 9483f8f 06b60e9 9483f8f c100279 2f4ecd3 9483f8f c100279 9483f8f c100279 9483f8f c100279 9483f8f c100279 9483f8f 9a244e8 09bef47 c100279 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import os
import sys
import logging
import gradio as gr
import re
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import asyncio
from crewai import Agent
from huggingface_hub import InferenceClient
import json
import warnings
from typing import Literal
# Suppress all deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def get_huggingface_api_token():
token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
if token:
logger.info("Hugging Face API token found in environment variables.")
return token
try:
with open('config.json', 'r') as config_file:
config = json.load(config_file)
token = config.get('HUGGINGFACEHUB_API_TOKEN')
if token:
logger.info("Hugging Face API token found in config.json file.")
return token
except FileNotFoundError:
logger.warning("Config file not found.")
except json.JSONDecodeError:
logger.error("Error reading the config file. Please check its format.")
logger.error("Hugging Face API token not found. Please set it up.")
return None
token = get_huggingface_api_token()
if not token:
logger.error("Hugging Face API token is not set. Exiting.")
sys.exit(1)
hf_client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.2", token=token)
vectorizer = CountVectorizer()
approved_topics = ['account opening', 'trading', 'fees', 'platforms', 'funds', 'regulations', 'support']
X = vectorizer.fit_transform(approved_topics)
classifier = MultinomialNB()
classifier.fit(X, np.arange(len(approved_topics)))
class CommunicationExpertAgent(Agent):
role: Literal["Communication Expert"] = "Communication Expert"
goal: Literal["To interpret and rephrase user queries with empathy and respect"] = "To interpret and rephrase user queries with empathy and respect"
backstory: Literal["You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions."] = \
"You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions."
async def run(self, query):
sanitized_query = re.sub(r'[<>&\']', '', query)
topic_relevance = classifier.predict(vectorizer.transform([sanitized_query]))[0] in range(len(approved_topics))
if not topic_relevance:
return "I apologize, but your query doesn't seem to be related to Zerodha's services. Could you please ask about account opening, trading, fees, our platforms, funds, regulations, or customer support?"
emotional_context = self.analyze_emotional_context(sanitized_query)
rephrased_query = f"Rephrased query with empathy: {sanitized_query}\nEmotional context: {emotional_context}"
return rephrased_query
def analyze_emotional_context(self, query):
# This is a placeholder. In a real scenario, you'd use sentiment analysis or a more sophisticated method.
if any(word in query.lower() for word in ['frustrated', 'angry', 'upset']):
return "The user seems frustrated or upset."
elif any(word in query.lower() for word in ['confused', 'unclear', 'don\'t understand']):
return "The user seems confused or seeking clarification."
else:
return "The user's emotional state is neutral or unclear."
class ResponseExpertAgent(Agent):
role: Literal["Response Expert"] = "Response Expert"
goal: Literal["To provide accurate, helpful, and emotionally intelligent responses to user queries"] = "To provide accurate, helpful, and emotionally intelligent responses to user queries"
backstory: Literal["You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone."] = \
"You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone."
async def run(self, rephrased_query):
try:
logger.info(f"Sending query for generation: {rephrased_query}")
response = hf_client.text_generation(rephrased_query, max_new_tokens=500, temperature=0.7)
return response
except Exception as e:
logger.error(f"Failed to generate text due to: {str(e)}")
return "I apologize, but I'm having trouble generating a response at the moment. Please try again or contact Zerodha support directly if the issue persists."
class PostprocessingAgent(Agent):
role: Literal["Postprocessing Expert"] = "Postprocessing Expert"
goal: Literal["To enhance and finalize responses ensuring quality and completeness"] = "To enhance and finalize responses ensuring quality and completeness"
backstory: Literal["You are responsible for finalizing communications, adding polite terminations, and ensuring that the responses meet the quality standards expected in customer interactions."] = \
"You are responsible for finalizing communications, adding polite terminations, and ensuring that the responses meet the quality standards expected in customer interactions."
def run(self, response):
processed_response = self.add_disclaimers(response)
processed_response = self.ensure_politeness(processed_response)
return processed_response
def add_disclaimers(self, response):
if any(word in response.lower() for word in ['invest', 'trade', 'buy', 'sell', 'market']):
response += "\n\nPlease note that this information is for educational purposes only and should not be considered as financial advice. Always do your own research and consider consulting with a qualified financial advisor before making investment decisions."
return response
def ensure_politeness(self, response):
if not re.search(r'(Thank you|Is there anything else|Hope this helps|Let me know if you need more information)\s*$', response, re.IGNORECASE):
response += "\n\nThank you for choosing Zerodha. Is there anything else I can assist you with today?"
return response
# Instantiate agents
communication_expert = CommunicationExpertAgent()
response_expert = ResponseExpertAgent()
postprocessing_agent = PostprocessingAgent()
async def handle_query(query):
try:
if not query.strip():
return "I'm sorry, but I didn't receive any query. Could you please ask a question about Zerodha's services?"
rephrased_query = await communication_expert.run(query)
response = await response_expert.run(rephrased_query)
final_response = postprocessing_agent.run(response)
return final_response
except Exception as e:
logger.error(f"Error in handle_query: {str(e)}")
return "I apologize, but an error occurred while processing your request. Please try again or contact Zerodha support if the issue persists."
# Gradio interface setup
def setup_interface():
with gr.Blocks() as app:
gr.Markdown("# Zerodha Support Chatbot")
gr.Markdown("Ask questions about Zerodha's services, trading, account management, and more.")
with gr.Row():
query_input = gr.Textbox(label="Enter your query", placeholder="Type your question here...")
submit_button = gr.Button("Submit")
response_output = gr.Textbox(label="Response", lines=10)
submit_button.click(
fn=lambda x: asyncio.run(handle_query(x)),
inputs=[query_input],
outputs=[response_output]
)
gr.Examples(
examples=[
"How do I open a Zerodha account?",
"What are the brokerage charges for intraday trading?",
"Can you explain how to use the Kite platform?",
"I'm having trouble logging into my account. What should I do?",
"What are the margin requirements for F&O trading?"
],
inputs=[query_input]
)
return app
app = setup_interface()
if __name__ == "__main__":
app.launch() |