Spaces:
Sleeping
Sleeping
File size: 8,708 Bytes
5b2aed4 f46801a 29cdc30 f46801a 414a697 9a244e8 912dd7d f46801a 5b2aed4 bdb68b7 b3618e8 912dd7d 81887fd 5b2aed4 941cd03 5b2aed4 bdb68b7 5b2aed4 b3618e8 9a244e8 bdb68b7 414a697 bdb68b7 414a697 bdb68b7 9a244e8 912dd7d 9a244e8 912dd7d 9a244e8 912dd7d 9a244e8 81887fd 912dd7d 81887fd 9a244e8 81887fd 9a244e8 81887fd 9a244e8 5305f8e 414a697 5b2aed4 9a244e8 912dd7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import os
import sys
import logging
import gradio as gr
from huggingface_hub import InferenceClient
import re
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import asyncio
from crewai import Agent as CrewAgent, Task, Crew
import autogen
from langchain_openai import ChatOpenAI
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Check for OpenAI API key
if 'OPENAI_API_KEY' not in os.environ:
logger.error("OPENAI_API_KEY environment variable is not set.")
logger.info("Please set the OPENAI_API_KEY environment variable before running this script.")
sys.exit(1)
# Initialize the client with the Mistral-7B-Instruct-v0.2 model
try:
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
except Exception as e:
logger.error(f"Failed to initialize InferenceClient: {e}")
sys.exit(1)
# Shared context for both agents
SHARED_CONTEXT = """You are part of a multi-agent system designed to provide respectful, empathetic, and accurate support for Zerodha, a leading Indian financial services company. Your role is crucial in ensuring all interactions uphold the highest standards of customer service while maintaining Zerodha's excellent reputation.
Key points about Zerodha:
1. India's largest discount broker, known for innovative technology and low-cost trading.
2. Flat fee structure: ₹20 per executed order for intraday and F&O trades, zero brokerage for delivery equity investments.
3. Main trading platform: Kite (web and mobile).
4. Coin platform for commission-free direct mutual fund investments.
5. Extensive educational resources through Varsity.
6. Additional tools: Sentinel (price alerts) and ChartIQ (advanced charting).
7. Console for account management and administrative tasks.
Always prioritize user safety, ethical investing practices, and transparent communication. Never provide information that could mislead users or bring disrepute to Zerodha."""
# Guardrail functions
def sanitize_input(input_text):
return re.sub(r'[<>&\']', '', input_text)
approved_topics = ['account opening', 'trading', 'fees', 'platforms', 'funds', 'regulations', 'support']
vectorizer = CountVectorizer()
classifier = MultinomialNB()
X = vectorizer.fit_transform(approved_topics)
y = np.arange(len(approved_topics))
classifier.fit(X, y)
def is_relevant_topic(query):
query_vector = vectorizer.transform([query])
prediction = classifier.predict(query_vector)
return prediction[0] in range(len(approved_topics))
def redact_sensitive_info(text):
text = re.sub(r'\b\d{10,12}\b', '[REDACTED]', text)
text = re.sub(r'[A-Z]{5}[0-9]{4}[A-Z]', '[REDACTED]', text)
return text
def check_response_content(response):
unauthorized_patterns = [
r'\b(guarantee|assured|certain)\b.*\b(returns|profit)\b',
r'\b(buy|sell)\b.*\b(specific stocks?|shares?)\b'
]
return not any(re.search(pattern, response, re.IGNORECASE) for pattern in unauthorized_patterns)
def check_confidence(response):
uncertain_phrases = ["I'm not sure", "It's possible", "I don't have enough information"]
return not any(phrase.lower() in response.lower() for phrase in uncertain_phrases)
async def generate_response(prompt):
try:
return await client.text_generation(prompt, max_new_tokens=500, temperature=0.7)
except Exception as e:
logger.error(f"Error generating response: {e}")
return "I apologize, but I'm having trouble generating a response at the moment. Please try again later."
def post_process_response(response):
response = re.sub(r'\b(stupid|dumb|idiotic|foolish)\b', 'mistaken', response, flags=re.IGNORECASE)
if not re.search(r'(Thank you|Is there anything else|Hope this helps|Let me know if you need more information)\s*$', response, re.IGNORECASE):
response += "\n\nIs there anything else I can help you with regarding Zerodha's services?"
if re.search(r'\b(invest|trade|buy|sell|market)\b', response, re.IGNORECASE):
response += "\n\nPlease note that this information is for educational purposes only and should not be considered as financial advice. Always do your own research and consider consulting with a qualified financial advisor before making investment decisions."
return response
# CrewAI and AutoGen setup
chat_model = ChatOpenAI(model="gpt-3.5-turbo")
communication_expert_crew = CrewAgent(
role='Communication Expert',
goal='Interpret and rephrase user queries with empathy and respect',
backstory="""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions.""",
verbose=True,
allow_delegation=False,
llm=chat_model
)
response_expert_crew = CrewAgent(
role='Response Expert',
goal='Provide accurate, helpful, and emotionally intelligent responses to user queries',
backstory="""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone.""",
verbose=True,
allow_delegation=False,
llm=chat_model
)
llm_config = {
"config_list": [{"model": "gpt-3.5-turbo"}]
}
communication_expert_autogen = autogen.AssistantAgent(
name="Communication_Expert",
system_message=SHARED_CONTEXT + """
As the Communication Expert, your primary role is to interpret user queries with the utmost respect and empathy. You should:
1. Rephrase the user's query to ensure it's understood in the most positive and constructive light.
2. Identify and highlight any emotional subtext or concerns in the query.
3. Frame the query in a way that invites a supportive and informative response.
4. Ensure that any potential complaints or frustrations are acknowledged respectfully.
Your output should be a rephrased version of the user's query that maintains its original intent while setting the stage for an empathetic and respectful response.""",
llm_config=llm_config
)
response_expert_autogen = autogen.AssistantAgent(
name="Response_Expert",
system_message=SHARED_CONTEXT + """
As the Response Expert, your role is to provide accurate, helpful, and emotionally intelligent responses to user queries. You should:
1. Address the user's question or concern directly and comprehensively.
2. Maintain a tone of respect and empathy throughout your response.
3. Provide clear, factual information about Zerodha's services and policies.
4. When discussing financial matters, include appropriate disclaimers and encourage users to seek professional advice for complex decisions.
5. For complaints or concerns, acknowledge them respectfully and provide constructive guidance or escalation paths.
6. Always uphold Zerodha's reputation for transparency and user-centric service.
Your output should be a complete, informative response that addresses the user's query while demonstrating empathy and respect.""",
llm_config=llm_config
)
user_proxy = autogen.UserProxyAgent(
name="User_Proxy",
human_input_mode="NEVER",
max_consecutive_auto_reply=1
)
# Main function
async def zerodha_support(message, history):
try:
sanitized_message = sanitize_input(message)
if not is_relevant_topic(sanitized_message):
return "I'm sorry, but I can only assist with queries related to Zerodha's services and trading. Could you please ask a question about your Zerodha account, trading, or our platforms?"
sanitized_message = redact_sensitive_info(sanitized_message)
# Use crewAI for initial query rephrasing
rephrase_task = Task(
description=f"Rephrase the following user query with empathy and respect: '{sanitized_message}'",
agent=communication_expert_crew
)
crew = Crew(
agents=[communication_expert_crew],
tasks=[rephrase_task],
verbose=2
)
rephrased_query = crew.kickoff()
# Use AutoGen for generating the response
async def get_autogen_response():
await user_proxy.a_initiate_chat(
response_expert_autogen,
message=f"Please provide a respectful and empathetic response to the following query: '{rephrased_query}'"
)
return response_expert_autogen.last_message()["content"]
res |