invincible-jha commited on
Commit
5b2aed4
·
verified ·
1 Parent(s): 5305f8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -152
app.py CHANGED
@@ -1,175 +1,94 @@
 
 
 
1
  import gradio as gr
2
- from crewai import Agent as CrewAgent, Task, Crew
3
  import autogen
4
  from huggingface_hub import InferenceClient
5
- import os
6
  import re
7
  import numpy as np
8
  from sklearn.feature_extraction.text import CountVectorizer
9
  from sklearn.naive_bayes import MultinomialNB
10
  import asyncio
11
 
12
- # Initialize the client with the Mistral-7B-Instruct-v0.2 model
13
- client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
14
-
15
- # Shared context for both agents
16
- SHARED_CONTEXT = """You are part of a multi-agent system designed to provide respectful, empathetic, and accurate support for Zerodha, a leading Indian financial services company. Your role is crucial in ensuring all interactions uphold the highest standards of customer service while maintaining Zerodha's excellent reputation.
17
-
18
- Key points about Zerodha:
19
- 1. India's largest discount broker, known for innovative technology and low-cost trading.
20
- 2. Flat fee structure: ₹20 per executed order for intraday and F&O trades, zero brokerage for delivery equity investments.
21
- 3. Main trading platform: Kite (web and mobile).
22
- 4. Coin platform for commission-free direct mutual fund investments.
23
- 5. Extensive educational resources through Varsity.
24
- 6. Additional tools: Sentinel (price alerts) and ChartIQ (advanced charting).
25
- 7. Console for account management and administrative tasks.
26
-
27
- Always prioritize user safety, ethical investing practices, and transparent communication. Never provide information that could mislead users or bring disrepute to Zerodha."""
28
-
29
- # Guardrail functions
30
- def sanitize_input(input_text):
31
- sanitized = re.sub(r'[<>&\']', '', input_text)
32
- return sanitized
33
-
34
- approved_topics = ['account opening', 'trading', 'fees', 'platforms', 'funds', 'regulations', 'support']
35
- vectorizer = CountVectorizer()
36
- classifier = MultinomialNB()
37
-
38
- X = vectorizer.fit_transform(approved_topics)
39
- y = np.arange(len(approved_topics))
40
- classifier.fit(X, y)
41
-
42
- def is_relevant_topic(query):
43
- query_vector = vectorizer.transform([query])
44
- prediction = classifier.predict(query_vector)
45
- return prediction[0] in range(len(approved_topics))
46
-
47
- def redact_sensitive_info(text):
48
- text = re.sub(r'\b\d{10,12}\b', '[REDACTED]', text)
49
- text = re.sub(r'[A-Z]{5}[0-9]{4}[A-Z]', '[REDACTED]', text)
50
- return text
51
-
52
- def check_response_content(response):
53
- unauthorized_patterns = [
54
- r'\b(guarantee|assured|certain)\b.*\b(returns|profit)\b',
55
- r'\b(buy|sell)\b.*\b(specific stocks?|shares?)\b'
56
- ]
57
- return not any(re.search(pattern, response, re.IGNORECASE) for pattern in unauthorized_patterns)
58
-
59
- def check_confidence(response):
60
- uncertain_phrases = ["I'm not sure", "It's possible", "I don't have enough information"]
61
- return not any(phrase in response for phrase in uncertain_phrases)
62
-
63
- def generate_response(prompt):
64
- response = client.text_generation(prompt, max_new_tokens=500, temperature=0.7)
65
- return response
66
-
67
- def post_process_response(response):
68
- response = re.sub(r'\b(stupid|dumb|idiotic|foolish)\b', 'mistaken', response, flags=re.IGNORECASE)
69
-
70
- if not re.search(r'(Thank you|Is there anything else|Hope this helps|Let me know if you need more information)\s*$', response, re.IGNORECASE):
71
- response += "\n\nIs there anything else I can help you with regarding Zerodha's services?"
72
-
73
- if re.search(r'\b(invest|trade|buy|sell|market)\b', response, re.IGNORECASE):
74
- response += "\n\nPlease note that this information is for educational purposes only and should not be considered as financial advice. Always do your own research and consider consulting with a qualified financial advisor before making investment decisions."
75
-
76
- return response
77
-
78
- # CrewAI and AutoGen setup
79
- communication_expert_crew = CrewAgent(
80
- role='Communication Expert',
81
- goal='Interpret and rephrase user queries with empathy and respect',
82
- backstory="""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions.""",
83
- verbose=True,
84
- allow_delegation=False,
85
- tools=[generate_response]
86
- )
87
 
88
- response_expert_crew = CrewAgent(
89
- role='Response Expert',
90
- goal='Provide accurate, helpful, and emotionally intelligent responses to user queries',
91
- backstory="""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone.""",
92
- verbose=True,
93
- allow_delegation=False,
94
- tools=[generate_response]
95
- )
96
 
97
- communication_expert_autogen = autogen.AssistantAgent(
98
- name="Communication_Expert",
99
- system_message=SHARED_CONTEXT + """
100
- As the Communication Expert, your primary role is to interpret user queries with the utmost respect and empathy. You should:
101
- 1. Rephrase the user's query to ensure it's understood in the most positive and constructive light.
102
- 2. Identify and highlight any emotional subtext or concerns in the query.
103
- 3. Frame the query in a way that invites a supportive and informative response.
104
- 4. Ensure that any potential complaints or frustrations are acknowledged respectfully.
105
-
106
- Your output should be a rephrased version of the user's query that maintains its original intent while setting the stage for an empathetic and respectful response.""",
107
- llm_config={"config_list": [{"model": "gpt-3.5-turbo"}]}
108
- )
109
 
110
- response_expert_autogen = autogen.AssistantAgent(
111
- name="Response_Expert",
112
- system_message=SHARED_CONTEXT + """
113
- As the Response Expert, your role is to provide accurate, helpful, and emotionally intelligent responses to user queries. You should:
114
- 1. Address the user's question or concern directly and comprehensively.
115
- 2. Maintain a tone of respect and empathy throughout your response.
116
- 3. Provide clear, factual information about Zerodha's services and policies.
117
- 4. When discussing financial matters, include appropriate disclaimers and encourage users to seek professional advice for complex decisions.
118
- 5. For complaints or concerns, acknowledge them respectfully and provide constructive guidance or escalation paths.
119
- 6. Always uphold Zerodha's reputation for transparency and user-centric service.
120
-
121
- Your output should be a complete, informative response that addresses the user's query while demonstrating empathy and respect.""",
122
- llm_config={"config_list": [{"model": "gpt-3.5-turbo"}]}
123
- )
124
 
125
- user_proxy = autogen.UserProxyAgent(
126
- name="User_Proxy",
127
- human_input_mode="NEVER",
128
- max_consecutive_auto_reply=1
129
- )
 
 
 
 
 
130
 
131
  # Main function
132
  async def zerodha_support(message, history):
133
- sanitized_message = sanitize_input(message)
134
-
135
- if not is_relevant_topic(sanitized_message):
136
- return "I'm sorry, but I can only assist with queries related to Zerodha's services and trading. Could you please ask a question about your Zerodha account, trading, or our platforms?"
137
-
138
- sanitized_message = redact_sensitive_info(sanitized_message)
139
-
140
- # Use crewAI for initial query rephrasing
141
- rephrase_task = Task(
142
- description=f"Rephrase the following user query with empathy and respect: '{sanitized_message}'",
143
- agent=communication_expert_crew
144
- )
145
-
146
- crew = Crew(
147
- agents=[communication_expert_crew],
148
- tasks=[rephrase_task],
149
- verbose=2
150
- )
151
-
152
- rephrased_query = crew.kickoff()
153
-
154
- # Use AutoGen for generating the response
155
- async def get_autogen_response():
156
- await user_proxy.a_initiate_chat(
157
- response_expert_autogen,
158
- message=f"Please provide a respectful and empathetic response to the following query: '{rephrased_query}'"
159
  )
160
- return response_expert_autogen.last_message()["content"]
161
 
162
- response = await get_autogen_response()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
- if not check_response_content(response):
165
- response += "\n\nPlease note that I cannot provide specific investment advice or guarantee returns. For personalized guidance, please consult with a qualified financial advisor."
166
 
167
- if not check_confidence(response):
168
- return "I apologize, but I'm not confident in providing an accurate answer to this query. For the most up-to-date and accurate information, please contact Zerodha's customer support directly."
169
 
170
- final_response = post_process_response(response)
171
 
172
- return final_response
 
 
 
173
 
174
  # Wrap the asynchronous function for Gradio
175
  def zerodha_support_wrapper(message, history):
@@ -195,5 +114,9 @@ demo = gr.ChatInterface(
195
  )
196
 
197
  if __name__ == "__main__":
198
- public_url = demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
199
- print(f"\n\nSHAREABLE LINK: {public_url}\n\n")
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
  import gradio as gr
 
5
  import autogen
6
  from huggingface_hub import InferenceClient
 
7
  import re
8
  import numpy as np
9
  from sklearn.feature_extraction.text import CountVectorizer
10
  from sklearn.naive_bayes import MultinomialNB
11
  import asyncio
12
 
13
+ # Set up logging
14
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Check Python version
17
+ if sys.version_info < (3, 7):
18
+ logging.error("This script requires Python 3.7 or higher")
19
+ sys.exit(1)
 
 
 
 
20
 
21
+ # Check and set environment variables
22
+ required_env_vars = ['HUGGINGFACE_API_KEY']
23
+ for var in required_env_vars:
24
+ if var not in os.environ:
25
+ logging.error(f"Environment variable {var} is not set")
26
+ sys.exit(1)
 
 
 
 
 
 
27
 
28
+ # Initialize the client with the Mistral-7B-Instruct-v0.2 model
29
+ try:
30
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
31
+ except Exception as e:
32
+ logging.error(f"Failed to initialize InferenceClient: {e}")
33
+ sys.exit(1)
 
 
 
 
 
 
 
 
34
 
35
+ # Rest of your code (SHARED_CONTEXT, guardrail functions, etc.) remains the same
36
+
37
+ # CrewAI setup
38
+ try:
39
+ from crewai import Agent as CrewAgent, Task, Crew
40
+ except ImportError:
41
+ logging.error("Failed to import crewai. Make sure it's installed: pip install crewai")
42
+ sys.exit(1)
43
+
44
+ # CrewAI and AutoGen setup remains the same
45
 
46
  # Main function
47
  async def zerodha_support(message, history):
48
+ try:
49
+ sanitized_message = sanitize_input(message)
50
+
51
+ if not is_relevant_topic(sanitized_message):
52
+ return "I'm sorry, but I can only assist with queries related to Zerodha's services and trading. Could you please ask a question about your Zerodha account, trading, or our platforms?"
53
+
54
+ sanitized_message = redact_sensitive_info(sanitized_message)
55
+
56
+ # Use crewAI for initial query rephrasing
57
+ rephrase_task = Task(
58
+ description=f"Rephrase the following user query with empathy and respect: '{sanitized_message}'",
59
+ agent=communication_expert_crew
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  )
 
61
 
62
+ crew = Crew(
63
+ agents=[communication_expert_crew],
64
+ tasks=[rephrase_task],
65
+ verbose=2
66
+ )
67
+
68
+ rephrased_query = crew.kickoff()
69
+
70
+ # Use AutoGen for generating the response
71
+ async def get_autogen_response():
72
+ await user_proxy.a_initiate_chat(
73
+ response_expert_autogen,
74
+ message=f"Please provide a respectful and empathetic response to the following query: '{rephrased_query}'"
75
+ )
76
+ return response_expert_autogen.last_message()["content"]
77
+
78
+ response = await get_autogen_response()
79
 
80
+ if not check_response_content(response):
81
+ response += "\n\nPlease note that I cannot provide specific investment advice or guarantee returns. For personalized guidance, please consult with a qualified financial advisor."
82
 
83
+ if not check_confidence(response):
84
+ return "I apologize, but I'm not confident in providing an accurate answer to this query. For the most up-to-date and accurate information, please contact Zerodha's customer support directly."
85
 
86
+ final_response = post_process_response(response)
87
 
88
+ return final_response
89
+ except Exception as e:
90
+ logging.error(f"Error in zerodha_support: {e}")
91
+ return "I apologize, but an error occurred while processing your request. Please try again later."
92
 
93
  # Wrap the asynchronous function for Gradio
94
  def zerodha_support_wrapper(message, history):
 
114
  )
115
 
116
  if __name__ == "__main__":
117
+ try:
118
+ public_url = demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
119
+ print(f"\n\nSHAREABLE LINK: {public_url}\n\n")
120
+ except Exception as e:
121
+ logging.error(f"Failed to launch Gradio interface: {e}")
122
+ sys.exit(1)