invincible-jha commited on
Commit
7d3b780
·
verified ·
1 Parent(s): 3b3aff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -6
app.py CHANGED
@@ -8,18 +8,36 @@ from sklearn.feature_extraction.text import CountVectorizer
8
  from sklearn.naive_bayes import MultinomialNB
9
  import asyncio
10
  from crewai import Agent, Task, Crew
11
- from langchain.llms import HuggingFaceHub
12
  import random
 
13
 
14
  # Set up logging
15
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
16
  logger = logging.getLogger(__name__)
17
 
18
- # Set up the Mistral model using HuggingFaceHub
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  try:
20
- mistral_llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature": 0.7, "max_new_tokens": 500})
 
21
  except Exception as e:
22
- logger.error(f"Failed to initialize Mistral model: {e}")
23
  sys.exit(1)
24
 
25
  # Shared context for both agents
@@ -69,6 +87,14 @@ def check_confidence(response):
69
  uncertain_phrases = ["I'm not sure", "It's possible", "I don't have enough information"]
70
  return not any(phrase.lower() in response.lower() for phrase in uncertain_phrases)
71
 
 
 
 
 
 
 
 
 
72
  def post_process_response(response):
73
  response = re.sub(r'\b(stupid|dumb|idiotic|foolish)\b', 'mistaken', response, flags=re.IGNORECASE)
74
 
@@ -87,7 +113,7 @@ communication_expert_crew = Agent(
87
  backstory="""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions.""",
88
  verbose=True,
89
  allow_delegation=False,
90
- llm=mistral_llm
91
  )
92
 
93
  response_expert_crew = Agent(
@@ -96,7 +122,7 @@ response_expert_crew = Agent(
96
  backstory="""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone.""",
97
  verbose=True,
98
  allow_delegation=False,
99
- llm=mistral_llm
100
  )
101
 
102
  # Main function
 
8
  from sklearn.naive_bayes import MultinomialNB
9
  import asyncio
10
  from crewai import Agent, Task, Crew
11
+ from huggingface_hub import InferenceClient
12
  import random
13
+ import json
14
 
15
  # Set up logging
16
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
17
  logger = logging.getLogger(__name__)
18
 
19
+ # Function to get Hugging Face API token
20
+ def get_huggingface_api_token():
21
+ token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
22
+ if not token:
23
+ try:
24
+ with open('config.json', 'r') as config_file:
25
+ config = json.load(config_file)
26
+ token = config.get('HUGGINGFACEHUB_API_TOKEN')
27
+ except (FileNotFoundError, json.JSONDecodeError):
28
+ logger.warning("Config file not found or invalid. Please provide the Hugging Face API token.")
29
+
30
+ if not token:
31
+ token = input("Please enter your Hugging Face API token: ")
32
+
33
+ return token
34
+
35
+ # Initialize the Hugging Face Inference Client
36
  try:
37
+ hf_token = get_huggingface_api_token()
38
+ client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.2", token=hf_token)
39
  except Exception as e:
40
+ logger.error(f"Failed to initialize Hugging Face client: {e}")
41
  sys.exit(1)
42
 
43
  # Shared context for both agents
 
87
  uncertain_phrases = ["I'm not sure", "It's possible", "I don't have enough information"]
88
  return not any(phrase.lower() in response.lower() for phrase in uncertain_phrases)
89
 
90
+ async def generate_response(prompt):
91
+ try:
92
+ response = await client.text_generation(prompt, max_new_tokens=500, temperature=0.7)
93
+ return response
94
+ except Exception as e:
95
+ logger.error(f"Error generating response: {e}")
96
+ return "I apologize, but I'm having trouble generating a response at the moment. Please try again later."
97
+
98
  def post_process_response(response):
99
  response = re.sub(r'\b(stupid|dumb|idiotic|foolish)\b', 'mistaken', response, flags=re.IGNORECASE)
100
 
 
113
  backstory="""You are an expert in communication, specializing in understanding and rephrasing queries to ensure they are interpreted in the most positive and constructive light. Your role is crucial in setting the tone for respectful and empathetic interactions.""",
114
  verbose=True,
115
  allow_delegation=False,
116
+ tools=[generate_response]
117
  )
118
 
119
  response_expert_crew = Agent(
 
122
  backstory="""You are an expert in Zerodha's services and policies, with a keen ability to provide comprehensive and empathetic responses. Your role is to ensure that all user queries are addressed accurately while maintaining a respectful and supportive tone.""",
123
  verbose=True,
124
  allow_delegation=False,
125
+ tools=[generate_response]
126
  )
127
 
128
  # Main function