Spaces:
Sleeping
Sleeping
import pinecone | |
import openai | |
from openai import OpenAI | |
from Answering_Agent import Answering_Agent | |
from Obnoxious_Agent import Obnoxious_Agent | |
from Query_Agent import Query_Agent | |
from Relevant_Documents_Agent import Relevant_Documents_Agent | |
class Head_Agent: | |
def __init__(self, openai_key, pinecone_key, pinecone_index_name) -> None: | |
# TODO: Initialize the Head_Agent | |
# Store API keys and Pinecone index name | |
openai_key = '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' | |
self.openai_client = OpenAI(api_key=openai_key) | |
self.openai_key = openai_key | |
self.pinecone_key = pinecone_key | |
self.pinecone_index_name = pinecone_index_name | |
# Initialize Pinecone | |
# Placeholder for sub-agents, to be initialized in setup_sub_agents | |
self.obnoxious_agent = None | |
self.query_agent = None | |
self.answering_agent = None | |
self.relevant_documents_agent = None | |
# set up sub agents | |
self.setup_sub_agents() | |
def get_completion(self, prompt, model="gpt-4"): | |
message = {"role": "user", "content": prompt} | |
response = self.openai_client.chat.completions.create( | |
model=model, | |
messages=[message] | |
) | |
return response.choices[0].message.content | |
def setup_sub_agents(self): | |
# TODO: Setup the sub-agents | |
# Setup the Obnoxious_Agent | |
self.obnoxious_agent = Obnoxious_Agent() # Assuming no external API client required | |
# Set up the Query_Agent with Pinecone index | |
self.query_agent = Query_Agent(pinecone_index=self.pinecone_index_name, openai_client=openai, | |
embeddings="text-embedding-ada-002") | |
# Set up the Answering_Agent | |
self.answering_agent = Answering_Agent(openai_api_key=self.openai_key) | |
openai_client = OpenAI(api_key=self.openai_key) | |
self.relevant_documents_agent = Relevant_Documents_Agent(openai_client) | |
def process_query(self, user_input, conversation_history): | |
# Check if the query is obnoxious | |
# conversation_history = get_conversation() | |
if self.obnoxious_agent.check_query(user_input) == "Yes": | |
return "Sorry, I cannot respond to this query." | |
greetings = [ | |
"hi", "hello", "how are you", "hey", "good morning", | |
"good afternoon", "good evening", "greetings", "what's up", | |
"howdy", "hi there", "hello there", "hey there" | |
] | |
if user_input.lower() in greetings: | |
return "How can I help you today?" | |
# Retrieve relevant documents for the query (simplified example) | |
# In a real scenario, you might need to preprocess the query for embeddings | |
relevant_docs = self.query_agent.query_vector_store(user_input, k=5) | |
print("relevant_docs .length : ") | |
print(len(relevant_docs)) | |
if len(relevant_docs) == 0: | |
none_relevant_docs = "No relevant documents found in the documents.Please ask a relevant question to the book on Machine Learning." | |
return none_relevant_docs | |
# choose mode | |
prompt_choose_mode = "Infer whether the user wants the response to be chatty or concise? If the response needs to be chatty you must answer chatty, otherwise answer concise and the answer must contain only the one word. Here is user_input: "+ user_input | |
prompt_choose_mode = self.get_completion(prompt=prompt_choose_mode, model="gpt-4") | |
if prompt_choose_mode == "chatty": | |
response = self.answering_agent.generate_response(user_input, relevant_docs, conv_history=conversation_history, | |
mode="chatty") | |
elif prompt_choose_mode =="concise": | |
response = self.answering_agent.generate_response(user_input, relevant_docs, | |
conv_history=conversation_history, | |
mode="concise") | |
else: | |
# Generate a response based on the query and relevant documents | |
response = self.answering_agent.generate_response(user_input, relevant_docs, conv_history=conversation_history, | |
mode="chatty") | |
print("prompt_choose_mode: "+ prompt_choose_mode ) | |
return response | |