from llama_index import GPTPineconeIndex, LLMPredictor, ServiceContext import pinecone from langchain import OpenAI import os from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaIndexTool, LlamaToolkit, create_llama_chat_agent from langchain.chains.conversation.memory import ConversationBufferMemory # logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) # logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) pinecone_key=os.environ['PINECONE_KEY'] def askQuestion(brain, question, prompt, temperature, maxTokens): temperature = float(temperature) finalQuestion = prompt+question print(brain, finalQuestion, temperature, maxTokens) Brain_Name = brain.lower() print(Brain_Name) pinecone.init(api_key=pinecone_key, environment="us-west4-gcp") pineconeindex = pinecone.Index(Brain_Name) index = GPTPineconeIndex([], pinecone_index=pineconeindex) # index = GPTSimpleVectorIndex.load_from_disk('index.json') # For Q-A set this value to 4, For Content-Genration set this value b/w 7-10. data_chunks = 5 # prompt query goes here # query="summarize in full detail the solution that dimetyd is providing, and previous email sequences which can be used as a context knowledge" query = finalQuestion # relevant info from brain goes here info = ["pdf"] llm_predictor = LLMPredictor(llm=OpenAI( temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens)) service_context_gpt4 = ServiceContext.from_defaults( llm_predictor=llm_predictor) response = index.query(query, service_context=service_context_gpt4, similarity_top_k=data_chunks, response_mode="compact") print(question) print(response) memory = ConversationBufferMemory(memory_key="chat_history") memory.chat_memory.add_user_message(question) memory.chat_memory.add_ai_message(response.response) return response, memory def getBrains(name): pinecone.init(api_key=pinecone_key, environment="us-west4-gcp") active_indexes = pinecone.list_indexes() print(active_indexes) name = name.lower() if name in active_indexes: return True else: return False def runAgent(memory, question, temperature, maxTokens): if (memory == False): return "Please Initiate the Chat first.." temperature = float(temperature) pinecone.init(api_key=pinecone_key, environment="us-west4-gcp") pineconeindex = pinecone.Index("dimetyd-test") index = GPTPineconeIndex([], pinecone_index=pineconeindex) # memory = ConversationBufferMemory(memory_key="chat_history") # print(memory.chat_memory) llm = OpenAI( temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens) tool_config = IndexToolConfig( index=index, name="Vector Index", description="Use this tool if you can't find the required Information in the previous message history", index_query_kwargs={"similarity_top_k": 4, "response_mode": "compact"}, tool_kwargs={"return_direct": True} ) toolkit = LlamaToolkit(index_configs=[tool_config]) agent_chain = create_llama_chat_agent( toolkit, llm, memory=memory, verbose=True) response = agent_chain.run(question) # print(memory.chat_memory) return response, memory