Spaces:
Paused
Paused
Commit
·
8240614
1
Parent(s):
cb12053
follow-up
Browse files
ask.py
CHANGED
@@ -2,18 +2,21 @@ from llama_index import GPTPineconeIndex, LLMPredictor, ServiceContext
|
|
2 |
import pinecone
|
3 |
from langchain import OpenAI
|
4 |
import os
|
|
|
|
|
5 |
|
6 |
|
7 |
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
8 |
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
|
|
9 |
|
10 |
-
|
11 |
-
def askQuestion(brain, question, temperature, maxTokens):
|
12 |
temperature = float(temperature)
|
13 |
-
|
|
|
14 |
Brain_Name = brain.lower()
|
15 |
print(Brain_Name)
|
16 |
-
pinecone.init(api_key=
|
17 |
environment="us-west4-gcp")
|
18 |
pineconeindex = pinecone.Index(Brain_Name)
|
19 |
index = GPTPineconeIndex([], pinecone_index=pineconeindex)
|
@@ -24,7 +27,7 @@ def askQuestion(brain, question, temperature, maxTokens):
|
|
24 |
|
25 |
# prompt query goes here
|
26 |
# query="summarize in full detail the solution that dimetyd is providing, and previous email sequences which can be used as a context knowledge"
|
27 |
-
query =
|
28 |
# relevant info from brain goes here
|
29 |
info = ["pdf"]
|
30 |
|
@@ -37,11 +40,14 @@ def askQuestion(brain, question, temperature, maxTokens):
|
|
37 |
similarity_top_k=data_chunks, response_mode="compact")
|
38 |
print(question)
|
39 |
print(response)
|
40 |
-
|
|
|
|
|
|
|
41 |
|
42 |
|
43 |
def getBrains(name):
|
44 |
-
pinecone.init(api_key=
|
45 |
environment="us-west4-gcp")
|
46 |
active_indexes = pinecone.list_indexes()
|
47 |
print(active_indexes)
|
@@ -50,3 +56,32 @@ def getBrains(name):
|
|
50 |
return True
|
51 |
else:
|
52 |
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import pinecone
|
3 |
from langchain import OpenAI
|
4 |
import os
|
5 |
+
from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaIndexTool, LlamaToolkit, create_llama_chat_agent
|
6 |
+
from langchain.chains.conversation.memory import ConversationBufferMemory
|
7 |
|
8 |
|
9 |
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
10 |
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
11 |
+
pinecone_key=os.environ['PINECONE_KEY']
|
12 |
|
13 |
+
def askQuestion(brain, question, prompt, temperature, maxTokens):
|
|
|
14 |
temperature = float(temperature)
|
15 |
+
finalQuestion = prompt+question
|
16 |
+
print(brain, finalQuestion, temperature, maxTokens)
|
17 |
Brain_Name = brain.lower()
|
18 |
print(Brain_Name)
|
19 |
+
pinecone.init(api_key=pinecone_key,
|
20 |
environment="us-west4-gcp")
|
21 |
pineconeindex = pinecone.Index(Brain_Name)
|
22 |
index = GPTPineconeIndex([], pinecone_index=pineconeindex)
|
|
|
27 |
|
28 |
# prompt query goes here
|
29 |
# query="summarize in full detail the solution that dimetyd is providing, and previous email sequences which can be used as a context knowledge"
|
30 |
+
query = finalQuestion
|
31 |
# relevant info from brain goes here
|
32 |
info = ["pdf"]
|
33 |
|
|
|
40 |
similarity_top_k=data_chunks, response_mode="compact")
|
41 |
print(question)
|
42 |
print(response)
|
43 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
44 |
+
memory.chat_memory.add_user_message(question)
|
45 |
+
memory.chat_memory.add_ai_message(response.response)
|
46 |
+
return response, memory
|
47 |
|
48 |
|
49 |
def getBrains(name):
|
50 |
+
pinecone.init(api_key=pinecone_key,
|
51 |
environment="us-west4-gcp")
|
52 |
active_indexes = pinecone.list_indexes()
|
53 |
print(active_indexes)
|
|
|
56 |
return True
|
57 |
else:
|
58 |
return False
|
59 |
+
|
60 |
+
|
61 |
+
def runAgent(memory, question, temperature, maxTokens):
|
62 |
+
if (memory == False):
|
63 |
+
return "Please Initiate the Chat first.."
|
64 |
+
temperature = float(temperature)
|
65 |
+
pinecone.init(api_key=pinecone_key,
|
66 |
+
environment="us-west4-gcp")
|
67 |
+
pineconeindex = pinecone.Index("dimetyd-test")
|
68 |
+
index = GPTPineconeIndex([], pinecone_index=pineconeindex)
|
69 |
+
# memory = ConversationBufferMemory(memory_key="chat_history")
|
70 |
+
# print(memory.chat_memory)
|
71 |
+
llm = OpenAI(
|
72 |
+
temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens)
|
73 |
+
tool_config = IndexToolConfig(
|
74 |
+
index=index,
|
75 |
+
name="Vector Index",
|
76 |
+
description="Use this tool if you can't find the required Information in the previous message history",
|
77 |
+
index_query_kwargs={"similarity_top_k": 4, "response_mode": "compact"},
|
78 |
+
tool_kwargs={"return_direct": True}
|
79 |
+
)
|
80 |
+
|
81 |
+
toolkit = LlamaToolkit(index_configs=[tool_config])
|
82 |
+
|
83 |
+
agent_chain = create_llama_chat_agent(
|
84 |
+
toolkit, llm, memory=memory, verbose=True)
|
85 |
+
response = agent_chain.run(question)
|
86 |
+
# print(memory.chat_memory)
|
87 |
+
return response, memory
|