File size: 3,438 Bytes
d1197b3
 
 
 
8240614
 
d1197b3
 
 
 
8240614
d1197b3
8240614
aa5a834
8240614
 
cb80c15
 
8240614
d1197b3
 
 
 
 
 
e435187
d1197b3
 
 
8240614
d1197b3
 
 
 
 
 
 
 
 
 
3cc2ff7
 
8240614
 
 
 
d1197b3
 
cb80c15
8240614
d1197b3
 
 
cb80c15
 
 
 
 
8240614
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from llama_index import GPTPineconeIndex, LLMPredictor, ServiceContext
import pinecone
from langchain import OpenAI
import os
from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaIndexTool, LlamaToolkit, create_llama_chat_agent
from langchain.chains.conversation.memory import ConversationBufferMemory


# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
pinecone_key=os.environ['PINECONE_KEY']

def askQuestion(brain, question, prompt, temperature, maxTokens):
    temperature = float(temperature)
    finalQuestion = prompt+question
    print(brain, finalQuestion, temperature, maxTokens)
    Brain_Name = brain.lower()
    print(Brain_Name)
    pinecone.init(api_key=pinecone_key,
                  environment="us-west4-gcp")
    pineconeindex = pinecone.Index(Brain_Name)
    index = GPTPineconeIndex([], pinecone_index=pineconeindex)
    # index = GPTSimpleVectorIndex.load_from_disk('index.json')

    # For Q-A set this value to 4, For Content-Genration set this value b/w 7-10.
    data_chunks = 5

    # prompt query goes here
    # query="summarize in full detail the solution that dimetyd is providing, and previous email sequences which can be used as a context knowledge"
    query = finalQuestion
    # relevant info from brain goes here
    info = ["pdf"]

    llm_predictor = LLMPredictor(llm=OpenAI(
        temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens))
    service_context_gpt4 = ServiceContext.from_defaults(
        llm_predictor=llm_predictor)

    response = index.query(query, service_context=service_context_gpt4,
                           similarity_top_k=data_chunks, response_mode="compact")
    print(question)
    print(response)
    memory = ConversationBufferMemory(memory_key="chat_history")
    memory.chat_memory.add_user_message(question)
    memory.chat_memory.add_ai_message(response.response)
    return response, memory


def getBrains(name):
    pinecone.init(api_key=pinecone_key,
                  environment="us-west4-gcp")
    active_indexes = pinecone.list_indexes()
    print(active_indexes)
    name = name.lower()
    if name in active_indexes:
        return True
    else:
        return False


def runAgent(memory, question, temperature, maxTokens):
    if (memory == False):
        return "Please Initiate the Chat first.."
    temperature = float(temperature)
    pinecone.init(api_key=pinecone_key,
                  environment="us-west4-gcp")
    pineconeindex = pinecone.Index("dimetyd-test")
    index = GPTPineconeIndex([], pinecone_index=pineconeindex)
    # memory = ConversationBufferMemory(memory_key="chat_history")
    # print(memory.chat_memory)
    llm = OpenAI(
        temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens)
    tool_config = IndexToolConfig(
        index=index,
        name="Vector Index",
        description="Use this tool if you can't find the required Information in the previous message history",
        index_query_kwargs={"similarity_top_k": 4, "response_mode": "compact"},
        tool_kwargs={"return_direct": True}
    )

    toolkit = LlamaToolkit(index_configs=[tool_config])

    agent_chain = create_llama_chat_agent(
        toolkit, llm, memory=memory, verbose=True)
    response = agent_chain.run(question)
    # print(memory.chat_memory)
    return response, memory