Spaces:
Paused
Paused
Commit
·
00cd58e
1
Parent(s):
24eee70
dn
Browse files
ask.py
CHANGED
@@ -4,6 +4,7 @@ from langchain import OpenAI
|
|
4 |
import os
|
5 |
from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaIndexTool, LlamaToolkit, create_llama_chat_agent
|
6 |
from langchain.chains.conversation.memory import ConversationBufferMemory
|
|
|
7 |
|
8 |
|
9 |
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
@@ -13,31 +14,44 @@ pinecone_key=os.environ['PINECONE_KEY']
|
|
13 |
def askQuestion(brain, question, prompt, temperature, maxTokens):
|
14 |
temperature = float(temperature)
|
15 |
finalQuestion = prompt+question
|
16 |
-
print(
|
|
|
|
|
|
|
17 |
Brain_Name = brain.lower()
|
18 |
print(Brain_Name)
|
19 |
pinecone.init(api_key=pinecone_key,
|
20 |
environment="us-west4-gcp")
|
21 |
pineconeindex = pinecone.Index(Brain_Name)
|
|
|
22 |
index = GPTPineconeIndex([], pinecone_index=pineconeindex)
|
23 |
# index = GPTSimpleVectorIndex.load_from_disk('index.json')
|
24 |
|
25 |
# For Q-A set this value to 4, For Content-Genration set this value b/w 7-10.
|
26 |
data_chunks = 5
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# relevant info from brain goes here
|
32 |
info = ["pdf"]
|
33 |
|
34 |
llm_predictor = LLMPredictor(llm=OpenAI(
|
35 |
temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens))
|
|
|
36 |
service_context_gpt4 = ServiceContext.from_defaults(
|
37 |
llm_predictor=llm_predictor)
|
38 |
|
39 |
response = index.query(query, service_context=service_context_gpt4,
|
40 |
-
similarity_top_k=data_chunks, response_mode="compact")
|
41 |
print(question)
|
42 |
print(response)
|
43 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
|
|
4 |
import os
|
5 |
from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaIndexTool, LlamaToolkit, create_llama_chat_agent
|
6 |
from langchain.chains.conversation.memory import ConversationBufferMemory
|
7 |
+
from llama_index import QuestionAnswerPrompt
|
8 |
|
9 |
|
10 |
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
|
|
14 |
def askQuestion(brain, question, prompt, temperature, maxTokens):
|
15 |
temperature = float(temperature)
|
16 |
finalQuestion = prompt+question
|
17 |
+
print(finalQuestion)
|
18 |
+
print(temperature, maxTokens)
|
19 |
+
#print(type(temperature))
|
20 |
+
#print(type(maxTokens))
|
21 |
Brain_Name = brain.lower()
|
22 |
print(Brain_Name)
|
23 |
pinecone.init(api_key=pinecone_key,
|
24 |
environment="us-west4-gcp")
|
25 |
pineconeindex = pinecone.Index(Brain_Name)
|
26 |
+
pineconeindex.describe_index_stats
|
27 |
index = GPTPineconeIndex([], pinecone_index=pineconeindex)
|
28 |
# index = GPTSimpleVectorIndex.load_from_disk('index.json')
|
29 |
|
30 |
# For Q-A set this value to 4, For Content-Genration set this value b/w 7-10.
|
31 |
data_chunks = 5
|
32 |
|
33 |
+
QA_PROMPT_TMPL = (
|
34 |
+
"We have provided context information below. \n"
|
35 |
+
"---------------------\n"
|
36 |
+
"{context_str}"
|
37 |
+
"\n---------------------\n"
|
38 |
+
"Given this information, please answer the question at the end of this main prompt: "+prompt+" {query_str}\n"
|
39 |
+
)
|
40 |
+
|
41 |
+
QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL)
|
42 |
+
|
43 |
+
query = question
|
44 |
# relevant info from brain goes here
|
45 |
info = ["pdf"]
|
46 |
|
47 |
llm_predictor = LLMPredictor(llm=OpenAI(
|
48 |
temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens))
|
49 |
+
|
50 |
service_context_gpt4 = ServiceContext.from_defaults(
|
51 |
llm_predictor=llm_predictor)
|
52 |
|
53 |
response = index.query(query, service_context=service_context_gpt4,
|
54 |
+
similarity_top_k=data_chunks, response_mode="compact",text_qa_template=QA_PROMPT)
|
55 |
print(question)
|
56 |
print(response)
|
57 |
memory = ConversationBufferMemory(memory_key="chat_history")
|