Spaces:
Paused
Paused
File size: 1,844 Bytes
d1197b3 aa5a834 3cc2ff7 cb80c15 d1197b3 e435187 d1197b3 3cc2ff7 d1197b3 cb80c15 d1197b3 cb80c15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from llama_index import GPTPineconeIndex, LLMPredictor, ServiceContext
import pinecone
from langchain import OpenAI
import os
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def askQuestion(brain, question, temperature, maxTokens):
temperature = float(temperature)
print(brain, question, temperature,maxTokens)
Brain_Name = brain.lower()
print(Brain_Name)
pinecone.init(api_key=os.environ['PINECONE_KEY'],
environment="us-west4-gcp")
pineconeindex = pinecone.Index(Brain_Name)
index = GPTPineconeIndex([], pinecone_index=pineconeindex)
# index = GPTSimpleVectorIndex.load_from_disk('index.json')
# For Q-A set this value to 4, For Content-Genration set this value b/w 7-10.
data_chunks = 5
# prompt query goes here
# query="summarize in full detail the solution that dimetyd is providing, and previous email sequences which can be used as a context knowledge"
query = question
# relevant info from brain goes here
info = ["pdf"]
llm_predictor = LLMPredictor(llm=OpenAI(
temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens))
service_context_gpt4 = ServiceContext.from_defaults(
llm_predictor=llm_predictor)
response = index.query(query, service_context=service_context_gpt4,
similarity_top_k=data_chunks, response_mode="compact")
print(question)
print(response)
return response
def getBrains(name):
pinecone.init(api_key=os.environ['PINECONE_KEY'],
environment="us-west4-gcp")
active_indexes = pinecone.list_indexes()
print(active_indexes)
name = name.lower()
if name in active_indexes:
return True
else:
return False
|