Spaces:
Paused
Paused
from llama_index import GPTPineconeIndex, LLMPredictor, ServiceContext | |
import pinecone | |
from langchain import OpenAI | |
import os | |
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) | |
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) | |
def askQuestion(brain, question, temperature, maxTokens): | |
print(brain, question, temperature,maxTokens) | |
Brain_Name = brain.lower() | |
print(Brain_Name) | |
pinecone.init(api_key=os.environ['PINECONE_KEY'], | |
environment="us-west4-gcp") | |
pineconeindex = pinecone.Index(Brain_Name) | |
index = GPTPineconeIndex([], pinecone_index=pineconeindex) | |
# index = GPTSimpleVectorIndex.load_from_disk('index.json') | |
# For Q-A set this value to 4, For Content-Genration set this value b/w 7-10. | |
data_chunks = 4 | |
# prompt query goes here | |
# query="summarize in full detail the solution that dimetyd is providing, and previous email sequences which can be used as a context knowledge" | |
query = question | |
# relevant info from brain goes here | |
info = ["pdf"] | |
llm_predictor = LLMPredictor(llm=OpenAI( | |
temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens)) | |
service_context_gpt4 = ServiceContext.from_defaults( | |
llm_predictor=llm_predictor) | |
response = index.query(query, service_context=service_context_gpt4, | |
similarity_top_k=data_chunks, response_mode="compact") | |
print(question) | |
print(response) | |
return response | |
def getBrains(name): | |
pinecone.init(api_key=os.environ['PINECONE_KEY'], | |
environment="us-west4-gcp") | |
active_indexes = pinecone.list_indexes() | |
print(active_indexes) | |
name = name.lower() | |
if name in active_indexes: | |
return True | |
else: | |
return False | |