Spaces:
Sleeping
Sleeping
from pinecone import Pinecone | |
import openai | |
from openai import OpenAI | |
def extract_action(response, query=None): | |
# TODO: Extract the action from the response | |
# For example, retrieve document IDs and their similarity scores | |
relevant_docs = [{"id": match["id"], "score": match["score"]} for match in response["matches"]] | |
return relevant_docs | |
class Query_Agent: | |
def __init__(self, pinecone_index, openai_client, embeddings) -> None: | |
# TODO: Initialize the Query_Agent agent | |
# Initialize the OpenAI client with the provided API key | |
openai_api_key = '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' | |
self.client = OpenAI(api_key=openai_api_key) | |
# Specify the embeddings model to use for generating query embeddings | |
self.embeddings_model = embeddings | |
pc = Pinecone(api_key="52ef9136-6188-4e51-af13-9639bf95c163") | |
# Initialize Pinecone client and connect to the specified index | |
self.pinecone_index = pc.Index(pinecone_index) | |
def query_vector_store(self, query, k=5): | |
# TODO: Query the Pinecone vector store | |
# Generate an embedding for the query | |
query = query.replace("\n", " ") | |
query_embedding = self.client.embeddings.create( | |
input=[query], | |
model=self.embeddings_model | |
).data[0].embedding | |
# Query the Pinecone index using the generated embedding | |
query_results = self.pinecone_index.query( | |
vector=query_embedding, | |
top_k=k, | |
include_metadata=True) | |
# Extract and return the most relevant documents along with their scores | |
relevant_docs = [ | |
(result['id'], result['score'], result['metadata']['text']) | |
for result in query_results['matches'] | |
] | |
return relevant_docs | |
def set_prompt(self, prompt): | |
# TODO: Set the prompt for the Query_Agent agent | |
self.prompt = prompt | |