Spaces:
Sleeping
Sleeping
File size: 1,959 Bytes
2702698 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from pinecone import Pinecone
import openai
from openai import OpenAI
def extract_action(response, query=None):
# TODO: Extract the action from the response
# For example, retrieve document IDs and their similarity scores
relevant_docs = [{"id": match["id"], "score": match["score"]} for match in response["matches"]]
return relevant_docs
class Query_Agent:
def __init__(self, pinecone_index, openai_client, embeddings) -> None:
# TODO: Initialize the Query_Agent agent
# Initialize the OpenAI client with the provided API key
openai_api_key = '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'
self.client = OpenAI(api_key=openai_api_key)
# Specify the embeddings model to use for generating query embeddings
self.embeddings_model = embeddings
pc = Pinecone(api_key="52ef9136-6188-4e51-af13-9639bf95c163")
# Initialize Pinecone client and connect to the specified index
self.pinecone_index = pc.Index(pinecone_index)
def query_vector_store(self, query, k=5):
# TODO: Query the Pinecone vector store
# Generate an embedding for the query
query = query.replace("\n", " ")
query_embedding = self.client.embeddings.create(
input=[query],
model=self.embeddings_model
).data[0].embedding
# Query the Pinecone index using the generated embedding
query_results = self.pinecone_index.query(
vector=query_embedding,
top_k=k,
include_metadata=True)
# Extract and return the most relevant documents along with their scores
relevant_docs = [
(result['id'], result['score'], result['metadata']['text'])
for result in query_results['matches']
]
return relevant_docs
def set_prompt(self, prompt):
# TODO: Set the prompt for the Query_Agent agent
self.prompt = prompt
|