File size: 1,599 Bytes
0a93ad3
 
 
 
 
 
 
 
 
 
 
 
 
 
569e532
0a93ad3
 
 
 
 
 
 
 
 
 
 
a740eb7
0a93ad3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a740eb7
0a93ad3
 
 
 
 
e123c9b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# Retriever function 

from pinecone import Pinecone
from langchain_openai import AzureOpenAIEmbeddings
import uuid
import pandas as pd
import streamlit as st
import os 
# Initialize Pinecone client
pc = Pinecone(api_key=st.secrets["PC_API_KEY"])

index = pc.Index("openai-serverless")

# Azure OpenAI configuration
os.environ["AZURE_OPENAI_API_KEY"] = st.secrets["api_key"]
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://davidfearn-gpt4.openai.azure.com/"
os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] = "text-embedding-ada-002"
os.environ["AZURE_OPENAI_API_VERSION"] = "2024-08-01-preview"

# Model configuration
embeddings_model = AzureOpenAIEmbeddings(
    azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
    azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],
    openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
)

def retriever(query, k):

    namespace="gskRegIntel"
    """
    Embeds a query string and searches the vector database for similar entries.

    :param query: The string to embed and search for.
    :param namespace: Pinecone namespace to search within.
    :param top_k: Number of top results to retrieve.
    :return: List of search results with metadata and scores.
    """
    try:
        # Generate embedding for the query
        query_embedding = embeddings_model.embed_query(query)

        # Perform search in Pinecone
        results = index.query(vector=query_embedding, top_k=k, namespace=namespace, include_metadata=True)

        return results.matches

    except Exception as e:
        print(f"Error during search: {e}")
        return []