File size: 1,432 Bytes
be482eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os

import joblib
from langchain.callbacks import get_openai_callback
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings.sentence_transformer import \
    SentenceTransformerEmbeddings
from langchain.llms import OpenAI
from langchain.vectorstores import Pinecone

from pages.admin_utils import pine_cone_index


#Function to pull index data from Pinecone
def pull_from_pinecone(embeddings,pinecone_index_name: str | None=None):
    index_name = pine_cone_index(pinecone_index_name)

    index = Pinecone.from_existing_index(index_name, embeddings)
    return index

def create_embeddings():
    embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
    return embeddings

#This function will help us in fetching the top relevent documents from our vector store - Pinecone Index
def get_similar_docs(index,query,k=2):

    similar_docs = index.similarity_search(query, k=k)
    return similar_docs

def get_answer(docs,user_input):
    chain = load_qa_chain(OpenAI(), chain_type="stuff")
    with get_openai_callback() as cb:
        response = chain.run(input_documents=docs, question=user_input)
    return response


def predict(query_result):
    if os.path.exists('modelsvm.pk1'):
        Fitmodel = joblib.load('modelsvm.pk1')
        result=Fitmodel.predict([query_result])
        return result[0]
    return "No Idea?"