Spaces:
Running
Running
File size: 1,220 Bytes
e63103b 3f199c2 0870c96 e63103b 3f199c2 e63103b 3f199c2 7d2e972 3f199c2 0870c96 e63103b 3f199c2 1a93363 e63103b 0870c96 3f199c2 0870c96 e63103b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import os
from langchain_backend.utils import create_prompt_llm_chain, create_retriever, getPDF
from langchain_backend import utils
from langchain.chains import create_retrieval_chain
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
os.environ.get("OPENAI_API_KEY")
def get_llm_answer(system_prompt, user_prompt, pdf_url, model, embedding):
if embedding == "gpt":
embedding_object = OpenAIEmbeddings()
else:
embedding_object = HuggingFaceEmbeddings(model_name=embedding)
vectorstore = Chroma(
collection_name="documents",
embedding_function=embedding_object
)
print('model: ', model)
print('embedding: ', embedding)
pages = []
if pdf_url:
pages = getPDF(pdf_url)
else:
pages = getPDF()
retriever = create_retriever(pages, vectorstore)
rag_chain = create_retrieval_chain(retriever, create_prompt_llm_chain(system_prompt, model))
results = rag_chain.invoke({"input": user_prompt})
print('allIds ARQUIVO MAIN: ', utils.allIds)
vectorstore.delete( utils.allIds)
vectorstore.delete_collection()
utils.allIds = []
print('utils.allIds: ', utils.allIds)
return results |