Spaces:
Running
Running
File size: 1,556 Bytes
ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef ae69b09 e5cacef 52edf53 462013d e5cacef ae69b09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
# openAI Model e openAI Embedings
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_core.documents import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.chat_models import init_chat_model
import gradio as gr
llm = init_chat_model("gpt-4o-mini", model_provider="openai")
loader = UnstructuredMarkdownLoader("manual.md")
documentos = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
textos = text_splitter.split_documents(documentos)
embeddings = OpenAIEmbeddings()
db = Chroma.from_documents(textos, embeddings)
retriever = db.as_retriever(search_kwargs={"k": 3})
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True
)
def consultar_base_conhecimento(pergunta, history):
resposta = qa_chain.run(pergunta)
return resposta
css = """
footer { display: none !important; }
.footer { display: none !important; }
.gradio-footer { display: none !important;}"
"""
demo = gr.ChatInterface(css=css, fn=consultar_base_conhecimento, title="Este chatbot responde perguntas com base no manual do aluno do IFAL", examples=["O que você sabe?", "Quem é o reitor?", "Como funciona o processo de matrícula?", "Quais são as regras para aprovação nas disciplinas?"])
if __name__ == "__main__":
demo.launch() |