Spaces:
Running
Running
File size: 3,286 Bytes
9f7d1a3 94b9a6c 3d0a266 9f7d1a3 3d0a266 9f7d1a3 3d0a266 9f7d1a3 3d0a266 9f7d1a3 3d0a266 9f7d1a3 3e2583e 3d0a266 9f7d1a3 3d0a266 3e2583e 9f7d1a3 a0e4da8 9f7d1a3 3d0a266 9f7d1a3 9f461bd 36561d9 9f7d1a3 36561d9 9f7d1a3 a0e4da8 9f7d1a3 36561d9 9f7d1a3 3d0a266 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain.chat_models import ChatOpenAI
from langsmith import traceable
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import os
import sys
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.prompts import ChatPromptTemplate
from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate
#embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
embeddings = OpenAIEmbeddings()
#vectordb=Chroma.from_documents(document_chunks,embedding=embeddings, persist_directory='./ai_vocacional_v2')
vectordb = Chroma(persist_directory="./ai_vocacional_v2", embedding_function=embeddings)
llm=ChatOpenAI(temperature=0, model_name='gpt-4o-mini')
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True)
general_system_template = r"""
Eres el Orientador de carreras. Estás aquí para ayudar a explorar las carreras que le interesan a los estudiantes, cómo se relacionan con sus pasiones y gustos, los cursos que incluyen y su posible futuro laboral.
Toma los siguientes documentos de contexto {context} y responde únicamente basado en este contexto.
"""
general_user_template = "Pregunta:```{question}```"
messages = [
SystemMessagePromptTemplate.from_template(general_system_template),
HumanMessagePromptTemplate.from_template(general_user_template)
]
qa_prompt = ChatPromptTemplate.from_messages( messages )
@traceable
def pdf_qa(query):
function = ConversationalRetrievalChain.from_llm(
llm = llm,
retriever=vectordb.as_retriever(search_kwargs={'k':16})
, combine_docs_chain_kwargs={'prompt': qa_prompt},
memory = memory#,max_tokens_limit=4000
)
return function({"question": query})
import gradio as gr
# Define chat interface
with gr.Blocks() as demo:
chatbot = gr.Chatbot(value=[[None,'''
¡Hola! Soy tu Orientador de carreras. Estoy aquí para ayudarte a explorar las carreras que te interesan, cómo se relacionan con tus pasiones y gustos, los cursos que incluyen y tu posible futuro laboral.
'''
]])
msg = gr.Textbox()
clear = gr.Button("Clear")
chat_history = []
def user(query, chat_history):
print("User query:", query)
print("Chat history:", chat_history)
# Convert chat history to list of tuples
chat_history_tuples = []
for message in chat_history:
chat_history_tuples.append((message[0], message[1]))
# Get result from QA chain
result = pdf_qa(query))
# Append user message and response to chat history
chat_history.append((query, result["answer"]))
print("Updated chat history:", chat_history)
return gr.update(value=""), chat_history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch() |