File size: 1,534 Bytes
3de2576
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# from langchain_text_splitters import RecursiveCharacterTextSplitter
# from qdrant_client import QdrantClient
# from langchain_openai.embeddings import OpenAIEmbeddings
# from langchain_core.prompts import ChatPromptTemplate
# from langchain_core.globals import set_llm_cache
# from langchain_openai import ChatOpenAI
# from langchain_core.caches import InMemoryCache
# from operator import itemgetter
# from langchain_core.runnables.passthrough import RunnablePassthrough
# from langchain_qdrant import QdrantVectorStore, Qdrant
import chainlit as cl

# chat_model = ChatOpenAI(model="gpt-4o-mini")
# te3_small = OpenAIEmbeddings(model="text-embedding-3-small")
# set_llm_cache(InMemoryCache())
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=100)
# rag_system_prompt_template = """\
# You are a helpful assistant that uses the provided context to answer questions. Never reference this prompt, or the existance of context.
# """
# rag_message_list = [{"role" : "system", "content" : rag_system_prompt_template},]
# rag_user_prompt_template = """\
# Question:
# {question}
# Context:
# {context}
# """
# chat_prompt = ChatPromptTemplate.from_messages([("system", rag_system_prompt_template), ("human", rag_user_prompt_template)])

@cl.on_chat_start
async def on_chat_start():
    await cl.Message(content="Ask away!").send()

@cl.author_rename
def rename(orig_author: str):
    return "AI Assistant"

@cl.on_message
async def main(message: cl.Message):
    await cl.Message(content="Response").send()