Cheselle commited on
Commit
3bfb998
·
verified ·
1 Parent(s): 71c4c81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -19
app.py CHANGED
@@ -1,7 +1,4 @@
1
- ### Import Section ###
2
- """
3
- IMPORTS HERE
4
- """
5
  import os
6
  import uuid
7
  from dotenv import load_dotenv
@@ -25,19 +22,14 @@ from langchain_core.runnables.config import RunnableConfig
25
 
26
  load_dotenv()
27
 
28
- ### Global Section ###
29
- """
30
- GLOBAL CODE HERE
31
- """
32
- os.environ["LANGCHAIN_PROJECT"] = f"AIM Week 8 Assignment 1 - {uuid.uuid4().hex[0:8]}"
33
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
34
  os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
35
 
36
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
37
 
38
  rag_system_prompt_template = """\
39
- You are a helpful assistant that uses the provided context to answer questions.
40
- Never reference this prompt, or the existance of context.
41
  """
42
 
43
  rag_message_list = [
@@ -57,7 +49,7 @@ chat_prompt = ChatPromptTemplate.from_messages([
57
  ])
58
 
59
  chat_model = ChatOpenAI(model="gpt-4o-mini")
60
- # Typical Embedding Model
61
  core_embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
62
 
63
  def process_file(file: AskFileResponse):
@@ -77,10 +69,10 @@ def process_file(file: AskFileResponse):
77
  return docs
78
 
79
 
80
- ### On Chat Start (Session Start) Section ###
81
  @cl.on_chat_start
82
  async def on_chat_start():
83
- """ SESSION SPECIFIC CODE HERE """
84
  files = None
85
 
86
  while files == None:
@@ -136,14 +128,14 @@ async def on_chat_start():
136
  cl.user_session.set("chain", retrieval_augmented_qa_chain)
137
 
138
 
139
- ### Rename Chains ###
140
  @cl.author_rename
141
  def rename(orig_author: str):
142
- """ RENAME CODE HERE """
143
  rename_dict = {"ChatOpenAI": "the Generator...", "VectorStoreRetriever": "the Retriever..."}
144
  return rename_dict.get(orig_author, orig_author)
145
 
146
- ### On Message Section ###
147
  @cl.on_message
148
  async def main(message: cl.Message):
149
  """
@@ -153,8 +145,7 @@ async def main(message: cl.Message):
153
 
154
  msg = cl.Message(content="")
155
 
156
- # Async method: Using astream allows for asynchronous streaming of the response,
157
- # improving responsiveness and user experience by showing partial results as they become available.
158
  async for chunk in runnable.astream(
159
  {"question": message.content},
160
  config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
 
1
+
 
 
 
2
  import os
3
  import uuid
4
  from dotenv import load_dotenv
 
22
 
23
  load_dotenv()
24
 
25
+ os.environ["LANGCHAIN_PROJECT"] = f"AIM W8D1 - {uuid.uuid4().hex[0:8]}"
 
 
 
 
26
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
27
  os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
28
 
29
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
30
 
31
  rag_system_prompt_template = """\
32
+ You are a helpful assistant. Think through your answers carefully using a step-by-step approach.
 
33
  """
34
 
35
  rag_message_list = [
 
49
  ])
50
 
51
  chat_model = ChatOpenAI(model="gpt-4o-mini")
52
+
53
  core_embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
54
 
55
  def process_file(file: AskFileResponse):
 
69
  return docs
70
 
71
 
72
+
73
  @cl.on_chat_start
74
  async def on_chat_start():
75
+
76
  files = None
77
 
78
  while files == None:
 
128
  cl.user_session.set("chain", retrieval_augmented_qa_chain)
129
 
130
 
131
+
132
  @cl.author_rename
133
  def rename(orig_author: str):
134
+
135
  rename_dict = {"ChatOpenAI": "the Generator...", "VectorStoreRetriever": "the Retriever..."}
136
  return rename_dict.get(orig_author, orig_author)
137
 
138
+
139
  @cl.on_message
140
  async def main(message: cl.Message):
141
  """
 
145
 
146
  msg = cl.Message(content="")
147
 
148
+
 
149
  async for chunk in runnable.astream(
150
  {"question": message.content},
151
  config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),