niting089 commited on
Commit
67a340d
·
1 Parent(s): f1ce08d

Add Application File

Browse files
Files changed (1) hide show
  1. app.py +25 -20
app.py CHANGED
@@ -1,41 +1,45 @@
 
 
1
  # OpenAI Chat completion
2
  import os
3
- import openai
4
- import chainlit as cl
5
- from chainlit.prompt import Prompt, PromptMessage
6
- from chainlit.playground.providers import ChatOpenAI
7
  from dotenv import load_dotenv
8
 
9
  load_dotenv()
10
 
11
  # ChatOpenAI Templates
12
- system_template = """You are a highly analytical assistant who has in-depth knowledge of science!
13
  """
14
 
15
  user_template = """{input}
16
  Think through your response step by step.
17
- Be concise in your responses
18
- Do not make up stuff. If you don't know something, let the user know.
19
  """
20
 
21
- @cl.on_chat_start
 
22
  async def start_chat():
23
  settings = {
24
  "model": "gpt-3.5-turbo",
25
- "temperature": 0.7, # Adjust to a valid range
26
  "max_tokens": 500,
27
  "top_p": 1,
28
  "frequency_penalty": 0,
29
  "presence_penalty": 0,
30
  }
 
31
  cl.user_session.set("settings", settings)
32
 
33
- @cl.on_message
 
34
  async def main(message: cl.Message):
35
  settings = cl.user_session.get("settings")
36
 
37
- # Set up the OpenAI client with the API key
38
- openai.api_key = os.getenv("OPENAI_API_KEY")
 
39
 
40
  prompt = Prompt(
41
  provider=ChatOpenAI.id,
@@ -55,16 +59,17 @@ async def main(message: cl.Message):
55
  settings=settings,
56
  )
57
 
 
 
58
  msg = cl.Message(content="")
59
 
60
- # Call OpenAI and stream response
61
- async for stream_resp in await openai.ChatCompletion.acreate(
62
- model=settings['model'],
63
- messages=[m.to_openai() for m in prompt.messages],
64
- stream=True,
65
- **settings
66
  ):
67
- token = stream_resp.choices[0].delta.get("content", "")
 
 
68
  await msg.stream_token(token)
69
 
70
  # Update the prompt object with the completion
@@ -72,4 +77,4 @@ async def main(message: cl.Message):
72
  msg.prompt = prompt
73
 
74
  # Send and close the message stream
75
- await msg.send()
 
1
+ # You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
2
+
3
  # OpenAI Chat completion
4
  import os
5
+ from openai import AsyncOpenAI # importing openai for API usage
6
+ import chainlit as cl # importing chainlit for our app
7
+ from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
8
+ from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
9
  from dotenv import load_dotenv
10
 
11
  load_dotenv()
12
 
13
  # ChatOpenAI Templates
14
+ system_template = """You are a helpful assistant who always speaks in a pleasant tone!
15
  """
16
 
17
  user_template = """{input}
18
  Think through your response step by step.
 
 
19
  """
20
 
21
+
22
+ @cl.on_chat_start # marks a function that will be executed at the start of a user session
23
  async def start_chat():
24
  settings = {
25
  "model": "gpt-3.5-turbo",
26
+ "temperature": 0,
27
  "max_tokens": 500,
28
  "top_p": 1,
29
  "frequency_penalty": 0,
30
  "presence_penalty": 0,
31
  }
32
+
33
  cl.user_session.set("settings", settings)
34
 
35
+
36
+ @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
37
  async def main(message: cl.Message):
38
  settings = cl.user_session.get("settings")
39
 
40
+ client = AsyncOpenAI()
41
+
42
+ print(message.content)
43
 
44
  prompt = Prompt(
45
  provider=ChatOpenAI.id,
 
59
  settings=settings,
60
  )
61
 
62
+ print([m.to_openai() for m in prompt.messages])
63
+
64
  msg = cl.Message(content="")
65
 
66
+ # Call OpenAI
67
+ async for stream_resp in await client.chat.completions.create(
68
+ messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
 
 
 
69
  ):
70
+ token = stream_resp.choices[0].delta.content
71
+ if not token:
72
+ token = ""
73
  await msg.stream_token(token)
74
 
75
  # Update the prompt object with the completion
 
77
  msg.prompt = prompt
78
 
79
  # Send and close the message stream
80
+ await msg.send()