GianJSX commited on
Commit
2366517
Β·
1 Parent(s): ebe3bf0

Upload 5 files

Browse files
Files changed (4) hide show
  1. .chainlit/config.toml +37 -16
  2. app.py +3 -18
  3. chainlit.md +3 -0
  4. tools.py +27 -3
.chainlit/config.toml CHANGED
@@ -1,18 +1,4 @@
1
  [project]
2
- # If true (default), the app will be available to anonymous users.
3
- # If false, users will need to authenticate and be part of the project to use the app.
4
- public = true
5
-
6
- # The project ID (found on https://cloud.chainlit.io).
7
- # The project ID is required when public is set to false or when using the cloud database.
8
- #id = ""
9
-
10
- # Uncomment if you want to persist the chats.
11
- # local will create a database in your .chainlit directory (requires node.js installed).
12
- # cloud will use the Chainlit cloud database.
13
- # custom will load use your custom client.
14
- # database = "local"
15
-
16
  # Whether to enable telemetry (default: true). No personal data is collected.
17
  enable_telemetry = false
18
 
@@ -22,13 +8,44 @@ user_env = []
22
  # Duration (in seconds) during which the session is saved when the connection is lost
23
  session_timeout = 3600
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  [UI]
26
  # Name of the app and chatbot.
27
  name = "Chatbot"
28
 
 
 
 
29
  # Description of the app and chatbot. This is used for HTML tags.
30
  # description = ""
31
 
 
 
 
32
  # The default value for the expand messages settings.
33
  default_expand_messages = false
34
 
@@ -36,7 +53,11 @@ default_expand_messages = false
36
  hide_cot = false
37
 
38
  # Link to your github repo. This will add a github button in the UI's header.
39
- # github = ""
 
 
 
 
40
 
41
  # Override default MUI light theme. (Check theme.ts)
42
  [UI.theme.light]
@@ -60,4 +81,4 @@ hide_cot = false
60
 
61
 
62
  [meta]
63
- generated_by = "0.6.2"
 
1
  [project]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  # Whether to enable telemetry (default: true). No personal data is collected.
3
  enable_telemetry = false
4
 
 
8
  # Duration (in seconds) during which the session is saved when the connection is lost
9
  session_timeout = 3600
10
 
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
22
+ unsafe_allow_html = false
23
+
24
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
25
+ latex = false
26
+
27
+ # Authorize users to upload files with messages
28
+ multi_modal = true
29
+
30
+ # Allows user to use speech to text
31
+ [features.speech_to_text]
32
+ enabled = false
33
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
34
+ # language = "en-US"
35
+
36
  [UI]
37
  # Name of the app and chatbot.
38
  name = "Chatbot"
39
 
40
+ # Show the readme while the conversation is empty.
41
+ show_readme_as_default = true
42
+
43
  # Description of the app and chatbot. This is used for HTML tags.
44
  # description = ""
45
 
46
+ # Large size content are by default collapsed for a cleaner ui
47
+ default_collapse_content = true
48
+
49
  # The default value for the expand messages settings.
50
  default_expand_messages = false
51
 
 
53
  hide_cot = false
54
 
55
  # Link to your github repo. This will add a github button in the UI's header.
56
+ # github = "https://github.com/GianfrancoCorrea/GPT-Vision-DALLE3-Chat"
57
+
58
+ # Specify a CSS file that can be used to customize the user interface.
59
+ # The CSS file can be served from the public directory or via an external link.
60
+ # custom_css = "/public/test.css"
61
 
62
  # Override default MUI light theme. (Check theme.ts)
63
  [UI.theme.light]
 
81
 
82
 
83
  [meta]
84
+ generated_by = "0.7.700"
app.py CHANGED
@@ -2,30 +2,13 @@ from langchain.agents import AgentExecutor, AgentType, initialize_agent
2
  from langchain.agents.structured_chat.prompt import SUFFIX
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.memory import ConversationBufferMemory
5
- from tools import generate_image_tool, describe_image_tool, gpt_vision_call, process_images, handle_image_history
6
 
7
  import chainlit as cl
8
  from chainlit.action import Action
9
  from chainlit.input_widget import Select, Switch, Slider
10
 
11
 
12
- #@cl.author_rename
13
- def rename(orig_author):
14
- """
15
- Rename the author of messages as displayed in the "Thinking" section.
16
-
17
- This is useful to make the chat look more natural, or add some fun to it!
18
- """
19
- mapping = {
20
- "AgentExecutor": "The LLM Brain",
21
- "LLMChain": "The Assistant",
22
- "GenerateImage": "DALL-E 3",
23
- "ChatOpenAI": "GPT-4 Turbo",
24
- "Chatbot": "Coolest App",
25
- }
26
- return mapping.get(orig_author, orig_author)
27
-
28
-
29
  @cl.cache
30
  def get_memory():
31
  """
@@ -64,6 +47,7 @@ async def start():
64
  ]
65
  ).send()
66
  await setup_agent(settings)
 
67
 
68
 
69
  @cl.on_settings_update
@@ -75,6 +59,7 @@ async def setup_agent(settings):
75
  temperature=settings["Temperature"],
76
  streaming=settings["Streaming"],
77
  model=settings["Model"],
 
78
  )
79
 
80
  # We get our memory here, which is used to track the conversation history.
 
2
  from langchain.agents.structured_chat.prompt import SUFFIX
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.memory import ConversationBufferMemory
5
+ from tools import generate_image_tool, describe_image_tool, handle_image_history, wait_for_key
6
 
7
  import chainlit as cl
8
  from chainlit.action import Action
9
  from chainlit.input_widget import Select, Switch, Slider
10
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  @cl.cache
13
  def get_memory():
14
  """
 
47
  ]
48
  ).send()
49
  await setup_agent(settings)
50
+ await wait_for_key()
51
 
52
 
53
  @cl.on_settings_update
 
59
  temperature=settings["Temperature"],
60
  streaming=settings["Streaming"],
61
  model=settings["Model"],
62
+ api_key=cl.user_session.get("api_key"),
63
  )
64
 
65
  # We get our memory here, which is used to track the conversation history.
chainlit.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Welcome to a GPT-4 Turbo application with DALL-E 3 Image Generation capabilities!
2
+
3
+ [Github repo](https://github.com/GianfrancoCorrea/GPT-Vision-DALLE3-Chat)
tools.py CHANGED
@@ -34,7 +34,7 @@ def _generate_image(prompt: str):
34
  We use the OpenAI API to generate the image, and then store it in our
35
  user session so we can reference it later.
36
  """
37
- client = OpenAI()
38
 
39
  response = client.images.generate(
40
  model="dall-e-3",
@@ -76,7 +76,7 @@ generate_image_tool = Tool.from_function(
76
  def gpt_vision_call(image_id: str):
77
  #cl.user_session.set("image_id", image_id)
78
  print("image_id", image_id)
79
- client = OpenAI()
80
  image_history = cl.user_session.get("image_history")
81
  stream = client.chat.completions.create(
82
  model="gpt-4-vision-preview",
@@ -129,4 +129,28 @@ describe_image_tool = Tool.from_function(
129
  name="DescribeImage",
130
  description=f"Useful to describe an image. Input should be a single string strictly in the following JSON format: {describe_image_format}",
131
  return_direct=False,
132
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  We use the OpenAI API to generate the image, and then store it in our
35
  user session so we can reference it later.
36
  """
37
+ client = OpenAI(api_key=cl.user_session.get("api_key"))
38
 
39
  response = client.images.generate(
40
  model="dall-e-3",
 
76
  def gpt_vision_call(image_id: str):
77
  #cl.user_session.set("image_id", image_id)
78
  print("image_id", image_id)
79
+ client = OpenAI(api_key=cl.user_session.get("api_key"))
80
  image_history = cl.user_session.get("image_history")
81
  stream = client.chat.completions.create(
82
  model="gpt-4-vision-preview",
 
129
  name="DescribeImage",
130
  description=f"Useful to describe an image. Input should be a single string strictly in the following JSON format: {describe_image_format}",
131
  return_direct=False,
132
+ )
133
+
134
+
135
+
136
+ async def wait_for_key():
137
+ res = await cl.AskUserMessage(content="Send an Openai API KEY to start. [https://platform.openai.com/api-keys](https://platform.openai.com/api-keys). e.g. sk-IY8Wl.....1cXD8", timeout=600).send()
138
+ if res:
139
+ await cl.Message(content="setting up...", indent=1).send()
140
+ # check if the key is valid
141
+ client = OpenAI(api_key=res["content"])
142
+ try:
143
+ stream = client.chat.completions.create(
144
+ model="gpt-3.5-turbo-1106",
145
+ messages=[{"role": "system", "content": "test"}],
146
+ max_tokens=1,
147
+ )
148
+ if stream:
149
+ await cl.Message(content="API_KEY setted, you can start chatting!", indent=1).send()
150
+ cl.user_session.set("api_key", res["content"])
151
+ except Exception as e:
152
+ await cl.Message(content=f"{e}", indent=1).send()
153
+ return await wait_for_key()
154
+ return await cl.Message(content="βœ… API KEY works! βœ… you can start chatting! πŸ’¬").send()
155
+ else:
156
+ return await wait_for_key()