Upload 2 files
Browse files
app.py
CHANGED
@@ -2,7 +2,8 @@ from langchain.agents import AgentExecutor, AgentType, initialize_agent
|
|
2 |
from langchain.agents.structured_chat.prompt import SUFFIX
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.memory import ConversationBufferMemory
|
5 |
-
from tools import generate_image_tool, describe_image_tool, handle_image_history
|
|
|
6 |
|
7 |
import chainlit as cl
|
8 |
from chainlit.action import Action
|
@@ -18,6 +19,30 @@ def get_memory():
|
|
18 |
return ConversationBufferMemory(memory_key="chat_history")
|
19 |
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
@cl.on_chat_start
|
22 |
async def start():
|
23 |
"""
|
@@ -26,7 +51,7 @@ async def start():
|
|
26 |
We can add some settings to our application to allow users to select the appropriate model, and more!
|
27 |
"""
|
28 |
cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant. You are developed with GPT-4-vision-preview, if the user uploads an image, you have the ability to understand it."}])
|
29 |
-
|
30 |
settings = await cl.ChatSettings(
|
31 |
[
|
32 |
Select(
|
@@ -46,8 +71,9 @@ async def start():
|
|
46 |
),
|
47 |
]
|
48 |
).send()
|
49 |
-
await
|
50 |
-
|
|
|
51 |
|
52 |
|
53 |
@cl.on_settings_update
|
|
|
2 |
from langchain.agents.structured_chat.prompt import SUFFIX
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.memory import ConversationBufferMemory
|
5 |
+
from tools import generate_image_tool, describe_image_tool, handle_image_history
|
6 |
+
from openai import OpenAI
|
7 |
|
8 |
import chainlit as cl
|
9 |
from chainlit.action import Action
|
|
|
19 |
return ConversationBufferMemory(memory_key="chat_history")
|
20 |
|
21 |
|
22 |
+
async def wait_for_key():
|
23 |
+
res = await cl.AskUserMessage(content="Send an Openai API KEY to start. [https://platform.openai.com/api-keys](https://platform.openai.com/api-keys). e.g. sk-IY8Wl.....1cXD8", timeout=600).send()
|
24 |
+
if res:
|
25 |
+
await cl.Message(content="setting up...", indent=1).send()
|
26 |
+
# check if the key is valid
|
27 |
+
client = OpenAI(api_key=res["content"])
|
28 |
+
try:
|
29 |
+
stream = client.chat.completions.create(
|
30 |
+
model="gpt-3.5-turbo-1106",
|
31 |
+
messages=[{"role": "system", "content": "test"}],
|
32 |
+
max_tokens=1,
|
33 |
+
)
|
34 |
+
if stream:
|
35 |
+
await cl.Message(content="API_KEY setted, you can start chatting!", indent=1).send()
|
36 |
+
cl.user_session.set("api_key", res["content"])
|
37 |
+
except Exception as e:
|
38 |
+
await cl.Message(content=f"{e}", indent=1).send()
|
39 |
+
return await wait_for_key()
|
40 |
+
await cl.Message(content="β
API KEY works! β
you can start chatting! π¬").send()
|
41 |
+
return res["content"]
|
42 |
+
else:
|
43 |
+
return await wait_for_key()
|
44 |
+
|
45 |
+
|
46 |
@cl.on_chat_start
|
47 |
async def start():
|
48 |
"""
|
|
|
51 |
We can add some settings to our application to allow users to select the appropriate model, and more!
|
52 |
"""
|
53 |
cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant. You are developed with GPT-4-vision-preview, if the user uploads an image, you have the ability to understand it."}])
|
54 |
+
|
55 |
settings = await cl.ChatSettings(
|
56 |
[
|
57 |
Select(
|
|
|
71 |
),
|
72 |
]
|
73 |
).send()
|
74 |
+
api_key = await wait_for_key()
|
75 |
+
if api_key:
|
76 |
+
await setup_agent(settings)
|
77 |
|
78 |
|
79 |
@cl.on_settings_update
|
tools.py
CHANGED
@@ -130,27 +130,3 @@ describe_image_tool = Tool.from_function(
|
|
130 |
description=f"Useful to describe an image. Input should be a single string strictly in the following JSON format: {describe_image_format}",
|
131 |
return_direct=False,
|
132 |
)
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
async def wait_for_key():
|
137 |
-
res = await cl.AskUserMessage(content="Send an Openai API KEY to start. [https://platform.openai.com/api-keys](https://platform.openai.com/api-keys). e.g. sk-IY8Wl.....1cXD8", timeout=600).send()
|
138 |
-
if res:
|
139 |
-
await cl.Message(content="setting up...", indent=1).send()
|
140 |
-
# check if the key is valid
|
141 |
-
client = OpenAI(api_key=res["content"])
|
142 |
-
try:
|
143 |
-
stream = client.chat.completions.create(
|
144 |
-
model="gpt-3.5-turbo-1106",
|
145 |
-
messages=[{"role": "system", "content": "test"}],
|
146 |
-
max_tokens=1,
|
147 |
-
)
|
148 |
-
if stream:
|
149 |
-
await cl.Message(content="API_KEY setted, you can start chatting!", indent=1).send()
|
150 |
-
cl.user_session.set("api_key", res["content"])
|
151 |
-
except Exception as e:
|
152 |
-
await cl.Message(content=f"{e}", indent=1).send()
|
153 |
-
return await wait_for_key()
|
154 |
-
return await cl.Message(content="β
API KEY works! β
you can start chatting! π¬").send()
|
155 |
-
else:
|
156 |
-
return await wait_for_key()
|
|
|
130 |
description=f"Useful to describe an image. Input should be a single string strictly in the following JSON format: {describe_image_format}",
|
131 |
return_direct=False,
|
132 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|