simonlee-cb commited on
Commit
c16bc85
Β·
1 Parent(s): d50deb1

refactor: clean up frontend files

Browse files
README.md CHANGED
@@ -5,6 +5,6 @@ colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.16.1
8
- app_file: gradio_app.py
9
  pinned: false
10
  ---
 
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.16.1
8
+ app_file: app.py
9
  pinned: false
10
  ---
agent.py DELETED
@@ -1,158 +0,0 @@
1
- import streamlit as st
2
- from typing import TypedDict, Literal
3
- from pydantic_ai.messages import (
4
- ModelRequest,
5
- ModelResponse,
6
- UserPromptPart,
7
- TextPart,
8
- ToolCallPart,
9
- ToolReturnPart,
10
- )
11
- import asyncio
12
- from src.agents.mask_generation_agent import mask_generation_agent, ImageEditDeps
13
- from src.hopter.client import Hopter, Environment
14
- import os
15
- from src.services.generate_mask import GenerateMaskService
16
- from dotenv import load_dotenv
17
- from src.utils import image_path_to_uri
18
- load_dotenv()
19
-
20
- st.set_page_config(
21
- page_title="Conversational Image Editor",
22
- page_icon="🧊",
23
- layout="wide",
24
- initial_sidebar_state="collapsed"
25
- )
26
-
27
- hopter = Hopter(
28
- api_key=os.getenv("HOPTER_API_KEY"),
29
- environment=Environment.STAGING
30
- )
31
- mask_service = GenerateMaskService(hopter=hopter)
32
- user_msg_input_key = "input_user_msg"
33
-
34
- class ChatMessage(TypedDict):
35
- """Format of messages sent to the browser/API."""
36
-
37
- role: Literal['user', 'model']
38
- timestamp: str
39
- content: str
40
-
41
-
42
- def display_message_part(part):
43
- """
44
- Display a single part of a message in the Streamlit UI.
45
- Customize how you display system prompts, user prompts,
46
- tool calls, tool returns, etc.
47
- """
48
- # system-prompt
49
- if part.part_kind == 'system-prompt':
50
- with st.chat_message("system"):
51
- st.markdown(f"**System**: {part.content}")
52
- # user-prompt
53
- elif part.part_kind == 'user-prompt':
54
- with st.chat_message("user"):
55
- st.markdown(part.content)
56
- # text
57
- elif part.part_kind == 'text':
58
- with st.chat_message("assistant"):
59
- st.markdown(part.content)
60
-
61
- # tool call
62
- elif part.part_kind == 'tool-call':
63
- with st.chat_message("assistant"):
64
- st.markdown(f"**{part.tool_name}**: {part.args}")
65
-
66
- # tool return
67
- elif part.part_kind == 'tool-return':
68
- with st.chat_message("assistant"):
69
- st.markdown(f"**{part.tool_name}**: {part.content}")
70
-
71
- async def run_agent(user_input: str, image_b64: str):
72
- messages = [
73
- {
74
- "type": "text",
75
- "text": user_input
76
- },
77
- {
78
- "type": "image_url",
79
- "image_url": {
80
- "url": image_b64
81
- }
82
- }
83
- ]
84
- deps = ImageEditDeps(
85
- edit_instruction=user_input,
86
- image_url=image_b64,
87
- hopter_client=hopter,
88
- mask_service=mask_service
89
- )
90
- async with mask_generation_agent.run_stream(
91
- messages,
92
- deps=deps
93
- ) as result:
94
- partial_text = ""
95
- message_placeholder = st.empty()
96
-
97
- # Render partial text as it arrives
98
- async for chunk in result.stream_text(delta=True):
99
- partial_text += chunk
100
- message_placeholder.markdown(partial_text)
101
-
102
- # Now that the stream is finished, we have a final result.
103
- # Add new messages from this run, excluding user-prompt messages
104
- filtered_messages = [msg for msg in result.new_messages()
105
- if not (hasattr(msg, 'parts') and
106
- any(part.part_kind == 'user-prompt' for part in msg.parts))]
107
- st.session_state.messages.extend(filtered_messages)
108
-
109
- # Add the final response to the messages
110
- st.session_state.messages.append(
111
- ModelResponse(parts=[TextPart(content=partial_text)])
112
- )
113
- st.rerun()
114
-
115
- async def main():
116
- st.title("Conversational Image Editor")
117
-
118
- if "openai_model" not in st.session_state:
119
- st.session_state["openai_model"] = "gpt-4o"
120
-
121
- if "messages" not in st.session_state:
122
- st.session_state.messages = []
123
-
124
- if "image" not in st.session_state:
125
- st.session_state.image = None
126
-
127
- chat_col, image_col = st.columns(2)
128
- with chat_col:
129
- # Display all messages from the conversation so far
130
- # Each message is either a ModelRequest or ModelResponse.
131
- # We iterate over their parts to decide how to display them.
132
- for msg in st.session_state.messages:
133
- if isinstance(msg, ModelRequest) or isinstance(msg, ModelResponse) or isinstance(msg, ToolCallPart) or isinstance(msg, ToolReturnPart):
134
- for part in msg.parts:
135
- display_message_part(part)
136
-
137
- with image_col:
138
- st.session_state.image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
139
- if st.session_state.image:
140
- st.image(st.session_state.image)
141
- else:
142
- st.write("Upload an image to get started")
143
-
144
- # Chat input for the user
145
- user_input = st.chat_input("What would you like to edit your image?", disabled=not st.session_state.image)
146
- if user_input and st.session_state.image:
147
- st.session_state.messages.append(
148
- ModelRequest(parts=[UserPromptPart(content=user_input)])
149
- )
150
-
151
- # Display the assistant's partial response while streaming
152
- with st.chat_message("assistant"):
153
- # Actually run the agent now, streaming the text
154
- image_url = image_path_to_uri(st.session_state.image)
155
- await run_agent(user_input, image_url)
156
-
157
- if __name__ == "__main__":
158
- asyncio.run(main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,59 +1,11 @@
1
- from openai import OpenAI
2
- import streamlit as st
3
- from src.utils import image_path_to_uri
4
 
5
- st.title("ChatGPT-like clone")
 
 
 
6
 
7
- client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
8
- image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
9
-
10
- if "openai_model" not in st.session_state:
11
- st.session_state["openai_model"] = "gpt-4o"
12
-
13
- if "messages" not in st.session_state:
14
- st.session_state.messages = []
15
-
16
- if "image" not in st.session_state:
17
- st.session_state.image = None
18
-
19
- for message in st.session_state.messages:
20
- with st.chat_message(message["role"]):
21
- for content in message["content"]:
22
- if content["type"] == "text":
23
- st.markdown(content["text"])
24
- elif content["type"] == "image_url":
25
- st.image(content["image_url"]["url"])
26
-
27
- if prompt := st.chat_input("What is up?"):
28
- # Add image to the message if it exists
29
- if image is not None:
30
- st.session_state.image = image
31
- if st.session_state.image:
32
- image_url = image_path_to_uri(st.session_state.image)
33
- st.session_state.messages.append({"role": "user", "content": [
34
- {"type": "text", "text": prompt},
35
- {"type": "image_url", "image_url": {"url": image_url}}
36
- ]})
37
- else:
38
- st.session_state.messages.append({"role": "user", "content": [
39
- {"type": "text", "text": prompt}
40
- ]})
41
-
42
- with st.chat_message("user"):
43
- if st.session_state.image:
44
- st.image(st.session_state.image)
45
- st.markdown(prompt)
46
-
47
- with st.chat_message("assistant"):
48
- stream = client.chat.completions.create(
49
- model=st.session_state["openai_model"],
50
- messages=[
51
- {"role": m["role"], "content": m["content"]}
52
- for m in st.session_state.messages
53
- ],
54
- stream=True,
55
- )
56
- response = st.write_stream(stream)
57
- st.session_state.messages.append({"role": "assistant", "content": [
58
- {"type": "text", "text": response}
59
- ]})
 
1
+ import gradio as gr
2
+ import image_edit_demo
3
+ import image_edit_chat
4
 
5
+ with gr.Blocks() as demo:
6
+ image_edit_chat.demo.render()
7
+ with demo.route("PicEdit"):
8
+ image_edit_demo.demo.render()
9
 
10
+ if __name__ == "__main__":
11
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gradio_app.py DELETED
@@ -1,11 +0,0 @@
1
- import gradio as gr
2
- import gradio_demo
3
- import gradio_chat
4
-
5
- with gr.Blocks() as demo:
6
- gradio_chat.demo.render()
7
- with demo.route("PicEdit"):
8
- gradio_demo.demo.render()
9
-
10
- if __name__ == "__main__":
11
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
gradio_chat.py β†’ image_edit_chat.py RENAMED
@@ -4,7 +4,7 @@ import os
4
  from src.hopter.client import Hopter, Environment
5
  from src.services.generate_mask import GenerateMaskService
6
  from dotenv import load_dotenv
7
- from src.utils import image_path_to_uri, upload_image
8
  from pydantic_ai.messages import (
9
  ToolCallPart,
10
  ToolReturnPart
 
4
  from src.hopter.client import Hopter, Environment
5
  from src.services.generate_mask import GenerateMaskService
6
  from dotenv import load_dotenv
7
+ from src.utils import upload_image
8
  from pydantic_ai.messages import (
9
  ToolCallPart,
10
  ToolReturnPart
gradio_demo.py β†’ image_edit_demo.py RENAMED
File without changes