danielrosehill
commited on
Commit
Β·
41c6f7c
1
Parent(s):
459e37c
updated
Browse files- app/app.py +129 -137
- requirements.txt +2 -2
app/app.py
CHANGED
@@ -38,22 +38,15 @@ def count_text_stats(text):
|
|
38 |
def analyze_with_llm(text):
|
39 |
if not st.session_state.openai_api_key:
|
40 |
st.error("Please provide an OpenAI API key in the sidebar")
|
41 |
-
return None, None
|
42 |
-
|
43 |
try:
|
44 |
client = OpenAI(api_key=st.session_state.openai_api_key)
|
45 |
-
|
46 |
response = client.chat.completions.create(
|
47 |
model="gpt-3.5-turbo-1106",
|
48 |
messages=[
|
49 |
{
|
50 |
"role": "system",
|
51 |
-
"content": """You are a text analysis expert. Your task is to separate a conversation into the prompt/question and the response/answer.
|
52 |
-
Return ONLY a JSON object with three fields:
|
53 |
-
- title: a short, descriptive title for the conversation (max 6 words)
|
54 |
-
- prompt: the user's question or prompt
|
55 |
-
- output: the response or answer
|
56 |
-
If you cannot clearly identify any part, set it to null."""
|
57 |
},
|
58 |
{
|
59 |
"role": "user",
|
@@ -61,26 +54,22 @@ def analyze_with_llm(text):
|
|
61 |
}
|
62 |
],
|
63 |
temperature=0,
|
64 |
-
response_format={
|
65 |
)
|
66 |
-
|
67 |
result = response.choices[0].message.content
|
68 |
parsed = json.loads(result)
|
69 |
return parsed.get("title"), parsed.get("prompt"), parsed.get("output")
|
70 |
-
|
71 |
except Exception as e:
|
72 |
-
|
73 |
-
|
74 |
|
75 |
def separate_prompt_output(text):
|
76 |
if not text:
|
77 |
return "", "", ""
|
78 |
-
|
79 |
if st.session_state.openai_api_key:
|
80 |
title, prompt, output = analyze_with_llm(text)
|
81 |
if all(v is not None for v in [title, prompt, output]):
|
82 |
return title, prompt, output
|
83 |
-
|
84 |
parts = text.split('\n\n', 1)
|
85 |
if len(parts) == 2:
|
86 |
return "Untitled Conversation", parts[0].strip(), parts[1].strip()
|
@@ -101,25 +90,16 @@ with st.sidebar:
|
|
101 |
if api_key:
|
102 |
st.session_state.openai_api_key = api_key
|
103 |
|
104 |
-
# Dark mode toggle
|
105 |
st.markdown("---")
|
106 |
st.markdown("## π¨ Appearance")
|
107 |
-
dark_mode = st.
|
108 |
st.session_state.mode = 'dark' if dark_mode else 'light'
|
109 |
-
|
110 |
-
# Settings section
|
111 |
-
st.markdown("---")
|
112 |
-
st.markdown("## βοΈ Settings")
|
113 |
-
auto_copy = st.checkbox("Auto-copy results to clipboard", value=False)
|
114 |
-
|
115 |
-
if st.session_state.openai_api_key:
|
116 |
-
st.success("β API Key configured")
|
117 |
-
else:
|
118 |
-
st.warning("β No API Key provided - using basic separation")
|
119 |
|
120 |
# Main interface
|
121 |
st.title("βοΈ Prompt Output Separator")
|
122 |
-
st.markdown(
|
|
|
123 |
|
124 |
# Tabs with icons
|
125 |
tabs = st.tabs(["π Paste Text", "π File Processing", "π History"])
|
@@ -130,6 +110,7 @@ with tabs[0]:
|
|
130 |
|
131 |
# Input area with placeholder
|
132 |
input_container = st.container()
|
|
|
133 |
with input_container:
|
134 |
input_text = st.text_area(
|
135 |
"Paste your conversation here...",
|
@@ -137,16 +118,16 @@ with tabs[0]:
|
|
137 |
placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.",
|
138 |
help="Enter the text you want to separate into prompt and output."
|
139 |
)
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
# Suggested Title Section
|
151 |
st.markdown("### π Suggested Title")
|
152 |
title_area = st.text_area(
|
@@ -194,34 +175,35 @@ with tabs[0]:
|
|
194 |
# File Processing Tab
|
195 |
with tabs[1]:
|
196 |
st.subheader("File Processing")
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
st.dataframe(
|
214 |
-
processed_df,
|
215 |
-
use_container_width=True,
|
216 |
-
hide_index=True
|
217 |
)
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
|
|
|
|
|
|
|
|
|
|
225 |
|
226 |
# History Tab
|
227 |
with tabs[2]:
|
@@ -254,79 +236,89 @@ st.markdown(
|
|
254 |
unsafe_allow_html=True
|
255 |
)
|
256 |
|
257 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
if st.session_state.mode == 'dark':
|
259 |
st.markdown("""
|
260 |
<style>
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
font-size: 0.8em;
|
329 |
-
margin-top: 5px;
|
330 |
-
}
|
331 |
</style>
|
332 |
""", unsafe_allow_html=True)
|
|
|
38 |
def analyze_with_llm(text):
|
39 |
if not st.session_state.openai_api_key:
|
40 |
st.error("Please provide an OpenAI API key in the sidebar")
|
41 |
+
return None, None, None
|
|
|
42 |
try:
|
43 |
client = OpenAI(api_key=st.session_state.openai_api_key)
|
|
|
44 |
response = client.chat.completions.create(
|
45 |
model="gpt-3.5-turbo-1106",
|
46 |
messages=[
|
47 |
{
|
48 |
"role": "system",
|
49 |
+
"content": """You are a text analysis expert. Your task is to separate a conversation into the prompt/question and the response/answer. Return ONLY a JSON object with three fields: - title: a short, descriptive title for the conversation (max 6 words) - prompt: the user's question or prompt - output: the response or answer If you cannot clearly identify any part, set it to null."""
|
|
|
|
|
|
|
|
|
|
|
50 |
},
|
51 |
{
|
52 |
"role": "user",
|
|
|
54 |
}
|
55 |
],
|
56 |
temperature=0,
|
57 |
+
response_format={"type": "json_object"}
|
58 |
)
|
|
|
59 |
result = response.choices[0].message.content
|
60 |
parsed = json.loads(result)
|
61 |
return parsed.get("title"), parsed.get("prompt"), parsed.get("output")
|
|
|
62 |
except Exception as e:
|
63 |
+
st.error(f"Error analyzing text: {str(e)}. The error was: {e}")
|
64 |
+
return None, None, None
|
65 |
|
66 |
def separate_prompt_output(text):
|
67 |
if not text:
|
68 |
return "", "", ""
|
|
|
69 |
if st.session_state.openai_api_key:
|
70 |
title, prompt, output = analyze_with_llm(text)
|
71 |
if all(v is not None for v in [title, prompt, output]):
|
72 |
return title, prompt, output
|
|
|
73 |
parts = text.split('\n\n', 1)
|
74 |
if len(parts) == 2:
|
75 |
return "Untitled Conversation", parts[0].strip(), parts[1].strip()
|
|
|
90 |
if api_key:
|
91 |
st.session_state.openai_api_key = api_key
|
92 |
|
93 |
+
# Dark mode toggle using checkbox
|
94 |
st.markdown("---")
|
95 |
st.markdown("## π¨ Appearance")
|
96 |
+
dark_mode = st.checkbox("Dark Mode", value=st.session_state.mode == 'dark')
|
97 |
st.session_state.mode = 'dark' if dark_mode else 'light'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
# Main interface
|
100 |
st.title("βοΈ Prompt Output Separator")
|
101 |
+
st.markdown(
|
102 |
+
"Utility to assist with separating prompts and outputs when they are recorded in a unified block of text. For cost-optimisation, uses GPT 3.5.")
|
103 |
|
104 |
# Tabs with icons
|
105 |
tabs = st.tabs(["π Paste Text", "π File Processing", "π History"])
|
|
|
110 |
|
111 |
# Input area with placeholder
|
112 |
input_container = st.container()
|
113 |
+
|
114 |
with input_container:
|
115 |
input_text = st.text_area(
|
116 |
"Paste your conversation here...",
|
|
|
118 |
placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.",
|
119 |
help="Enter the text you want to separate into prompt and output."
|
120 |
)
|
121 |
+
|
122 |
+
# Process button
|
123 |
+
if st.button("π Process", use_container_width=True) and input_text:
|
124 |
+
with st.spinner("Processing..."):
|
125 |
+
title, prompt, output = separate_prompt_output(input_text)
|
126 |
+
st.session_state.title = title
|
127 |
+
st.session_state.prompt = prompt
|
128 |
+
st.session_state.output = output
|
129 |
+
st.session_state.history.append(input_text)
|
130 |
+
|
131 |
# Suggested Title Section
|
132 |
st.markdown("### π Suggested Title")
|
133 |
title_area = st.text_area(
|
|
|
175 |
# File Processing Tab
|
176 |
with tabs[1]:
|
177 |
st.subheader("File Processing")
|
178 |
+
uploaded_file = st.file_uploader("Choose a file", type=['txt', 'csv'])
|
179 |
+
|
180 |
+
if uploaded_file is not None:
|
181 |
+
try:
|
182 |
+
if uploaded_file.type == "text/csv":
|
183 |
+
df = pd.read_csv(uploaded_file)
|
184 |
+
column = st.selectbox("Select column to process", df.columns)
|
185 |
+
if st.button("Process CSV"):
|
186 |
+
with st.spinner("Processing..."):
|
187 |
+
processed_df = process_column(df[column])
|
188 |
+
st.write(processed_df)
|
189 |
+
st.download_button(
|
190 |
+
"Download Processed CSV",
|
191 |
+
processed_df.to_csv(index=False),
|
192 |
+
"processed_data.csv",
|
193 |
+
"text/csv"
|
|
|
|
|
|
|
|
|
194 |
)
|
195 |
+
else:
|
196 |
+
content = uploaded_file.getvalue().decode("utf-8")
|
197 |
+
if st.button("Process Text File"):
|
198 |
+
with st.spinner("Processing..."):
|
199 |
+
title, prompt, output = separate_prompt_output(content)
|
200 |
+
st.session_state.title = title
|
201 |
+
st.session_state.prompt = prompt
|
202 |
+
st.session_state.output = output
|
203 |
+
st.session_state.history.append(content)
|
204 |
+
st.experimental_rerun()
|
205 |
+
except Exception as e:
|
206 |
+
st.error(f"Error processing file: {str(e)}")
|
207 |
|
208 |
# History Tab
|
209 |
with tabs[2]:
|
|
|
236 |
unsafe_allow_html=True
|
237 |
)
|
238 |
|
239 |
+
# Custom CSS for stats text to prevent them from overlapping
|
240 |
+
st.markdown("""
|
241 |
+
<style>
|
242 |
+
.stats-text {
|
243 |
+
text-align: left;
|
244 |
+
font-size: 0.8em;
|
245 |
+
color: #888; /* Darker gray to fit the style */
|
246 |
+
margin-top: -10px; /* push the stats closer to the textarea */
|
247 |
+
margin-bottom: 10px;
|
248 |
+
}
|
249 |
+
</style>
|
250 |
+
""", unsafe_allow_html=True)
|
251 |
+
|
252 |
+
# Custom CSS to style dark mode
|
253 |
if st.session_state.mode == 'dark':
|
254 |
st.markdown("""
|
255 |
<style>
|
256 |
+
body {
|
257 |
+
color: #fff;
|
258 |
+
background-color: #262730;
|
259 |
+
}
|
260 |
+
.stTextInput, .stTextArea, .stNumberInput, .stSelectbox, .stRadio, .stCheckbox, .stSlider, .stDateInput, .stTimeInput {
|
261 |
+
background-color: #3d3d4d; /* Darker background for input widgets */
|
262 |
+
color: #fff; /* White text for better contrast */
|
263 |
+
}
|
264 |
+
.stButton>button {
|
265 |
+
background-color: #5c5c7a; /* Adjust button color */
|
266 |
+
color: white;
|
267 |
+
}
|
268 |
+
.stButton>button:hover {
|
269 |
+
background-color: #6e6e8a;
|
270 |
+
color: white;
|
271 |
+
}
|
272 |
+
|
273 |
+
.streamlit-expanderHeader {
|
274 |
+
background-color: #3d3d4d !important;
|
275 |
+
color: #fff !important;
|
276 |
+
}
|
277 |
+
|
278 |
+
.streamlit-expanderContent {
|
279 |
+
background-color: #3d3d4d !important;
|
280 |
+
}
|
281 |
+
|
282 |
+
.streamlit-container {
|
283 |
+
background-color: #262730;
|
284 |
+
}
|
285 |
+
|
286 |
+
.stAlert {
|
287 |
+
background-color: #3d3d4d !important;
|
288 |
+
color: #fff !important;
|
289 |
+
}
|
290 |
+
|
291 |
+
.st-ba {
|
292 |
+
background-color: #3d3d4d; /* Makes the body background dark */
|
293 |
+
color: #fff;
|
294 |
+
}
|
295 |
+
|
296 |
+
.css-10trblm {
|
297 |
+
background-color: #3d3d4d;
|
298 |
+
color: #fff;
|
299 |
+
}
|
300 |
+
|
301 |
+
.css-qbe2hs {
|
302 |
+
color: #fff;
|
303 |
+
}
|
304 |
+
|
305 |
+
.css-1wtrr7o {
|
306 |
+
color: #fff;
|
307 |
+
}
|
308 |
+
|
309 |
+
.css-103n16l {
|
310 |
+
color: #fff;
|
311 |
+
}
|
312 |
+
|
313 |
+
.css-10pw50 {
|
314 |
+
color: #fff;
|
315 |
+
}
|
316 |
+
|
317 |
+
.css-z5fcl4 {
|
318 |
+
color: #fff;
|
319 |
+
}
|
320 |
+
.css-1d391kg {
|
321 |
+
color: #fff;
|
322 |
+
}
|
|
|
|
|
|
|
323 |
</style>
|
324 |
""", unsafe_allow_html=True)
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
streamlit==1.24.1
|
2 |
pandas>=2.0
|
3 |
-
numpy
|
4 |
openai==1.3.0
|
5 |
-
pyperclip==1.8.2
|
|
|
1 |
streamlit==1.24.1
|
2 |
pandas>=2.0
|
3 |
+
numpy
|
4 |
openai==1.3.0
|
5 |
+
pyperclip==1.8.2
|