danielrosehill commited on
Commit
5c2a16c
Β·
1 Parent(s): ed80909
Files changed (2) hide show
  1. app/app.py +90 -137
  2. versions/v3.py +129 -137
app/app.py CHANGED
@@ -5,20 +5,13 @@ import time
5
  import os
6
  from io import StringIO
7
  import pyperclip
8
- from openai import OpenAI
9
  import json
 
10
 
11
- # Page Configuration
12
- st.set_page_config(
13
- page_title="Prompt Output Separator",
14
- page_icon="βœ‚οΈ",
15
- layout="wide",
16
- initial_sidebar_state="expanded"
17
- )
18
 
19
- # Initialize session state variables
20
- if 'openai_api_key' not in st.session_state:
21
- st.session_state.openai_api_key = None
22
  if 'history' not in st.session_state:
23
  st.session_state.history = []
24
  if 'prompt' not in st.session_state:
@@ -36,37 +29,77 @@ def count_text_stats(text):
36
  return words, chars
37
 
38
  def analyze_with_llm(text):
39
- if not st.session_state.openai_api_key:
40
  st.error("Please provide an OpenAI API key in the sidebar")
41
  return None, None, None
42
  try:
43
- client = OpenAI(api_key=st.session_state.openai_api_key)
44
- response = client.chat.completions.create(
45
- model="gpt-3.5-turbo-1106",
46
- messages=[
 
 
 
 
47
  {
48
  "role": "system",
49
- "content": """You are a text analysis expert. Your task is to separate a conversation into the prompt/question and the response/answer. Return ONLY a JSON object with three fields: - title: a short, descriptive title for the conversation (max 6 words) - prompt: the user's question or prompt - output: the response or answer If you cannot clearly identify any part, set it to null."""
 
 
 
 
 
 
 
 
 
 
 
50
  },
51
  {
52
  "role": "user",
53
- "content": f"Please analyze this text and separate it into title, prompt and output: {text}"
54
  }
55
  ],
56
- temperature=0,
57
- response_format={"type": "json_object"}
 
 
 
 
 
58
  )
59
- result = response.choices[0].message.content
60
- parsed = json.loads(result)
61
- return parsed.get("title"), parsed.get("prompt"), parsed.get("output")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  except Exception as e:
63
- st.error(f"Error analyzing text: {str(e)}. The error was: {e}")
64
- return None, None, None
65
 
66
  def separate_prompt_output(text):
67
  if not text:
68
  return "", "", ""
69
- if st.session_state.openai_api_key:
70
  title, prompt, output = analyze_with_llm(text)
71
  if all(v is not None for v in [title, prompt, output]):
72
  return title, prompt, output
@@ -82,44 +115,31 @@ def process_column(column):
82
  processed_data.append({"Title": title, "Prompt": prompt, "Output": output})
83
  return pd.DataFrame(processed_data)
84
 
85
- # Sidebar configuration
86
  with st.sidebar:
87
  st.image("https://img.icons8.com/color/96/000000/chat.png", width=50)
88
  st.markdown("## πŸ› οΈ Configuration")
89
- api_key = st.text_input("Enter OpenAI API Key", type="password")
90
  if api_key:
91
- st.session_state.openai_api_key = api_key
92
 
93
- # Dark mode toggle using checkbox
94
  st.markdown("---")
95
  st.markdown("## 🎨 Appearance")
96
  dark_mode = st.checkbox("Dark Mode", value=st.session_state.mode == 'dark')
97
  st.session_state.mode = 'dark' if dark_mode else 'light'
98
 
99
- # Main interface
100
  st.title("βœ‚οΈ Prompt Output Separator")
101
- st.markdown(
102
- "Utility to assist with separating prompts and outputs when they are recorded in a unified block of text. For cost-optimisation, uses GPT 3.5.")
103
 
104
- # Tabs with icons
105
  tabs = st.tabs(["πŸ“ Paste Text", "πŸ“ File Processing", "πŸ“Š History"])
106
 
107
- # Paste Text Tab
108
  with tabs[0]:
109
  st.subheader("Paste Prompt and Output")
110
 
111
- # Input area with placeholder
112
  input_container = st.container()
113
 
114
  with input_container:
115
- input_text = st.text_area(
116
- "Paste your conversation here...",
117
- height=200,
118
- placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.",
119
- help="Enter the text you want to separate into prompt and output."
120
- )
121
 
122
- # Process button
123
  if st.button("πŸ”„ Process", use_container_width=True) and input_text:
124
  with st.spinner("Processing..."):
125
  title, prompt, output = separate_prompt_output(input_text)
@@ -128,26 +148,11 @@ with tabs[0]:
128
  st.session_state.output = output
129
  st.session_state.history.append(input_text)
130
 
131
- # Suggested Title Section
132
  st.markdown("### πŸ“Œ Suggested Title")
133
- title_area = st.text_area(
134
- "",
135
- value=st.session_state.get('title', ""),
136
- height=70,
137
- key="title_area",
138
- help="AI-generated title based on the conversation content"
139
- )
140
 
141
- # Prompt Section
142
  st.markdown("### πŸ“ Prompt")
143
- prompt_area = st.text_area(
144
- "",
145
- value=st.session_state.get('prompt', ""),
146
- height=200,
147
- key="prompt_area",
148
- help="The extracted prompt will appear here"
149
- )
150
- # Display prompt stats
151
  prompt_words, prompt_chars = count_text_stats(st.session_state.get('prompt', ""))
152
  st.markdown(f"<p class='stats-text'>Words: {prompt_words} | Characters: {prompt_chars}</p>", unsafe_allow_html=True)
153
 
@@ -155,16 +160,8 @@ with tabs[0]:
155
  pyperclip.copy(st.session_state.get('prompt', ""))
156
  st.success("Copied prompt to clipboard!")
157
 
158
- # Output Section
159
  st.markdown("### πŸ€– Output")
160
- output_area = st.text_area(
161
- "",
162
- value=st.session_state.get('output', ""),
163
- height=200,
164
- key="output_area",
165
- help="The extracted output will appear here"
166
- )
167
- # Display output stats
168
  output_words, output_chars = count_text_stats(st.session_state.get('output', ""))
169
  st.markdown(f"<p class='stats-text'>Words: {output_words} | Characters: {output_chars}</p>", unsafe_allow_html=True)
170
 
@@ -172,28 +169,29 @@ with tabs[0]:
172
  pyperclip.copy(st.session_state.get('output', ""))
173
  st.success("Copied output to clipboard!")
174
 
175
- # File Processing Tab
176
  with tabs[1]:
177
- st.subheader("File Processing")
178
  uploaded_file = st.file_uploader("Choose a file", type=['txt', 'csv'])
179
 
180
  if uploaded_file is not None:
181
  try:
182
  if uploaded_file.type == "text/csv":
183
  df = pd.read_csv(uploaded_file)
184
- column = st.selectbox("Select column to process", df.columns)
 
185
  if st.button("Process CSV"):
186
  with st.spinner("Processing..."):
187
- processed_df = process_column(df[column])
188
- st.write(processed_df)
189
  st.download_button(
190
  "Download Processed CSV",
191
- processed_df.to_csv(index=False),
192
- "processed_data.csv",
193
- "text/csv"
 
194
  )
195
  else:
196
- content = uploaded_file.getvalue().decode("utf-8")
197
  if st.button("Process Text File"):
198
  with st.spinner("Processing..."):
199
  title, prompt, output = separate_prompt_output(content)
@@ -202,10 +200,10 @@ with tabs[1]:
202
  st.session_state.output = output
203
  st.session_state.history.append(content)
204
  st.experimental_rerun()
 
205
  except Exception as e:
206
  st.error(f"Error processing file: {str(e)}")
207
 
208
- # History Tab
209
  with tabs[2]:
210
  st.subheader("Processing History")
211
  if st.session_state.history:
@@ -215,41 +213,13 @@ with tabs[2]:
215
 
216
  for idx, item in enumerate(reversed(st.session_state.history)):
217
  with st.expander(f"Entry {len(st.session_state.history) - idx}", expanded=False):
218
- st.text_area(
219
- "Content",
220
- value=item,
221
- height=150,
222
- key=f"history_{idx}",
223
- disabled=True
224
- )
225
  else:
226
  st.info("πŸ’‘ No processing history available yet. Process some text to see it here.")
227
 
228
- # Footer
229
  st.markdown("---")
230
- st.markdown(
231
- """
232
- <div style='text-align: center'>
233
- <p>Created by <a href="https://github.com/danielrosehill/Prompt-And-Output-Separator">Daniel Rosehill</a></p>
234
- </div>
235
- """,
236
- unsafe_allow_html=True
237
- )
238
-
239
- # Custom CSS for stats text to prevent them from overlapping
240
- st.markdown("""
241
- <style>
242
- .stats-text {
243
- text-align: left;
244
- font-size: 0.8em;
245
- color: #888; /* Darker gray to fit the style */
246
- margin-top: -10px; /* push the stats closer to the textarea */
247
- margin-bottom: 10px;
248
- }
249
- </style>
250
- """, unsafe_allow_html=True)
251
 
252
- # Custom CSS to style dark mode
253
  if st.session_state.mode == 'dark':
254
  st.markdown("""
255
  <style>
@@ -258,11 +228,11 @@ if st.session_state.mode == 'dark':
258
  background-color: #262730;
259
  }
260
  .stTextInput, .stTextArea, .stNumberInput, .stSelectbox, .stRadio, .stCheckbox, .stSlider, .stDateInput, .stTimeInput {
261
- background-color: #3d3d4d; /* Darker background for input widgets */
262
- color: #fff; /* White text for better contrast */
263
  }
264
  .stButton>button {
265
- background-color: #5c5c7a; /* Adjust button color */
266
  color: white;
267
  }
268
  .stButton>button:hover {
@@ -287,38 +257,21 @@ if st.session_state.mode == 'dark':
287
  background-color: #3d3d4d !important;
288
  color: #fff !important;
289
  }
290
-
291
- .st-ba {
292
- background-color: #3d3d4d; /* Makes the body background dark */
293
- color: #fff;
294
- }
295
 
296
- .css-10trblm {
297
- background-color: #3d3d4d;
298
- color: #fff;
299
- }
300
-
301
- .css-qbe2hs {
302
- color: #fff;
303
- }
304
-
305
- .css-1wtrr7o {
306
- color: #fff;
307
  }
308
 
309
- .css-103n16l {
310
- color: #fff;
311
  }
312
 
313
- .css-10pw50 {
314
- color: #fff;
315
  }
316
 
317
- .css-z5fcl4 {
318
- color: #fff;
319
- }
320
- .css-1d391kg {
321
- color: #fff;
322
  }
323
  </style>
324
  """, unsafe_allow_html=True)
 
5
  import os
6
  from io import StringIO
7
  import pyperclip
 
8
  import json
9
+ import requests
10
 
11
+ st.set_page_config(page_title="Prompt Output Separator", page_icon="βœ‚οΈ", layout="wide", initial_sidebar_state="expanded")
 
 
 
 
 
 
12
 
13
+ if 'api_key' not in st.session_state:
14
+ st.session_state.api_key = None
 
15
  if 'history' not in st.session_state:
16
  st.session_state.history = []
17
  if 'prompt' not in st.session_state:
 
29
  return words, chars
30
 
31
  def analyze_with_llm(text):
32
+ if not st.session_state.api_key:
33
  st.error("Please provide an OpenAI API key in the sidebar")
34
  return None, None, None
35
  try:
36
+ headers = {
37
+ "Authorization": f"Bearer {st.session_state.api_key}",
38
+ "Content-Type": "application/json"
39
+ }
40
+
41
+ data = {
42
+ "model": "gpt-4",
43
+ "messages": [
44
  {
45
  "role": "system",
46
+ "content": """You are a text separator. Your ONLY job is to split the input text into its original prompt and response components.
47
+
48
+ CRITICAL RULES:
49
+ - DO NOT summarize or modify ANY text
50
+ - Return the EXACT original text split into two parts
51
+ - Make NO changes to the content
52
+ - Preserve ALL formatting and whitespace
53
+
54
+ Return ONLY a JSON object with these fields:
55
+ - title: brief descriptive title (max 6 words)
56
+ - prompt: the EXACT, COMPLETE first part of the conversation
57
+ - output: the EXACT, COMPLETE response/answer part"""
58
  },
59
  {
60
  "role": "user",
61
+ "content": f"Split this text into its original parts with NO modifications: {text}"
62
  }
63
  ],
64
+ "temperature": 0
65
+ }
66
+
67
+ response = requests.post(
68
+ "https://api.openai.com/v1/chat/completions",
69
+ headers=headers,
70
+ json=data
71
  )
72
+
73
+ if response.status_code == 200:
74
+ result = response.json()['choices'][0]['message']['content']
75
+ try:
76
+ parsed = json.loads(result)
77
+ # Verify no content was lost
78
+ original_words = len(text.split())
79
+ result_words = len((parsed.get("prompt", "") + parsed.get("output", "")).split())
80
+ if result_words < original_words * 0.9: # Allow for 10% difference due to splitting
81
+ st.error("Content was modified during processing. Using basic split instead.")
82
+ parts = text.split('\n\n', 1)
83
+ if len(parts) == 2:
84
+ return "Untitled Conversation", parts[0].strip(), parts[1].strip()
85
+ return "Untitled Conversation", text.strip(), ""
86
+ return parsed.get("title"), parsed.get("prompt"), parsed.get("output")
87
+ except json.JSONDecodeError:
88
+ st.error("Failed to parse LLM response as JSON")
89
+ return None, None, None
90
+ else:
91
+ st.error(f"API request failed with status code: {response.status_code}")
92
+ st.error(f"Response: {response.text}")
93
+ return None, None, None
94
+
95
  except Exception as e:
96
+ st.error(f"Error analyzing text: {str(e)}")
97
+ return None, None, None
98
 
99
  def separate_prompt_output(text):
100
  if not text:
101
  return "", "", ""
102
+ if st.session_state.api_key:
103
  title, prompt, output = analyze_with_llm(text)
104
  if all(v is not None for v in [title, prompt, output]):
105
  return title, prompt, output
 
115
  processed_data.append({"Title": title, "Prompt": prompt, "Output": output})
116
  return pd.DataFrame(processed_data)
117
 
 
118
  with st.sidebar:
119
  st.image("https://img.icons8.com/color/96/000000/chat.png", width=50)
120
  st.markdown("## πŸ› οΈ Configuration")
121
+ api_key = st.text_input("Enter OpenAI API Key", type="password", help="Get your API key from platform.openai.com")
122
  if api_key:
123
+ st.session_state.api_key = api_key
124
 
 
125
  st.markdown("---")
126
  st.markdown("## 🎨 Appearance")
127
  dark_mode = st.checkbox("Dark Mode", value=st.session_state.mode == 'dark')
128
  st.session_state.mode = 'dark' if dark_mode else 'light'
129
 
 
130
  st.title("βœ‚οΈ Prompt Output Separator")
131
+ st.markdown("Utility to assist with separating prompts and outputs when they are recorded in a unified block of text.")
 
132
 
 
133
  tabs = st.tabs(["πŸ“ Paste Text", "πŸ“ File Processing", "πŸ“Š History"])
134
 
 
135
  with tabs[0]:
136
  st.subheader("Paste Prompt and Output")
137
 
 
138
  input_container = st.container()
139
 
140
  with input_container:
141
+ input_text = st.text_area("Paste your conversation here...", height=200, placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.", help="Enter the text you want to separate into prompt and output.")
 
 
 
 
 
142
 
 
143
  if st.button("πŸ”„ Process", use_container_width=True) and input_text:
144
  with st.spinner("Processing..."):
145
  title, prompt, output = separate_prompt_output(input_text)
 
148
  st.session_state.output = output
149
  st.session_state.history.append(input_text)
150
 
 
151
  st.markdown("### πŸ“Œ Suggested Title")
152
+ title_area = st.text_area("", value=st.session_state.get('title', ""), height=70, key="title_area", help="AI-generated title based on the conversation content")
 
 
 
 
 
 
153
 
 
154
  st.markdown("### πŸ“ Prompt")
155
+ prompt_area = st.text_area("", value=st.session_state.get('prompt', ""), height=200, key="prompt_area", help="The extracted prompt will appear here")
 
 
 
 
 
 
 
156
  prompt_words, prompt_chars = count_text_stats(st.session_state.get('prompt', ""))
157
  st.markdown(f"<p class='stats-text'>Words: {prompt_words} | Characters: {prompt_chars}</p>", unsafe_allow_html=True)
158
 
 
160
  pyperclip.copy(st.session_state.get('prompt', ""))
161
  st.success("Copied prompt to clipboard!")
162
 
 
163
  st.markdown("### πŸ€– Output")
164
+ output_area = st.text_area("", value=st.session_state.get('output', ""), height=200, key="output_area", help="The extracted output will appear here")
 
 
 
 
 
 
 
165
  output_words, output_chars = count_text_stats(st.session_state.get('output', ""))
166
  st.markdown(f"<p class='stats-text'>Words: {output_words} | Characters: {output_chars}</p>", unsafe_allow_html=True)
167
 
 
169
  pyperclip.copy(st.session_state.get('output', ""))
170
  st.success("Copied output to clipboard!")
171
 
 
172
  with tabs[1]:
173
+ st.subheader("Process File")
174
  uploaded_file = st.file_uploader("Choose a file", type=['txt', 'csv'])
175
 
176
  if uploaded_file is not None:
177
  try:
178
  if uploaded_file.type == "text/csv":
179
  df = pd.read_csv(uploaded_file)
180
+ st.write("Select the column containing the conversations:")
181
+ column = st.selectbox("Column", df.columns.tolist())
182
  if st.button("Process CSV"):
183
  with st.spinner("Processing..."):
184
+ result_df = process_column(df[column])
185
+ st.write(result_df)
186
  st.download_button(
187
  "Download Processed CSV",
188
+ result_df.to_csv(index=False).encode('utf-8'),
189
+ "processed_conversations.csv",
190
+ "text/csv",
191
+ key='download-csv'
192
  )
193
  else:
194
+ content = StringIO(uploaded_file.getvalue().decode("utf-8")).read()
195
  if st.button("Process Text File"):
196
  with st.spinner("Processing..."):
197
  title, prompt, output = separate_prompt_output(content)
 
200
  st.session_state.output = output
201
  st.session_state.history.append(content)
202
  st.experimental_rerun()
203
+
204
  except Exception as e:
205
  st.error(f"Error processing file: {str(e)}")
206
 
 
207
  with tabs[2]:
208
  st.subheader("Processing History")
209
  if st.session_state.history:
 
213
 
214
  for idx, item in enumerate(reversed(st.session_state.history)):
215
  with st.expander(f"Entry {len(st.session_state.history) - idx}", expanded=False):
216
+ st.text_area("Content", value=item, height=150, key=f"history_{idx}", disabled=True)
 
 
 
 
 
 
217
  else:
218
  st.info("πŸ’‘ No processing history available yet. Process some text to see it here.")
219
 
 
220
  st.markdown("---")
221
+ st.markdown("<div style='text-align: center'><p>Created by <a href='https://github.com/danielrosehill/Prompt-And-Output-Separator'>Daniel Rosehill</a></p></div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
 
223
  if st.session_state.mode == 'dark':
224
  st.markdown("""
225
  <style>
 
228
  background-color: #262730;
229
  }
230
  .stTextInput, .stTextArea, .stNumberInput, .stSelectbox, .stRadio, .stCheckbox, .stSlider, .stDateInput, .stTimeInput {
231
+ background-color: #3d3d4d;
232
+ color: #fff;
233
  }
234
  .stButton>button {
235
+ background-color: #5c5c7a;
236
  color: white;
237
  }
238
  .stButton>button:hover {
 
257
  background-color: #3d3d4d !important;
258
  color: #fff !important;
259
  }
 
 
 
 
 
260
 
261
+ .stats-text {
262
+ color: #aaa !important;
 
 
 
 
 
 
 
 
 
263
  }
264
 
265
+ .css-10trblm {
266
+ color: #fff !important;
267
  }
268
 
269
+ .css-16idsys {
270
+ color: #fff !important;
271
  }
272
 
273
+ .css-1vq4p4l {
274
+ color: #fff !important;
 
 
 
275
  }
276
  </style>
277
  """, unsafe_allow_html=True)
versions/v3.py CHANGED
@@ -38,22 +38,15 @@ def count_text_stats(text):
38
  def analyze_with_llm(text):
39
  if not st.session_state.openai_api_key:
40
  st.error("Please provide an OpenAI API key in the sidebar")
41
- return None, None
42
-
43
  try:
44
  client = OpenAI(api_key=st.session_state.openai_api_key)
45
-
46
  response = client.chat.completions.create(
47
  model="gpt-3.5-turbo-1106",
48
  messages=[
49
  {
50
  "role": "system",
51
- "content": """You are a text analysis expert. Your task is to separate a conversation into the prompt/question and the response/answer.
52
- Return ONLY a JSON object with three fields:
53
- - title: a short, descriptive title for the conversation (max 6 words)
54
- - prompt: the user's question or prompt
55
- - output: the response or answer
56
- If you cannot clearly identify any part, set it to null."""
57
  },
58
  {
59
  "role": "user",
@@ -61,26 +54,22 @@ def analyze_with_llm(text):
61
  }
62
  ],
63
  temperature=0,
64
- response_format={ "type": "json_object" }
65
  )
66
-
67
  result = response.choices[0].message.content
68
  parsed = json.loads(result)
69
  return parsed.get("title"), parsed.get("prompt"), parsed.get("output")
70
-
71
  except Exception as e:
72
- st.error(f"Error analyzing text: {str(e)}")
73
- return None, None, None
74
 
75
  def separate_prompt_output(text):
76
  if not text:
77
  return "", "", ""
78
-
79
  if st.session_state.openai_api_key:
80
  title, prompt, output = analyze_with_llm(text)
81
  if all(v is not None for v in [title, prompt, output]):
82
  return title, prompt, output
83
-
84
  parts = text.split('\n\n', 1)
85
  if len(parts) == 2:
86
  return "Untitled Conversation", parts[0].strip(), parts[1].strip()
@@ -101,25 +90,16 @@ with st.sidebar:
101
  if api_key:
102
  st.session_state.openai_api_key = api_key
103
 
104
- # Dark mode toggle
105
  st.markdown("---")
106
  st.markdown("## 🎨 Appearance")
107
- dark_mode = st.toggle("Dark Mode", value=st.session_state.mode == 'dark')
108
  st.session_state.mode = 'dark' if dark_mode else 'light'
109
-
110
- # Settings section
111
- st.markdown("---")
112
- st.markdown("## βš™οΈ Settings")
113
- auto_copy = st.checkbox("Auto-copy results to clipboard", value=False)
114
-
115
- if st.session_state.openai_api_key:
116
- st.success("βœ“ API Key configured")
117
- else:
118
- st.warning("⚠ No API Key provided - using basic separation")
119
 
120
  # Main interface
121
  st.title("βœ‚οΈ Prompt Output Separator")
122
- st.markdown("Utility to assist with separating prompts and outputs when they are recorded in a unified block of text. For cost-optimisation, uses GPT 3.5.")
 
123
 
124
  # Tabs with icons
125
  tabs = st.tabs(["πŸ“ Paste Text", "πŸ“ File Processing", "πŸ“Š History"])
@@ -130,6 +110,7 @@ with tabs[0]:
130
 
131
  # Input area with placeholder
132
  input_container = st.container()
 
133
  with input_container:
134
  input_text = st.text_area(
135
  "Paste your conversation here...",
@@ -137,16 +118,16 @@ with tabs[0]:
137
  placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.",
138
  help="Enter the text you want to separate into prompt and output."
139
  )
140
-
141
- # Process button
142
- if st.button("πŸ”„ Process", use_container_width=True) and input_text:
143
- with st.spinner("Processing..."):
144
- title, prompt, output = separate_prompt_output(input_text)
145
- st.session_state.title = title
146
- st.session_state.prompt = prompt
147
- st.session_state.output = output
148
- st.session_state.history.append(input_text)
149
-
150
  # Suggested Title Section
151
  st.markdown("### πŸ“Œ Suggested Title")
152
  title_area = st.text_area(
@@ -194,34 +175,35 @@ with tabs[0]:
194
  # File Processing Tab
195
  with tabs[1]:
196
  st.subheader("File Processing")
197
- uploaded_files = st.file_uploader(
198
- "Upload files",
199
- type=["txt", "md", "csv"],
200
- accept_multiple_files=True,
201
- help="Upload text files to process multiple conversations at once"
202
- )
203
-
204
- if uploaded_files:
205
- for file in uploaded_files:
206
- with st.expander(f"πŸ“„ {file.name}", expanded=True):
207
- file_content = file.read().decode("utf-8")
208
- if file.name.endswith(".csv"):
209
- df = pd.read_csv(StringIO(file_content))
210
- for col in df.columns:
211
- processed_df = process_column(df[col])
212
- st.write(f"Processed column: {col}")
213
- st.dataframe(
214
- processed_df,
215
- use_container_width=True,
216
- hide_index=True
217
  )
218
- else:
219
- title, prompt, output = separate_prompt_output(file_content)
220
- st.json({
221
- "Title": title,
222
- "Prompt": prompt,
223
- "Output": output
224
- })
 
 
 
 
 
225
 
226
  # History Tab
227
  with tabs[2]:
@@ -254,79 +236,89 @@ st.markdown(
254
  unsafe_allow_html=True
255
  )
256
 
257
- # Apply theme
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  if st.session_state.mode == 'dark':
259
  st.markdown("""
260
  <style>
261
- body {
262
- color: #fff;
263
- background-color: #0e1117;
264
- }
265
- .stTextInput > div > div > input, .stTextArea > div > div > textarea {
266
- color: #fff;
267
- background-color: #262730;
268
- border-radius: 8px;
269
- border: 1px solid #464646;
270
- padding: 15px;
271
- font-family: 'Courier New', monospace;
272
- }
273
- .stButton > button {
274
- color: #fff;
275
- background-color: #4c4cff;
276
- border-radius: 8px;
277
- padding: 10px 20px;
278
- transition: all 0.3s ease;
279
- border: none;
280
- width: 100%;
281
- }
282
- .stButton > button:hover {
283
- background-color: #6b6bff;
284
- transform: translateY(-2px);
285
- box-shadow: 0 4px 12px rgba(76, 76, 255, 0.3);
286
- }
287
- .stMarkdown {
288
- color: #fff;
289
- }
290
- .stats-text {
291
- color: #999;
292
- font-size: 0.8em;
293
- margin-top: 5px;
294
- }
295
- </style>
296
- """, unsafe_allow_html=True)
297
- else:
298
- st.markdown("""
299
- <style>
300
- body {
301
- color: #333;
302
- background-color: #fff;
303
- }
304
- .stTextInput > div > div > input, .stTextArea > div > div > textarea {
305
- color: #333;
306
- background-color: #f8f9fa;
307
- border-radius: 8px;
308
- border: 1px solid #e0e0e0;
309
- padding: 15px;
310
- font-family: 'Courier New', monospace;
311
- }
312
- .stButton > button {
313
- color: #fff;
314
- background-color: #4c4cff;
315
- border-radius: 8px;
316
- padding: 10px 20px;
317
- transition: all 0.3s ease;
318
- border: none;
319
- width: 100%;
320
- }
321
- .stButton > button:hover {
322
- background-color: #6b6bff;
323
- transform: translateY(-2px);
324
- box-shadow: 0 4px 12px rgba(76, 76, 255, 0.3);
325
- }
326
- .stats-text {
327
- color: #666;
328
- font-size: 0.8em;
329
- margin-top: 5px;
330
- }
331
  </style>
332
  """, unsafe_allow_html=True)
 
38
  def analyze_with_llm(text):
39
  if not st.session_state.openai_api_key:
40
  st.error("Please provide an OpenAI API key in the sidebar")
41
+ return None, None, None
 
42
  try:
43
  client = OpenAI(api_key=st.session_state.openai_api_key)
 
44
  response = client.chat.completions.create(
45
  model="gpt-3.5-turbo-1106",
46
  messages=[
47
  {
48
  "role": "system",
49
+ "content": """You are a text analysis expert. Your task is to separate a conversation into the prompt/question and the response/answer. Return ONLY a JSON object with three fields: - title: a short, descriptive title for the conversation (max 6 words) - prompt: the user's question or prompt - output: the response or answer If you cannot clearly identify any part, set it to null."""
 
 
 
 
 
50
  },
51
  {
52
  "role": "user",
 
54
  }
55
  ],
56
  temperature=0,
57
+ response_format={"type": "json_object"}
58
  )
 
59
  result = response.choices[0].message.content
60
  parsed = json.loads(result)
61
  return parsed.get("title"), parsed.get("prompt"), parsed.get("output")
 
62
  except Exception as e:
63
+ st.error(f"Error analyzing text: {str(e)}. The error was: {e}")
64
+ return None, None, None
65
 
66
  def separate_prompt_output(text):
67
  if not text:
68
  return "", "", ""
 
69
  if st.session_state.openai_api_key:
70
  title, prompt, output = analyze_with_llm(text)
71
  if all(v is not None for v in [title, prompt, output]):
72
  return title, prompt, output
 
73
  parts = text.split('\n\n', 1)
74
  if len(parts) == 2:
75
  return "Untitled Conversation", parts[0].strip(), parts[1].strip()
 
90
  if api_key:
91
  st.session_state.openai_api_key = api_key
92
 
93
+ # Dark mode toggle using checkbox
94
  st.markdown("---")
95
  st.markdown("## 🎨 Appearance")
96
+ dark_mode = st.checkbox("Dark Mode", value=st.session_state.mode == 'dark')
97
  st.session_state.mode = 'dark' if dark_mode else 'light'
 
 
 
 
 
 
 
 
 
 
98
 
99
  # Main interface
100
  st.title("βœ‚οΈ Prompt Output Separator")
101
+ st.markdown(
102
+ "Utility to assist with separating prompts and outputs when they are recorded in a unified block of text. For cost-optimisation, uses GPT 3.5.")
103
 
104
  # Tabs with icons
105
  tabs = st.tabs(["πŸ“ Paste Text", "πŸ“ File Processing", "πŸ“Š History"])
 
110
 
111
  # Input area with placeholder
112
  input_container = st.container()
113
+
114
  with input_container:
115
  input_text = st.text_area(
116
  "Paste your conversation here...",
 
118
  placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.",
119
  help="Enter the text you want to separate into prompt and output."
120
  )
121
+
122
+ # Process button
123
+ if st.button("πŸ”„ Process", use_container_width=True) and input_text:
124
+ with st.spinner("Processing..."):
125
+ title, prompt, output = separate_prompt_output(input_text)
126
+ st.session_state.title = title
127
+ st.session_state.prompt = prompt
128
+ st.session_state.output = output
129
+ st.session_state.history.append(input_text)
130
+
131
  # Suggested Title Section
132
  st.markdown("### πŸ“Œ Suggested Title")
133
  title_area = st.text_area(
 
175
  # File Processing Tab
176
  with tabs[1]:
177
  st.subheader("File Processing")
178
+ uploaded_file = st.file_uploader("Choose a file", type=['txt', 'csv'])
179
+
180
+ if uploaded_file is not None:
181
+ try:
182
+ if uploaded_file.type == "text/csv":
183
+ df = pd.read_csv(uploaded_file)
184
+ column = st.selectbox("Select column to process", df.columns)
185
+ if st.button("Process CSV"):
186
+ with st.spinner("Processing..."):
187
+ processed_df = process_column(df[column])
188
+ st.write(processed_df)
189
+ st.download_button(
190
+ "Download Processed CSV",
191
+ processed_df.to_csv(index=False),
192
+ "processed_data.csv",
193
+ "text/csv"
 
 
 
 
194
  )
195
+ else:
196
+ content = uploaded_file.getvalue().decode("utf-8")
197
+ if st.button("Process Text File"):
198
+ with st.spinner("Processing..."):
199
+ title, prompt, output = separate_prompt_output(content)
200
+ st.session_state.title = title
201
+ st.session_state.prompt = prompt
202
+ st.session_state.output = output
203
+ st.session_state.history.append(content)
204
+ st.experimental_rerun()
205
+ except Exception as e:
206
+ st.error(f"Error processing file: {str(e)}")
207
 
208
  # History Tab
209
  with tabs[2]:
 
236
  unsafe_allow_html=True
237
  )
238
 
239
+ # Custom CSS for stats text to prevent them from overlapping
240
+ st.markdown("""
241
+ <style>
242
+ .stats-text {
243
+ text-align: left;
244
+ font-size: 0.8em;
245
+ color: #888; /* Darker gray to fit the style */
246
+ margin-top: -10px; /* push the stats closer to the textarea */
247
+ margin-bottom: 10px;
248
+ }
249
+ </style>
250
+ """, unsafe_allow_html=True)
251
+
252
+ # Custom CSS to style dark mode
253
  if st.session_state.mode == 'dark':
254
  st.markdown("""
255
  <style>
256
+ body {
257
+ color: #fff;
258
+ background-color: #262730;
259
+ }
260
+ .stTextInput, .stTextArea, .stNumberInput, .stSelectbox, .stRadio, .stCheckbox, .stSlider, .stDateInput, .stTimeInput {
261
+ background-color: #3d3d4d; /* Darker background for input widgets */
262
+ color: #fff; /* White text for better contrast */
263
+ }
264
+ .stButton>button {
265
+ background-color: #5c5c7a; /* Adjust button color */
266
+ color: white;
267
+ }
268
+ .stButton>button:hover {
269
+ background-color: #6e6e8a;
270
+ color: white;
271
+ }
272
+
273
+ .streamlit-expanderHeader {
274
+ background-color: #3d3d4d !important;
275
+ color: #fff !important;
276
+ }
277
+
278
+ .streamlit-expanderContent {
279
+ background-color: #3d3d4d !important;
280
+ }
281
+
282
+ .streamlit-container {
283
+ background-color: #262730;
284
+ }
285
+
286
+ .stAlert {
287
+ background-color: #3d3d4d !important;
288
+ color: #fff !important;
289
+ }
290
+
291
+ .st-ba {
292
+ background-color: #3d3d4d; /* Makes the body background dark */
293
+ color: #fff;
294
+ }
295
+
296
+ .css-10trblm {
297
+ background-color: #3d3d4d;
298
+ color: #fff;
299
+ }
300
+
301
+ .css-qbe2hs {
302
+ color: #fff;
303
+ }
304
+
305
+ .css-1wtrr7o {
306
+ color: #fff;
307
+ }
308
+
309
+ .css-103n16l {
310
+ color: #fff;
311
+ }
312
+
313
+ .css-10pw50 {
314
+ color: #fff;
315
+ }
316
+
317
+ .css-z5fcl4 {
318
+ color: #fff;
319
+ }
320
+ .css-1d391kg {
321
+ color: #fff;
322
+ }
 
 
 
323
  </style>
324
  """, unsafe_allow_html=True)