File size: 8,284 Bytes
d796c69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
import streamlit as st
import pandas as pd
from io import StringIO
import pyperclip
import openai

# Initialize session state variables
if 'history' not in st.session_state:
    st.session_state.history = []
if 'prompt' not in st.session_state:
    st.session_state.prompt = ""
if 'output' not in st.session_state:
    st.session_state.output = ""
if 'title' not in st.session_state:
    st.session_state.title = ""

# Custom CSS
st.markdown("""
    <style>
    .stats-text {
        font-size: 0.8rem;
        color: #666;
    }
    </style>
""", unsafe_allow_html=True)

def count_text_stats(text):
    words = len(text.split())
    chars = len(text)
    return words, chars

def separate_prompt_output(text):
    if not text:
        return "", "", ""
    
    if st.session_state.get('openai_api_key'):
        prompt, output = analyze_with_llm(text)
        if prompt is not None and output is not None:
            suggested_title = generate_title_with_llm(prompt, output)
            return suggested_title, prompt, output
    
    parts = text.split('\n\n', 1)
    if len(parts) == 2:
        return "Untitled Conversation", parts[0].strip(), parts[1].strip()
    return "Untitled Conversation", text.strip(), ""

def generate_title_with_llm(prompt, output):
    try:
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {
                    "role": "system",
                    "content": "Generate a short, concise title (max 6 words) that captures the main topic of this conversation."
                },
                {
                    "role": "user",
                    "content": f"Prompt: {prompt}\nOutput: {output}"
                }
            ],
            max_tokens=20,
            temperature=0.7
        )
        return response.choices[0].message['content'].strip()
    except Exception as e:
        return "Untitled Conversation"

def process_column(column):
    processed_data = []
    for item in column:
        title, prompt, output = separate_prompt_output(str(item))
        processed_data.append({"Title": title, "Prompt": prompt, "Output": output})
    return pd.DataFrame(processed_data)

# Main interface
st.title("βœ‚οΈ Prompt Output Separator")
st.markdown("Utility to assist with separating prompts and outputs when they are recorded in a unified block of text. For cost-optimisation, uses GPT 3.5.")

# Tabs with icons
tabs = st.tabs(["πŸ“ Paste Text", "πŸ“ File Processing", "πŸ“Š History"])

# Paste Text Tab
with tabs[0]:
    st.subheader("Paste Prompt and Output")
    
    # Settings
    with st.expander("βš™οΈ Settings", expanded=False):
        auto_copy = st.checkbox("Automatically copy prompt to clipboard", value=False)
        st.text_input("OpenAI API Key (optional)", type="password", key="openai_api_key")
    
    # Input area with placeholder
    input_container = st.container()
    with input_container:
        input_text = st.text_area(
            "Paste your conversation here...",
            height=200,
            placeholder="Paste your conversation here. The tool will automatically separate the prompt from the output.",
            help="Enter the text you want to separate into prompt and output."
        )

    # Action buttons
    col1, col2 = st.columns(2)
    with col1:
        if st.button("πŸ”„ Separate Now", use_container_width=True):
            if input_text:
                with st.spinner("Processing..."):
                    st.session_state.history.append(input_text)
                    title, prompt, output = separate_prompt_output(input_text)
                    st.session_state.title = title
                    st.session_state.prompt = prompt
                    st.session_state.output = output
                    if auto_copy:
                        pyperclip.copy(prompt)
            else:
                st.error("Please enter some text")
    
    with col2:
        if st.button("πŸ—‘οΈ Clear All", use_container_width=True):
            st.session_state.title = ""
            st.session_state.prompt = ""
            st.session_state.output = ""
            input_text = ""

    # Suggested Title Section
    st.markdown("### πŸ“Œ Suggested Title")
    title_area = st.text_area(
        "",
        value=st.session_state.get('title', ""),
        height=50,
        key="title_area",
        help="A suggested title based on the content"
    )

    # Prompt Section
    st.markdown("### πŸ“ Prompt")
    prompt_area = st.text_area(
        "",
        value=st.session_state.get('prompt', ""),
        height=200,
        key="prompt_area",
        help="The extracted prompt will appear here"
    )
    # Display prompt stats
    prompt_words, prompt_chars = count_text_stats(st.session_state.get('prompt', ""))
    st.markdown(f"<p class='stats-text'>Words: {prompt_words} | Characters: {prompt_chars}</p>", unsafe_allow_html=True)
    
    if st.button("πŸ“‹ Copy Prompt", use_container_width=True):
        pyperclip.copy(st.session_state.get('prompt', ""))
        st.success("Copied prompt to clipboard!")

    # Output Section
    st.markdown("### πŸ€– Output")
    output_area = st.text_area(
        "",
        value=st.session_state.get('output', ""),
        height=200,
        key="output_area",
        help="The extracted output will appear here"
    )
    # Display output stats
    output_words, output_chars = count_text_stats(st.session_state.get('output', ""))
    st.markdown(f"<p class='stats-text'>Words: {output_words} | Characters: {output_chars}</p>", unsafe_allow_html=True)
    
    if st.button("πŸ“‹ Copy Output", use_container_width=True):
        pyperclip.copy(st.session_state.get('output', ""))
        st.success("Copied output to clipboard!")

# File Processing Tab
with tabs[1]:
    st.subheader("File Processing")
    uploaded_files = st.file_uploader(
        "Upload files",
        type=["txt", "md", "csv"],
        accept_multiple_files=True,
        help="Upload text files to process multiple conversations at once"
    )

    if uploaded_files:
        for file in uploaded_files:
            with st.expander(f"πŸ“„ {file.name}", expanded=True):
                file_content = file.read().decode("utf-8")
                if file.name.endswith(".csv"):
                    df = pd.read_csv(StringIO(file_content))
                    for col in df.columns:
                        processed_df = process_column(df[col])
                        st.write(f"Processed column: {col}")
                        st.dataframe(
                            processed_df,
                            use_container_width=True,
                            hide_index=True
                        )
                else:
                    title, prompt, output = separate_prompt_output(file_content)
                    st.json({
                        "Title": title,
                        "Prompt": prompt,
                        "Output": output
                    })

# History Tab
with tabs[2]:
    st.subheader("Processing History")
    if st.session_state.history:
        if st.button("πŸ—‘οΈ Clear History", type="secondary"):
            st.session_state.history = []
            st.experimental_rerun()
            
        for idx, item in enumerate(reversed(st.session_state.history)):
            with st.expander(f"Entry {len(st.session_state.history) - idx}", expanded=False):
                st.text_area(
                    "Content",
                    value=item,
                    height=150,
                    key=f"history_{idx}",
                    disabled=True
                )
    else:
        st.info("πŸ’‘ No processing history available yet. Process some text to see it here.")

# Footer
st.markdown("---")
st.markdown(
    """
    <div style='text-align: center'>
        <p>Created by <a href="https://github.com/danielrosehill/Prompt-And-Output-Separator">Daniel Rosehill</a> and Claude Sonnet 3.5</p>
        <p><a href="https://github.com/danielrosehill/Prompt-And-Output-Separator" target="_blank">
            <img src="https://img.shields.io/github/stars/danielrosehill/Prompt-And-Output-Separator?style=social" alt="GitHub stars">
        </a></p>
    </div>
    """,
    unsafe_allow_html=True
)