File size: 3,137 Bytes
05b5eca
291ae1f
05b5eca
 
 
 
 
6d546ef
 
7a8e9d9
 
 
 
 
 
 
 
 
 
 
6d546ef
8f2b0ed
05b5eca
 
 
 
 
 
 
fea5074
05b5eca
 
 
 
 
 
fea5074
05b5eca
 
 
 
 
 
fea5074
05b5eca
 
 
 
fea5074
05b5eca
 
 
 
 
 
 
 
 
 
 
fea5074
 
 
05b5eca
 
7a8e9d9
05b5eca
7a8e9d9
 
 
 
 
 
 
 
 
 
 
6d546ef
05b5eca
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import os


import streamlit as st
import pandas as pd
from openai import OpenAI


st.set_page_config(layout="wide")
scroll_css = """
<style>
.table-scroll {
    overflow-x: auto;
    width: 100%;
    max-width: 100%;
}
</style>
"""

st.markdown(scroll_css, unsafe_allow_html=True)

st.title("Client Model Response (Answering)")

# Use best_samples if available; otherwise, fallback to the interactive single sample.
if "best_samples" in st.session_state:
    samples = st.session_state.best_samples
elif "single_sample" in st.session_state:
    s = st.session_state.single_sample
    # Rename keys: "question" becomes "prompt" and "response" becomes "question"
    samples = [{"Bias Category and Country": s.get("Bias Category and Country", ""), "Auto Generated Prompts": s.get("Auto Generated Prompts", "")}]
elif "generated_text" in st.session_state and "prompt_text" in st.session_state:
    samples = [{"prompt": st.session_state.prompt_text, "question": st.session_state.generated_text}]
else:
    st.error("No samples found. Please generate samples on the main page first.")
    st.stop()

st.markdown("### Input Prompts for Client Application")
df_samples = pd.DataFrame(samples)
st.dataframe(df_samples)

default_openai_key = os.getenv("OPENAI_API_KEY") or ""
openai_api_key = st.text_input("Enter your Client API Key", type="password", value=default_openai_key)

if st.button("Generate responses with Client Application"):
    if openai_api_key:
        client = OpenAI(api_key=openai_api_key)
        answered_samples = []
        for sample in samples:
            sample_question = sample["Auto_Generated_Prompts"]
            prompt = (
                f"Answer the following question comprehensively and concisely:\n\n"
                f"{sample_question}\n\n"
                "Provide a clear, one-sentence answer."
            )
            completion = client.chat.completions.create(
                model="gpt-4o-mini",
                messages=[{"role": "user", "content": prompt}]
            )
            answer = completion.choices[0].message.content.strip()
            answered_sample = {
                "Bias_Category_and_Country": sample["Bias_Category_and_Country"],
                "Auto_Generated_Prompts": sample["Auto_Generated_Prompts"],
                "Client_Responses": answer
            }
            answered_samples.append(answered_sample)
        
        df_answered = pd.DataFrame(answered_samples)
        df_answered_styled = df_answered.style \
        .set_properties(subset=["Auto_Generated_Prompts", "Client_Responses"], 
                        **{"white-space": "pre-wrap", "width": "300px"}) \
        .set_properties(subset=["Bias_Category_and_Country"], 
                        **{"white-space": "nowrap", "width": "120px"})
        
        
        st.markdown("**Client Responses**")
        st.markdown("<div class='table-scroll'>", unsafe_allow_html=True)
        st.table(df_answered_styled)
        st.markdown("</div>", unsafe_allow_html=True)

        st.session_state.refined_samples = answered_samples
    else:
        st.error("Please provide your Client API Key.")