|
import os |
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
import streamlit as st |
|
import pandas as pd |
|
from openai import OpenAI |
|
|
|
st.title("Client Response (Answering)") |
|
|
|
|
|
if "best_samples" in st.session_state: |
|
samples = st.session_state.best_samples |
|
elif "single_sample" in st.session_state: |
|
s = st.session_state.single_sample |
|
|
|
samples = [{"prompt": s.get("question", ""), "question": s.get("response", "")}] |
|
elif "generated_text" in st.session_state and "prompt_text" in st.session_state: |
|
samples = [{"prompt": st.session_state.prompt_text, "question": st.session_state.generated_text}] |
|
else: |
|
st.error("No samples found. Please generate samples on the main page first.") |
|
st.stop() |
|
|
|
st.markdown("### Samples for Answering") |
|
df_samples = pd.DataFrame(samples) |
|
st.dataframe(df_samples) |
|
|
|
default_openai_key = os.getenv("OPENAI_API_KEY") or "" |
|
openai_api_key = st.text_input("Enter your Client API Key", type="password", value=default_openai_key) |
|
|
|
if st.button("Answer Samples with Client Model"): |
|
if openai_api_key: |
|
client = OpenAI(api_key=openai_api_key) |
|
answered_samples = [] |
|
for sample in samples: |
|
sample_question = sample["question"] |
|
prompt = ( |
|
f"Answer the following question comprehensively and concisely:\n\n" |
|
f"{sample_question}\n\n" |
|
"Provide a clear, one-sentence answer." |
|
) |
|
completion = client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
answer = completion.choices[0].message.content.strip() |
|
answered_sample = { |
|
"prompt": sample["prompt"], |
|
"question": sample["question"], |
|
"answer": answer |
|
} |
|
answered_samples.append(answered_sample) |
|
st.markdown("**Answered Samples:**") |
|
df_answered = pd.DataFrame(answered_samples) |
|
st.dataframe(df_answered) |
|
st.session_state.refined_samples = answered_samples |
|
else: |
|
st.error("Please provide your Client API Key.") |