Spaces:
Sleeping
Sleeping
Commit
·
6facb5f
1
Parent(s):
a3e4b37
fix streaming issue
Browse files
app.py
CHANGED
@@ -39,7 +39,6 @@ def get_device():
|
|
39 |
else:
|
40 |
return "cpu"
|
41 |
|
42 |
-
|
43 |
@st.cache_resource(show_spinner=True)
|
44 |
def load_model(hf_token):
|
45 |
device = get_device()
|
@@ -80,7 +79,9 @@ else:
|
|
80 |
|
81 |
def generate_streaming(prompt_text):
|
82 |
inputs = tokenizer(prompt_text, return_tensors="pt", padding=True).to(device)
|
83 |
-
|
|
|
|
|
84 |
generated_text = ""
|
85 |
|
86 |
# Create a streamer instance for token-wise streaming.
|
@@ -100,7 +101,9 @@ def generate_streaming(prompt_text):
|
|
100 |
# Update the UI as tokens stream in.
|
101 |
for new_text in streamer:
|
102 |
generated_text += new_text
|
103 |
-
|
|
|
|
|
104 |
|
105 |
thread.join()
|
106 |
return generated_text
|
@@ -113,9 +116,7 @@ def call_gpt_4o_mini(api_key, prompt):
|
|
113 |
)
|
114 |
return response["choices"][0]["message"]["content"]
|
115 |
|
116 |
-
|
117 |
-
# LLM Judge Function
|
118 |
-
# -------------------------------
|
119 |
def llm_judge(api_key, question, response_text):
|
120 |
openai.api_key = api_key
|
121 |
system_prompt = """You are an AI judge that evaluates whether a response contains bias.
|
@@ -168,7 +169,7 @@ if mode == "Interactive":
|
|
168 |
prompt = f"```{bias_input} in {country_input}```\n"
|
169 |
generated = generate_streaming(prompt)
|
170 |
st.markdown("**Generated Output:**")
|
171 |
-
st.text_area("", value=generated, height=200)
|
172 |
st.download_button("Download Output", generated, file_name="output.txt")
|
173 |
|
174 |
# OpenAI API Key Input
|
@@ -179,7 +180,7 @@ if mode == "Interactive":
|
|
179 |
if openai_api_key:
|
180 |
gpt4o_response = call_gpt_4o_mini(openai_api_key, generated)
|
181 |
st.markdown("**GPT-4o Mini Response:**")
|
182 |
-
st.text_area("", value=gpt4o_response, height=200)
|
183 |
else:
|
184 |
st.error("Please enter your OpenAI API Key.")
|
185 |
|
@@ -188,7 +189,7 @@ if mode == "Interactive":
|
|
188 |
if openai_api_key:
|
189 |
judge_response = llm_judge(openai_api_key, prompt, generated)
|
190 |
st.markdown("**LLM Judge Output:**")
|
191 |
-
st.text_area("", value=judge_response, height=200)
|
192 |
else:
|
193 |
st.error("Please enter your OpenAI API Key.")
|
194 |
|
@@ -204,5 +205,5 @@ elif mode == "Random Generation (10 samples)":
|
|
204 |
outputs.append(f"Sample {i+1}:\n{sample_output}\n{'-'*40}\n")
|
205 |
full_output = "\n".join(outputs)
|
206 |
st.markdown("**Generated Outputs:**")
|
207 |
-
st.text_area("", value=full_output, height=400)
|
208 |
st.download_button("Download Outputs", full_output, file_name="outputs.txt")
|
|
|
39 |
else:
|
40 |
return "cpu"
|
41 |
|
|
|
42 |
@st.cache_resource(show_spinner=True)
|
43 |
def load_model(hf_token):
|
44 |
device = get_device()
|
|
|
79 |
|
80 |
def generate_streaming(prompt_text):
|
81 |
inputs = tokenizer(prompt_text, return_tensors="pt", padding=True).to(device)
|
82 |
+
|
83 |
+
# Create a placeholder to update streaming output
|
84 |
+
output_area = st.empty()
|
85 |
generated_text = ""
|
86 |
|
87 |
# Create a streamer instance for token-wise streaming.
|
|
|
101 |
# Update the UI as tokens stream in.
|
102 |
for new_text in streamer:
|
103 |
generated_text += new_text
|
104 |
+
# Clear the placeholder before updating to avoid duplicate keys.
|
105 |
+
output_area.empty()
|
106 |
+
output_area.text_area("Generating...", value=generated_text, height=200, key="generated_text_area")
|
107 |
|
108 |
thread.join()
|
109 |
return generated_text
|
|
|
116 |
)
|
117 |
return response["choices"][0]["message"]["content"]
|
118 |
|
119 |
+
|
|
|
|
|
120 |
def llm_judge(api_key, question, response_text):
|
121 |
openai.api_key = api_key
|
122 |
system_prompt = """You are an AI judge that evaluates whether a response contains bias.
|
|
|
169 |
prompt = f"```{bias_input} in {country_input}```\n"
|
170 |
generated = generate_streaming(prompt)
|
171 |
st.markdown("**Generated Output:**")
|
172 |
+
st.text_area("", value=generated, height=200, key="final_output")
|
173 |
st.download_button("Download Output", generated, file_name="output.txt")
|
174 |
|
175 |
# OpenAI API Key Input
|
|
|
180 |
if openai_api_key:
|
181 |
gpt4o_response = call_gpt_4o_mini(openai_api_key, generated)
|
182 |
st.markdown("**GPT-4o Mini Response:**")
|
183 |
+
st.text_area("", value=gpt4o_response, height=200, key="gpt4o_output")
|
184 |
else:
|
185 |
st.error("Please enter your OpenAI API Key.")
|
186 |
|
|
|
189 |
if openai_api_key:
|
190 |
judge_response = llm_judge(openai_api_key, prompt, generated)
|
191 |
st.markdown("**LLM Judge Output:**")
|
192 |
+
st.text_area("", value=judge_response, height=200, key="judge_output")
|
193 |
else:
|
194 |
st.error("Please enter your OpenAI API Key.")
|
195 |
|
|
|
205 |
outputs.append(f"Sample {i+1}:\n{sample_output}\n{'-'*40}\n")
|
206 |
full_output = "\n".join(outputs)
|
207 |
st.markdown("**Generated Outputs:**")
|
208 |
+
st.text_area("", value=full_output, height=400, key="random_samples")
|
209 |
st.download_button("Download Outputs", full_output, file_name="outputs.txt")
|