Spaces:
Running
on
Zero
Running
on
Zero
Martín Santillán Cooper
commited on
Minor code refactor
Browse files- src/app.py +1 -1
- src/model.py +6 -5
src/app.py
CHANGED
@@ -165,7 +165,7 @@ def on_submit(criteria, context, user_message, assistant_message_text, assistant
|
|
165 |
|
166 |
result = get_guardian_response(messages=messages, criteria_name=criteria_name)
|
167 |
result_label = result["assessment"] # Yes or No
|
168 |
-
result_confidence_score = round(result["certainty"]
|
169 |
|
170 |
html_str = f"<p><strong>{result_label}</strong> <span style='padding-left: 0.25rem; color: gray; font-style: italic'>(Confidence Score: {result_confidence_score})</span></p>"
|
171 |
# html_str = f"{get_result_description(state['selected_sub_catalog'], state['selected_criteria_name'])} {result_label}"
|
|
|
165 |
|
166 |
result = get_guardian_response(messages=messages, criteria_name=criteria_name)
|
167 |
result_label = result["assessment"] # Yes or No
|
168 |
+
result_confidence_score = round(result["certainty"], 3)
|
169 |
|
170 |
html_str = f"<p><strong>{result_label}</strong> <span style='padding-left: 0.25rem; color: gray; font-style: italic'>(Confidence Score: {result_confidence_score})</span></p>"
|
171 |
# html_str = f"{get_result_description(state['selected_sub_catalog'], state['selected_criteria_name'])} {result_label}"
|
src/model.py
CHANGED
@@ -202,16 +202,17 @@ def get_guardian_response(messages, criteria_name):
|
|
202 |
# logger.debug(f"model output is:\n{output}")
|
203 |
|
204 |
label, prob_of_risk = parse_output(output, input_len)
|
205 |
-
|
206 |
-
logger.debug(f"Prob_of_risk is: {prob_of_risk}")
|
207 |
else:
|
208 |
raise Exception("Environment variable 'INFERENCE_ENGINE' must be one of [WATSONX, MOCK, TORCH]")
|
209 |
|
210 |
-
|
211 |
-
|
|
|
|
|
212 |
|
213 |
end = time()
|
214 |
total = end - start
|
215 |
logger.debug(f"The evaluation took {total} secs")
|
216 |
|
217 |
-
return {"assessment": label, "certainty":
|
|
|
202 |
# logger.debug(f"model output is:\n{output}")
|
203 |
|
204 |
label, prob_of_risk = parse_output(output, input_len)
|
205 |
+
|
|
|
206 |
else:
|
207 |
raise Exception("Environment variable 'INFERENCE_ENGINE' must be one of [WATSONX, MOCK, TORCH]")
|
208 |
|
209 |
+
certainty = prob_of_risk if prob_of_risk > 0.5 else 1 - prob_of_risk
|
210 |
+
|
211 |
+
logger.debug(f"Label: {label}")
|
212 |
+
logger.debug(f"Probabiliy of risk: {prob_of_risk}")
|
213 |
|
214 |
end = time()
|
215 |
total = end - start
|
216 |
logger.debug(f"The evaluation took {total} secs")
|
217 |
|
218 |
+
return {"assessment": label, "prob_of_risk": prob_of_risk, "certainty": certainty}
|