Spaces:
Runtime error
Runtime error
smoke test showed previous MC model performed better, commenting out 1on1
Browse files- app.py +11 -10
- predictors.py +7 -7
app.py
CHANGED
|
@@ -12,13 +12,13 @@ np.set_printoptions(suppress=True)
|
|
| 12 |
|
| 13 |
def ai_generated_test(option, input):
|
| 14 |
if option == "Human vs AI":
|
| 15 |
-
return predict_bc_scores(input), None
|
| 16 |
elif option == "Human vs AI Source Models":
|
| 17 |
-
return
|
| 18 |
-
elif option == "Human vs AI Source Models (1 on 1)":
|
| 19 |
-
|
| 20 |
|
| 21 |
-
return None, None
|
| 22 |
|
| 23 |
|
| 24 |
# COMBINED
|
|
@@ -111,7 +111,7 @@ with gr.Blocks() as demo:
|
|
| 111 |
[
|
| 112 |
"Human vs AI",
|
| 113 |
"Human vs AI Source Models",
|
| 114 |
-
"Human vs AI Source Models (1 on 1)",
|
| 115 |
],
|
| 116 |
label="Choose an option please.",
|
| 117 |
)
|
|
@@ -154,8 +154,8 @@ with gr.Blocks() as demo:
|
|
| 154 |
bcLabel = gr.Label(label="Source")
|
| 155 |
with gr.Column():
|
| 156 |
mcLabel = gr.Label(label="Creator")
|
| 157 |
-
with gr.Column():
|
| 158 |
-
|
| 159 |
with gr.Row():
|
| 160 |
QLabel = gr.Label(label="Humanized")
|
| 161 |
with gr.Group():
|
|
@@ -222,7 +222,7 @@ with gr.Blocks() as demo:
|
|
| 222 |
outputs=[
|
| 223 |
bcLabel,
|
| 224 |
mcLabel,
|
| 225 |
-
mc1on1Label,
|
| 226 |
sentenceBreakdown,
|
| 227 |
writing_analysis_plot,
|
| 228 |
QLabel,
|
|
@@ -233,7 +233,8 @@ with gr.Blocks() as demo:
|
|
| 233 |
only_ai_btn.click(
|
| 234 |
fn=ai_generated_test,
|
| 235 |
inputs=[ai_option, input_text],
|
| 236 |
-
outputs=[bcLabel, mcLabel, mc1on1Label],
|
|
|
|
| 237 |
api_name="ai_check",
|
| 238 |
)
|
| 239 |
|
|
|
|
| 12 |
|
| 13 |
def ai_generated_test(option, input):
|
| 14 |
if option == "Human vs AI":
|
| 15 |
+
return predict_bc_scores(input), None
|
| 16 |
elif option == "Human vs AI Source Models":
|
| 17 |
+
return predict_bc_scores(input), predict_mc_scores(input)
|
| 18 |
+
# elif option == "Human vs AI Source Models (1 on 1)":
|
| 19 |
+
# return predict_bc_scores(input), None, predict_1on1_scores(input)
|
| 20 |
|
| 21 |
+
return None, None
|
| 22 |
|
| 23 |
|
| 24 |
# COMBINED
|
|
|
|
| 111 |
[
|
| 112 |
"Human vs AI",
|
| 113 |
"Human vs AI Source Models",
|
| 114 |
+
# "Human vs AI Source Models (1 on 1)",
|
| 115 |
],
|
| 116 |
label="Choose an option please.",
|
| 117 |
)
|
|
|
|
| 154 |
bcLabel = gr.Label(label="Source")
|
| 155 |
with gr.Column():
|
| 156 |
mcLabel = gr.Label(label="Creator")
|
| 157 |
+
# with gr.Column():
|
| 158 |
+
# mc1on1Label = gr.Label(label="Creator(1 on 1 Approach)")
|
| 159 |
with gr.Row():
|
| 160 |
QLabel = gr.Label(label="Humanized")
|
| 161 |
with gr.Group():
|
|
|
|
| 222 |
outputs=[
|
| 223 |
bcLabel,
|
| 224 |
mcLabel,
|
| 225 |
+
# mc1on1Label,
|
| 226 |
sentenceBreakdown,
|
| 227 |
writing_analysis_plot,
|
| 228 |
QLabel,
|
|
|
|
| 233 |
only_ai_btn.click(
|
| 234 |
fn=ai_generated_test,
|
| 235 |
inputs=[ai_option, input_text],
|
| 236 |
+
# outputs=[bcLabel, mcLabel, mc1on1Label],
|
| 237 |
+
outputs=[bcLabel, mcLabel],
|
| 238 |
api_name="ai_check",
|
| 239 |
)
|
| 240 |
|
predictors.py
CHANGED
|
@@ -46,13 +46,13 @@ quillbot_tokenizer = AutoTokenizer.from_pretrained(text_quillbot_model_path)
|
|
| 46 |
quillbot_model = AutoModelForSequenceClassification.from_pretrained(
|
| 47 |
text_quillbot_model_path
|
| 48 |
).to(device)
|
| 49 |
-
tokenizers_1on1 = {}
|
| 50 |
-
models_1on1 = {}
|
| 51 |
-
for model in text_1on1_models:
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
|
| 57 |
|
| 58 |
def split_text_allow_complete_sentences_nltk(
|
|
|
|
| 46 |
quillbot_model = AutoModelForSequenceClassification.from_pretrained(
|
| 47 |
text_quillbot_model_path
|
| 48 |
).to(device)
|
| 49 |
+
# tokenizers_1on1 = {}
|
| 50 |
+
# models_1on1 = {}
|
| 51 |
+
# for model in text_1on1_models:
|
| 52 |
+
# tokenizers_1on1[model] = AutoTokenizer.from_pretrained(model)
|
| 53 |
+
# models_1on1[model] = AutoModelForSequenceClassification.from_pretrained(
|
| 54 |
+
# model
|
| 55 |
+
# ).to(device)
|
| 56 |
|
| 57 |
|
| 58 |
def split_text_allow_complete_sentences_nltk(
|