File size: 3,818 Bytes
d97734e
936b250
 
 
 
 
 
 
 
 
d97734e
a236161
d97734e
afd1fe2
d97734e
936b250
 
 
afd1fe2
 
 
 
 
 
 
 
 
 
936b250
 
 
d97734e
936b250
d97734e
936b250
 
 
 
 
63c0903
fa64871
899f27e
 
 
011ac7a
53c66e2
 
 
 
 
 
 
 
 
5f71889
8df8252
 
 
 
5f71889
 
8df8252
5f71889
 
 
 
 
d97734e
 
 
 
 
936b250
fa64871
53c66e2
 
 
 
 
 
 
 
 
d97734e
 
 
936b250
d97734e
5f71889
d97734e
afd1fe2
 
53c66e2
afd1fe2
5f71889
afd1fe2
d97734e
 
 
afd1fe2
 
 
d97734e
 
d3ee676
d97734e
 
 
d3ee676
53c66e2
afd1fe2
d97734e
936b250
90827dd
53c66e2
 
 
 
 
90827dd
53c66e2
 
 
 
 
 
 
 
 
 
d97734e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import gradio as gr
import spaces


# TOKENIZER = 
# MINIMUM_TOKENS = 64

# def count_tokens(text):
#     return len(TOKENIZER(text).input_ids)


# Mock function for testing layout
def run_test_power(model_name, real_text, generated_text, N=10):
    return f"Prediction: Human (Mocked for {model_name})"



# Change mode name
#def change_mode(mode):
#    if mode == "Faster Model":
#        .change_mode("t5-small")
#    elif mode == "Medium Model":
#        .change_mode("roberta-base-openai-detector")
#    elif mode == "Powerful Model":
#        .change_mode("falcon-rw-1b")
#    else:
#        gr.Error(f"Invaild mode selected.")
#    return mode



css = """
#header { text-align: center; font-size: 1.5em; margin-bottom: 20px; }
#output-text { font-weight: bold; font-size: 1.2em; }
.links { 
    display: flex; 
    justify-content: flex-end; 
    gap: 10px; 
    margin-right: 10px; 
    align-items: center;
}
.separator {
    margin: 0 5px;
    color: black;
}
.row {
    display: flex;
    justify-content: center;
    width: 100%;
}
.gradio-row input, .gradio-row select, .gradio-row button {
    width: 250px;  /* Set all elements to the same width */
    margin: 5px;
}
/* Adjusting layout for Input Text and Inference Result */
.input-row {
    display: flex;
    width: 100%;
}

.input-text {
    flex: 3;  /* 4 parts of the row */
}

.output-text {
    flex: 1;  /* 1 part of the row */
}
"""

# Gradio App
with gr.Blocks(css=css) as app:
    with gr.Row():
        gr.HTML('<div id="header">R-detect On HuggingFace</div>')
    with gr.Row():
        gr.HTML("""
        <div class="links">
            <a href="https://openreview.net/forum?id=z9j7wctoGV" target="_blank">Paper</a>
            <span class="separator">|</span>
            <a href="https://github.com/xLearn-AU/R-Detect" target="_blank">Code</a>
            <span class="separator">|</span>
            <a href="mailto:[email protected]" target="_blank">Contact</a>
        </div>
        """)
    with gr.Row():
        input_text = gr.Textbox(
            label="Input Text",
            placeholder="Enter Text Here",
            lines=8,
            elem_classes=["input-text"],
        )
        output = gr.Textbox(
            label="Inference Result",
            placeholder="Made by Human or AI",
            elem_id="output-text",
            elem_classes=["output-text"]
        )
    with gr.Row():
        model_name = gr.Dropdown(
            [
                "Faster Model",
                "Medium Model",
                "Powerful Model",
            ],
            label="Select Model",
            value="Medium Model",
        )
        submit_button = gr.Button("Run Detection", variant="primary")
        clear_button = gr.Button("Clear", variant="secondary")
    
    # Hooking up the change_mode function to select different models
    submit_button.click(run_test_power, inputs=[model_name, input_text, input_text], outputs=output)
    clear_button.click(lambda: ("", ""), inputs=[], outputs=[input_text, output])

    with gr.Accordion("Disclaimer", open=False):
        gr.Markdown("""
        - **Disclaimer**: This tool is for demonstration purposes only. It is not a foolproof AI detector.
        - **Accuracy**: Results may vary based on input length and quality.
        """)

    with gr.Accordion("Citations", open=False):
        gr.Markdown("""
        ```
        @inproceedings{zhangs2024MMDMP,
            title={Detecting Machine-Generated Texts by Multi-Population Aware Optimization for Maximum Mean Discrepancy},
            author={Zhang, Shuhai and Song, Yiliao and Yang, Jiahao and Li, Yuanqing and Han, Bo and Tan, Mingkui},
            booktitle = {International Conference on Learning Representations (ICLR)},
            year={2024}
        }
        ```
        """)

app.launch()