File size: 5,333 Bytes
1b198f6
 
 
 
 
 
 
 
 
 
 
 
ac7a9a3
 
 
1b198f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac7a9a3
1b198f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145

import pandas as pd
import warnings
from ast import literal_eval
import gradio as gr

warnings.filterwarnings('ignore')

# Load the CSV file.
# For Hugging Face Spaces, place the CSV file in your repository (or update the path accordingly).
df = pd.read_csv('df_chatbot_response.csv')

df['len_history'] = df['history_conversation'].apply(lambda x: len(literal_eval(x)))
df = df[df['len_history']>1].reset_index(drop=True)

# Create new conversation columns for variants A and B.
df['full_conversation_a'] = ''
df['full_conversation_b'] = ''

for i in range(len(df)):
    a = literal_eval(df['history_conversation'][i])
    a.append({'AI': df['existing_answer'][i]})
    b = literal_eval(df['history_conversation'][i])
    b.append({'AI': df['new_answer'][i]})
    df.at[i, 'full_conversation_a'] = a
    df.at[i, 'full_conversation_b'] = b

# Store conversation rounds as a list of dictionaries.
conversation_rounds = []
for i in range(len(df)):
    a = df['full_conversation_a'][i]
    # Replace literal "\n" with actual newline characters.
    for d in a:
        for key, value in d.items():
            if isinstance(value, str):
                d[key] = value.replace('\\n', '\n')
    b = df['full_conversation_b'][i]
    for d in b:
        for key, value in d.items():
            if isinstance(value, str):
                d[key] = value.replace('\\n', '\n')
    data_conv = {"a": df['full_conversation_a'][i], "b": df['full_conversation_b'][i]}
    conversation_rounds.append(data_conv)

# --- Helper Function to Format Conversations ---
def format_conversation(conv):
    """
    Convert a list of dictionaries (with 'human' and 'AI' keys) into a list of tuples,
    pairing each human message with its subsequent AI response.
    """
    pairs = []
    i = 0
    while i < len(conv):
        if 'human' in conv[i]:
            human_msg = conv[i]['human']
            # If the next message exists and is from AI, pair them
            if i + 1 < len(conv) and 'AI' in conv[i+1]:
                ai_msg = conv[i+1]['AI']
                pairs.append((human_msg, ai_msg))
                i += 2
            else:
                pairs.append((human_msg, ""))
                i += 1
        else:
            # If conversation starts with an AI message
            if 'AI' in conv[i]:
                pairs.append(("", conv[i]['AI']))
            i += 1
    return pairs

def get_conversation(round_idx):
    """Return formatted conversation pairs for a given round index."""
    if round_idx < len(conversation_rounds):
        conv = conversation_rounds[round_idx]
        existing_pairs = format_conversation(conv["a"])
        new_pairs = format_conversation(conv["b"])
        return existing_pairs, new_pairs
    else:
        # End-of-conversation message if no more rounds.
        return [("End of conversation.", "")], [("End of conversation.", "")]

def update_conversation(choice, current_idx, choices_list):
    """
    Update the conversation round when the user clicks the button.
    - 'choice' is the user's selection ("A" or "B") for the current round.
    - 'current_idx' is the current conversation round index.
    - 'choices_list' accumulates all the user's choices.
    """
    choices_list = choices_list + [choice]
    new_idx = current_idx + 1

    if new_idx >= len(conversation_rounds):
        summary_text = "Semua percakapan selesai.\nPilihan pengguna per ronde: " + ", ".join(choices_list)
        # Display the summary in both chatbots when done.
        return new_idx, [("End of conversation.", summary_text)], [("End of conversation.", summary_text)], choices_list
    else:
        existing_pairs, new_pairs = get_conversation(new_idx)
        return new_idx, existing_pairs, new_pairs, choices_list

# --- Gradio UI Setup ---
with gr.Blocks() as demo:
    gr.Markdown(f"## Blind Test 2 Model LLM with Total Conversation {len(df)}")
    
    # States to track the current round and store user choices.
    conversation_index = gr.State(0)
    user_choices = gr.State([])

    # Display the two conversation variants side by side.
    with gr.Row():
        existing_chat = gr.Chatbot(label="A")
        new_chat = gr.Chatbot(label="B")
    
    # Initialize the chatbots with the first conversation round.
    initial_existing, initial_new = get_conversation(0)
    existing_chat.value = initial_existing
    new_chat.value = initial_new
    
    # Radio button for the user to choose their preferred conversation variant.
    flag_choice = gr.Radio(choices=["A", "B"], label="Pilih percakapan yang lebih disukai:")
    next_button = gr.Button("Percakapan Berikutnya")
    
    # Textbox to display the final summary of user choices.
    summary_output = gr.Textbox(label="Ringkasan Pilihan", interactive=False)
    
    next_button.click(
        update_conversation,
        inputs=[flag_choice, conversation_index, user_choices],
        outputs=[conversation_index, existing_chat, new_chat, user_choices]
    )

    def show_summary(choices):
        choices_a = choices.count("A")
        choices_b = choices.count("B")
        return f"A: {choices_a}\nB: {choices_b}"
    
    gr.Button("Tampilkan Ringkasan").click(
        show_summary,
        inputs=[user_choices],
        outputs=[summary_output]
    )

# Launch the Gradio app.
if __name__ == "__main__":
    demo.launch()