import pandas as pd
import warnings
from ast import literal_eval
import gradio as gr

warnings.filterwarnings('ignore')

# Load the CSV file.
# For Hugging Face Spaces, place the CSV file in your repository (or update the path accordingly).
df = pd.read_csv('df_chatbot_response.csv')

df['len_history'] = df['history_conversation'].apply(lambda x: len(literal_eval(x)))
df = df[df['len_history']>1].reset_index(drop=True)

# Create new conversation columns for variants A and B.
df['full_conversation_a'] = ''
df['full_conversation_b'] = ''

for i in range(len(df)):
    a = literal_eval(df['history_conversation'][i])
    a.append({'AI': df['existing_answer'][i]})
    b = literal_eval(df['history_conversation'][i])
    b.append({'AI': df['new_answer'][i]})
    df.at[i, 'full_conversation_a'] = a
    df.at[i, 'full_conversation_b'] = b

# Store conversation rounds as a list of dictionaries.
conversation_rounds = []
for i in range(len(df)):
    a = df['full_conversation_a'][i]
    # Replace literal "\n" with actual newline characters.
    for d in a:
        for key, value in d.items():
            if isinstance(value, str):
                d[key] = value.replace('\\n', '\n')
    b = df['full_conversation_b'][i]
    for d in b:
        for key, value in d.items():
            if isinstance(value, str):
                d[key] = value.replace('\\n', '\n')
    data_conv = {"a": df['full_conversation_a'][i], "b": df['full_conversation_b'][i]}
    conversation_rounds.append(data_conv)

# --- Helper Function to Format Conversations ---
def format_conversation(conv):
    """
    Convert a list of dictionaries (with 'human' and 'AI' keys) into a list of tuples,
    pairing each human message with its subsequent AI response.
    """
    pairs = []
    i = 0
    while i < len(conv):
        if 'human' in conv[i]:
            human_msg = conv[i]['human']
            # If the next message exists and is from AI, pair them
            if i + 1 < len(conv) and 'AI' in conv[i+1]:
                ai_msg = conv[i+1]['AI']
                pairs.append((human_msg, ai_msg))
                i += 2
            else:
                pairs.append((human_msg, ""))
                i += 1
        else:
            # If conversation starts with an AI message
            if 'AI' in conv[i]:
                pairs.append(("", conv[i]['AI']))
            i += 1
    return pairs

def get_conversation(round_idx):
    """Return formatted conversation pairs for a given round index."""
    if round_idx < len(conversation_rounds):
        conv = conversation_rounds[round_idx]
        existing_pairs = format_conversation(conv["a"])
        new_pairs = format_conversation(conv["b"])
        return existing_pairs, new_pairs
    else:
        # End-of-conversation message if no more rounds.
        return [("End of conversation.", "")], [("End of conversation.", "")]

def update_conversation(choice, current_idx, choices_list):
    """
    Update the conversation round when the user clicks the button.
    - 'choice' is the user's selection ("A" or "B") for the current round.
    - 'current_idx' is the current conversation round index.
    - 'choices_list' accumulates all the user's choices.
    """
    choices_list = choices_list + [choice]
    new_idx = current_idx + 1

    if new_idx >= len(conversation_rounds):
        summary_text = "Semua percakapan selesai.\nPilihan pengguna per ronde: " + ", ".join(choices_list)
        # Display the summary in both chatbots when done.
        return new_idx, [("End of conversation.", summary_text)], [("End of conversation.", summary_text)], choices_list
    else:
        existing_pairs, new_pairs = get_conversation(new_idx)
        return new_idx, existing_pairs, new_pairs, choices_list

# --- Gradio UI Setup ---
with gr.Blocks() as demo:
    gr.Markdown(f"## Blind Test 2 Model LLM with Total Conversation {len(df)}")
    
    # States to track the current round and store user choices.
    conversation_index = gr.State(0)
    user_choices = gr.State([])

    # Display the two conversation variants side by side.
    with gr.Row():
        existing_chat = gr.Chatbot(label="A")
        new_chat = gr.Chatbot(label="B")
    
    # Initialize the chatbots with the first conversation round.
    initial_existing, initial_new = get_conversation(0)
    existing_chat.value = initial_existing
    new_chat.value = initial_new
    
    # Radio button for the user to choose their preferred conversation variant.
    flag_choice = gr.Radio(choices=["A", "B"], label="Pilih percakapan yang lebih disukai:")
    next_button = gr.Button("Percakapan Berikutnya")
    
    # Textbox to display the final summary of user choices.
    summary_output = gr.Textbox(label="Ringkasan Pilihan", interactive=False)
    
    next_button.click(
        update_conversation,
        inputs=[flag_choice, conversation_index, user_choices],
        outputs=[conversation_index, existing_chat, new_chat, user_choices]
    )

    def show_summary(choices):
        choices_a = choices.count("A")
        choices_b = choices.count("B")
        return f"A: {choices_a}\nB: {choices_b}"
    
    gr.Button("Tampilkan Ringkasan").click(
        show_summary,
        inputs=[user_choices],
        outputs=[summary_output]
    )

# Launch the Gradio app.
if __name__ == "__main__":
    demo.launch()