File size: 2,024 Bytes
201dd80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# sandbox_runner.py

import gradio as gr
from data_handler import upload_test_data
from criteria_handler import select_evaluation_criteria
from model_handler import select_evaluators
from score_handler import handle_analysis
from random_sample_tab import random_sample_tab

def run_sandbox():
    with gr.Blocks(css="""
    .truncate_cells table {
        table-layout: fixed !important;
        width: 100% !important;
    }
    .truncate_cells table td,
    .truncate_cells table th {
        white-space: nowrap !important;
        overflow: hidden !important;
        text-overflow: ellipsis !important;
        max-width: 200px !important;
        text-align: left !important;
        vertical-align: top !important;
    }
    """) as demo:
        gr.Markdown("# Atla Testing Sandbox")
        with gr.Tabs():
            # Random samples tab
            random_sample_tab()

            # Sandbox tab
            with gr.TabItem("Custom Dataset"):
                # Initialize state object to track the DataFrame
                df_state = gr.State(value=None)
                # Initialize state object to track the prompt
                prompt_state = gr.State(value=None)
                # Initialize the evaluation_complete flag
                evaluation_complete = gr.State(value=None)

                # Data upload
                data_upload_group, df_state = upload_test_data(df_state)
                
                # Criteria selection
                criteria_group, df_state, prompt_state, save_prompt_button = \
                    select_evaluation_criteria(data_upload_group, df_state, prompt_state)

                # Models selection
                model_selection_group, df_state, analyze_results_button = \
                    select_evaluators(criteria_group, df_state, prompt_state, save_prompt_button)

                # Result analysis
                handle_analysis(df_state, model_selection_group, analyze_results_button)

    demo.launch()

if __name__ == "__main__":
    run_sandbox()