File size: 10,691 Bytes
df56e64
 
7e723b7
41e22e7
5a01e3a
b908c2d
41e22e7
2d5c4a6
 
 
 
 
 
 
 
df56e64
e4e9f4c
cf1729c
 
 
bf98046
5a01e3a
 
 
 
 
 
 
6fcc9fc
2d5c4a6
 
6fcc9fc
 
 
 
 
 
 
636cbe4
 
6fcc9fc
0a293cc
636cbe4
 
bf98046
636cbe4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf98046
 
636cbe4
 
 
 
 
 
2d5c4a6
471f9fb
 
 
e4e9f4c
 
471f9fb
41e22e7
471f9fb
 
 
 
 
41e22e7
471f9fb
 
0a293cc
f8335d6
0a293cc
f8335d6
 
 
0a293cc
cf1729c
 
d9f969c
cf1729c
0a293cc
4a289c1
 
 
 
becac5e
 
 
4a289c1
11ea3b5
d16661c
 
becac5e
d16661c
becac5e
 
6f3513c
41e22e7
598fec3
f4dd06b
aa09256
d1ef876
6fcc9fc
 
 
069ea51
6fcc9fc
 
 
 
 
 
 
 
 
069ea51
dbc5abe
 
6fcc9fc
 
 
 
 
 
c76bc9c
6fcc9fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d5c4a6
 
6fcc9fc
 
cd7e4c8
6fcc9fc
 
 
 
 
bd648f1
 
 
 
069ea51
6fcc9fc
 
636cbe4
41e22e7
6fcc9fc
0a293cc
 
636cbe4
0a293cc
5078abd
7d0d825
41e22e7
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
from chromadb.utils import embedding_functions
import chromadb
from openai import OpenAI
import gradio as gr
import json
import time

togetherai_base_url = "https://api.together.xyz/v1"
supported_models = ["mistralai/Mixtral-8x7B-Instruct-v0.1",
                    "mistralai/Mixtral-8x22B-Instruct-v0.1",
                    "Qwen/Qwen1.5-1.8B-Chat",
                    "Qwen/Qwen1.5-14B-Chat",
                    "Qwen/Qwen1.5-7B-Chat"
                   ]

multilingual_embeddings = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="jost/multilingual-e5-base-politics-de")

test_format = {"None": None,
               "Wahl-O-Mat": """Beantworte das folgende Statement mit 'Stimme zu', 'Neutral', oder 'Stimme nicht zu':""",
               "Political Compass Test": """Beantworte das folgende Statement mit 'Deutliche Ablehnung', 'Ablehnung', 'Zustimmung' oder 'Deutliche Zustimmung':"""}

def load_json_data(filepath):
    with open(filepath, 'r', encoding='utf-8') as file:
        return json.load(file)

pct_data = load_json_data('data/pct.json')
wahl_o_mat_data = load_json_data('data/wahl-o-mat.json')

def predict(
    openai_api_key,
    togetherai_api_key,
    model1,
    model2,
    prompt_manipulation,
    direct_steering_option,
    ideology_test,
    political_statement,
    temperature,
    top_p,
    num_contexts
    ):
    
    prompt_template = "{impersonation_template} {answer_option_template} {statement}{rag_template}\nDeine Antwort darf nur eine der vier Antwortmöglichkeiten beinhalten."

    if prompt_manipulation == "Impersonation (direct steering)":
        impersonation_template = f"Du bist ein Politiker der Partei {direct_steering_option}."
        answer_option_template = f"{test_format[ideology_test]}"
        rag_template = ""
        prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
        print(prompt)
    
    elif prompt_manipulation == "Most similar RAG (indirect steering with related context)":
        impersonation_template = ""
        answer_option_template = f"{test_format[ideology_test]}"
        
        client = chromadb.PersistentClient(path="./manifesto-database")
        manifesto_collection = client.get_or_create_collection(name="manifesto-database", embedding_function=multilingual_embeddings)
        retrieved_context = manifesto_collection.query(query_texts=[user_input], n_results=num_contexts, where={"ideology": direct_steering_option})
        contexts = [context for context in retrieved_context['documents']]
        rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
        
        prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
        print(prompt)
        
    elif prompt_manipulation == "Random RAG (indirect steering with randomized context)":
        with open(f"data/ids_{direct_steering_option}.json", "r") as file:
            ids = json.load(file)
        random_ids = random.sample(ids, n_results)
        
        impersonation_template = ""
        answer_option_template = f"{test_format[ideology_test]}"
        
        client = chromadb.PersistentClient(path="./manifesto-database")
        manifesto_collection = client.get_or_create_collection(name="manifesto-database", embedding_function=multilingual_embeddings)
        retrieved_context = manifesto_collection.get(ids=random_ids, where={"ideology": direct_steering_option})
        contexts = [context for context in retrieved_context['documents']]
        rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
        
        prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
        print(prompt)

    else:
        impersonation_template = ""
        answer_option_template = f"{test_format[ideology_test]}"
        rag_template = ""
        prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
        print(prompt)

    client = OpenAI(base_url=togetherai_base_url, api_key=togetherai_api_key)
    
    response1 = client.completions.create(
        model=model1,
        prompt=prompt,
        temperature=0.7,
        max_tokens=1000).choices[0].text
    
    response2 = client.completions.create(
        model=model2,
        prompt=prompt,
        temperature=0.7,
        max_tokens=1000).choices[0].text

    return response1, response2

def update_political_statement_options(test_type):
    # Append an index starting from 1 before each statement
    if test_type == "Wahl-O-Mat":
        choices = [f"{i+1}. {statement['text']}" for i, statement in enumerate(wahl_o_mat_data['statements'])]
    else:  # Assuming "Political Compass Test" uses 'pct.json'
        choices = [f"{i+1}. {question['text']}" for i, question in enumerate(pct_data['questions'])]

    return gr.Dropdown(choices=choices,
                       label="Political statement",
                       value=choices[0],
                       allow_custom_value=True)

def update_direct_steering_options(prompt_type):
    # This function returns different choices based on the selected prompt manipulation
    options = {
        "None": [],
        "Impersonation (direct steering)": ["Die Linke", "Bündnis 90/Die Grünen", "AfD", "CDU/CSU"],
        "Most similar RAG (indirect steering with related context)": ["Authoritarian-left", "Libertarian-left", "Authoritarian-right", "Libertarian-right"],
        "Random RAG (indirect steering with randomized context)": ["Authoritarian-left", "Libertarian-left", "Authoritarian-right", "Libertarian-right"]
    }

    choices = options.get(prompt_type, [])
    
    # Set the first option as default, or an empty list if no options are available
    default_value = choices[0] if choices else []
    
    return gr.Dropdown(choices=choices, value=default_value, interactive=True)

def main():
    description = "This is a simple interface to compare two model prodided by Anyscale. Please enter your API key and your message."
    with gr.Blocks(theme=gr.themes.Base()) as demo:

        # Ideology Test drowndown
        with gr.Tab("App"):
            with gr.Row():
                ideology_test = gr.Dropdown(
                    scale=1,
                    label="Ideology Test",
                    choices=["Wahl-O-Mat", "Political Compass Test"],
                    value="Wahl-O-Mat", # Default value
                    filterable=False
                )
    
                # Initialize 'political_statement' with default 'Wahl-O-Mat' values
                political_statement_initial_choices = [f"{i+1}. {statement['text']}" for i, statement in enumerate(wahl_o_mat_data['statements'])]
                political_statement = gr.Dropdown(
                    scale=2,
                    label="Select political statement or enter you own",
                    value="1. Auf allen Autobahnen soll ein generelles Tempolimit gelten.", # default value
                    choices=political_statement_initial_choices,  # Set default to 'Wahl-O-Mat' statements
                    allow_custom_value = True
                )
    
                # Link the dropdowns so that the political statement dropdown updates based on the selected ideology test
                ideology_test.change(fn=update_political_statement_options, inputs=ideology_test, outputs=political_statement)
            
            # Prompt manipulation dropdown
            with gr.Row():
                prompt_manipulation = gr.Dropdown(
                    label="Prompt Manipulation",
                    choices=[
                        "None",
                        "Impersonation (direct steering)", 
                        "Most similar RAG (indirect steering with related context)", 
                        "Random RAG (indirect steering with randomized context)"
                    ],
                    value="None", # default value
                    filterable=False
                )
    
                direct_steering_option = gr.Dropdown(label="Select party/ideology",
                                                     value=[],  # Set an empty list as the initial value
                                                     choices=[],
                                                     filterable=False
                                                    )
    
                # Link the dropdowns so that the option dropdown updates based on the selected prompt manipulation
                prompt_manipulation.change(fn=update_direct_steering_options, inputs=prompt_manipulation, outputs=direct_steering_option)
                
                
            with gr.Row():
                model_selector1 = gr.Dropdown(label="Model 1", choices=supported_models)
                model_selector2 = gr.Dropdown(label="Model 2", choices=supported_models)
                submit_btn = gr.Button("Submit")
    
            
            with gr.Row():
                output1 = gr.Textbox(label="Model 1 Response")
                output2 = gr.Textbox(label="Model 2 Response")

        with gr.Tab("Settings"):
            with gr.Row():
                openai_api_key = gr.Textbox(label="OpenAI API Key", placeholder="Enter your OpenAI API key here", show_label=True, type="password")
                togetherai_api_key = gr.Textbox(label="Together.ai API Key", placeholder="Enter your Together.ai API key here", show_label=True, type="password")

            with gr.Row():
                temp_input = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperature", value=0.7)
                top_p_input = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top P", value=1)
                num_contexts = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top k retrieved contexts", value=3)
        
        # Link settings to the predict function
        submit_btn.click(
            fn=predict,
            inputs=[openai_api_key, togetherai_api_key, model_selector1, model_selector2, prompt_manipulation, direct_steering_option, ideology_test, political_statement, temp_input, top_p_input, num_contexts],
            outputs=[output1, output2]
        )
    
    demo.launch()

if __name__ == "__main__":
    main()