Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -46,7 +46,7 @@ def predict(
|
|
46 |
impersonation_template = f"Du bist ein Politiker der Partei {direct_steering_option}."
|
47 |
answer_option_template = f"{test_format[ideology_test]}"
|
48 |
rag_template = ""
|
49 |
-
prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
|
50 |
print(prompt)
|
51 |
|
52 |
elif prompt_manipulation == "Most similar RAG (indirect steering with related context)":
|
@@ -59,7 +59,7 @@ def predict(
|
|
59 |
contexts = [context for context in retrieved_context['documents']]
|
60 |
rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
|
61 |
|
62 |
-
prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
|
63 |
print(prompt)
|
64 |
|
65 |
elif prompt_manipulation == "Random RAG (indirect steering with randomized context)":
|
@@ -76,7 +76,7 @@ def predict(
|
|
76 |
contexts = [context for context in retrieved_context['documents']]
|
77 |
rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
|
78 |
|
79 |
-
prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template,
|
80 |
print(prompt)
|
81 |
|
82 |
else:
|
|
|
46 |
impersonation_template = f"Du bist ein Politiker der Partei {direct_steering_option}."
|
47 |
answer_option_template = f"{test_format[ideology_test]}"
|
48 |
rag_template = ""
|
49 |
+
prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement[3:], rag_template=rag_template)
|
50 |
print(prompt)
|
51 |
|
52 |
elif prompt_manipulation == "Most similar RAG (indirect steering with related context)":
|
|
|
59 |
contexts = [context for context in retrieved_context['documents']]
|
60 |
rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
|
61 |
|
62 |
+
prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement[3:], rag_template=rag_template)
|
63 |
print(prompt)
|
64 |
|
65 |
elif prompt_manipulation == "Random RAG (indirect steering with randomized context)":
|
|
|
76 |
contexts = [context for context in retrieved_context['documents']]
|
77 |
rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
|
78 |
|
79 |
+
prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statementpolitical_statement[3:], rag_template=rag_template)
|
80 |
print(prompt)
|
81 |
|
82 |
else:
|