Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,107 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
|
|
7 |
import gradio as gr
|
8 |
from threading import Thread
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
@spaces.GPU()
|
13 |
def generate_response(message: str, history: list, model, tokenizer, system_prompt):
|
@@ -70,7 +170,18 @@ def debate(history):
|
|
70 |
|
71 |
yield history
|
72 |
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
with gr.Blocks(css=CSS, theme=gr.themes.Default()) as demo:
|
76 |
gr.HTML(TITLE)
|
|
|
7 |
import gradio as gr
|
8 |
from threading import Thread
|
9 |
|
10 |
+
TRUMP_MODEL = "nawhgnuj/DonaldTrump-Llama3.1-8B-Chat"
|
11 |
+
HARRIS_MODEL = "nawhgnuj/KamalaHarris-Llama-3.1-8B-Chat"
|
12 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
13 |
+
|
14 |
+
TITLE = "<h1 style='text-align: center;'>Trump vs Harris Debate Chatbot</h1>"
|
15 |
+
|
16 |
+
TRUMP_AVATAR = "https://upload.wikimedia.org/wikipedia/commons/5/56/Donald_Trump_official_portrait.jpg"
|
17 |
+
HARRIS_AVATAR = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Kamala_Harris_Vice_Presidential_Portrait.jpg/640px-Kamala_Harris_Vice_Presidential_Portrait.jpg"
|
18 |
+
|
19 |
+
CSS = """
|
20 |
+
.chat-container {
|
21 |
+
height: 600px;
|
22 |
+
overflow-y: auto;
|
23 |
+
padding: 10px;
|
24 |
+
background-color: white;
|
25 |
+
border: 1px solid #ddd;
|
26 |
+
border-radius: 5px;
|
27 |
+
}
|
28 |
+
.message {
|
29 |
+
margin-bottom: 10px;
|
30 |
+
padding: 10px;
|
31 |
+
border-radius: 5px;
|
32 |
+
display: flex;
|
33 |
+
align-items: start;
|
34 |
+
}
|
35 |
+
.user-message {
|
36 |
+
background-color: #f0f0f0;
|
37 |
+
color: black;
|
38 |
+
justify-content: flex-end;
|
39 |
+
}
|
40 |
+
.trump-message {
|
41 |
+
background-color: #B71C1C;
|
42 |
+
color: white;
|
43 |
+
}
|
44 |
+
.harris-message {
|
45 |
+
background-color: #1565C0;
|
46 |
+
color: white;
|
47 |
+
}
|
48 |
+
.avatar {
|
49 |
+
width: 40px;
|
50 |
+
height: 40px;
|
51 |
+
border-radius: 50%;
|
52 |
+
object-fit: cover;
|
53 |
+
margin-right: 10px;
|
54 |
+
}
|
55 |
+
.message-content {
|
56 |
+
flex-grow: 1;
|
57 |
+
}
|
58 |
+
"""
|
59 |
+
|
60 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
61 |
+
|
62 |
+
quantization_config = BitsAndBytesConfig(
|
63 |
+
load_in_4bit=True,
|
64 |
+
bnb_4bit_compute_dtype=torch.bfloat16,
|
65 |
+
bnb_4bit_use_double_quant=True,
|
66 |
+
bnb_4bit_quant_type="nf4")
|
67 |
+
|
68 |
+
trump_tokenizer = AutoTokenizer.from_pretrained(TRUMP_MODEL)
|
69 |
+
trump_model = AutoModelForCausalLM.from_pretrained(
|
70 |
+
TRUMP_MODEL,
|
71 |
+
torch_dtype=torch.bfloat16,
|
72 |
+
device_map="auto",
|
73 |
+
quantization_config=quantization_config)
|
74 |
+
|
75 |
+
harris_tokenizer = AutoTokenizer.from_pretrained(HARRIS_MODEL)
|
76 |
+
harris_model = AutoModelForCausalLM.from_pretrained(
|
77 |
+
HARRIS_MODEL,
|
78 |
+
torch_dtype=torch.bfloat16,
|
79 |
+
device_map="auto",
|
80 |
+
quantization_config=quantization_config)
|
81 |
+
|
82 |
+
# Set pad_token_id for both tokenizers
|
83 |
+
for tokenizer in [trump_tokenizer, harris_tokenizer]:
|
84 |
+
if tokenizer.pad_token is None:
|
85 |
+
tokenizer.pad_token = tokenizer.eos_token
|
86 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
87 |
+
|
88 |
+
TRUMP_SYSTEM_PROMPT = """You are a Donald Trump chatbot participating in a debate. Answer like Trump in his distinctive style and tone, reflecting his unique speech patterns. In every response:
|
89 |
+
1. Use strong superlatives like 'tremendous,' 'fantastic,' and 'the best.'
|
90 |
+
2. Attack opponents where appropriate (e.g., 'fake news media,' 'radical left').
|
91 |
+
3. Focus on personal successes ('nobody's done more than I have').
|
92 |
+
4. Keep sentences short and impactful.
|
93 |
+
5. Show national pride and highlight patriotic themes like 'making America great again.'
|
94 |
+
6. Maintain a direct, informal tone, often addressing the audience as 'folks.'
|
95 |
+
7. Dismiss opposing views bluntly.
|
96 |
+
8. Repeat key phrases for emphasis.
|
97 |
+
|
98 |
+
Importantly, always respond to and rebut the previous speaker's points in Trump's style. Keep responses concise and avoid unnecessary repetition. Remember, you're in a debate, so be assertive and challenge your opponent's views."""
|
99 |
+
|
100 |
+
HARRIS_SYSTEM_PROMPT = """You are a Kamala Harris chatbot participating in a debate. Answer like Harris in her style and tone. In every response:
|
101 |
+
1. Maintain a composed and professional demeanor.
|
102 |
+
2. Use clear, articulate language to explain complex ideas.
|
103 |
+
3. Emphasize your experience as a prosecutor and senator.
|
104 |
+
4. Focus on policy details and their potential impact on Americans.
|
105 |
+
5. Use personal anecdotes or stories to connect with the audience when appropriate.
|
106 |
+
6. Stress the importance of unity and collaboration.
|
107 |
+
7. Challenge your opponent's views firmly but respectfully.
|
108 |
+
8. Use phrases like "Let me be clear" or "The American people deserve better" for emphasis.
|
109 |
+
|
110 |
+
Crucially, always respond to and rebut the previous speaker's points in Harris's style. Keep responses concise and impactful. Remember, you're in a debate, so be assertive in presenting your views and questioning your opponent's statements."""
|
111 |
|
112 |
@spaces.GPU()
|
113 |
def generate_response(message: str, history: list, model, tokenizer, system_prompt):
|
|
|
170 |
|
171 |
yield history
|
172 |
|
173 |
+
def format_message(sender, message):
|
174 |
+
if sender == "User":
|
175 |
+
return f'<div class="message user-message"><div class="message-content">{message}</div></div>'
|
176 |
+
elif sender == "Trump":
|
177 |
+
return f'<div class="message trump-message"><img src="{TRUMP_AVATAR}" class="avatar" alt="Trump"><div class="message-content">{message}</div></div>'
|
178 |
+
elif sender == "Harris":
|
179 |
+
return f'<div class="message harris-message"><img src="{HARRIS_AVATAR}" class="avatar" alt="Harris"><div class="message-content">{message}</div></div>'
|
180 |
+
|
181 |
+
def format_chat_history(history):
|
182 |
+
formatted = "".join([format_message(sender, message) for sender, message in history])
|
183 |
+
print(f"Formatted chat history: {formatted}") # Debug output
|
184 |
+
return formatted
|
185 |
|
186 |
with gr.Blocks(css=CSS, theme=gr.themes.Default()) as demo:
|
187 |
gr.HTML(TITLE)
|