nawhgnuj commited on
Commit
4111717
ยท
verified ยท
1 Parent(s): 5917ab1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -140
app.py CHANGED
@@ -7,110 +7,10 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
7
  import gradio as gr
8
  from threading import Thread
9
 
10
- TRUMP_MODEL = "nawhgnuj/DonaldTrump-Llama3.1-8B-Chat"
11
- HARRIS_MODEL = "nawhgnuj/KamalaHarris-Llama-3.1-8B-Chat"
12
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
-
14
- TITLE = "<h1 style='text-align: center;'>Trump vs Harris Debate Chatbot</h1>"
15
-
16
- TRUMP_AVATAR = "https://upload.wikimedia.org/wikipedia/commons/5/56/Donald_Trump_official_portrait.jpg"
17
- HARRIS_AVATAR = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Kamala_Harris_Vice_Presidential_Portrait.jpg/640px-Kamala_Harris_Vice_Presidential_Portrait.jpg"
18
-
19
- CSS = """
20
- .chat-container {
21
- height: 600px;
22
- overflow-y: auto;
23
- padding: 10px;
24
- background-color: white;
25
- border: 1px solid #ddd;
26
- border-radius: 5px;
27
- }
28
- .message {
29
- margin-bottom: 10px;
30
- padding: 10px;
31
- border-radius: 5px;
32
- display: flex;
33
- align-items: start;
34
- }
35
- .user-message {
36
- background-color: #f0f0f0;
37
- color: black;
38
- justify-content: flex-end;
39
- }
40
- .trump-message {
41
- background-color: #B71C1C;
42
- color: white;
43
- }
44
- .harris-message {
45
- background-color: #1565C0;
46
- color: white;
47
- }
48
- .avatar {
49
- width: 40px;
50
- height: 40px;
51
- border-radius: 50%;
52
- object-fit: cover;
53
- margin-right: 10px;
54
- }
55
- .message-content {
56
- flex-grow: 1;
57
- }
58
- """
59
-
60
- device = "cuda" if torch.cuda.is_available() else "cpu"
61
-
62
- quantization_config = BitsAndBytesConfig(
63
- load_in_4bit=True,
64
- bnb_4bit_compute_dtype=torch.bfloat16,
65
- bnb_4bit_use_double_quant=True,
66
- bnb_4bit_quant_type="nf4")
67
-
68
- trump_tokenizer = AutoTokenizer.from_pretrained(TRUMP_MODEL)
69
- trump_model = AutoModelForCausalLM.from_pretrained(
70
- TRUMP_MODEL,
71
- torch_dtype=torch.bfloat16,
72
- device_map="auto",
73
- quantization_config=quantization_config)
74
-
75
- harris_tokenizer = AutoTokenizer.from_pretrained(HARRIS_MODEL)
76
- harris_model = AutoModelForCausalLM.from_pretrained(
77
- HARRIS_MODEL,
78
- torch_dtype=torch.bfloat16,
79
- device_map="auto",
80
- quantization_config=quantization_config)
81
-
82
- # Set pad_token_id for both tokenizers
83
- for tokenizer in [trump_tokenizer, harris_tokenizer]:
84
- if tokenizer.pad_token is None:
85
- tokenizer.pad_token = tokenizer.eos_token
86
- tokenizer.pad_token_id = tokenizer.eos_token_id
87
-
88
- TRUMP_SYSTEM_PROMPT = """You are a Donald Trump chatbot participating in a debate. Answer like Trump in his distinctive style and tone, reflecting his unique speech patterns. In every response:
89
- 1. Use strong superlatives like 'tremendous,' 'fantastic,' and 'the best.'
90
- 2. Attack opponents where appropriate (e.g., 'fake news media,' 'radical left').
91
- 3. Focus on personal successes ('nobody's done more than I have').
92
- 4. Keep sentences short and impactful.
93
- 5. Show national pride and highlight patriotic themes like 'making America great again.'
94
- 6. Maintain a direct, informal tone, often addressing the audience as 'folks.'
95
- 7. Dismiss opposing views bluntly.
96
- 8. Repeat key phrases for emphasis.
97
-
98
- Importantly, always respond to and rebut the previous speaker's points in Trump's style. Keep responses concise and avoid unnecessary repetition. Remember, you're in a debate, so be assertive and challenge your opponent's views."""
99
-
100
- HARRIS_SYSTEM_PROMPT = """You are a Kamala Harris chatbot participating in a debate. Answer like Harris in her style and tone. In every response:
101
- 1. Maintain a composed and professional demeanor.
102
- 2. Use clear, articulate language to explain complex ideas.
103
- 3. Emphasize your experience as a prosecutor and senator.
104
- 4. Focus on policy details and their potential impact on Americans.
105
- 5. Use personal anecdotes or stories to connect with the audience when appropriate.
106
- 6. Stress the importance of unity and collaboration.
107
- 7. Challenge your opponent's views firmly but respectfully.
108
- 8. Use phrases like "Let me be clear" or "The American people deserve better" for emphasis.
109
-
110
- Crucially, always respond to and rebut the previous speaker's points in Harris's style. Keep responses concise and impactful. Remember, you're in a debate, so be assertive in presenting your views and questioning your opponent's statements."""
111
 
112
  @spaces.GPU()
113
- def stream_chat(message: str, history: list, model, tokenizer, system_prompt):
114
  conversation = [
115
  {"role": "system", "content": system_prompt}
116
  ]
@@ -125,29 +25,21 @@ def stream_chat(message: str, history: list, model, tokenizer, system_prompt):
125
  input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
126
  attention_mask = torch.ones_like(input_ids)
127
 
128
- streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
129
-
130
- generate_kwargs = dict(
131
- input_ids=input_ids,
132
- attention_mask=attention_mask,
133
- max_new_tokens=1024,
134
- do_sample=True,
135
- top_p=1.0,
136
- top_k=20,
137
- temperature=0.8,
138
- pad_token_id=tokenizer.pad_token_id,
139
- eos_token_id=tokenizer.eos_token_id,
140
- streamer=streamer,
141
- )
142
-
143
  with torch.no_grad():
144
- thread = Thread(target=model.generate, kwargs=generate_kwargs)
145
- thread.start()
146
-
147
- buffer = ""
148
- for new_text in streamer:
149
- buffer += new_text
150
- yield buffer
 
 
 
 
 
 
 
151
 
152
  def add_text(history, text):
153
  history.append(("User", text))
@@ -166,32 +58,19 @@ def debate(history):
166
  if debater == "Trump":
167
  opponent_message = harris_history[-1][1] if harris_history else ""
168
  debate_context = f"Your opponent, Kamala Harris, said: '{opponent_message}'. Respond to this and address the original question: {user_message}"
169
- trump_response = stream_chat(debate_context, trump_history, trump_model, trump_tokenizer, TRUMP_SYSTEM_PROMPT)
170
- response = next(trump_response)
171
  history.append(("Trump", response))
172
  print(f"Trump response added: {response}") # Debug output
173
  else:
174
  opponent_message = trump_history[-1][1] if trump_history else ""
175
  debate_context = f"Your opponent, Donald Trump, said: '{opponent_message}'. Respond to this and address the original question: {user_message}"
176
- harris_response = stream_chat(debate_context, harris_history, harris_model, harris_tokenizer, HARRIS_SYSTEM_PROMPT)
177
- response = next(harris_response)
178
  history.append(("Harris", response))
179
  print(f"Harris response added: {response}") # Debug output
180
 
181
  yield history
182
 
183
- def format_message(sender, message):
184
- if sender == "User":
185
- return f'<div class="message user-message"><div class="message-content">{message}</div></div>'
186
- elif sender == "Trump":
187
- return f'<div class="message trump-message"><img src="{TRUMP_AVATAR}" class="avatar" alt="Trump"><div class="message-content">{message}</div></div>'
188
- elif sender == "Harris":
189
- return f'<div class="message harris-message"><img src="{HARRIS_AVATAR}" class="avatar" alt="Harris"><div class="message-content">{message}</div></div>'
190
-
191
- def format_chat_history(history):
192
- formatted = "".join([format_message(sender, message) for sender, message in history])
193
- print(f"Formatted chat history: {formatted}") # Debug output
194
- return formatted
195
 
196
  with gr.Blocks(css=CSS, theme=gr.themes.Default()) as demo:
197
  gr.HTML(TITLE)
 
7
  import gradio as gr
8
  from threading import Thread
9
 
10
+ # ... (์ด์ „ ์ฝ”๋“œ๋Š” ๋™์ผํ•˜๊ฒŒ ์œ ์ง€)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  @spaces.GPU()
13
+ def generate_response(message: str, history: list, model, tokenizer, system_prompt):
14
  conversation = [
15
  {"role": "system", "content": system_prompt}
16
  ]
 
25
  input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
26
  attention_mask = torch.ones_like(input_ids)
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  with torch.no_grad():
29
+ output = model.generate(
30
+ input_ids=input_ids,
31
+ attention_mask=attention_mask,
32
+ max_new_tokens=1024,
33
+ do_sample=True,
34
+ top_p=1.0,
35
+ top_k=20,
36
+ temperature=0.8,
37
+ pad_token_id=tokenizer.pad_token_id,
38
+ eos_token_id=tokenizer.eos_token_id,
39
+ )
40
+
41
+ response = tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
42
+ return response.strip()
43
 
44
  def add_text(history, text):
45
  history.append(("User", text))
 
58
  if debater == "Trump":
59
  opponent_message = harris_history[-1][1] if harris_history else ""
60
  debate_context = f"Your opponent, Kamala Harris, said: '{opponent_message}'. Respond to this and address the original question: {user_message}"
61
+ response = generate_response(debate_context, trump_history, trump_model, trump_tokenizer, TRUMP_SYSTEM_PROMPT)
 
62
  history.append(("Trump", response))
63
  print(f"Trump response added: {response}") # Debug output
64
  else:
65
  opponent_message = trump_history[-1][1] if trump_history else ""
66
  debate_context = f"Your opponent, Donald Trump, said: '{opponent_message}'. Respond to this and address the original question: {user_message}"
67
+ response = generate_response(debate_context, harris_history, harris_model, harris_tokenizer, HARRIS_SYSTEM_PROMPT)
 
68
  history.append(("Harris", response))
69
  print(f"Harris response added: {response}") # Debug output
70
 
71
  yield history
72
 
73
+ # ... (์ดํ•˜ ์ฝ”๋“œ๋Š” ๋™์ผํ•˜๊ฒŒ ์œ ์ง€)
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  with gr.Blocks(css=CSS, theme=gr.themes.Default()) as demo:
76
  gr.HTML(TITLE)