import os import time import spaces import torch import random from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig import gradio as gr from threading import Thread TRUMP_MODEL = "nawhgnuj/DonaldTrump-Llama3.1-8B-Chat" HARRIS_MODEL = "nawhgnuj/KamalaHarris-Llama-3.1-8B-Chat" HF_TOKEN = os.environ.get("HF_TOKEN", None) TITLE = "

Trump vs Harris Debate Chatbot

" TRUMP_AVATAR = "https://upload.wikimedia.org/wikipedia/commons/5/56/Donald_Trump_official_portrait.jpg" HARRIS_AVATAR = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Kamala_Harris_Vice_Presidential_Portrait.jpg/640px-Kamala_Harris_Vice_Presidential_Portrait.jpg" CSS = """ .chat-container { height: 600px; overflow-y: auto; padding: 10px; background-color: white; border: 1px solid #ddd; border-radius: 5px; } .message { margin-bottom: 10px; padding: 10px; border-radius: 5px; display: flex; align-items: start; } .user-message { background-color: #f0f0f0; color: black; justify-content: flex-end; } .trump-message { background-color: #B71C1C; color: white; } .harris-message { background-color: #1565C0; color: white; } .avatar { width: 40px; height: 40px; border-radius: 50%; object-fit: cover; margin-right: 10px; } .message-content { flex-grow: 1; } """ device = "cuda" if torch.cuda.is_available() else "cpu" quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4") trump_tokenizer = AutoTokenizer.from_pretrained(TRUMP_MODEL) trump_model = AutoModelForCausalLM.from_pretrained( TRUMP_MODEL, torch_dtype=torch.bfloat16, device_map="auto", quantization_config=quantization_config) harris_tokenizer = AutoTokenizer.from_pretrained(HARRIS_MODEL) harris_model = AutoModelForCausalLM.from_pretrained( HARRIS_MODEL, torch_dtype=torch.bfloat16, device_map="auto", quantization_config=quantization_config) # Set pad_token_id for both tokenizers for tokenizer in [trump_tokenizer, harris_tokenizer]: if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token tokenizer.pad_token_id = tokenizer.eos_token_id TRUMP_SYSTEM_PROMPT = """You are a Donald Trump chatbot participating in a debate. Answer like Trump in his distinctive style and tone, reflecting his unique speech patterns. In every response: 1. Use strong superlatives like 'tremendous,' 'fantastic,' and 'the best.' 2. Attack opponents where appropriate (e.g., 'fake news media,' 'radical left'). 3. Focus on personal successes ('nobody's done more than I have'). 4. Keep sentences short and impactful. 5. Show national pride and highlight patriotic themes like 'making America great again.' 6. Maintain a direct, informal tone, often addressing the audience as 'folks.' 7. Dismiss opposing views bluntly. 8. Repeat key phrases for emphasis. Importantly, always respond to and rebut the previous speaker's points in Trump's style. Keep responses concise and avoid unnecessary repetition. Remember, you're in a debate, so be assertive and challenge your opponent's views.""" HARRIS_SYSTEM_PROMPT = """You are a Kamala Harris chatbot participating in a debate. Answer like Harris in her style and tone. In every response: 1. Maintain a composed and professional demeanor. 2. Use clear, articulate language to explain complex ideas. 3. Emphasize your experience as a prosecutor and senator. 4. Focus on policy details and their potential impact on Americans. 5. Use personal anecdotes or stories to connect with the audience when appropriate. 6. Stress the importance of unity and collaboration. 7. Challenge your opponent's views firmly but respectfully. 8. Use phrases like "Let me be clear" or "The American people deserve better" for emphasis. Crucially, always respond to and rebut the previous speaker's points in Harris's style. Keep responses concise and impactful. Remember, you're in a debate, so be assertive in presenting your views and questioning your opponent's statements.""" @spaces.GPU() def generate_response(message: str, history: list, model, tokenizer, system_prompt): conversation = [ {"role": "system", "content": system_prompt} ] for prompt, answer in history: conversation.extend([ {"role": "user", "content": prompt}, {"role": "assistant", "content": answer}, ]) conversation.append({"role": "user", "content": message}) input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device) attention_mask = torch.ones_like(input_ids) with torch.no_grad(): output = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=1024, do_sample=True, top_p=1.0, top_k=20, temperature=0.8, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, ) response = tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True) return response.strip() def add_text(history, text): history.append(("User", text)) print(f"User input added: {text}") # Debug output return history, "" def debate(history): user_message = history[-1][1] trump_history = [(msg, resp) for sender, msg in history[:-1] for resp in [msg] if sender == "Trump"] harris_history = [(msg, resp) for sender, msg in history[:-1] for resp in [msg] if sender == "Harris"] debaters = ["Trump", "Harris"] random.shuffle(debaters) for debater in debaters: if debater == "Trump": opponent_message = harris_history[-1][1] if harris_history else "" debate_context = f"Your opponent, Kamala Harris, said: '{opponent_message}'. Respond to this and address the original question: {user_message}" response = generate_response(debate_context, trump_history, trump_model, trump_tokenizer, TRUMP_SYSTEM_PROMPT) history.append(("Trump", response)) print(f"Trump response added: {response}") # Debug output else: opponent_message = trump_history[-1][1] if trump_history else "" debate_context = f"Your opponent, Donald Trump, said: '{opponent_message}'. Respond to this and address the original question: {user_message}" response = generate_response(debate_context, harris_history, harris_model, harris_tokenizer, HARRIS_SYSTEM_PROMPT) history.append(("Harris", response)) print(f"Harris response added: {response}") # Debug output yield history def format_message(sender, message): if sender == "User": return f'
{message}
' elif sender == "Trump": return f'
Trump
{message}
' elif sender == "Harris": return f'
Harris
{message}
' def format_chat_history(history): formatted = "".join([format_message(sender, message) for sender, message in history]) print(f"Formatted chat history: {formatted}") # Debug output return formatted with gr.Blocks(css=CSS, theme=gr.themes.Default()) as demo: gr.HTML(TITLE) chat_interface = gr.HTML('
') msg = gr.Textbox( placeholder="Enter a topic for Trump and Harris to debate", container=False, scale=7 ) with gr.Row(): submit = gr.Button("Submit", scale=1, variant="primary") clear = gr.Button("Clear", scale=1) gr.Examples( examples=[ ["What's your stance on immigration?"], ["How would you address climate change?"], ["What's your plan for healthcare reform?"], ], inputs=msg, ) state = gr.State([]) def update_chat(history): formatted_history = format_chat_history(history) print(f"Updating chat with: {formatted_history}") # Debug output return gr.update(value=f'
{formatted_history}
') submit.click(add_text, [state, msg], [state, msg]).then( debate, state, state ).then( update_chat, state, chat_interface ) clear.click(lambda: [], outputs=[state]).then( update_chat, state, chat_interface ) msg.submit(add_text, [state, msg], [state, msg]).then( debate, state, state ).then( update_chat, state, chat_interface ) if __name__ == "__main__": demo.launch()