import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline from gtts import gTTS import torch import os # Load DialoGPT model for conversational style model_name = "microsoft/DialoGPT-medium" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Set up a text generation pipeline with the conversational model generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1) # Function to generate Trump-like response def generate_response(prompt): # Craft the prompt to encourage a Trump-like response trump_prompt = f"Imagine you are Donald Trump. Respond in your style to: {prompt}" # Generate the response response = generator(trump_prompt, max_length=50, num_return_sequences=1, temperature=0.6) return response[0]['generated_text'] # Function to convert text to audio using gTTS def generate_audio(text): tts = gTTS(text=text, lang="en") audio_path = "output.mp3" tts.save(audio_path) return audio_path # Streamlit app UI st.title("Trump-like Chat Assistant") st.write("Type in a question or statement, and receive a 'Trump-style' response in both text and audio!") # Text input from user user_input = st.text_input("Your message:", "Enter a message here") if user_input: # Generate Trump-like response trump_response = generate_response(user_input) # Display text output of the response st.subheader("Trump-like Assistant (Text Response):") st.write(trump_response) # Show the generated text directly # Convert response to audio audio_output_path = generate_audio(trump_response) # Ensure the file exists and can be played back if os.path.exists(audio_output_path): with open(audio_output_path, "rb") as audio_file: audio_bytes = audio_file.read() # Display audio output st.subheader("Trump-like Assistant (Audio Response):") st.audio(audio_bytes, format="audio/mp3") else: st.error("Failed to generate audio. Please try again.")