HassanDataSci commited on
Commit
f159a3c
·
verified ·
1 Parent(s): f20b251

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -50
app.py CHANGED
@@ -1,57 +1,30 @@
1
  import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
- from gtts import gTTS
4
- import torch
5
- import os
6
 
7
- # Load DialoGPT model for conversational style
8
- model_name = "microsoft/DialoGPT-medium"
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
- # Set up a text generation pipeline with the conversational model
13
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
 
14
 
15
- # Function to generate Trump-like response
16
- def generate_response(prompt):
17
- # Craft the prompt to encourage a Trump-like response
18
- trump_prompt = f"Imagine you are Donald Trump. Respond in your style to: {prompt}"
19
-
20
- # Generate the response
21
- response = generator(trump_prompt, max_length=50, num_return_sequences=1, temperature=0.6)
22
- return response[0]['generated_text']
23
-
24
- # Function to convert text to audio using gTTS
25
- def generate_audio(text):
26
- tts = gTTS(text=text, lang="en")
27
- audio_path = "output.mp3"
28
- tts.save(audio_path)
29
- return audio_path
30
-
31
- # Streamlit app UI
32
- st.title("Trump-like Chat Assistant")
33
- st.write("Type in a question or statement, and receive a 'Trump-style' response in both text and audio!")
34
-
35
- # Text input from user
36
- user_input = st.text_input("Your message:", "Enter a message here")
37
 
38
  if user_input:
39
- # Generate Trump-like response
40
- trump_response = generate_response(user_input)
41
 
42
- # Display text output of the response
43
- st.subheader("Trump-like Assistant (Text Response):")
44
- st.write(trump_response) # Show the generated text directly
45
-
46
- # Convert response to audio
47
- audio_output_path = generate_audio(trump_response)
48
-
49
- # Ensure the file exists and can be played back
50
- if os.path.exists(audio_output_path):
51
- with open(audio_output_path, "rb") as audio_file:
52
- audio_bytes = audio_file.read()
53
- # Display audio output
54
- st.subheader("Trump-like Assistant (Audio Response):")
55
- st.audio(audio_bytes, format="audio/mp3")
56
- else:
57
- st.error("Failed to generate audio. Please try again.")
 
1
  import streamlit as st
2
+ from transformers import pipeline
 
 
 
3
 
4
+ # Load the Hugging Face model
5
+ generator = pipeline("text-generation", model="gpt-2")
 
 
6
 
7
+ # Streamlit app setup
8
+ st.title("Donald Trump Style Text Generator")
9
+ st.write("Enter a prompt, and the model will respond in a manner inspired by Donald Trump.")
10
 
11
+ # User input
12
+ user_input = st.text_input("Enter your prompt:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  if user_input:
15
+ # Prefix the prompt for stylistic guidance
16
+ trump_prompt = f"In the style of Donald Trump: {user_input}"
17
 
18
+ # Generate response with conversational tuning
19
+ response = generator(
20
+ trump_prompt,
21
+ max_length=100,
22
+ num_return_sequences=1,
23
+ do_sample=True,
24
+ temperature=0.8,
25
+ top_p=0.9
26
+ )
27
+
28
+ # Display the response
29
+ generated_text = response[0]["generated_text"]
30
+ st.write("Response:", generated_text)