Spaces:
Sleeping
Sleeping
Commit
·
bd3c6d1
1
Parent(s):
ae0bbea
Update app.py
Browse files
app.py
CHANGED
@@ -1,61 +1,99 @@
|
|
1 |
-
from transformers import AutoTokenizer
|
2 |
-
import transformers
|
3 |
import torch
|
4 |
import streamlit as st
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
8 |
|
9 |
-
|
|
|
|
|
|
|
10 |
"text-generation",
|
11 |
-
model=
|
12 |
tokenizer=tokenizer,
|
13 |
-
|
14 |
-
trust_remote_code=True,
|
15 |
-
device_map="auto"
|
16 |
)
|
17 |
|
18 |
-
def generate_text(prompt,
|
19 |
-
|
20 |
-
|
21 |
max_length=max_length,
|
|
|
22 |
do_sample=True,
|
23 |
top_k=top_k,
|
24 |
temperature=temperature,
|
|
|
25 |
num_return_sequences=1,
|
26 |
eos_token_id=tokenizer.eos_token_id,
|
27 |
-
)
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
# Streamlit app
|
31 |
st.title("AI-Generated Blog Post")
|
32 |
|
33 |
-
|
34 |
-
keywords_input = st.text_input("
|
35 |
keywords = [word.strip() for word in keywords_input.split(',')]
|
|
|
36 |
|
37 |
-
# Display generated content on button click
|
38 |
if st.button('Generate Article'):
|
39 |
-
if
|
40 |
-
|
41 |
generated_text = " ".join(keywords)
|
42 |
-
intro_text = generate_text(generated_text, "Introduction", max_length=200, top_k=50, temperature=0.7)
|
43 |
-
body_text = generate_text(generated_text, "Body", max_length=500, top_k=50, temperature=0.7)
|
44 |
-
conclusion_text = generate_text(generated_text, "Conclusion", max_length=150, top_k=50, temperature=0.7)
|
45 |
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
st.header("Introduction")
|
48 |
st.write(intro_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
st.
|
51 |
-
st.write(body_text)
|
52 |
|
53 |
st.header("Conclusion")
|
54 |
st.write(conclusion_text)
|
55 |
else:
|
56 |
st.warning("Please input keywords to generate content.")
|
57 |
|
58 |
-
# Sidebar with instructions
|
59 |
st.sidebar.title("Instructions")
|
60 |
st.sidebar.write(
|
61 |
"1. Enter keywords related to the topic you want to generate content about."
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import streamlit as st
|
3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
4 |
+
from diffusers import StableDiffusionPipeline
|
5 |
+
from PIL import Image
|
6 |
|
7 |
+
text_model = "gpt2"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(text_model)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(text_model)
|
10 |
|
11 |
+
image_model = "runwayml/stable-diffusion-v1-5"
|
12 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
+
|
14 |
+
generator = pipeline(
|
15 |
"text-generation",
|
16 |
+
model=model,
|
17 |
tokenizer=tokenizer,
|
18 |
+
device=0 if torch.cuda.is_available() else -1
|
|
|
|
|
19 |
)
|
20 |
|
21 |
+
def generate_text(prompt, temperature=0.7, top_k=50, repetition_penalty=1.2, max_length=None, min_length=10):
|
22 |
+
return generator(
|
23 |
+
prompt,
|
24 |
max_length=max_length,
|
25 |
+
min_length=min_length,
|
26 |
do_sample=True,
|
27 |
top_k=top_k,
|
28 |
temperature=temperature,
|
29 |
+
repetition_penalty=repetition_penalty,
|
30 |
num_return_sequences=1,
|
31 |
eos_token_id=tokenizer.eos_token_id,
|
32 |
+
)[0]["generated_text"]
|
33 |
+
|
34 |
+
def generate_image(prompt):
|
35 |
+
pipe = StableDiffusionPipeline.from_pretrained(image_model, torch_dtype=torch.float32)
|
36 |
+
pipe = pipe.to(device)
|
37 |
+
image = pipe(prompt).images[0]
|
38 |
+
return image
|
39 |
|
|
|
40 |
st.title("AI-Generated Blog Post")
|
41 |
|
42 |
+
title = st.text_input("Topic of the Article")
|
43 |
+
keywords_input = st.text_input("Enter Some Keywords About The Topic (Separate keywords with commas)")
|
44 |
keywords = [word.strip() for word in keywords_input.split(',')]
|
45 |
+
keywords.append(title)
|
46 |
|
|
|
47 |
if st.button('Generate Article'):
|
48 |
+
if keywords:
|
|
|
49 |
generated_text = " ".join(keywords)
|
|
|
|
|
|
|
50 |
|
51 |
+
formatted_title = title.capitalize()
|
52 |
+
|
53 |
+
st.markdown(
|
54 |
+
f"<h1 style='text-align: center; color: blue; font-size: 70px;'>{formatted_title}</h1>",
|
55 |
+
unsafe_allow_html=True
|
56 |
+
)
|
57 |
+
|
58 |
+
generated_image1 = generate_image(generated_text)
|
59 |
+
|
60 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
61 |
+
with col2:
|
62 |
+
new_image1 = generated_image1.resize((700, 300)) # Resize the image here
|
63 |
+
st.image(new_image1, use_column_width=True)
|
64 |
+
|
65 |
+
intro_text = generate_text(generated_text, min_length=100, max_length=200)
|
66 |
+
body_text1 = generate_text(generated_text, min_length=200, max_length=250)
|
67 |
+
body_text2 = generate_text(generated_text, min_length=300, max_length=400)
|
68 |
+
conclusion_text = generate_text(generated_text, min_length=100, max_length=200)
|
69 |
+
|
70 |
st.header("Introduction")
|
71 |
st.write(intro_text)
|
72 |
+
modified_prompt = generated_text + ' bright'
|
73 |
+
generated_image2 = generate_image(modified_prompt)
|
74 |
+
|
75 |
+
new_image2 = generated_image1.resize((700, 300)) # Resize the image here
|
76 |
+
st.image(new_image2, use_column_width=True)
|
77 |
+
|
78 |
+
|
79 |
+
col1, col2 = st.columns(2)
|
80 |
+
with col1:
|
81 |
+
st.header("Body")
|
82 |
+
st.write(body_text1)
|
83 |
+
|
84 |
+
with col2:
|
85 |
+
modified_prompt2 = generated_text + ' shade'
|
86 |
+
generated_image3 = generate_image(modified_prompt2)
|
87 |
+
st.markdown("<br><br><br><br>", unsafe_allow_html=True) # Add vertical space
|
88 |
+
st.image(generated_image3, use_column_width=True)
|
89 |
|
90 |
+
st.write(body_text2)
|
|
|
91 |
|
92 |
st.header("Conclusion")
|
93 |
st.write(conclusion_text)
|
94 |
else:
|
95 |
st.warning("Please input keywords to generate content.")
|
96 |
|
|
|
97 |
st.sidebar.title("Instructions")
|
98 |
st.sidebar.write(
|
99 |
"1. Enter keywords related to the topic you want to generate content about."
|