File size: 3,824 Bytes
06696c4
35bafe7
f5a396a
 
 
ac462f6
717fa5f
2bd2884
717fa5f
 
 
 
 
 
 
 
 
 
 
 
 
2bd2884
8f9f452
 
 
 
 
 
 
6cf25d0
 
 
 
 
 
 
 
 
 
8f9f452
 
 
 
 
 
 
 
 
 
6cf25d0
717fa5f
6cf25d0
 
 
 
 
 
 
 
 
717fa5f
6cf25d0
 
 
 
 
717fa5f
6cf25d0
be64c97
2bd2884
 
06696c4
 
 
 
 
 
35bafe7
 
06696c4
 
35bafe7
06696c4
 
 
 
 
 
 
2bd2884
06696c4
 
 
 
8f9f452
 
06696c4
8f9f452
 
06696c4
 
6cf25d0
 
 
8f9f452
 
6cf25d0
 
 
8f9f452
35bafe7
8f9f452
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from diffusers import StableDiffusionPipeline
import torch
from langchain.chains import LLMChain
from langchain.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
import streamlit as st
import json

# Load existing ideas from a file
def load_ideas():
    try:
        with open("ideas.json", "r") as file:
            ideas = json.load(file)
    except FileNotFoundError:
        ideas = []
    return ideas

# Save ideas to a file
def save_ideas(ideas):
    with open("ideas.json", "w") as file:
        json.dump(ideas, file)

# Create the pipeline and langchain model 
def load_models():
  with torch.no_grad():
    model_id = "CompVis/stable-diffusion-v1-4"
    device = "cuda"
    pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
    pipe = pipe.to(device)
    hub_llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta")
    prompt = PromptTemplate(
        input_variables=['keyword'],
        template="""
        Write a comprehensive article about {keyword} covering the following aspects:
        Introduction, History and Background, Key Concepts and Terminology, Use Cases and Applications, Benefits and Drawbacks, Future Outlook, Conclusion
        Ensure that the article is well-structured, informative, and at least 1500 words long. Use SEO best practices for content optimization.
        """
    )
    hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
    return hub_chain,pipe

# Wait for the models to be created
with st.spinner("Creating generation pipelines. Please Wait:)..."):
  hub_chain,pipe = load_models()


# Function to generate content
@torch.no_grad()
def generate_content(topic):
    content = hub_chain.run(topic)

    subheadings = [
        "Introduction",
        "History and Background",
        "Key Concepts and Terminology",
        "Use Cases and Applications",
        "Benefits and Drawbacks",
        "Future Outlook",
        "Conclusion",
    ]

    for subheading in subheadings:
        if (subheading + ":") in content:
            content = content.replace(subheading + ":", "## " + subheading + "\n")
        elif subheading in content:
            content = content.replace(subheading, "## " + subheading + "\n")

    return content



# generate image
@torch.no_grad()
def generate_image(topic):
    prompt = f"blog banner about {topic}"
    image = pipe(prompt).images[0]
    return image


# Streamlit app
st.title("Blog Generator")

# Input and button
topic = st.text_input("Enter Title for the blog")
button_clicked = st.button("Create blog!")
st.subheader(topic)
# Load existing ideas
existing_ideas = load_ideas()
st.sidebar.header("Previous Ideas:")

# Display existing ideas in the sidebar
keys = list(set([key for idea in existing_ideas for key in idea.keys()]))
if topic in keys:
    index = keys.index(topic)
    selected_idea = st.sidebar.selectbox("Select Idea", keys, key=f"selectbox{topic}", index=index)
    # Display content and image for the selected idea
    selected_idea_from_list = next((idea for idea in existing_ideas if selected_idea in idea), None)
    st.markdown(selected_idea_from_list[selected_idea]["content"])
    st.image(selected_idea_from_list[selected_idea]["image_path"])
else:
    index = 0
# Handle button click
if button_clicked:
    # Generate content and update existing ideas
    content, image_path = generate_content(topic),generate_image(topic)
    existing_ideas.append({topic: {"content": content, "image_path": image_path}})
    save_ideas(existing_ideas)
    # Update keys and selected idea in the sidebar
    keys = list(set([key for idea in existing_ideas for key in idea.keys()]))
    selected_idea = st.sidebar.selectbox("Select Idea", keys, key=f"selectbox{topic}", index=keys.index(topic))
    st.markdown(content)
    st.image(image_path)