File size: 1,232 Bytes
e2ccc4b
 
175ea08
e34a9a0
e7567a7
e34a9a0
 
e7567a7
 
 
 
db7ea0e
 
175ea08
 
 
db7ea0e
175ea08
 
 
db7ea0e
175ea08
 
 
 
 
 
db7ea0e
 
 
 
 
 
 
 
175ea08
 
2de0bdd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import os
os.system('pip install streamlit transformers torch')

import streamlit as st
from transformers import BartForConditionalGeneration,   AutoTokenizer
import torch

# Load the BART fine tuned model and tokenizer
model_name = "llmahmad/facebook_BART_summary"
tokenizer = BartForConditionalGeneration.from_pretrained(model_name)
model =  AutoTokenizer.from_pretrained(model_name)

def generate_blog_post(topic):
    try:
        # Encode the input topic
        inputs = tokenizer.encode(topic, return_tensors='pt')

        # Generate the blog post
        outputs = model.generate(inputs, max_length=500, num_return_sequences=1, no_repeat_ngram_size=2, 
                                 do_sample=True, top_k=50, top_p=0.95, temperature=0.9)

        # Decode the generated text
        blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return blog_post
    except Exception as e:
        st.error(f"Error: {e}")
        return ""

# Streamlit app
st.title("Blog Post Generator")
st.write("Enter a topic to generate a blog post.")

topic = st.text_input("Topic:")

if st.button("Generate"):
    with st.spinner('Generating...'):
        blog_post = generate_blog_post(topic)
    st.write(blog_post)