Spaces:
Build error
Build error
File size: 1,505 Bytes
bd9a64a c415045 bd9a64a 8e20695 c415045 bd9a64a 150d813 bd9a64a 150d813 bd9a64a 150d813 bd9a64a 150d813 bd9a64a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import streamlit as st
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from summarizer import Summarizer
model=GPT2LMHeadModel.from_pretrained("DemocracyStudio/generate_nft_content")
tokenizer=GPT2Tokenizer.from_pretrained("DemocracyStudio/generate_nft_content")
summarize=Summarizer()
st.title("Text generation for the marketing content of NFTs")
st.subheader("Course project 'NLP with transformers' at opencampus.sh, Spring 2022")
st.sidebar.image("bayc crown.png", use_column_width=True)
topics=["NFT", "Blockchain", "Metaverse"]
choice = st.sidebar.selectbox("Select one topic", topics)
if choice == 'NFT':
keywords=st.text_area("Input 4 keywords here: (optional)")
length=st.text_area("How long should be your text? (default: 512 words)")
if st.button("Generate"):
prompt = "<|startoftext|>"
generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
generated = generated.to(device)
sample_outputs = model.generate(
generated,
do_sample=True,
top_k=50,
max_length = 512,
top_p=0.95,
num_return_sequences=1
)
for i, sample_output in enumerate(sample_outputs):
generated_text = tokenizer.decode(sample_output,
skip_special_tokens=True)
#st.text("Keywords: {}\n".format(keywords))
#st.text("Length in number of words: {}\n".format(length))
st.text("This is your tailored blog article {generated_text}")
summary = summarize(generated_text, num_sentences=1)
st.text("This is a tweet-sized summary of your article {summary}")
else:
st.write("Topic not available yet")
|