File size: 1,097 Bytes
f41f56e
ac462f6
29a6b30
 
 
 
 
 
 
 
 
 
f41f56e
 
dc2591a
 
f41f56e
29a6b30
 
 
 
dd71470
29a6b30
 
dc2591a
 
 
f41f56e
dc2591a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from transformers import AutoModelForCausalLM, AutoTokenizer
import streamlit as st
from transformers import AutoTokenizer, AutoModelWithLMHead  
import torch
if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = "cpu"
    
    
tokenizer = AutoTokenizer.from_pretrained("salesken/content_generation_from_phrases") 
model = AutoModelWithLMHead.from_pretrained("salesken/content_generation_from_phrases").to(device)


input_query=st.text_input("Enter the Blog Title")
query = "<|startoftext|> " +"Create a blog about "+ input_query + " ~~"

input_ids = tokenizer.encode(query.lower(), return_tensors='pt').to(device)
sample_outputs = model.generate(input_ids,
                                do_sample=True,
                                num_beams=1, 
                                max_length=4096,
                                temperature=0.9,
                                top_k = 30,
                                num_return_sequences=1)
r = tokenizer.decode(sample_outputs[0], skip_special_tokens=True).split('||')[0]
r = r.split(' ~~ ')[1]

st.write(r)