Chillyblast commited on
Commit
0b6d7d2
·
verified ·
1 Parent(s): 1c11816

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ # Initialize the tokenizer and model
5
+ tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
6
+ model = AutoModelForCausalLM.from_pretrained('gpt2-large')
7
+
8
+ def generate_blog(topic, max_length=500, num_return_sequences=1):
9
+ # Encode the topic as input IDs
10
+ input_ids = tokenizer.encode(topic, return_tensors='pt')
11
+
12
+ # Generate the blog text
13
+ outputs = model.generate(
14
+ input_ids,
15
+ max_length=max_length,
16
+ num_return_sequences=num_return_sequences,
17
+ no_repeat_ngram_size=2,
18
+ early_stopping=True
19
+ )
20
+
21
+ # Decode the generated IDs to text
22
+ generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
23
+ return generated_texts
24
+
25
+ # Example usage
26
+ topic = input(str("Enter the topic:"))
27
+ generated_blogs = generate_blog(topic)
28
+
29
+ for i, blog in enumerate(generated_blogs):
30
+ print(f"Blog {i+1}:\n{blog}\n")