ms1449 commited on
Commit
c6b9e67
·
verified ·
1 Parent(s): 5f61159

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
  import torch
 
4
  # Load the model and tokenizer
5
  model_name = "gpt2-large"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -18,17 +19,14 @@ if st.button("Generate Blog Post"):
18
  # Prepare the prompt
19
  prompt = f"Write a blog post about {topic}:\n\n"
20
 
21
- # Generate text
22
- generation_config = GenerationConfig(max_new_tokens=50, do_sample=True, temperature=0.7)
23
-
24
  # Tokenize the input
25
  inputs_encoded = tokenizer.encode(prompt, return_tensors="pt")
26
 
27
- # Model output
28
- model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0]
29
 
30
  # Decode the output
31
- output = tokenizer.decode(model_output, skip_special_tokens=True)
32
 
33
  # Display the generated blog post
34
  st.subheader("Generated Blog Post:")
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
+
5
  # Load the model and tokenizer
6
  model_name = "gpt2-large"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
19
  # Prepare the prompt
20
  prompt = f"Write a blog post about {topic}:\n\n"
21
 
 
 
 
22
  # Tokenize the input
23
  inputs_encoded = tokenizer.encode(prompt, return_tensors="pt")
24
 
25
+ # Generate text
26
+ model_output = model.generate(inputs_encoded, max_new_tokens=50, do_sample=True, temperature=0.7)
27
 
28
  # Decode the output
29
+ output = tokenizer.decode(model_output[0], skip_special_tokens=True)
30
 
31
  # Display the generated blog post
32
  st.subheader("Generated Blog Post:")