sabssag commited on
Commit
7550359
·
verified ·
1 Parent(s): 83b3714

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -29
app.py CHANGED
@@ -1,38 +1,42 @@
1
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
2
  import streamlit as st
 
 
 
 
3
 
4
  # Initialize the tokenizer and model
5
  model_name = 'gpt2-large'
6
  tokenizer = GPT2Tokenizer.from_pretrained(model_name)
7
  model = GPT2LMHeadModel.from_pretrained(model_name)
8
 
9
- text= st.text_area("Enter your Topic: ")
10
-
11
 
12
  if text:
13
- # Encode input text
14
- encoded_input = tokenizer(text, return_tensors='pt')
15
-
16
- # Generate text
17
- output = model.generate(
18
- input_ids=encoded_input['input_ids'],
19
- max_length=50,
20
- num_return_sequences=1,
21
- no_repeat_ngram_size=2,
22
- top_p=0.95,
23
- top_k=50
24
- )
25
-
26
- # Decode generated text
27
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
28
-
29
- st.json(generated_text)
30
-
31
- # import streamlit as st
32
- # from transformers import pipeline
33
- # pipe = pipeline("sentiment-analysis")
34
- # text= st.text_area("Enter your text")
35
-
36
- # if text:
37
- # output = pipe(text)
38
- # st.json(output)
 
 
 
1
  import streamlit as st
2
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
3
+
4
+ # Set the title of the Streamlit app
5
+ st.title("GPT-2 Blog Post Generator")
6
 
7
  # Initialize the tokenizer and model
8
  model_name = 'gpt2-large'
9
  tokenizer = GPT2Tokenizer.from_pretrained(model_name)
10
  model = GPT2LMHeadModel.from_pretrained(model_name)
11
 
12
+ # Text input for the user
13
+ text = st.text_area("Enter your Topic: ")
14
 
15
  if text:
16
+ try:
17
+ # Encode input text
18
+ encoded_input = tokenizer(text, return_tensors='pt')
19
+
20
+ # Generate text
21
+ output = model.generate(
22
+ input_ids=encoded_input['input_ids'],
23
+ max_length=200, # Adjust length as needed
24
+ num_return_sequences=1,
25
+ no_repeat_ngram_size=2,
26
+ top_p=0.95,
27
+ top_k=50
28
+ )
29
+
30
+ # Decode generated text
31
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
32
+
33
+ # Display generated text
34
+ st.subheader("Generated Blog Post")
35
+ st.write(generated_text)
36
+ except Exception as e:
37
+ st.error(f"An error occurred: {e}")
38
+
39
+ # Add instructions
40
+ st.write("""
41
+ Enter a topic or a starting sentence in the text area above, and the GPT-2 model will generate a blog post for you.
42
+ """)