ahmadmac commited on
Commit
1140203
·
verified ·
1 Parent(s): fb37ea7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -8
app.py CHANGED
@@ -1,18 +1,18 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load the tokenizer and model
 
5
  tokenizer = AutoTokenizer.from_pretrained("ahmadmac/Pretrained-GPT2")
6
  model = AutoModelForCausalLM.from_pretrained("ahmadmac/Pretrained-GPT2")
7
 
8
- # Function to generate text
9
  def generate_text(prompt):
10
  inputs = tokenizer(prompt, return_tensors="pt")
11
- outputs = model.generate(**inputs, max_length=150)
12
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
  return generated_text
14
 
15
- st.title("GPT-2 Shakespear Text Generator")
16
  st.write("Enter a prompt to generate text using GPT-2")
17
 
18
  user_input = st.text_input("Prompt")
@@ -24,7 +24,3 @@ if st.button("Generate"):
24
  st.write(generated_text)
25
  else:
26
  st.warning("Please enter a prompt")
27
-
28
- if __name__ == "__main__":
29
- st.set_page_config(page_title="GPT-2 Text Generator", layout="centered")
30
- st.write("GPT-2 Text Generator")
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ st.set_page_config(page_title="GPT-2 Text Generator", layout="centered")
5
+
6
  tokenizer = AutoTokenizer.from_pretrained("ahmadmac/Pretrained-GPT2")
7
  model = AutoModelForCausalLM.from_pretrained("ahmadmac/Pretrained-GPT2")
8
 
 
9
  def generate_text(prompt):
10
  inputs = tokenizer(prompt, return_tensors="pt")
11
+ outputs = model.generate(**inputs, max_length=50)
12
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
  return generated_text
14
 
15
+ st.title("GPT-2 Text Generator")
16
  st.write("Enter a prompt to generate text using GPT-2")
17
 
18
  user_input = st.text_input("Prompt")
 
24
  st.write(generated_text)
25
  else:
26
  st.warning("Please enter a prompt")