sabssag commited on
Commit
83b3714
·
verified ·
1 Parent(s): 50250c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -17
app.py CHANGED
@@ -1,22 +1,38 @@
1
- # import streamlit as st
2
- # from transformers import pipeline
3
- # generator= pipeline("text_generation", model="gpt2-large")
4
- # def generate_blog(topic):
5
- # res= generator(max_length=400, num_return_sequences=3)
6
- # return res[0]["generate_text"]
7
 
 
 
 
 
8
 
 
9
 
10
- # text = st.text_area("Enter a topic")
11
- # if text:
12
- # out=generate_blog(text)
13
- # st.json(out)
14
-
15
- import streamlit as st
16
- from transformers import pipeline
17
- pipe = pipeline("sentiment-analysis")
18
- text= st.text_area("Enter your text")
19
 
20
  if text:
21
- output = pipe(text)
22
- st.json(output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
2
+ import streamlit as st
 
 
 
 
3
 
4
+ # Initialize the tokenizer and model
5
+ model_name = 'gpt2-large'
6
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
7
+ model = GPT2LMHeadModel.from_pretrained(model_name)
8
 
9
+ text= st.text_area("Enter your Topic: ")
10
 
 
 
 
 
 
 
 
 
 
11
 
12
  if text:
13
+ # Encode input text
14
+ encoded_input = tokenizer(text, return_tensors='pt')
15
+
16
+ # Generate text
17
+ output = model.generate(
18
+ input_ids=encoded_input['input_ids'],
19
+ max_length=50,
20
+ num_return_sequences=1,
21
+ no_repeat_ngram_size=2,
22
+ top_p=0.95,
23
+ top_k=50
24
+ )
25
+
26
+ # Decode generated text
27
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
28
+
29
+ st.json(generated_text)
30
+
31
+ # import streamlit as st
32
+ # from transformers import pipeline
33
+ # pipe = pipeline("sentiment-analysis")
34
+ # text= st.text_area("Enter your text")
35
+
36
+ # if text:
37
+ # output = pipe(text)
38
+ # st.json(output)