GSridhar1982 commited on
Commit
14aa639
·
verified ·
1 Parent(s): 620f6d6

Gradio app file

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+ import nltk
3
+ nltk.download('punkt')
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("anukvma/bart-base-medium-email-subject-generation-v5")
6
+ model = AutoModelForSeq2SeqLM.from_pretrained("anukvma/bart-base-medium-email-subject-generation-v5")
7
+
8
+ text = """
9
+ Harry - I got kicked out of the system, so I'm sending this from Tom's account.
10
+ He can fill you in on the potential deal with STEAG.
11
+ I left my resume on your chair.
12
+ I'll e-mail a copy when I have my home account running.
13
+ My contact info is:
14
+ """
15
+
16
+ inputs = ["provide email subject: " + text]
17
+
18
+ inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
19
+ output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=10)
20
+ decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
21
+ predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]
22
+
23
+ print(predicted_title)
24
+
25
+ def generate_subject(text):
26
+ inputs = ["provide email subject: " + text]
27
+
28
+ inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
29
+ output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=10)
30
+ decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
31
+ predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]
32
+ return predicted_title
33
+
34
+ import gradio as gr
35
+ gr.Interface(fn = generate_subject, inputs="text",outputs="text").launch()