GSridhar1982 commited on
Commit
e4e56de
·
verified ·
1 Parent(s): e6b9ea8

Updated the gradio app interface

Browse files
Files changed (1) hide show
  1. app.py +25 -35
app.py CHANGED
@@ -1,35 +1,25 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
- import nltk
3
- nltk.download('punkt')
4
-
5
- tokenizer = AutoTokenizer.from_pretrained("anukvma/bart-base-medium-email-subject-generation-v5")
6
- model = AutoModelForSeq2SeqLM.from_pretrained("anukvma/bart-base-medium-email-subject-generation-v5")
7
-
8
- text = """
9
- Harry - I got kicked out of the system, so I'm sending this from Tom's account.
10
- He can fill you in on the potential deal with STEAG.
11
- I left my resume on your chair.
12
- I'll e-mail a copy when I have my home account running.
13
- My contact info is:
14
- """
15
-
16
- inputs = ["provide email subject: " + text]
17
-
18
- inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
19
- output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=10)
20
- decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
21
- predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]
22
-
23
- print(predicted_title)
24
-
25
- def generate_subject(text):
26
- inputs = ["provide email subject: " + text]
27
-
28
- inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
29
- output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=10)
30
- decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
31
- predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]
32
- return predicted_title
33
-
34
- import gradio as gr
35
- gr.Interface(fn = generate_subject, inputs="text",outputs="text").launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+ import nltk
3
+ nltk.download('punkt')
4
+
5
+ def generate_subject(model_name,email_body):
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
+ inputs = ["provide email subject: " + email_body]
9
+ inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
10
+ output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=10)
11
+ decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
12
+ predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]
13
+ return predicted_title
14
+
15
+ import gradio as gr
16
+ iface = gr.Interface(
17
+ fn=generate_subject,
18
+ inputs=[
19
+ gr.Dropdown(choices=["anukvma/t5-base-medium-email-subject-generation-v2", "anukvma/bart-base-medium-email-subject-generation-v5"], label="Select Model"),
20
+ gr.Textbox(lines=5, label="Email Body")
21
+ ],
22
+ outputs=gr.Textbox(label="Email Summary")
23
+ )
24
+
25
+ iface.launch()