johngoad commited on
Commit
7a38ebc
1 Parent(s): e6984a1

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +119 -0
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import re
3
+
4
+ from gradio.mix import Parallel
5
+ from transformers import (
6
+ AutoTokenizer,
7
+ AutoModelForSeq2SeqLM,
8
+ )
9
+
10
+ def clean_text(text):
11
+ text = text.encode("ascii", errors="ignore").decode(
12
+ "ascii"
13
+ ) # remove non-ascii, Chinese characters
14
+ text = re.sub(r"\n", " ", text)
15
+ text = re.sub(r"\n\n", " ", text)
16
+ text = re.sub(r"\t", " ", text)
17
+ text = text.strip(" ")
18
+ text = re.sub(
19
+ " +", " ", text
20
+ ).strip() # get rid of multiple spaces and replace with a single
21
+ return text
22
+
23
+ modchoice_1 = "chinhon/headline_writer"
24
+
25
+ def headline_writer1(text):
26
+ input_text = clean_text(text)
27
+
28
+ tokenizer_1 = AutoTokenizer.from_pretrained(modchoice_1)
29
+
30
+ model_1 = AutoModelForSeq2SeqLM.from_pretrained(modchoice_1)
31
+
32
+ with tokenizer_1.as_target_tokenizer():
33
+ batch = tokenizer_1(
34
+ input_text, truncation=True, padding="longest", return_tensors="pt"
35
+ )
36
+
37
+ translated = model_1.generate(**batch)
38
+
39
+ summary_1 = tokenizer_1.batch_decode(translated, skip_special_tokens=True)
40
+
41
+ return summary_1[0]
42
+
43
+
44
+ headline1 = gr.Interface(
45
+ fn=headline_writer1,
46
+ inputs=gr.inputs.Textbox(),
47
+ outputs=gr.outputs.Textbox(label=""),
48
+ )
49
+
50
+
51
+ modchoice_2 = "chinhon/pegasus-multi_news-headline"
52
+
53
+ def headline_writer2(text):
54
+ input_text = clean_text(text)
55
+
56
+ tokenizer_2 = AutoTokenizer.from_pretrained(modchoice_2)
57
+
58
+ model_2 = AutoModelForSeq2SeqLM.from_pretrained(modchoice_2)
59
+
60
+ with tokenizer_2.as_target_tokenizer():
61
+ batch = tokenizer_2(
62
+ input_text, truncation=True, padding="longest", return_tensors="pt"
63
+ )
64
+
65
+ translated = model_2.generate(**batch)
66
+
67
+ summary_2 = tokenizer_2.batch_decode(translated, skip_special_tokens=True)
68
+
69
+ return summary_2[0]
70
+
71
+
72
+ headline2 = gr.Interface(
73
+ fn=headline_writer2,
74
+ inputs=gr.inputs.Textbox(),
75
+ outputs=gr.outputs.Textbox(label=""),
76
+ )
77
+
78
+
79
+ modchoice_3 = "chinhon/pegasus-newsroom-headline_writer"
80
+
81
+ def headline_writer3(text):
82
+ input_text = clean_text(text)
83
+
84
+ tokenizer_3 = AutoTokenizer.from_pretrained(modchoice_3)
85
+
86
+ model_3 = AutoModelForSeq2SeqLM.from_pretrained(modchoice_3)
87
+
88
+ with tokenizer_3.as_target_tokenizer():
89
+ batch = tokenizer_3(
90
+ input_text, truncation=True, padding="longest", return_tensors="pt"
91
+ )
92
+
93
+ translated = model_3.generate(**batch)
94
+
95
+ summary_3 = tokenizer_3.batch_decode(
96
+ translated, skip_special_tokens=True, max_length=100
97
+ )
98
+
99
+ return summary_3[0]
100
+
101
+
102
+ headline3 = gr.Interface(
103
+ fn=headline_writer3,
104
+ inputs=gr.inputs.Textbox(),
105
+ outputs=gr.outputs.Textbox(label=""),
106
+ )
107
+
108
+
109
+ Parallel(
110
+ headline1,
111
+ headline2,
112
+ headline3,
113
+ title="AI Headlines Generator",
114
+ inputs=gr.inputs.Textbox(
115
+ lines=20,
116
+ label="Paste the first few paragraphs of your story here, and choose from 3 suggested headlines",
117
+ ),
118
+ theme="darkhuggingface",
119
+ ).launch(enable_queue=True)