Spaces:
Running
Running
Delete app.py
Browse files
app.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
from __future__ import unicode_literals
|
2 |
-
import re
|
3 |
-
import unicodedata
|
4 |
-
import torch
|
5 |
-
import streamlit as st
|
6 |
-
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
7 |
-
|
8 |
-
|
9 |
-
def load_model():
|
10 |
-
# 学習済みモデルをHugging Face model hubからダウンロードする
|
11 |
-
model_dir_name = "sonoisa/t5-qiita-title-generation"
|
12 |
-
|
13 |
-
# トークナイザー(SentencePiece)
|
14 |
-
tokenizer = T5Tokenizer.from_pretrained(model_dir_name, is_fast=True)
|
15 |
-
|
16 |
-
# 学習済みモデル
|
17 |
-
trained_model = T5ForConditionalGeneration.from_pretrained(model_dir_name)
|
18 |
-
|
19 |
-
# GPUの利用有無
|
20 |
-
USE_GPU = torch.cuda.is_available()
|
21 |
-
if USE_GPU:
|
22 |
-
trained_model.cuda()
|
23 |
-
|
24 |
-
return trained_model, tokenizer
|
25 |
-
|
26 |
-
|
27 |
-
def unicode_normalize(cls, s):
|
28 |
-
pt = re.compile("([{}]+)".format(cls))
|
29 |
-
|
30 |
-
def norm(c):
|
31 |
-
return unicodedata.normalize("NFKC", c) if pt.match(c) else c
|
32 |
-
|
33 |
-
s = "".join(norm(x) for x in re.split(pt, s))
|
34 |
-
s = re.sub("-", "-", s)
|
35 |
-
return s
|
36 |
-
|
37 |
-
|
38 |
-
def remove_extra_spaces(s):
|
39 |
-
s = re.sub("[ ]+", " ", s)
|
40 |
-
blocks = "".join(
|
41 |
-
(
|
42 |
-
"\u4E00-\u9FFF", # CJK UNIFIED IDEOGRAPHS
|
43 |
-
"\u3040-\u309F", # HIRAGANA
|
44 |
-
"\u30A0-\u30FF", # KATAKANA
|
45 |
-
"\u3000-\u303F", # CJK SYMBOLS AND PUNCTUATION
|
46 |
-
"\uFF00-\uFFEF", # HALFWIDTH AND FULLWIDTH FORMS
|
47 |
-
)
|
48 |
-
)
|
49 |
-
basic_latin = "\u0000-\u007F"
|
50 |
-
|
51 |
-
def remove_space_between(cls1, cls2, s):
|
52 |
-
p = re.compile("([{}]) ([{}])".format(cls1, cls2))
|
53 |
-
while p.search(s):
|
54 |
-
s = p.sub(r"\1\2", s)
|
55 |
-
return s
|
56 |
-
|
57 |
-
s = remove_space_between(blocks, blocks, s)
|
58 |
-
s = remove_space_between(blocks, basic_latin, s)
|
59 |
-
s = remove_space_between(basic_latin, blocks, s)
|
60 |
-
return s
|
61 |
-
|
62 |
-
|
63 |
-
def normalize_neologd(s):
|
64 |
-
s = s.strip()
|
65 |
-
s = unicode_normalize("0-9A-Za-z。-゚", s)
|
66 |
-
|
67 |
-
def maketrans(f, t):
|
68 |
-
return {ord(x): ord(y) for x, y in zip(f, t)}
|
69 |
-
|
70 |
-
s = re.sub("[˗֊‐‑‒–⁃⁻₋−]+", "-", s) # normalize hyphens
|
71 |
-
s = re.sub("[﹣-ー—―─━ー]+", "ー", s) # normalize choonpus
|
72 |
-
s = re.sub("[~∼∾〜〰~]+", "〜", s) # normalize tildes (modified by Isao Sonobe)
|
73 |
-
s = s.translate(
|
74 |
-
maketrans(
|
75 |
-
"!\"#$%&'()*+,-./:;<=>?@[¥]^_`{|}~。、・「」",
|
76 |
-
"!”#$%&’()*+,-./:;<=>?@[¥]^_`{|}〜。、・「」",
|
77 |
-
)
|
78 |
-
)
|
79 |
-
|
80 |
-
s = remove_extra_spaces(s)
|
81 |
-
s = unicode_normalize("!”#$%&’()*+,-./:;<>?@[¥]^_`{|}〜", s) # keep =,・,「,」
|
82 |
-
s = re.sub("[’]", "'", s)
|
83 |
-
s = re.sub("[”]", '"', s)
|
84 |
-
return s
|
85 |
-
|
86 |
-
|
87 |
-
CODE_PATTERN = re.compile(r"```.*?```", re.MULTILINE | re.DOTALL)
|
88 |
-
LINK_PATTERN = re.compile(r"!?\[([^\]\)]+)\]\([^\)]+\)")
|
89 |
-
IMG_PATTERN = re.compile(r"<img[^>]*>")
|
90 |
-
URL_PATTERN = re.compile(r"(http|ftp)s?://[^\s]+")
|
91 |
-
NEWLINES_PATTERN = re.compile(r"(\s*\n\s*)+")
|
92 |
-
|
93 |
-
|
94 |
-
def clean_markdown(markdown_text):
|
95 |
-
markdown_text = CODE_PATTERN.sub(r"", markdown_text)
|
96 |
-
markdown_text = LINK_PATTERN.sub(r"\1", markdown_text)
|
97 |
-
markdown_text = IMG_PATTERN.sub(r"", markdown_text)
|
98 |
-
markdown_text = URL_PATTERN.sub(r"", markdown_text)
|
99 |
-
markdown_text = NEWLINES_PATTERN.sub(r"\n", markdown_text)
|
100 |
-
markdown_text = markdown_text.replace("`", "")
|
101 |
-
return markdown_text
|
102 |
-
|
103 |
-
|
104 |
-
def normalize_text(markdown_text):
|
105 |
-
markdown_text = clean_markdown(markdown_text)
|
106 |
-
markdown_text = markdown_text.replace("\t", " ")
|
107 |
-
markdown_text = normalize_neologd(markdown_text).lower()
|
108 |
-
markdown_text = markdown_text.replace("\n", " ")
|
109 |
-
return markdown_text
|
110 |
-
|
111 |
-
|
112 |
-
def preprocess_qiita_body(markdown_text):
|
113 |
-
return "body: " + normalize_text(markdown_text)[:4000]
|
114 |
-
|
115 |
-
|
116 |
-
def postprocess_title(title):
|
117 |
-
return re.sub(r"^title: ", "", title)
|
118 |
-
|
119 |
-
st.title("Qiita記事タイトル案生成")
|
120 |
-
|
121 |
-
description_text = st.empty()
|
122 |
-
|
123 |
-
if "trained_model" not in st.session_state:
|
124 |
-
description_text.text("...モデル読み込み中...")
|
125 |
-
|
126 |
-
trained_model, tokenizer = load_model()
|
127 |
-
trained_model.eval()
|
128 |
-
|
129 |
-
st.session_state.trained_model = trained_model
|
130 |
-
st.session_state.tokenizer = tokenizer
|
131 |
-
|
132 |
-
trained_model = st.session_state.trained_model
|
133 |
-
tokenizer = st.session_state.tokenizer
|
134 |
-
|
135 |
-
# GPUの利用有無
|
136 |
-
USE_GPU = torch.cuda.is_available()
|
137 |
-
|
138 |
-
description_text.text("記事の本文をコピペ入力して、タイトル生成ボタンを押すと、タイトル案が10個生成されます。\nGPUが使えないため生成に数十秒かかります。")
|
139 |
-
qiita_body = st.text_area(label="記事の本文", value="", height=300, max_chars=4000)
|
140 |
-
answer = st.button("タイトル生成")
|
141 |
-
|
142 |
-
if answer:
|
143 |
-
title_fieids = st.empty()
|
144 |
-
title_fieids.markdown("...生成中...")
|
145 |
-
|
146 |
-
MAX_SOURCE_LENGTH = 512 # 入力される記事本文の最大トークン数
|
147 |
-
MAX_TARGET_LENGTH = 64 # 生成されるタイトルの最大トークン数
|
148 |
-
|
149 |
-
# 前処理とトークナイズを行う
|
150 |
-
inputs = [preprocess_qiita_body(qiita_body)]
|
151 |
-
batch = tokenizer.batch_encode_plus(
|
152 |
-
inputs,
|
153 |
-
max_length=MAX_SOURCE_LENGTH,
|
154 |
-
truncation=True,
|
155 |
-
padding="longest",
|
156 |
-
return_tensors="pt",
|
157 |
-
)
|
158 |
-
|
159 |
-
input_ids = batch["input_ids"]
|
160 |
-
input_mask = batch["attention_mask"]
|
161 |
-
if USE_GPU:
|
162 |
-
input_ids = input_ids.cuda()
|
163 |
-
input_mask = input_mask.cuda()
|
164 |
-
|
165 |
-
# 生成処理を行う
|
166 |
-
outputs = trained_model.generate(
|
167 |
-
input_ids=input_ids,
|
168 |
-
attention_mask=input_mask,
|
169 |
-
max_length=MAX_TARGET_LENGTH,
|
170 |
-
return_dict_in_generate=True,
|
171 |
-
output_scores=True,
|
172 |
-
temperature=1.0, # 生成にランダム性を入れる温度パラメータ
|
173 |
-
num_beams=10, # ビームサーチの探索幅
|
174 |
-
diversity_penalty=1.0, # 生成結果の多様性を生み出すためのペナルティ
|
175 |
-
num_beam_groups=10, # ビームサーチのグループ数
|
176 |
-
num_return_sequences=10, # 生成する文の数
|
177 |
-
repetition_penalty=1.5, # 同じ文の繰り返し(モード崩壊)へのペナルティ
|
178 |
-
)
|
179 |
-
|
180 |
-
# 生成されたトークン列を文字列に変換する
|
181 |
-
generated_titles = [
|
182 |
-
tokenizer.decode(
|
183 |
-
ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
184 |
-
)
|
185 |
-
for ids in outputs.sequences
|
186 |
-
]
|
187 |
-
|
188 |
-
# 生成されたタイトルを表示する
|
189 |
-
titles = "## タイトル案:\n\n"
|
190 |
-
|
191 |
-
for i, title in enumerate(generated_titles):
|
192 |
-
titles += f"1. {postprocess_title(title)}\n"
|
193 |
-
|
194 |
-
title_fieids.markdown(titles)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|