code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : ltp_demo
# @Author : LiuYan
# @Time : 2021/3/29 15:23
from ltp import LTP
ltp = LTP()
seg, hidden = ltp.seg(["他叫汤姆去拿外衣。"])
pos = ltp.pos(hidden)
ner = ltp.ner(hidden)
srl = ltp.srl(hidden)
dep = ltp.dep(hidden)
sdp = ltp.sdp(hidden)
sentences = ltp.sent_split(["他叫汤姆去拿外衣。", "汤姆生病了。他去了医院。"])
print(sentences)
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/regular/ltp_demo.py
|
ltp_demo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/29 15:23
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/regular/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : jieba_demo
# @Author : LiuYan
# @Time : 2021/3/29 18:44
import jieba
import jieba.posseg as posseg
txt1 = '''
文本一:
人民网华盛顿3月28日电(记者郑琪)据美国约翰斯·霍普金斯大学疫情实时监测系统显示,截至美东时间3月28日下午6时,
美国已经至少有新冠病毒感染病例121117例,其中包括死亡病例2010例。
与大约24小时前相比,美国确诊病例至少增加了20400例,死亡病例至少增加了466例。
目前美国疫情最为严重的仍是纽约州,共有确诊病例至少52410例。此外,新泽西州有确诊病例11124例,加利福尼亚州有5065例,
密歇根州有4650例,马塞诸塞州有4257例,华盛顿州有4008例。
'''
# 精确模式
seg_list = jieba.cut(txt1, cut_all=False)
# seg_list = jieba.cut_for_search(txt1)
print("jieba分词:" + "/ ".join(seg_list)) # 精确模式
list = posseg.cut(txt1)
tag_list = []
for tag in list:
pos_word = {}
pos_word[1] = tag.word
pos_word[2] = tag.flag
tag_list.append(pos_word)
print('jieba词性标注:', tag_list)
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/regular/jieba_demo.py
|
jieba_demo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : digital_recognition_app
# @Author : LiuYan
# @Time : 2021/4/20 21:00
from base.app.base_app import *
from sequence.model.model_pyltp import LTP
from sequence.regular.regular_digital_recognition import RDR
seq_digital_recognition = Blueprint('/sequence/digital_recognition', __name__)
# model_path = '/data/zutnlp/model/pyltp/ltp_data_v3.4.0'
model_path = '/home/zzsn/liuyan/model/ltp/ltp_data_v3.4.0'
ltp = LTP(model_path=model_path)
rdr = RDR()
@seq_digital_recognition.route('/test', methods=('GET', 'POST'))
def test():
app.logger.info('test -> sequence -> digital_recognition success!')
# logger.info('test -> sequence -> digital_recognition success!')
return 'test -> sequence -> digital_recognition success!'
@seq_digital_recognition.route('/dr/', methods=['POST'])
def digital_recognition():
data = request.get_json()
sentence = data['title'] + data['article']
word_list, tag_list = ltp.pos(sentence=sentence)
number_list, money_list = rdr.digital_sorting(word_list=word_list, tag_list=tag_list)
print('\n数字个数为: {} \n分别是: {}'.format(len(number_list), ' '.join(number_list)))
print('\n金额个数为: {} \n分别是: {}'.format(len(money_list), ' '.join(money_list)))
result_dict = {
# 'title': data['title'],
# 'article': data['article'],
'number_sum': len(number_list),
'number_list': number_list,
'money_sum': len(money_list),
'money_list': money_list
}
app.logger.info(result_dict)
return json.dumps(result_dict, ensure_ascii=False)
@seq_digital_recognition.route('/release/', methods=('GET', 'POST'))
def release():
ltp.release()
app.logger.info('Success release model!')
return 'Success release model!'
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/app/digital_recognition_app.py
|
digital_recognition_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/21 10:02
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/app/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : model_pyltp
# @Author : LiuYan
# @version : v3.4.0
# @Time : 2021/3/31 10:00
import os
from pyltp import Segmentor, Postagger, NamedEntityRecognizer, Parser, SementicRoleLabeller
from pyltp import SentenceSplitter
class LTP(object):
def __init__(self, model_path: str):
super(LTP, self).__init__()
self._model_path = model_path
self._build_model()
def _build_model(self):
self._cws = Segmentor()
self._pos = Postagger()
self._ner = NamedEntityRecognizer()
self._parser = Parser()
self._role_label = SementicRoleLabeller()
self._cws.load(os.path.join(self._model_path, 'cws.model'))
self._pos.load(os.path.join(self._model_path, 'pos.model'))
self._ner.load(os.path.join(self._model_path, 'ner.model'))
self._parser.load(os.path.join(self._model_path, 'parser.model'))
self._role_label.load(os.path.join(self._model_path, 'pisrl.model'))
pass
def split(self, sentence: str) -> list: # 分句
sents = SentenceSplitter.split(sentence)
sents_list = list(sents)
return sents_list
def cws(self, sentence: str) -> list:
word_list = list(self._cws.segment(sentence))
return word_list
def pos(self, sentence: str) -> [list, list]:
word_list = self.cws(sentence=sentence)
tag_list = list(self._pos.postag(word_list))
return word_list, tag_list
def ner(self, sentence: str) -> [list, list]:
word_list, tag_list = self.pos(sentence=sentence)
tag_list = list(self._ner.recognize(word_list, tag_list))
return word_list, tag_list
def parse(self, sentence: str) -> [list, list, list]:
word_list, tag_list = self.pos(sentence=sentence)
arc_list = list(self._parser.parse(word_list, tag_list))
return word_list, tag_list, arc_list
def role_label(self, sentence: str) -> [list, list, list, list]:
word_list, tag_list, arc_list = self.parse(sentence=sentence)
role_list = list(self._role_label.label(word_list, tag_list, arc_list))
return word_list, tag_list, arc_list, role_list
def release(self):
self._cws.release()
self._pos.release()
self._ner.release()
self._parser.release()
self._role_label.release()
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/model/model_pyltp.py
|
model_pyltp.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/31 9:58
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/sequence/model/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : utils
# @Author : LiuYan
# @Time : 2021/4/16 17:54
from __future__ import unicode_literals, print_function, division
import time
import xlsxwriter
def timeit(f):
def timed(*args, **kw):
ts = time.time()
print('......begin {0:8s}......'.format(f.__name__))
result = f(*args, **kw)
te = time.time()
print('......finish {0:8s}, took:{1:.4f} sec......'.format(f.__name__, te - ts))
return result
return timed
def list2xlsx(result_list=None, xlsx_path=None):
"""
:param result_lists: [
{
'id': 1,
'title': 't',
...
}
...
]
:param xlsx_path: '/home/zzsn/liuyan/result/result.xlsx'
:return:
"""
workbook = xlsxwriter.Workbook(xlsx_path)
worksheet = workbook.add_worksheet('sheet1')
worksheet.write_row(row=0, col=0, data=list(result_list[0].keys()))
for row_index, result_dict in enumerate(result_list):
worksheet.write_row(row=row_index + 1, col=0, data=list(result_dict.values()))
workbook.close()
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/utils/utils.py
|
utils.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/9 10:45
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/utils/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : log
# @Author : LiuYan
# @Time : 2020/6/21 21:08
import os
import logging
import logging.handlers
from pathlib import Path
__all__ = ['logger']
# 用户配置部分 ↓
import tqdm
LEVEL_COLOR = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
STDOUT_LOG_FMT = '%(log_color)s[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(filename)s:%(lineno)d] %(message)s'
STDOUT_DATE_FMT = '%Y-%m-%d %H:%M:%S'
FILE_LOG_FMT = '[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(filename)s:%(lineno)d] %(message)s'
FILE_DATE_FMT = '%Y-%m-%d %H:%M:%S'
# 用户配置部分 ↑
class ColoredFormatter(logging.Formatter):
COLOR_MAP = {
'black': '30',
'red': '31',
'green': '32',
'yellow': '33',
'blue': '34',
'magenta': '35',
'cyan': '36',
'white': '37',
'bg_black': '40',
'bg_red': '41',
'bg_green': '42',
'bg_yellow': '43',
'bg_blue': '44',
'bg_magenta': '45',
'bg_cyan': '46',
'bg_white': '47',
'light_black': '1;30',
'light_red': '1;31',
'light_green': '1;32',
'light_yellow': '1;33',
'light_blue': '1;34',
'light_magenta': '1;35',
'light_cyan': '1;36',
'light_white': '1;37',
'light_bg_black': '100',
'light_bg_red': '101',
'light_bg_green': '102',
'light_bg_yellow': '103',
'light_bg_blue': '104',
'light_bg_magenta': '105',
'light_bg_cyan': '106',
'light_bg_white': '107',
}
def __init__(self, fmt, datefmt):
super(ColoredFormatter, self).__init__(fmt, datefmt)
def parse_color(self, level_name):
color_name = LEVEL_COLOR.get(level_name, '')
if not color_name:
return ""
color_value = []
color_name = color_name.split(',')
for _cn in color_name:
color_code = self.COLOR_MAP.get(_cn, '')
if color_code:
color_value.append(color_code)
return '\033[' + ';'.join(color_value) + 'm'
def format(self, record):
record.log_color = self.parse_color(record.levelname)
message = super(ColoredFormatter, self).format(record) + '\033[0m'
return message
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _get_logger(log_to_file=True, log_filename='default.log', log_level='DEBUG'):
_logger = logging.getLogger(__name__)
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(
ColoredFormatter(
fmt=STDOUT_LOG_FMT,
datefmt=STDOUT_DATE_FMT,
)
)
_logger.addHandler(stdout_handler)
# _logger.setLevel(logging.INFO)
# _logger.addHandler(TqdmLoggingHandler())
if log_to_file:
# _tmp_path = os.path.dirname(os.path.abspath(__file__))
# _tmp_path = os.path.join(_tmp_path, '../logs/{}'.format(log_filename))
_project_path = os.path.dirname(os.getcwd())
_tmp_path = os.path.join(_project_path, 'logs')
Path(_tmp_path).mkdir(parents=True, exist_ok=True)
_tmp_path = os.path.join(_tmp_path, log_filename)
file_handler = logging.handlers.TimedRotatingFileHandler(_tmp_path, when='midnight', backupCount=30)
file_formatter = logging.Formatter(
fmt=FILE_LOG_FMT,
datefmt=FILE_DATE_FMT,
)
file_handler.setFormatter(file_formatter)
_logger.addHandler(file_handler)
_logger.setLevel(log_level)
return _logger
logger = _get_logger(log_to_file=False)
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/utils/log.py
|
log.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/13 16:59
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : fast_text_config
# @Author : LiuYan
# @Time : 2021/4/19 10:46
import dynamic_yaml
import torch
from base.config.base_config import BaseConfig
class FastTextConfig(BaseConfig):
def __init__(self, config_path):
super(FastTextConfig, self).__init__()
self._config_path = config_path
pass
def load_config(self):
with open(self._config_path, mode='r', encoding='UTF-8') as f:
config = dynamic_yaml.load(f)
config.device = torch.device(config.device if torch.cuda.is_available() else 'cpu')
return config
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/config/fast_text_config.py
|
fast_text_config.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/15 10:31
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/config/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : runner_fast_text
# @Author : LiuYan
# @Time : 2021/4/15 16:44
import fasttext
from pathlib import Path
from utils.log import logger
from utils.utils import timeit
from base.runner.base_runner import BaseRunner
from classification.config.fast_text_config import FastTextConfig
from classification.evaluation.classify_evaluator import ClassifyEvaluator
from classification.utils.utils import *
class FastTextRunner(BaseRunner):
def __init__(self, config_path: str):
super(FastTextRunner, self).__init__()
self._config_path = config_path
self._config = None
self._train_dataloader = None
self._valid_dataloader = None
self._test_dataloader = None
self._model = None
self._loss = None
self._optimizer = None
self._evaluator = None
self._build()
@timeit
def _build(self):
self._build_config()
self._build_data()
self._build_model()
self._build_loss()
self._build_optimizer()
self._build_evaluator()
pass
def _build_config(self):
self._config = FastTextConfig(config_path=self._config_path).load_config()
pass
def _build_data(self):
self._train_path = self._config.data.train_path
self._valid_path = self._config.data.valid_path
self._test_path = self._config.data.test_path
pass
def _build_model(self):
if self._config.status == 'test' or 'pred':
self._load_checkpoint()
pass
def _build_loss(self):
pass
def _build_optimizer(self):
pass
def _build_evaluator(self):
self._evaluator = ClassifyEvaluator()
pass
@timeit
def train(self):
self._model = fasttext.train_supervised(
input=self._train_path, autotuneValidationFile=self._test_path,
autotuneDuration=3000, autotuneModelSize='200M'
)
self._save_checkpoint(epoch=100)
self._valid(epoch=100)
pass
def _train_epoch(self, epoch: int):
pass
def _valid(self, epoch: int):
with open(self._valid_path, encoding='utf-8') as file:
self._valid_dataloader = file.readlines()
labels = []
pre_labels = []
for text in self._valid_dataloader:
label = text.replace('__label__', '')[0]
text = text.replace('__label__', '')[1:-1]
labels.append(int(label))
# print(model.predict(text))
pre_label = self._model.predict(text)[0][0].replace('__label__', '')
# print(pre_label)
pre_labels.append(int(pre_label))
# print(model.predict(text))
# p = precision_score(labels, pre_labels)
# r = recall_score(labels, pre_labels)
# f1 = f1_score(labels, pre_labels)
p, r, f1 = self._evaluator.evaluate(true_list=labels, pred_list=pre_labels)
logger.info('P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(p, r, f1))
pass
def test(self):
self._valid(epoch=100)
pass
def pred(self, id: int, title: str, content: str):
text = (title + '。') * 2 + content
text = clean_txt(raw=clean_tag(text=text))
if type(text) is str:
text = text.replace('\n', '').replace('\r', '').replace('\t', '')
pre_label = self._model.predict(text)[0][0].replace('__label__', '')
if pre_label == '0':
label = '非招聘股票'
elif pre_label == '1':
label = '招聘信息'
else:
label = '股票信息'
return {
'handleMsg': 'success',
'isHandleSuccess': True,
'logs': None,
'resultData': {
'id': id,
'label': label
}
}
pass
def _display_result(self, epoch: int):
pass
@timeit
def _save_checkpoint(self, epoch: int):
Path(self._config.learn.dir.saved).mkdir(parents=True, exist_ok=True)
self._model.save_model(self._config.learn.dir.save_model)
pass
def _load_checkpoint(self):
self._model = fasttext.load_model(self._config.learn.dir.save_model)
pass
if __name__ == '__main__':
ft_config_path = '../config/fast_text_config.yml'
runner = FastTextRunner(config_path=ft_config_path)
# runner.train()
runner.test()
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/runner/runner_fast_text.py
|
runner_fast_text.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/15 10:31
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/runner/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/15 10:33
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/evaluation/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : eval_classification
# @Author : LiuYan
# @Time : 2021/4/20 21:19
from base.evaluation.base_evaluator import BaseEvaluator
class ClassifyEvaluator(BaseEvaluator):
def __init__(self):
super(ClassifyEvaluator, self).__init__()
pass
def evaluate(self, true_list: list, pred_list: list) -> tuple:
TP, FP = 0, 0
TP_FN = len(true_list)
for true, pred in zip(true_list, pred_list):
if true == pred:
TP += 1
else:
FP += 1
p = TP / (TP + FP) if (TP + FP) != 0 else 0
r = TP / TP_FN if TP_FN != 0 else 0
f1 = (2 * p * r) / (p + r) if (p + r) != 0 else 0
return p, r, f1
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/evaluation/classify_evaluator.py
|
classify_evaluator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/21 9:59
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/app/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : f_zp_gp_app
# @Author : LiuYan
# @Time : 2021/4/20 20:51
from base.app.base_app import *
from classification.runner.runner_fast_text import FastTextRunner
classification_f_zp_gp = Blueprint('/classification/f_zp_gp', __name__)
ft_config_path = '../classification/config/fast_text_config.yml'
runner = FastTextRunner(config_path=ft_config_path)
@classification_f_zp_gp.route('/test', methods=('GET', 'POST'))
def test():
logger.info('test -> classify -> f_zp_gp success!')
return 'test -> classify -> f_zp_gp success!'
@classification_f_zp_gp.route('/classify/', methods=['POST'])
def classify():
"""
-> data:
:return:
"""
data = request.get_json()
id = data['id']
title = data['title']
content = data['content']
result_dict = runner.pred(
id=id,
title=title,
content=content,
)
# logger.info(result_dict)
app.logger.info(result_dict)
return json.dumps(result_dict, ensure_ascii=False)
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/app/f_zp_gp_app.py
|
f_zp_gp_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_stats
# @Author : LiuYan
# @Time : 2021/4/15 16:52
import pandas as pd
from collections import Counter
def stat_fx():
"""
:return:
"""
data_list = pd.read_excel('sample/风险训练集.xlsx')
label_list = data_list['label']
print(Counter(label_list))
if __name__ == '__main__':
stat_fx()
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/data/data_stats.py
|
data_stats.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/15 10:31
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/data/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_process
# @Author : LiuYan
# @Time : 2021/4/15 17:39
import os
import random
import pandas as pd
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from classification.utils.utils import *
from doc_similarity.model.cosine_similarity import CosineSimilarity
from doc_similarity.model.jaccard import JaccardSimilarity
from doc_similarity.model.levenshtein import LevenshteinSimilarity
from doc_similarity.model.min_hash import MinHashSimilarity
from doc_similarity.model.sim_hash import OldSimHashSimilarity
from utils.utils import *
root_path = '/home/zzsn/liuyan/word2vec/doc_similarity'
stop_words_path = os.path.join(root_path, 'stop_words.txt')
cos_sim = CosineSimilarity(stop_words_path=stop_words_path)
jac_sim = JaccardSimilarity(stop_words_path=stop_words_path)
lev_sim = LevenshteinSimilarity(stop_words_path=stop_words_path)
min_hash_sim = MinHashSimilarity(stop_words_path=stop_words_path)
old_sim_hash_sim = OldSimHashSimilarity()
def remove_repetition(path: str) -> list:
"""
根据文章标题进行数据去重清洗
:param path:
:return:
"""
data_loader = pd.read_excel(path)
delete_num = 0
article_list = []
for index in range(len(data_loader['id'])):
title = data_loader['title'][index].replace('\n', '').replace('\r', '').replace('\t', '')
if judge_sim(article_list=article_list, title=title):
print('Add : \tindex: {} \t id: {} \t title: {}'.format(
index, data_loader['id'][index], data_loader['title'][index])
)
article_list.append({
'id': data_loader['id'][index],
'title': title,
'content': data_loader['content'][index].replace(
'\n', ''
).replace('\r', '').replace('\t', ''),
'origin': data_loader['origin'][index],
'source_address': data_loader['sourceaddress'][index]
})
else:
delete_num += 1
print('Delete: \tindex: {} \t id: {} \t title: {}'.format(
index, data_loader['id'][index], data_loader['title'][index])
)
print('Delete: \t{}'.format(delete_num))
return article_list
pass
def judge_sim(article_list: list, title: str) -> bool:
if len(article_list) < 1:
return True
if len(article_list) > 100:
article_list = article_list[-100: -1]
for article in article_list:
if cos_sim.calculate(article['title'], title) > 0.9:
print('{} --- {}'.format(title, article['title']))
return False
return True
pass
def process_txt(data_loader: DataFrame, train_file_path: str, valid_file_path: str):
articles = data_loader['article']
labels = data_loader['label']
article_list = []
for article, label in zip(articles, labels):
if type(article) is str:
text = article.replace('\n', '').replace('\r', '').replace('\t', '')
else:
print('{} is not str!'.format(article))
continue
text = seg(text=text, sw=stop_words(path='sample/stop_words.txt'))
text = '__label__{} {}'.format(label, text)
article_list.append(text)
# for index in range(len(data_loader['article'])):
# content = data_loader['article'][index].replace('\n', '').replace('\r', '').replace('\t', '')
# # text = seg(content, NLPTokenizer, stop_words())
# text = seg(content, stop_words(path='sample/stop_words.txt'))
# text = '__label__1 {}'.format(text)
# # text = transform_data(text, data_loader['label'][index])
# article_list.append(text)
train_data, valid_data = train_test_split(
article_list, train_size=0.8, random_state=2021, shuffle=True
)
with open(
train_file_path, 'w', encoding='utf-8'
) as train_file, open(
valid_file_path, 'w', encoding='utf-8'
) as valid_file:
for train in train_data:
train_file.write(train + '\n')
for valid in valid_data:
valid_file.write(valid + '\n')
pass
def process_fx(path='sample/风险训练集.xlsx'):
data_list = pd.read_excel(path)
data_list['article'] = (data_list['title'] + '。') * 2 + data_list['content']
pass
def process_f_zp_gp(path: str, train_file_path: str, valid_file_path: str):
data_loader = pd.read_excel(path)
# data_loader['article'] = '{}。{}'.format(data_loader['title'] * 2, data_loader['content'])
data_loader['article'] = data_loader['title'] * 2 + '。' + data_loader['content']
data_loader['article'] = data_loader.article.apply(clean_tag).apply(clean_txt)
process_txt(
data_loader=data_loader,
train_file_path=train_file_path,
valid_file_path=valid_file_path
)
pass
def merge_f_zp_gp(f_path: str, zp_path: str, gp_path: str, result_path: str):
result_list = []
f_list = read_excel_random(f_path, label=0)
zp_list = read_excel_random(zp_path, label=1)
gp_list = read_excel_random(gp_path, label=2)
result_list.extend(f_list)
result_list.extend(zp_list)
result_list.extend(gp_list[:5000])
df = pd.DataFrame(result_list)
df.to_excel(result_path)
pass
def read_excel_random(path: str, label: int) -> list:
df = pd.read_excel(path)
result_list = []
titles, contents = df['title'], df['content']
for title, content in zip(titles, contents):
result_list.append({
'title': title,
'content': content,
'label': label
})
random.shuffle(result_list)
return result_list
# return result_list[:5000] if len(result_list) > 5000 else result_list
pass
if __name__ == '__main__':
# 源语料去重
# article_list = remove_repetition(path='sample/股票信息.xlsx')
# df = pd.DataFrame(article_list)
# df.to_excel('sample/去重股票信息.xlsx')
# merge
# merge_f_zp_gp(
# f_path='sample/去重非招聘股票.xlsx',
# zp_path='sample/去重招聘信息.xlsx',
# gp_path='sample/去重股票信息.xlsx',
# result_path='sample/去重_F_ZP_GP.xlsx'
# )
# excel2txt 准备训练
# process_fx()
process_f_zp_gp(
path='sample/去重_F_ZP_GP.xlsx',
train_file_path='/home/zzsn/liuyan/data/f_zp_gp/train.txt',
valid_file_path='/home/zzsn/liuyan/data/f_zp_gp/valid.txt'
)
# list2xlsx(result_list=article_list, xlsx_path='sample/去重招聘信息.xlsx')
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/data/data_process.py
|
data_process.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : utils
# @Author : LiuYan
# @Time : 2021/4/16 16:40
import re
import jieba
from bs4 import BeautifulSoup
def clean_tag(text):
"""
清除网页标签
:param text:
:return:
"""
# print(text)
bs = BeautifulSoup(text, 'html.parser')
# print(bs.text)
return bs.text
def clean_txt(raw):
"""
去除表情
:param raw:
:return:
"""
res = re.compile(u'[\U00010000-\U0010ffff\uD800-\uDBFF\uDC00-\uDFFF]')
return res.sub('', raw)
def seg(text, sw):
"""
分词,NLPTokenizer会基于全部命名实体识别和词性标注进行分词
:param text:
:param NLPTokenizer:
:param sw:
:return:
"""
# text = ' '.join([i.word for i in NLPTokenizer.segment(text) if i.word.strip() and i.word not in sw])
text = ' '.join([i.strip() for i in jieba.cut(text) if i.strip() and i not in sw])
return text
def stop_words(path: str) -> list:
"""
去除停用词
:return:
"""
with open(path, 'r', encoding='utf-8') as swf:
return [line.strip() for line in swf]
def segment_para(text):
"""
:param text:
:return:
"""
split_pattern = re.compile(r'\n|。|?|!|\?|\!|\s')
global_sentences = split_pattern.split(text)
global_sentences = ''.join([str(i).strip() + '。' for i in global_sentences if len(i) >= 13])
return global_sentences
def cut_sent(para):
"""
:param para:
:return:
"""
para = re.sub('([。!?\?])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
return para.split("\n")
def transform_data(text, label):
"""
:param text:
:param label:
:return:
"""
fasttext_line = "__label__{} {}".format(label, text)
return fasttext_line
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/utils/utils.py
|
utils.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/16 16:40
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/utils/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : text_cnn
# @Author : LiuYan
# @Time : 2021/4/19 11:10
import torch
from torch import nn
from torch.nn import functional as F
from base.model.base_model import BaseModel
class TextCNN(BaseModel):
def __init__(self, model_config):
super(TextCNN, self).__init__()
self._config = model_config
self._device = self._config.device
self._num_vocab = self._config.data.num_vocab
self._num_category = self._config.data.num_category
self._dim_embed = self._config.model.dim_embed
self._dim_hidden = self._config.model.dim_hidden
self._size_kernel = self._config.model.size_kernel
self._rate_dropout = self._config.learn.rate_dropout
# for embedding
self.embed = nn.Embedding(self._num_vocab, self._dim_embed)
# for conv
self.conv = nn.Conv1d(self._dim_embed, self._dim_hidden, self._size_kernel)
self.dropout = nn.Dropout(p=self._rate_dropout)
# for pool
self.pool = nn.MaxPool1d(kernel_size=self._size_kernel)
# FC layer
self.fc = nn.Linear(self._dim_hidden, self._num_category)
pass
def forward(self, dict_inputs: dict) -> dict:
dict_outputs = dict()
(text, length), label = dict_inputs
# batch_size = len(label.T)
input_embed = torch.transpose(self.embed(text), 0, 1)
input_feature = input_embed.permute(0, 2, 1).contiguous()
# input_feature = self.dropout(input_feature)
output_conv = self.conv(input_feature)
output_conv = self.dropout(output_conv)
output_pool = torch.squeeze(F.max_pool1d(output_conv, output_conv.size(2)), dim=-1)
output_pool = self.dropout(output_pool)
outputs = self.fc(output_pool)
dict_outputs['outputs'] = outputs
output_predicts = torch.argmax(outputs, dim=-1)
dict_outputs['predicts'] = output_predicts
dict_outputs['labels'] = label
return dict_outputs
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/model/text_cnn.py
|
text_cnn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/15 10:31
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/classification/model/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 10:52
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/20 14:37
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/config/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : re_demo
# @Author : LiuYan
# @Time : 2021/3/20 14:37
import opennre
model = opennre.get_model('wiki80_bert_softmax')
result = model.infer(
{
'text': 'He was the son of Máel Dúin mac Máele Fithrich, and grandson of the high king Áed Uaridnach (died 612).',
'h': {'pos': (18, 46)},
't': {'pos': (78, 91)}
}
)
print(result)
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/runner/re_demo.py
|
re_demo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/20 14:37
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/runner/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_stats
# @Author : LiuYan
# @Time : 2021/3/11 11:12
import pandas as pd
total_list = ['资讯 研究热词', '研究领域 地域',
'研究领域 企业',
'研究领域 资讯',
'研究领域 专家',
'研究领域 领导',
'研究领域 研究领域',
'地域 资讯',
'专家 地域',
'专家 企业',
'企业 资讯',
'专家 专家',
'专家 资讯',
'领导 企业',
'领导 地域',
'领导 专家',
'领导 资讯',
'领导 领导',
'企业 企业']
def stat(data_path: str):
total_dict = {
'资讯 研究热词': [],
'研究领域 地域': [],
'研究领域 企业': [],
'研究领域 资讯': [],
'研究领域 专家': [],
'研究领域 领导': [],
'研究领域 研究领域': [],
'地域 资讯': [],
'专家 地域': [],
'专家 企业': [],
'企业 资讯': [],
'专家 专家': [],
'专家 资讯': [],
'领导 企业': [],
'领导 地域': [],
'领导 专家': [],
'领导 资讯': [],
'领导 领导': [],
'企业 企业': []
}
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
le_list, le_label_list, re_list, ri_list, ri_label_list, id_list, bo_list = [], [], [], [], [], [], []
data_list = []
double_list = []
for line in lines:
list_ = line.strip('\n').split(',')
left = list_[0].strip()[1: -1]
left_label = list_[1].strip()[1: -1]
relation = list_[2].strip()[1: -1]
right = list_[3].strip()[1: -1]
right_label = list_[4].strip()[1: -1]
corpusID = list_[5].strip()
bool_ = True if list_[6].strip()[1: -1] == '1' else False
le_list.append(left)
le_label_list.append(left_label)
re_list.append(relation)
ri_list.append(right)
ri_label_list.append(right_label)
# id_list.append(corpusID)
bo_list.append(bool_)
double_list.append(left_label + ' ' + right_label)
# data_list.append({
# 'left': left,
# 'left_label': left_label,
# 'relation': relation,
# 'right': right,
# 'right_label': right_label,
# 'corpusID': corpusID,
# 'valid': bool_
# })
bool_dt = False
for double_type in total_list:
if bool_dt:
break
type_L, type_R = double_type.split(' ')
if left_label == type_L and right_label == type_R:
total_dict[double_type].append({
'left': left,
'left_label': left_label,
'relation': relation,
'right': right,
'right_label': right_label,
'corpusID': corpusID,
'bool': bool_
})
bool_dt = True
elif left_label == type_R and right_label == type_L:
total_dict[double_type].append({
'left': left,
'left_label': left_label,
'relation': relation,
'right': right,
'right_label': right_label,
'corpusID': corpusID,
'bool': bool_
})
bool_dt = True
if not bool_dt:
print('得了呵的!!!')
# result_re = pd.value_counts(re_list)
result_bo = pd.value_counts(bo_list)
# result_double = pd.value_counts(double_list)
# print(result_re)
print(result_bo)
# print(result_double)
return total_dict
def stats_re(total_dict: dict):
for double_type in total_list:
type_list = total_dict[double_type]
re_list = []
for type_dict in type_list:
if type_dict['bool']:
re_list.append(type_dict['relation'])
print('{}: \n{}\n'.format(double_type, pd.value_counts(re_list)))
if __name__ == '__main__':
data_path = '/home/zutnlp/zutnlp_student_2017/liuyan/datasets/zzsn/re/实体标签.csv'
total_dict = stat(data_path=data_path)
stats_re(total_dict=total_dict)
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/data/data_stats.py
|
data_stats.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 11:12
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/data/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/20 14:37
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/relation_extraction/model/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 17:22
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 19:25
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/config/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : runner
# @Author : LiuYan
# @Time : 2021/3/23 9:59
from __future__ import print_function
import os
import time
import sys
import random
import torch
import gc
import xlsxwriter
import torch.optim as optim
import numpy as np
from doc_event.model.seqlabel import SeqLabel
from doc_event.data.data_loader import Data
from doc_event.evaluate.eval_entity import eval_entity
try:
import cPickle as pickle
except ImportError:
import pickle
seed_num = 42
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
def data_initialization(data):
data.build_alphabet(data.train_dir)
data.build_alphabet(data.dev_dir)
data.build_alphabet(data.test_dir)
data.fix_alphabet()
def predict_check(pred_variable, gold_variable, mask_variable):
"""
input:
pred_variable (batch_size, sent_len): pred tag result, in numpy format
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
# print("right: %s, total: %s"%(right_token, total_token))
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert (len(pred) == len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr / (1 + decay_rate * epoch)
print(' Learning rate is set as:', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def evaluate(data, model, name):
if name == 'train':
instances = data.train_Ids
elif name == 'dev':
instance_texts, instances = data.dev_texts, data.dev_Ids
elif name == 'test':
instance_texts, instances = data.test_texts, data.test_Ids
else:
print('Error: wrong evaluate name,', name)
exit(1)
right_token = 0
whole_token = 0
pred_results = []
gold_results = []
sequences, doc_ids = [], []
# set model in eval model
model.eval()
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num // batch_size + 1
for batch_id in range(total_batch):
start = batch_id * batch_size
end = (batch_id + 1) * batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
instance_text = instance_texts[start:end]
if not instance:
continue
batch_word, batch_word_len, batch_word_recover, list_sent_words_tensor, batch_label, mask = batchify_sequence_labeling_with_label(
instance, data.HP_gpu, False)
tag_seq = model(batch_word, batch_word_len, list_sent_words_tensor, mask)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_word_recover)
pred_results += pred_label
gold_results += gold_label
sequences += [item[0] for item in instance_text]
doc_ids += [item[-1] for item in instance_text]
# import ipdb; ipdb.set_trace()
decode_time = time.time() - start_time
speed = len(instances) / decode_time
# acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme)
# p, r, f = get_macro_avg(sequences, pred_results, doc_ids)
labels = list()
for label in data.label_alphabet.instances:
labels.append(label)
labels.remove('O')
from sklearn.metrics import classification_report
tag_true_all, tag_pred_all, text_all = list(), list(), list()
for gold_list, pred_list, seq_list in zip(gold_results, pred_results, sequences):
tag_true_all.extend(gold_list)
tag_pred_all.extend(pred_list)
text_all.extend(seq_list)
stat_info = classification_report(tag_true_all, tag_pred_all, labels=labels, output_dict=True)
# print(stat_info)
macro_avg = stat_info['macro avg']
p, r, f1 = macro_avg['precision'], macro_avg['recall'], macro_avg['f1-score']
print('macro avg precision: %.4f, recall: %.4f, f1-score: %.4f' % (p, r, f1))
# merge
result_true = merge(seq_lists=sequences, tag_lists=gold_results, doc_ids=doc_ids)
result_pred = merge(seq_lists=sequences, tag_lists=pred_results, doc_ids=doc_ids)
return speed, p, r, f1, pred_results, result_true, result_pred
def merge(seq_lists, tag_lists, doc_ids):
# merge the result [sequences, pred_results, doc_ids]
doc_id_ = None
text_all, tag_all = list(), list()
text, tag = [], []
for text_list, tag_list, doc_id in zip(seq_lists, tag_lists, doc_ids):
if doc_id_ is None or doc_id_ == doc_id:
doc_id_ = doc_id
text.extend(text_list)
tag.extend(tag_list)
else:
text_all.append(text)
tag_all.append(tag)
doc_id_ = doc_id
text = text_list
tag = tag_list
text_all.append(text)
tag_all.append(tag)
return [text_all, tag_all]
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
labels: label ids for one sentence. (batch_size, sent_len)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
word_seq_lengths: (batch_size,1) Tensor
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
sent_words = [sent[1] for sent in input_batch_list]
labels = [sent[2] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
for idx, (seq, label, seqlen) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor([1] * seqlen)
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
list_sent_words_tensor = []
for sent_words_one_example in sent_words:
one_example_list = []
for sent in sent_words_one_example:
sent_tensor = torch.zeros((1, len(sent)), requires_grad=if_train).long()
sent_tensor[0, :len(sent)] = torch.LongTensor(sent)
if gpu:
one_example_list.append(sent_tensor.cuda())
else:
one_example_list.append(sent_tensor)
list_sent_words_tensor.append(one_example_list)
word_perm_idx = word_perm_idx.data.numpy().tolist()
list_sent_words_tensor_perm = []
for idx in word_perm_idx:
list_sent_words_tensor_perm.append(list_sent_words_tensor[idx])
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
mask = mask.cuda()
return word_seq_tensor, word_seq_lengths, word_seq_recover, list_sent_words_tensor_perm, label_seq_tensor, mask
def train(data):
print('Training model...')
data.show_data_summary()
save_data_name = data.model_dir + '.dset'
data.save(save_data_name)
model = SeqLabel(data)
if data.optimizer.lower() == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum, weight_decay=data.HP_l2)
elif data.optimizer.lower() == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == 'adadelta':
optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == 'adam':
optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
else:
print('Optimizer illegal: %s' % (data.optimizer))
exit(1)
best_dev = -10
best_epoch = -10
# start training
for idx in range(data.HP_iteration):
epoch_start = time.time()
temp_start = epoch_start
print('\nEpoch: %s/%s' % (idx + 1, data.HP_iteration))
if data.optimizer == 'SGD':
optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)
instance_count = 0
sample_id = 0
sample_loss = 0
total_loss = 0
right_token = 0
whole_token = 0
random.shuffle(data.train_Ids)
print('Shuffle: first input word list:', data.train_Ids[0][0])
# set model in train model
model.train()
model.zero_grad()
batch_size = data.HP_batch_size
train_num = len(data.train_Ids)
total_batch = train_num // batch_size + 1
for batch_id in range(total_batch):
start = batch_id * batch_size
end = (batch_id + 1) * batch_size
if end > train_num:
end = train_num
instance = data.train_Ids[start: end]
if not instance:
continue
batch_word, batch_word_len, batch_word_recover, list_sent_words_tensor, batch_label, mask = batchify_sequence_labeling_with_label(
instance, data.HP_gpu, True)
instance_count += 1
loss, tag_seq = model.calculate_loss(batch_word, batch_word_len, list_sent_words_tensor, batch_label, mask)
right, whole = predict_check(tag_seq, batch_label, mask)
right_token += right
whole_token += whole
# print("loss:",loss.item())
sample_loss += loss.item()
total_loss += loss.item()
if end % 500 == 0:
temp_time = time.time()
temp_cost = temp_time - temp_start
temp_start = temp_time
print(' Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f' % (
end, temp_cost, sample_loss, right_token, whole_token, (right_token + 0.) / whole_token))
if sample_loss > 1e8 or str(sample_loss) == 'nan':
print('ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....')
exit(1)
sys.stdout.flush()
sample_loss = 0
loss.backward()
optimizer.step()
model.zero_grad()
temp_time = time.time()
temp_cost = temp_time - temp_start
print(' Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f' % (
end, temp_cost, sample_loss, right_token, whole_token, (right_token + 0.) / whole_token))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print('Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s' % (
idx + 1, epoch_cost, train_num / epoch_cost, total_loss))
print('total_loss:', total_loss)
if total_loss > 1e8 or str(total_loss) == 'nan':
print('ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....')
exit(1)
# continue
speed, p, r, f, _, result_true, result_pred = evaluate(data, model, 'dev')
# generate results {true, pred}
result_true_lists, result_pred_lists = generate_result_lists(result_true, result_pred)
p, r, f1 = eval_entity(result_true_lists, result_pred_lists)
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
current_score = f1
print(
'Dev: time: %.2fs, speed: %.2fst/s; precision: %.4f, recall: %.4f, f1-score: %.4f' % (
dev_cost, speed, p, r, f1
)
)
if current_score > best_dev:
print('\n!!! Exceed previous best f1-score: {}'.format(best_dev))
model_name = data.model_dir + '.best.model'
print('Save current best model in file: {}\n'.format(model_name))
torch.save(model.state_dict(), model_name)
best_dev = current_score
best_epoch = idx + 1
else:
print('\nBest model in epoch: {}, f1-score: {}\n'.format(best_epoch, best_dev))
gc.collect()
def load_model_decode(data, name):
print('Load Model from file: ', data.model_dir)
model = SeqLabel(data)
if data.HP_gpu:
model.load_state_dict(torch.load(data.load_model_dir))
else:
model.load_state_dict(torch.load(data.load_model_dir, map_location=lambda storage, loc: storage))
start_time = time.time()
speed, p, r, f, pred_results, result_true, result_pred = evaluate(data, model, name)
end_time = time.time()
time_cost = end_time - start_time
# generate results {true, pred}
result_true_lists, result_pred_lists = generate_result_lists(result_true, result_pred)
p, r, f1 = eval_entity(result_true_lists, result_pred_lists)
print('\n{}: time_cost: {:.2f}s, speed: {:.2f}st/s, precision: {:.4f}, recall: {:.4f}, f1-score: {:.4f}'.format(
name, time_cost, speed, p, r, f1
))
list2xlsx(xlsx_path=data.result_true_path, result_lists=result_true_lists)
list2xlsx(xlsx_path=data.result_pred_path, result_lists=result_pred_lists)
return pred_results
def generate_result_lists(result_true, result_pred):
# generate results {true, pred}
result_true_lists, result_pred_lists = list(), list()
for word_true_list, tag_true_list, word_pred_list, tag_pred_list in zip(
result_true[0], result_true[1], result_pred[0], result_pred[1]
):
result_true_dict = build_list2dict(len(word_true_list), word_true_list, tag_true_list, typ='true')
result_pred_dict = build_list2dict(len(word_pred_list), word_pred_list, tag_pred_list, typ='pred')
result_true_lists.append(result_true_dict)
result_pred_lists.append(result_pred_dict)
return result_true_lists, result_pred_lists
def build_list2dict(_len, _word_list, _tag_list, typ):
ps_list = list()
result_dict = {
'content': ''.join(_word_list),
'amount_of_cooperation': set(),
'project_name': set(),
'state': set(),
'company_identification_Party_A': set(),
'company_identification_Party_B': set(),
'project_cycle': set(),
'project_status': set()
}
# tag_dict = {
# 'amount_of_cooperation': '合作金额',
# 'project_name': '项目名称',
# 'state': '国家',
# 'company_identification_Party_A': '企业识别甲方',
# 'company_identification_Party_B': '企业识别乙方',
# 'project_cycle': '项目周期',
# 'project_status': '项目状态'
# }
for index, word, tag in zip(range(_len), _word_list, _tag_list):
start_pos = index
end_pos = index + 1
label_type = tag[2:]
if tag[0] == 'B' and end_pos != _len:
# two !=
while _tag_list[end_pos][0] == 'I' and _tag_list[end_pos][2:] == label_type and end_pos + 1 != _len:
end_pos += 1
if _tag_list[end_pos][0] == 'E':
chunk = ''.join(_word_list[start_pos: end_pos + 1])
if label_type == 'project_status' and typ == 'pred':
ps_list.append(chunk)
else:
result_dict[label_type].add(chunk)
if typ == 'pred' and len(ps_list) > 0:
result_dict['project_status'] = [max(ps_list, key=ps_list.count)]
return result_dict
def list2xlsx(xlsx_path=None, result_lists=None):
# 创建工作簿
workbook = xlsxwriter.Workbook(xlsx_path)
# 创建工作表
worksheet = workbook.add_worksheet('sheet1')
# 按行写
worksheet.write_row(
0, 0, [
'合同金额',
'项目名称',
'国家',
'企业识别甲方',
'企业识别乙方',
'项目周期',
'项目状态'
]
)
for index, result in enumerate(result_lists):
worksheet.write_row(
index + 1, 0, [
','.join(result['amount_of_cooperation']),
','.join(result['project_name']),
','.join(result['state']),
','.join(result['company_identification_Party_A']),
','.join(result['company_identification_Party_B']),
','.join(result['project_cycle']),
','.join(result['project_status'])
]
)
workbook.close()
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
config_path = '../config/config'
data = Data()
data.read_config(config_file=config_path)
status = data.status.lower()
print('Seed num:', seed_num)
if status == 'train':
print('MODEL: train')
data_initialization(data)
data.generate_instance('train')
data.generate_instance('dev')
data.generate_instance('test')
data.build_pretrain_emb()
train(data)
print('\n\nMODEL: decode')
data.load(data.dset_dir)
decode_results = load_model_decode(data, 'test')
data.write_decoded_results(decode_results, 'test')
elif status == 'decode':
print('MODEL: decode')
data.load(data.dset_dir)
data.read_config(config_file=config_path)
print(data.test_dir)
data.generate_instance('test')
decode_results = load_model_decode(data, 'test')
data.write_decoded_results(decode_results, 'test')
else:
print('Invalid argument! Please use valid arguments! (train/decode)')
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/runner/runner.py
|
runner.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/23 9:59
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/runner/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : eval_entity
# @Author : LiuYan
# @Time : 2021/1/15 18:02
def eval_entity(true_lists, pred_lists):
TP, FN, FP = 0, 0, 0
for true_dict, pred_dict in zip(true_lists, pred_lists):
tp, fn, fp = compute_entity(true_dict, pred_dict)
TP += tp
FN += fn
FP += fp
p = TP / (TP + FP) if (TP + FP) != 0 else 0
r = TP / (TP + FN) if (TP + FN) != 0 else 0
f1 = (2 * p * r) / (p + r) if (p + r) != 0 else 0
return p, r, f1
def compute_entity(true_dict, pred_dict):
content_true, content_pred = true_dict['content'], pred_dict['content']
amount_of_cooperation_true, amount_of_cooperation_pred = true_dict['amount_of_cooperation'], pred_dict['amount_of_cooperation']
project_name_true, project_name_pred = true_dict['project_name'], pred_dict['project_name']
state_true, state_pred = true_dict['state'], pred_dict['state']
company_identification_Party_A_true, company_identification_Party_A_pred = true_dict['company_identification_Party_A'], pred_dict['company_identification_Party_A']
company_identification_Party_B_true, company_identification_Party_B_pred = true_dict['company_identification_Party_B'], pred_dict['company_identification_Party_B']
project_cycle_true, project_cycle_pred = true_dict['project_cycle'], pred_dict['project_cycle']
project_status_true, project_status_pred = true_dict['project_status'], pred_dict['project_status']
TP, FP = 0, 0
# compute TP + FN
TP_FN = len(amount_of_cooperation_true) + len(project_name_true) + len(state_true) + len(
company_identification_Party_A_true
) + len(company_identification_Party_B_true) + len(project_cycle_true) + len(
project_status_true
)
for aof_pred in amount_of_cooperation_pred:
if judge_exist(aof_pred, amount_of_cooperation_true):
TP += 1
else:
FP += 1
for pn_pred in project_name_pred:
if judge_exist(pn_pred, project_name_true):
TP += 1
else:
FP += 1
for s_pred in state_pred:
if judge_exist(s_pred, state_true):
TP += 1
else:
FP += 1
for ciPA_pred in company_identification_Party_A_pred:
if judge_exist(ciPA_pred, company_identification_Party_A_true):
TP += 1
else:
FP += 1
for ciPB_pred in company_identification_Party_B_pred:
if judge_exist(ciPB_pred, company_identification_Party_B_true):
TP += 1
else:
FP += 1
for pc_pred in project_cycle_pred:
if judge_exist(pc_pred, project_cycle_true):
TP += 1
else:
FP += 1
for ps_pred in project_status_pred:
if judge_exist(ps_pred, project_status_true):
TP += 1
else:
FP += 1
return TP, TP_FN - TP, FP
def judge_exist(pred, true_list):
for true in true_list:
if pred == true:
return True
return False
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/evaluate/eval_entity.py
|
eval_entity.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 19:46
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/evaluate/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/21 10:00
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/app/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : the_belt_and_road_app
# @Author : LiuYan
# @Time : 2021/4/20 20:49
from base.app.base_app import *
doc_event_br = Blueprint('/doc_event/br', __name__)
@doc_event_br.route('/test', methods=('GET', 'POST'))
def test():
app.logger.info('test -> doc_event -> the_belt_and_road_app success!')
logger.info('test -> doc_event -> the_belt_and_road_app success!')
return 'test -> doc_event -> the_belt_and_road_app success!'
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/app/the_belt_and_road_app.py
|
the_belt_and_road_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_split
# @Author : LiuYan
# @Time : 2021/1/15 14:32
import xlsxwriter
from sklearn.model_selection import train_test_split
from data_process import *
def build_list2dict(_len, _word_list, _tag_list):
result_dict = {
'content': ''.join(_word_list),
'amount_of_cooperation': set(),
'project_name': set(),
'state': set(),
'company_identification_Party_A': set(),
'company_identification_Party_B': set(),
'project_cycle': set(),
'project_status': set()
}
# tag_dict = {
# 'amount_of_cooperation': '合作金额',
# 'project_name': '项目名称',
# 'state': '国家',
# 'company_identification_Party_A': '企业识别甲方',
# 'company_identification_Party_B': '企业识别乙方',
# 'project_cycle': '项目周期',
# 'project_status': '项目状态'
# }
for index, word, tag in zip(range(_len), _word_list, _tag_list):
start_pos = index
end_pos = index + 1
label_type = tag[2:]
if tag[0] == 'B' and end_pos != _len:
# two !=
while _tag_list[end_pos][0] == 'I' and _tag_list[end_pos][2:] == label_type and end_pos + 1 != _len:
end_pos += 1
if _tag_list[end_pos][0] == 'E':
result_dict[label_type].add(''.join(_word_list[start_pos: end_pos + 1]))
# build_list.append({'start_pos': start_pos,
# 'end_pos': end_pos + 1,
# 'label_type': tag_dict[label_type]})
return result_dict
def list2xlsx(xlsx_path=None, result_lists=None):
# 创建工作簿
workbook = xlsxwriter.Workbook(xlsx_path)
# 创建工作表
worksheet = workbook.add_worksheet('sheet1')
# 按行写
worksheet.write_row(
0, 0, [
'合同金额',
'项目名称',
'国家',
'企业识别甲方',
'企业识别乙方',
'项目周期',
'项目状态'
]
)
for index, result in enumerate(result_lists):
worksheet.write_row(
index + 1, 0, [
','.join(result['amount_of_cooperation']),
','.join(result['project_name']),
','.join(result['state']),
','.join(result['company_identification_Party_A']),
','.join(result['company_identification_Party_B']),
','.join(result['project_cycle']),
','.join(result['project_status'])
]
)
workbook.close()
def data_split(data_list):
# split_str = ',,、;;。' #
# split_str = ';;。' # 1
# split_str = ';;。!!' # 2
split_str = ';;。!!??' # 3
result_list = []
# 同时也可以以空格 ‘ ’ 为边界进行切分 即split_str = ',,、;;。 '
for word_list, tag_list in data_list:
length = 1
split_words, split_tags = [], []
split_list = []
for word, tag in zip(word_list, tag_list):
split_words.append(word)
split_tags.append(tag)
if length > 30 and tag[0] in ['O', 'E'] and word in split_str:
split_list.append([split_words, split_tags])
split_words, split_tags = [], []
length = 1
elif length > 120 and tag[0] in ['O', 'E']:
split_list.append([split_words, split_tags])
split_words, split_tags = [], []
length = 1
if length >= 200:
print(111111111111111111111111111111111) # Warning
length += 1
merge_list = merge_seq(seq_list=split_list)
result_list.append(merge_list)
assert len(data_list) == len(result_list), 'data_list: {} != result_list: {} !'.format(
len(data_list), len(result_list)
)
return result_list
def merge_seq(seq_list):
i = 0
num_sent_to_include, max_length = 3, 200
merge_words, merge_tags = [], []
merge_list, stats_list = [], []
for word_list, tag_list in seq_list:
if i == 0:
merge_words.extend(word_list)
merge_tags.extend(tag_list)
i += 1
elif i == 3:
merge_list.append([merge_words, merge_tags])
stats_list.append(i)
merge_words = word_list
merge_tags = tag_list
i = 1
elif len(merge_words) + len(word_list) < max_length:
merge_words.append('#####')
merge_tags.append('O')
merge_words.extend(word_list)
merge_tags.extend(tag_list)
i += 1
else:
merge_list.append([merge_words, merge_tags])
stats_list.append(i)
merge_words = word_list
merge_tags = tag_list
i = 1
print('段 平均由 {} 句构成'.format(sum(stats_list) / len(stats_list)))
return merge_list
pass
if __name__ == '__main__':
xlsx_path = './sample/total_datasets.xlsx'
total_list = xlsx2list(xlsx_path=xlsx_path)
data_list = list()
for sentence in total_list:
word_list, tag_list = sentence2tag(sentence)
data_list.append([word_list, tag_list])
result_list = data_split(data_list=data_list)
train_list, dev_list = train_test_split(
result_list, test_size=0.1, random_state=2021
)
write2txt(train_list, 'train_3.txt', 'train')
write2txt(dev_list, 'dev_3.txt', 'dev')
write2txt(dev_list, 'test_3.txt', 'test')
# test_data_path = 'test.txt'
# with open(test_data_path, 'r', encoding='utf-8') as f:
# file = f.readlines()
# doc_id = None
# word_list, tag_list = list(), list()
# for line in file:
# if doc_id is None:
# doc_id = line.strip('\n')
# else:
# word, tag = line.strip('\n').split('\t')
# pass
# result_lists = list()
# for word_list, tag_list in result_list:
# result_dict = build_list2dict(len(word_list), word_list, tag_list)
# result_lists.append(result_dict)
# list2xlsx(xlsx_path='test_result_true.xlsx', result_lists=result_lists)
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/data/data_split.py
|
data_split.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_loader
# @Author : LiuYan
# @Time : 2021/3/23 9:36
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from doc_event.utils.alphabet import Alphabet
from doc_event.utils.functions import *
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
START = '</s>'
UNKNOWN = '</unk>'
PADDING = '</pad>'
class Data:
def __init__(self):
self.MAX_SENTENCE_LENGTH = 1000
self.number_normalized = True
self.norm_word_emb = False
self.word_alphabet = Alphabet('word')
self.label_alphabet = Alphabet('label', True)
self.tagScheme = 'NoSeg' # BMES/BIO
self.split_token = '\t'
self.seg = True
# I/O
self.train_dir = None
self.dev_dir = None
self.test_dir = None
self.decode_dir = None
self.dset_dir = None # data vocabulary related file
self.model_dir = None # model save file
self.load_model_dir = None # model load file
self.result_true_path = None
self.result_pred_path = None
self.word_emb_dir = None
self.train_texts = []
self.dev_texts = []
self.test_texts = []
self.train_Ids = []
self.dev_Ids = []
self.test_Ids = []
self.pretrain_word_embedding = None
self.pretrain_feature_embeddings = []
self.label_size = 0
self.word_alphabet_size = 0
self.label_alphabet_size = 0
self.feature_alphabet_sizes = []
self.feature_emb_dims = []
self.norm_feature_embs = []
self.word_emb_dim = 50
# Networks
self.use_crf = True
self.word_feature_extractor = 'LSTM' # 'LSTM'/'CNN'/'GRU'
self.use_bert = False
self.bert_dir = None
# Training
self.average_batch_loss = False
self.optimizer = 'SGD' # 'SGD'/'AdaGrad'/'AdaDelta'/'RMSProp'/'Adam'
self.status = 'train'
# Hyperparameters
self.HP_iteration = 100
self.HP_batch_size = 10
self.HP_hidden_dim = 200
self.HP_dropout = 0.5
self.HP_lstm_layer = 1
self.HP_bilstm = True
self.HP_gpu = False
self.HP_lr = 0.015
self.HP_lr_decay = 0.05
self.HP_clip = None
self.HP_momentum = 0
self.HP_l2 = 1e-8
def show_data_summary(self):
print('++' * 50)
print('DATA SUMMARY START:')
print(' I/O:')
print(' Start Sequence Laebling task...')
print(' Tag scheme: %s' % (self.tagScheme))
print(' Split token: %s' % (self.split_token))
print(' MAX SENTENCE LENGTH: %s' % (self.MAX_SENTENCE_LENGTH))
print(' Number normalized: %s' % (self.number_normalized))
print(' Word alphabet size: %s' % (self.word_alphabet_size))
print(' Label alphabet size: %s' % (self.label_alphabet_size))
print(' Word embedding dir: %s' % (self.word_emb_dir))
print(' Word embedding size: %s' % (self.word_emb_dim))
print(' Norm word emb: %s' % (self.norm_word_emb))
print(' Train file directory: %s' % (self.train_dir))
print(' Dev file directory: %s' % (self.dev_dir))
print(' Test file directory: %s' % (self.test_dir))
print(' Dset file directory: %s' % (self.dset_dir))
print(' Model file directory: %s' % (self.model_dir))
print(' Loadmodel directory: %s' % (self.load_model_dir))
print(' Decode file directory: %s' % (self.decode_dir))
print(' Train instance number: %s' % (len(self.train_texts)))
print(' Dev instance number: %s' % (len(self.dev_texts)))
print(' Test instance number: %s' % (len(self.test_texts)))
print(' ' + '++' * 20)
print(' Model Network:')
print(' Model use_crf: %s' % (self.use_crf))
print(' Model word extractor: %s' % (self.word_feature_extractor))
print(' ' + '++' * 20)
print(' Training:')
print(' Optimizer: %s' % (self.optimizer))
print(' Iteration: %s' % (self.HP_iteration))
print(' BatchSize: %s' % (self.HP_batch_size))
print(' Average batch loss: %s' % (self.average_batch_loss))
print(' ' + '++' * 20)
print(' Hyperparameters:')
print(' Hyper lr: %s' % (self.HP_lr))
print(' Hyper lr_decay: %s' % (self.HP_lr_decay))
print(' Hyper HP_clip: %s' % (self.HP_clip))
print(' Hyper momentum: %s' % (self.HP_momentum))
print(' Hyper l2: %s' % (self.HP_l2))
print(' Hyper hidden_dim: %s' % (self.HP_hidden_dim))
print(' Hyper dropout: %s' % (self.HP_dropout))
print(' Hyper lstm_layer: %s' % (self.HP_lstm_layer))
print(' Hyper bilstm: %s' % (self.HP_bilstm))
print(' Hyper GPU: %s' % (self.HP_gpu))
print('DATA SUMMARY END.')
print('++' * 50)
sys.stdout.flush()
def build_alphabet(self, input_file):
in_lines = open(input_file, 'r').readlines()
for line in in_lines:
if len(line) > 2:
# if sequence labeling data format i.e. CoNLL 2003
pairs = line.strip('\n').split('\t')
if len(pairs) < 2:
continue
word = pairs[0]
if self.number_normalized:
word = normalize_word(word)
label = pairs[-1]
self.label_alphabet.add(label)
self.word_alphabet.add(word)
self.word_alphabet_size = self.word_alphabet.size()
self.label_alphabet_size = self.label_alphabet.size()
start_S = False
start_B = False
for label, _ in self.label_alphabet.iteritems():
if 'S-' in label.upper():
start_S = True
elif 'B-' in label.upper():
start_B = True
if start_B:
if start_S:
self.tagScheme = 'BMES'
else:
self.tagScheme = 'BIO'
def fix_alphabet(self):
self.word_alphabet.close()
self.label_alphabet.close()
def build_pretrain_emb(self):
if self.word_emb_dir:
print('Load pretrained word embedding, norm: %s, dir: %s' % (self.norm_word_emb, self.word_emb_dir))
self.pretrain_word_embedding, self.word_emb_dim = build_pretrain_embedding(self.word_emb_dir,
self.word_alphabet,
self.word_emb_dim,
self.norm_word_emb)
def generate_instance(self, name):
self.fix_alphabet()
if name == 'train':
self.train_texts, self.train_Ids = read_instance(self.train_dir, self.word_alphabet, self.label_alphabet,
self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.split_token)
elif name == 'dev':
self.dev_texts, self.dev_Ids = read_instance(self.dev_dir, self.word_alphabet, self.label_alphabet,
self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.split_token)
elif name == 'test':
self.test_texts, self.test_Ids = read_instance(self.test_dir, self.word_alphabet, self.label_alphabet,
self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.split_token)
else:
print('Error: you can only generate train/dev/test instance! Illegal input:%s' % (name))
def write_decoded_results(self, predict_results, name):
sent_num = len(predict_results)
content_list = []
if name == 'train':
content_list = self.train_texts
elif name == 'dev':
content_list = self.dev_texts
elif name == 'test':
content_list = self.test_texts
else:
print('Error: illegal name during writing predict result, name should be within train/dev/test !')
assert (sent_num == len(content_list))
fout = open(self.decode_dir, 'w')
for idx in range(sent_num):
sent_length = len(predict_results[idx])
fout.write(content_list[idx][-1] + '\n')
for idy in range(sent_length):
# content_list[idx] is a list with [word, char, label]
try: # Will fail with python3
fout.write(content_list[idx][0][idy].encode('utf-8') + ' ' + predict_results[idx][idy] + '\n')
except:
fout.write(content_list[idx][0][idy] + ' ' + predict_results[idx][idy] + '\n')
fout.write('\n')
fout.close()
print('Predict %s result has been written into file. %s' % (name, self.decode_dir))
def load(self, data_file):
f = open(data_file, 'rb')
tmp_dict = pickle.load(f)
f.close()
self.__dict__.update(tmp_dict)
def save(self, save_file):
f = open(save_file, 'wb')
pickle.dump(self.__dict__, f, 2)
f.close()
def read_config(self, config_file):
project_root_path = os.path.dirname(os.getcwd())
config = config_file_to_dict(config_file)
# read data:
the_item = 'train_dir'
if the_item in config:
self.train_dir = os.path.join(project_root_path, config[the_item])
the_item = 'dev_dir'
if the_item in config:
self.dev_dir = os.path.join(project_root_path, config[the_item])
the_item = 'test_dir'
if the_item in config:
self.test_dir = os.path.join(project_root_path, config[the_item])
the_item = 'decode_dir'
if the_item in config:
self.decode_dir = os.path.join(project_root_path, config[the_item])
the_item = 'dset_dir'
if the_item in config:
self.dset_dir = os.path.join(project_root_path, config[the_item])
the_item = 'model_dir'
if the_item in config:
self.model_dir = os.path.join(project_root_path, config[the_item])
the_item = 'load_model_dir'
if the_item in config:
self.load_model_dir = os.path.join(project_root_path, config[the_item])
the_item = 'result_true_path'
if the_item in config:
self.result_true_path = os.path.join(project_root_path, config[the_item])
the_item = 'result_pred_path'
if the_item in config:
self.result_pred_path = os.path.join(project_root_path, config[the_item])
the_item = 'word_emb_dir'
if the_item in config:
self.word_emb_dir = config[the_item]
the_item = 'MAX_SENTENCE_LENGTH'
if the_item in config:
self.MAX_SENTENCE_LENGTH = int(config[the_item])
the_item = 'norm_word_emb'
if the_item in config:
self.norm_word_emb = str2bool(config[the_item])
the_item = 'number_normalized'
if the_item in config:
self.number_normalized = str2bool(config[the_item])
the_item = 'seg'
if the_item in config:
self.seg = str2bool(config[the_item])
the_item = 'word_emb_dim'
if the_item in config:
self.word_emb_dim = int(config[the_item])
# read network:
the_item = 'use_crf'
if the_item in config:
self.use_crf = str2bool(config[the_item])
the_item = 'word_seq_feature'
if the_item in config:
self.word_feature_extractor = config[the_item]
the_item = 'use_bert'
if the_item in config:
self.use_bert = str2bool(config[the_item])
the_item = 'bert_dir'
if the_item in config:
self.bert_dir = config[the_item]
# read training setting:
the_item = 'optimizer'
if the_item in config:
self.optimizer = config[the_item]
the_item = 'ave_batch_loss'
if the_item in config:
self.average_batch_loss = str2bool(config[the_item])
the_item = 'status'
if the_item in config:
self.status = config[the_item]
# read Hyperparameters:
the_item = 'iteration'
if the_item in config:
self.HP_iteration = int(config[the_item])
the_item = 'batch_size'
if the_item in config:
self.HP_batch_size = int(config[the_item])
the_item = 'hidden_dim'
if the_item in config:
self.HP_hidden_dim = int(config[the_item])
the_item = 'dropout'
if the_item in config:
self.HP_dropout = float(config[the_item])
the_item = 'lstm_layer'
if the_item in config:
self.HP_lstm_layer = int(config[the_item])
the_item = 'bilstm'
if the_item in config:
self.HP_bilstm = str2bool(config[the_item])
the_item = 'gpu'
if the_item in config:
self.HP_gpu = str2bool(config[the_item])
the_item = 'learning_rate'
if the_item in config:
self.HP_lr = float(config[the_item])
the_item = 'lr_decay'
if the_item in config:
self.HP_lr_decay = float(config[the_item])
the_item = 'clip'
if the_item in config:
self.HP_clip = float(config[the_item])
the_item = 'momentum'
if the_item in config:
self.HP_momentum = float(config[the_item])
the_item = 'l2'
if the_item in config:
self.HP_l2 = float(config[the_item])
def config_file_to_dict(input_file):
config = {}
fins = open(input_file, 'r').readlines()
for line in fins:
if len(line) > 0 and line[0] == '#':
continue
if '=' in line:
pair = line.strip().split('#', 1)[0].split('=', 1)
item = pair[0]
if item == 'feature':
if item not in config:
feat_dict = {}
config[item] = feat_dict
feat_dict = config[item]
new_pair = pair[-1].split()
feat_name = new_pair[0]
one_dict = {}
one_dict['emb_dir'] = None
one_dict['emb_size'] = 10
one_dict['emb_norm'] = False
if len(new_pair) > 1:
for idx in range(1, len(new_pair)):
conf_pair = new_pair[idx].split('=')
if conf_pair[0] == 'emb_dir':
one_dict['emb_dir'] = conf_pair[-1]
elif conf_pair[0] == 'emb_size':
one_dict['emb_size'] = int(conf_pair[-1])
elif conf_pair[0] == 'emb_norm':
one_dict['emb_norm'] = str2bool(conf_pair[-1])
feat_dict[feat_name] = one_dict
# print "feat",feat_dict
else:
if item in config:
print('Warning: duplicated config item found: %s, updated.' % (pair[0]))
config[item] = pair[-1]
return config
def str2bool(string):
if string == 'True' or string == 'true' or string == 'TRUE':
return True
else:
return False
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/data/data_loader.py
|
data_loader.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 19:27
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/data/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_process
# @Author : LiuYan
# @Time : 2021/1/14 20:37
import re
import xlrd
from sklearn.model_selection import train_test_split
def xlsx2list(xlsx_path=None) -> list:
# 打开excel
wb = xlrd.open_workbook(xlsx_path)
# 按工作簿定位工作表
sh = wb.sheet_by_name('Sheet1')
print(sh.nrows) # 有效数据行数
print(sh.ncols) # 有效数据列数
print(sh.cell(0, 0).value) # 输出第一行第一列的值
print(sh.row_values(0)) # 输出第一行的所有值
# 将数据和标题组合成字典
print(dict(zip(sh.row_values(0), sh.row_values(1))))
# 遍历excel,打印所有数据
total_list = list()
for i in range(sh.nrows):
row = sh.row_values(i)
total_list.append({
'title': row[1].replace('\n', '').replace('\r', '').replace('\t', ''),
'content': row[2].replace('\n', '').replace('\r', '').replace('\t', ''),
'amount_of_cooperation': row[3].split(';') if len(row[3]) > 0 else None,
'project_name': row[4].split(',') if len(row[4]) > 0 else None,
'state': row[5].split(',') if len(row[5]) > 0 else None,
'company_identification_Party_A': row[6].split(',') if len(row[6]) > 0 else None,
'company_identification_Party_B': row[7].split(',') if len(row[7]) > 0 else None,
'project_cycle': row[8].split(',') if len(row[8]) > 0 else None,
'project_status': row[9].split(',') if len(row[9]) > 0 else None,
})
total_list = total_list[3:]
return total_list
def stats(content=None, com_list=None) -> list:
result_list = list()
for com in com_list:
pattern = re.compile(com)
result = pattern.findall(content)
result_list.append(len(result))
return result_list
def sentence2tag(sentence=None):
title, content = sentence['title'], sentence['content']
content = title + content
amount_of_cooperation = sentence['amount_of_cooperation']
project_name = sentence['project_name']
state = sentence['state']
company_identification_Party_A = sentence['company_identification_Party_A']
company_identification_Party_B = sentence['company_identification_Party_B']
project_cycle = sentence['project_cycle']
project_status = sentence['project_status']
word_list = list(content)
tag_list = ['O' for c in content]
if amount_of_cooperation is None:
pass
# print('None')
else:
for aoc in amount_of_cooperation:
index_list = find_all(content, aoc)
tag_list = tag_update(tag_list, index_list, aoc, 'amount_of_cooperation')
if project_name is None:
pass
# print('None')
else:
for pn in project_name:
index_list = find_all(content, pn)
tag_list = tag_update(tag_list, index_list, pn, 'project_name')
if state is None:
pass
# print('None')
else:
for s in state:
index_list = find_all(content, s)
tag_list = tag_update(tag_list, index_list, s, 'state')
if company_identification_Party_A is None:
pass
# print('None')
else:
for ciPA in company_identification_Party_A:
index_list = find_all(content, ciPA)
tag_list = tag_update(tag_list, index_list, ciPA, 'company_identification_Party_A')
if company_identification_Party_B is None:
pass
# print('None')
else:
for ciPB in company_identification_Party_B:
index_list = find_all(content, ciPB)
tag_list = tag_update(tag_list, index_list, ciPB, 'company_identification_Party_B')
if project_cycle is None:
# print('None')
pass
else:
for pc in project_cycle:
index_list = find_all(content, pc)
tag_list = tag_update(tag_list, index_list, pc, 'project_cycle')
if project_status is None:
pass
# print('None')
else:
for ps in project_status:
index_list = find_all(content, ps[0:2])
tag_list = tag_update(tag_list, index_list, ps[0:2], 'project_status')
s_word = ['', '\n', '\t']
s_tag = ['', ' ', '\n', '\t']
for word, tag in zip(word_list, tag_list):
if word in s_word:
print(111111111)
if tag in s_tag:
print(11111)
return word_list, tag_list
# result_list = stats(content, amount_of_cooperation)
pass
def tag_update(tag_list, index_list, s, tag_name):
if index_list is False:
return tag_list
for index in index_list:
if judge_all_o(tag_list, index, index + len(s)):
tag_list[index] = 'B-' + tag_name
for i in range(index + 1, index + len(s) - 1):
tag_list[i] = 'I-' + tag_name
tag_list[index + len(s) - 1] = 'E-' + tag_name
return tag_list
def judge_all_o(tag_list, index, index_end):
if tag_list[index][0] == 'O' or tag_list[index][0] == 'B':
if tag_list[index_end - 1][0] == 'O' or tag_list[index_end - 1][0] == 'E':
if tag_list[index][0] == 'B':
pass
return True
return False
def find_all(sub, s):
"""
从一篇文章(sub)中找到所有符合要素(s)的chunk,并返回起始下标
:param sub: role
:param s: doc
:return: index: list
"""
if len(s) < 2:
print('要素名过短: {}'.format(s)) # 要素名过短提示
index_list = []
index = sub.find(s)
while index != -1:
index_list.append(index)
index = sub.find(s, index + 1)
if len(index_list) > 0:
return index_list
else:
print('事件要素: {} 在文章中未能匹配成功!'.format(s)) # 文章中未匹配的要素
return False
def check_all(result_list):
for word_list, tag_list in result_list:
suffix = None
for word, tag in zip(word_list, tag_list):
if suffix is None:
if tag[0] == 'I' or tag[0] == 'E':
print(111)
if tag[0] == 'B':
suffix = tag[2:]
if suffix is not None:
if tag[0] == 'I' or tag[0] == 'E':
if tag[2:] != suffix:
print(111)
if tag[0] == 'O':
suffix = None
if word is ' ':
if tag[0] is not 'O':
pass
pass
def write2txt(docs_list, txt_path, typ):
i = 0
with open(txt_path, 'w', encoding='utf-8') as f:
for doc_list in docs_list:
for word_list, tag_list in doc_list:
if len(word_list) >= 250:
print(len(word_list))
f.write(typ + '-' + str(i) + '\n')
for index, word, tag in zip(range(len(word_list)), word_list, tag_list):
# if word == ' '
f.write(word + '\t' + tag + '\n')
if index + 1 == len(word_list):
f.write('\n')
i += 1
def data_split_write2txt(result_list, txt_path, typ):
"""
data_split + write2txt
:param result_list: list
:param txt_path:
:param typ: train/dev/test
:return:
"""
i = 0
# split_str = ',,、;;。'
split_str = ';;。'
# 同时也可以以空格 ‘ ’ 为边界进行切分 即split_str = ',,、;;。 '
with open(txt_path, 'w', encoding='utf-8') as f:
for word_list, tag_list in result_list:
f.write(typ + '-' + str(i) + '\n')
length = 1
for index, word, tag in zip(range(len(word_list)), word_list, tag_list):
f.write(word + '\t' + tag + '\n')
if index + 1 == len(word_list):
f.write('\n')
elif length > 30 and tag[0] in ['O', 'E'] and word in split_str:
f.write('\n' + typ + '-' + str(i) + '\n')
length = 1
elif length > 120 and tag[0] in ['O', 'E']:
f.write('\n' + typ + '-' + str(i) + '\n')
length = 1
if length >= 200:
print(111111111111111111111111111111111)
length += 1
pass
i += 1
if __name__ == '__main__':
s = '27.52亿美元,2,436.03亿元'
s_list = re.split('元,|币,', s)
xlsx_path = './sample/total_datasets.xlsx'
total_list = xlsx2list(xlsx_path=xlsx_path)
result_list = list()
for sentence in total_list:
word_list, tag_list = sentence2tag(sentence)
result_list.append([word_list, tag_list])
check_all(result_list)
train_list, dev_list = train_test_split(
result_list, test_size=0.1, random_state=2021
)
data_split_write2txt(train_list, 'train_1_.txt', 'train')
data_split_write2txt(dev_list, 'dev_1_.txt', 'dev')
data_split_write2txt(dev_list, 'test_1_.txt', 'test')
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/data/data_process.py
|
data_process.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : alphabet
# @Author : LiuYan
# @Time : 2021/3/23 9:55
"""
Alphabet maps objects to integer ids. It provides two way mapping from the index to the objects.
"""
from __future__ import print_function
import json
import os
class Alphabet:
def __init__(self, name, label=False, keep_growing=True):
self.name = name
self.UNKNOWN = '</unk>'
self.label = label
self.instance2index = {}
self.instances = []
self.keep_growing = keep_growing
# Index 0 is occupied by default, all else following.
self.default_index = 0
self.next_index = 1
if not self.label:
self.add(self.UNKNOWN)
def clear(self, keep_growing=True):
self.instance2index = {}
self.instances = []
self.keep_growing = keep_growing
# Index 0 is occupied by default, all else following.
self.default_index = 0
self.next_index = 1
def add(self, instance):
if instance not in self.instance2index:
self.instances.append(instance)
self.instance2index[instance] = self.next_index
self.next_index += 1
def get_index(self, instance):
try:
return self.instance2index[instance]
except KeyError:
if self.keep_growing:
index = self.next_index
self.add(instance)
return index
else:
return self.instance2index[self.UNKNOWN]
def get_instance(self, index):
if index == 0:
if self.label:
return self.instances[0]
# First index is occupied by the wildcard element.
return None
try:
return self.instances[index - 1]
except IndexError:
print('WARNING:Alphabet get_instance ,unknown instance, return the first label.')
return self.instances[0]
def size(self):
# if self.label:
# return len(self.instances)
# else:
return len(self.instances) + 1
def iteritems(self):
return self.instance2index.items()
def enumerate_items(self, start=1):
if start < 1 or start >= self.size():
raise IndexError('Enumerate is allowed between [1 : size of the alphabet)')
return zip(range(start, len(self.instances) + 1), self.instances[start - 1:])
def close(self):
self.keep_growing = False
def open(self):
self.keep_growing = True
def get_content(self):
return {'instance2index': self.instance2index, 'instances': self.instances}
def from_json(self, data):
self.instances = data['instances']
self.instance2index = data['instance2index']
def save(self, output_directory, name=None):
"""
Save both alhpabet records to the given directory.
:param output_directory: Directory to save model and weights.
:param name: The alphabet saving name, optional.
:return:
"""
saving_name = name if name else self.__name
try:
json.dump(self.get_content(), open(os.path.join(output_directory, saving_name + '.json'), 'w'))
except Exception as e:
print('Exception: Alphabet is not saved: ' % repr(e))
def load(self, input_directory, name=None):
"""
Load model architecture and weights from the give directory. This allow we use old models even the structure
changes.
:param input_directory: Directory to save model and weights
:return:
"""
loading_name = name if name else self.__name
self.from_json(json.load(open(os.path.join(input_directory, loading_name + '.json'))))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/utils/alphabet.py
|
alphabet.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 19:54
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/utils/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : functions
# @Author : LiuYan
# @Time : 2021/3/23 9:57
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
def normalize_word(word):
new_word = ''
for char in word:
if char.isdigit():
new_word += '0'
else:
new_word += char
return new_word
def read_instance(input_file, word_alphabet, label_alphabet, number_normalized, max_sent_length, split_token='\t'):
in_lines = open(input_file, 'r', encoding='utf-8').readlines()
instance_texts, instance_Ids = [], []
doc_id = ''
words, labels = [], []
word_Ids, label_Ids = [], []
# for sequence labeling data format i.e. CoNLL 2003
for line in in_lines:
if not doc_id:
doc_id = line.strip()
continue
if len(line) > 2:
pairs = line.strip('\n').split(split_token)
word = pairs[0]
words.append(word)
if number_normalized:
word = normalize_word(word)
label = pairs[-1]
labels.append(label)
word_Ids.append(word_alphabet.get_index(word))
label_Ids.append(label_alphabet.get_index(label))
else:
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)):
# get sent_word_Ids_list (split with '.')
period_id = word_alphabet.get_index('#####')
sent_word_Ids_list = []
idx = 0
sent_word_Ids = []
while idx <= len(word_Ids) - 1:
sent_word_Ids.append(word_Ids[idx])
if word_Ids[idx] == period_id:
sent_word_Ids_list.append(sent_word_Ids)
sent_word_Ids = []
idx += 1
if sent_word_Ids:
sent_word_Ids_list.append(sent_word_Ids)
instance_texts.append([words, labels, doc_id])
instance_Ids.append([word_Ids, sent_word_Ids_list, label_Ids])
doc_id = ''
words, labels = [], []
word_Ids, label_Ids = [], []
return instance_texts, instance_Ids
def build_pretrain_embedding(embedding_path, word_alphabet, embedd_dim=100, norm=True):
embedd_dict = dict()
if embedding_path != None:
embedd_dict, embedd_dim = load_pretrain_emb(embedding_path)
alphabet_size = word_alphabet.size()
scale = np.sqrt(3.0 / embedd_dim)
pretrain_emb = np.empty([word_alphabet.size(), embedd_dim])
perfect_match = 0
case_match = 0
not_match = 0
for word, index in word_alphabet.iteritems():
if word in embedd_dict:
if norm:
pretrain_emb[index, :] = norm2one(embedd_dict[word])
else:
pretrain_emb[index, :] = embedd_dict[word]
perfect_match += 1
elif word.lower() in embedd_dict:
if norm:
pretrain_emb[index, :] = norm2one(embedd_dict[word.lower()])
else:
pretrain_emb[index, :] = embedd_dict[word.lower()]
case_match += 1
else:
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedd_dim])
not_match += 1
pretrained_size = len(embedd_dict)
print('Embedding:\n pretrain word:%s, prefect match:%s, case_match:%s, oov:%s, oov%%:%s' % (
pretrained_size, perfect_match, case_match, not_match, (not_match + 0.) / alphabet_size))
return pretrain_emb, embedd_dim
def norm2one(vec):
root_sum_square = np.sqrt(np.sum(np.square(vec)))
return vec / root_sum_square
def load_pretrain_emb(embedding_path):
embedd_dim = -1
embedd_dict = dict()
with open(embedding_path, 'r', encoding='ISO-8859-1') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
elif embedd_dim + 1 != len(tokens):
# ignore illegal embedding line
continue
# assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim])
embedd[:] = tokens[1:]
first_col = tokens[0]
embedd_dict[first_col] = embedd
return embedd_dict, embedd_dim
if __name__ == '__main__':
a = np.arange(9.0)
print(a)
print(norm2one(a))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/utils/functions.py
|
functions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : seqlabel
# @Author : LiuYan
# @Time : 2021/3/23 9:50
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from doc_event.model.wordsequence import WordSequence
from doc_event.model.crf import CRF
class SeqLabel(nn.Module):
def __init__(self, data):
super(SeqLabel, self).__init__()
self.use_crf = data.use_crf
print('build sequence labeling network...')
print('word feature extractor: ', data.word_feature_extractor)
print('use crf: ', self.use_crf)
self.gpu = data.HP_gpu
self.average_batch = data.average_batch_loss
# add two more label for downlayer lstm, use original label size for CRF
label_size = data.label_alphabet_size
data.label_alphabet_size += 2
self.word_hidden = WordSequence(data)
if self.use_crf:
self.crf = CRF(label_size, self.gpu)
def calculate_loss(self, word_inputs, word_seq_lengths, list_sent_words_tensor, batch_label, mask):
outs = self.word_hidden(word_inputs, list_sent_words_tensor, word_seq_lengths)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
if self.use_crf:
total_loss = self.crf.neg_log_likelihood_loss(outs, mask, batch_label)
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
else:
loss_function = nn.NLLLoss(ignore_index=0, size_average=False)
outs = outs.view(batch_size * seq_len, -1)
score = F.log_softmax(outs, 1)
total_loss = loss_function(score, batch_label.view(batch_size * seq_len))
_, tag_seq = torch.max(score, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
if self.average_batch:
total_loss = total_loss / batch_size
return total_loss, tag_seq
def forward(self, word_inputs, word_seq_lengths, list_sent_words_tensor, mask):
outs = self.word_hidden(word_inputs, list_sent_words_tensor, word_seq_lengths)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
if self.use_crf:
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
else:
outs = outs.view(batch_size * seq_len, -1)
_, tag_seq = torch.max(outs, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
# filter padded position with zero
tag_seq = mask.long() * tag_seq
return tag_seq
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/seqlabel.py
|
seqlabel.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : wordsequence
# @Author : LiuYan
# @Time : 2021/3/23 9:53
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from doc_event.model.wordrep import WordRep
seed_num = 42
torch.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
class WordSequence(nn.Module):
def __init__(self, data):
super(WordSequence, self).__init__()
print('build word sequence feature extractor: %s...' % (data.word_feature_extractor))
self.gpu = data.HP_gpu
# self.batch_size = data.HP_batch_size
# self.hidden_dim = data.HP_hidden_dim
self.droplstm = nn.Dropout(data.HP_dropout)
self.droplstm_sent = nn.Dropout(data.HP_dropout - 0.1)
self.bilstm_flag = data.HP_bilstm
self.lstm_layer = data.HP_lstm_layer
self.wordrep = WordRep(data)
self.input_size = data.word_emb_dim
# bert fea size
if data.use_bert:
self.input_size += 768
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
if self.bilstm_flag:
self.lstm_hidden = data.HP_hidden_dim // 2
else:
self.lstm_hidden = data.HP_hidden_dim
self.lstm = nn.LSTM(self.input_size, self.lstm_hidden, num_layers=self.lstm_layer,
batch_first=True, bidirectional=self.bilstm_flag)
self.sent_lstm = nn.LSTM(self.input_size, self.lstm_hidden, num_layers=self.lstm_layer,
batch_first=True, bidirectional=self.bilstm_flag)
self.lstm2 = nn.LSTM(self.lstm_hidden * 2, self.lstm_hidden, num_layers=self.lstm_layer,
batch_first=True, bidirectional=self.bilstm_flag)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
self.hidden2tag_sent_level = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
self.gate = nn.Linear(data.HP_hidden_dim * 2, data.HP_hidden_dim)
self.sigmoid = nn.Sigmoid()
if self.gpu:
self.droplstm = self.droplstm.cuda()
self.droplstm_sent = self.droplstm_sent.cuda()
self.hidden2tag = self.hidden2tag.cuda()
self.hidden2tag_sent_level = self.hidden2tag_sent_level.cuda()
self.lstm = self.lstm.cuda()
self.sent_lstm = self.sent_lstm.cuda()
self.gate = self.gate.cuda()
self.sigmoid = self.sigmoid.cuda()
def get_sent_rep(self, sent, sent_length):
word_represent = self.wordrep(sent, sent_length)
packed_words = pack_padded_sequence(word_represent, sent_length, True)
hidden = None
lstm_out, hidden = self.sent_lstm(packed_words, hidden)
lstm_out, _ = pad_packed_sequence(lstm_out)
feature_out_sent = self.droplstm_sent(lstm_out.transpose(1, 0))
return feature_out_sent
def forward(self, word_inputs, list_sent_words_tensor, word_seq_lengths):
"""
input:
word_inputs: (batch_size, sent_len)
feature_inputs: [(batch_size, sent_len), ...] list of variables
word_seq_lengths: list of batch_size, (batch_size,1)
output:
Variable(batch_size, sent_len, hidden_dim)
"""
# paragraph-level
word_represent = self.wordrep(word_inputs, word_seq_lengths) # [batch_size, seq_len, embed_size]
packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
hidden = None
lstm_out, hidden = self.lstm(packed_words, hidden)
lstm_out, _ = pad_packed_sequence(lstm_out) # [seq_len, batch_size, hidden_size]
feature_out = self.droplstm(lstm_out.transpose(1, 0)) # [batch_size, seq_len, hidden_size]
# sentence-level reps
feature_out_sents = torch.zeros(
(feature_out.size()[0], feature_out.size()[1], feature_out.size()[2]),
requires_grad=False
).float()
if self.gpu:
feature_out_sents = feature_out_sents.cuda()
for idx, seq in enumerate(list_sent_words_tensor):
feature_out_seq = []
for sent in seq:
feature_out_sent = self.get_sent_rep(sent, np.array([len(sent[0])]))
feature_out_seq.append(feature_out_sent.squeeze(0))
feature_out_seq = torch.cat(feature_out_seq, 0)
if self.gpu:
feature_out_seq.cuda()
feature_out_sents[idx][:len(feature_out_seq)][:] = feature_out_seq
gamma = self.sigmoid(self.gate(torch.cat((feature_out, feature_out_sents), 2)))
outputs_final = self.hidden2tag(gamma * feature_out + (1 - gamma) * feature_out_sents)
return outputs_final
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/wordsequence.py
|
wordsequence.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : wordrep
# @Author : LiuYan
# @Time : 2021/3/23 9:52
from __future__ import print_function
from __future__ import absolute_import
import os
import torch
import torch.nn as nn
import numpy as np
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM, WordpieceTokenizer
seed_num = 42
torch.manual_seed(seed_num)
np.random.seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
class WordRep(nn.Module):
def __init__(self, data):
super(WordRep, self).__init__()
print('build word representation...')
self.gpu = data.HP_gpu
self.batch_size = data.HP_batch_size
self.embedding_dim = data.word_emb_dim
self.drop = nn.Dropout(data.HP_dropout)
self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim)
if data.pretrain_word_embedding is not None:
self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding))
else:
self.word_embedding.weight.data.copy_(
torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim)))
# bert feature
self.word_alphabet = data.word_alphabet
self._use_bert = data.use_bert
self._bert_dir = data.bert_dir
if self._use_bert:
# Load pre-trained model (weights)
self.bert_model = BertModel.from_pretrained(self._bert_dir)
self.bert_model.eval()
# Load pre-trained model tokenizer (vocabulary)
self.tokenizer = BertTokenizer.from_pretrained(self._bert_dir)
self.wpiecetokenizer = WordpieceTokenizer(self.tokenizer.vocab)
self.vocab = self._read_vocab(path=self._bert_dir)
if self.gpu:
self.drop = self.drop.cuda()
self.word_embedding = self.word_embedding.cuda()
if self._use_bert:
self.bert_model = self.bert_model.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def _read_vocab(self, path):
result_vocab = list()
vocab_path = os.path.join(path, 'vocab.txt')
with open(vocab_path, 'r') as f:
vocab = f.readlines()
for v in vocab:
result_vocab.append(v.strip())
return result_vocab
pass
def _is_vocab(self, token):
if token in self.vocab:
return False
return True
pass
def bert_fea(self, ids_batch):
tokens_tensor_batch = []
context_tokens_uncased_batch = []
for ids in ids_batch:
context_tokens_uncased = []
for i in ids:
token = self.word_alphabet.get_instance(i)
if token == '</unk>' or not token or self._is_vocab(token):
context_tokens_uncased.append('[UNK]')
elif token == '<PAD>':
context_tokens_uncased.append('[PAD]')
else:
context_tokens_uncased.append(token)
context_tokens_uncased_batch.append(context_tokens_uncased)
# Tokenized input
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(context_tokens_uncased)
tokens_tensor_batch.append(indexed_tokens)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
tokens_tensor_batch = torch.tensor(tokens_tensor_batch)
if self.gpu:
tokens_tensor_batch = tokens_tensor_batch.to('cuda')
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = self.bert_model(tokens_tensor_batch)
# get the avg of last 4 layers hidden states (for each token)
# batchsize * doc len * 768 (bert hidden size)
avg = sum(encoded_layers) / len(encoded_layers)
# we do not use [CLS] fea and only use the first 100 of avg4
context_bert_feature_batch = avg[:, :, :]
return context_bert_feature_batch
def forward(self, word_inputs, word_seq_lengths):
"""
input:
word_inputs: (batch_size, sent_len)
word_seq_lengths: list of batch_size, (batch_size,1)
output:
Variable(batch_size, sent_len, hidden_dim)
"""
word_embs = self.word_embedding(word_inputs)
word_list = [word_embs]
if self._use_bert:
context_bert_feature_batch = self.bert_fea(word_inputs)
word_list.append(context_bert_feature_batch)
word_embs = torch.cat(word_list, 2)
word_represent = self.drop(word_embs)
return word_represent
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/wordrep.py
|
wordrep.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : crf
# @Author : LiuYan
# @Time : 2021/3/23 9:46
from __future__ import print_function
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
START_TAG = -2
STOP_TAG = -1
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,
m_size) # B * M
class CRF(nn.Module):
def __init__(self, tagset_size, gpu):
super(CRF, self).__init__()
print('build CRF...')
self.gpu = gpu
# Matrix of transition parameters. Entry i,j is the score of transitioning from i to j.
self.tagset_size = tagset_size
# # We add 2 here, because of START_TAG and STOP_TAG
# # transitions (f_tag_size, t_tag_size), transition value from f_tag to t_tag
init_transitions = torch.zeros(self.tagset_size + 2, self.tagset_size + 2)
init_transitions[:, START_TAG] = -10000.0
init_transitions[STOP_TAG, :] = -10000.0
init_transitions[:, 0] = -10000.0
init_transitions[0, :] = -10000.0
if self.gpu:
init_transitions = init_transitions.cuda()
self.transitions = nn.Parameter(init_transitions)
# self.transitions = nn.Parameter(torch.Tensor(self.tagset_size+2, self.tagset_size+2))
# self.transitions.data.zero_()
def _calculate_PZ(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
masks: (batch, seq_len)
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
# print feats.view(seq_len, tag_size)
assert (tag_size == self.tagset_size + 2)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
# be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
# need to consider start
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size, 1) # bat_size * to_target_size
# add start score (from start to all tag, duplicate to batch_size)
# partition = partition + self.transitions[START_TAG,:].view(1, tag_size, 1).expand(batch_size, tag_size, 1)
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size,
tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
# print cur_partition.data
# (bat_size * from_target * to_target) -> (bat_size * to_target)
# partition = utils.switch(partition, cur_partition, mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size)).view(bat_size, -1)
mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)
# effective updated partition part, only keep the partition value of mask value = 1
masked_cur_partition = cur_partition.masked_select(mask_idx)
# let mask_idx broadcastable, to disable warning
mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)
# replace the partition where the maskvalue=1, other partition value keeps the same
partition.masked_scatter_(mask_idx, masked_cur_partition)
# until the last state, add transition score for all partition (and do log_sum_exp) then select the value in STOP_TAG
cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size,
tag_size) + partition.contiguous().view(
batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
final_partition = cur_partition[:, STOP_TAG]
return final_partition.sum(), scores
def _viterbi_decode(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, seq_len) decoded sequence
path_score: (batch, 1) corresponding score for each sequence (to be implementated)
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert (tag_size == self.tagset_size + 2)
# calculate sentence length for each sentence
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# mask to (seq_len, batch_size)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
# be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
# need to consider start
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
# record the position of best score
back_points = list()
partition_history = list()
# reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).bool()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size) # bat_size * to_target_size
# print "init part:",partition.size()
partition_history.append(partition)
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: batch_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size,
tag_size)
# forscores, cur_bp = torch.max(cur_values[:,:-2,:], 1) # do not consider START_TAG/STOP_TAG
# print "cur value:", cur_values.size()
partition, cur_bp = torch.max(cur_values, 1)
# print "partsize:",partition.size()
# exit(0)
# print partition
# print cur_bp
# print "one best, ",idx
partition_history.append(partition)
# cur_bp: (batch_size, tag_size) max source score position in current tag
# set padded label as 0, which will be filtered in post processing
cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size), 0)
back_points.append(cur_bp)
# exit(0)
# add score to final STOP_TAG
partition_history = torch.cat(partition_history, 0).view(seq_len, batch_size, -1).transpose(1,
0).contiguous() ## (batch_size, seq_len. tag_size)
# get the last position for each setences, and select the last partitions using gather()
last_position = length_mask.view(batch_size, 1, 1).expand(batch_size, 1, tag_size) - 1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, 1)
# calculate the score from last partition to end state (and then select the STOP_TAG from it)
last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1, tag_size,
tag_size).expand(
batch_size, tag_size, tag_size)
_, last_bp = torch.max(last_values, 1)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)
# select end ids in STOP_TAG
pointer = last_bp[:, STOP_TAG]
insert_last = pointer.contiguous().view(batch_size, 1, 1).expand(batch_size, 1, tag_size)
back_points = back_points.transpose(1, 0).contiguous()
# move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values
# print "lp:",last_position
# print "il:",insert_last
back_points.scatter_(1, last_position, insert_last)
# print "bp:",back_points
# exit(0)
back_points = back_points.transpose(1, 0).contiguous()
# decode from the end, padded position ids are 0, which will be filtered if following evaluation
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.detach()
for idx in range(len(back_points) - 2, -1, -1):
pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))
decode_idx[idx] = pointer.detach().view(batch_size)
path_score = None
decode_idx = decode_idx.transpose(1, 0)
return path_score, decode_idx
def forward(self, feats):
path_score, best_path = self._viterbi_decode(feats)
return path_score, best_path
def _score_sentence(self, scores, mask, tags):
"""
input:
scores: variable (seq_len, batch, tag_size, tag_size)
mask: (batch, seq_len)
tags: tensor (batch, seq_len)
output:
score: sum of score for gold sequences within whole batch
"""
# Gives the score of a provided tag sequence
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(2)
# convert tag value into a new format, recorded label bigram information to index
new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))
if self.gpu:
new_tags = new_tags.cuda()
for idx in range(seq_len):
if idx == 0:
# start -> first score
new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0]
else:
new_tags[:, idx] = tags[:, idx - 1] * tag_size + tags[:, idx]
# transition for label to STOP_TAG
end_transition = self.transitions[:, STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)
# length for batch, last word position = length - 1
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# index the label id of last word
end_ids = torch.gather(tags, 1, length_mask - 1)
# index the transition score for end_id to STOP_TAG
end_energy = torch.gather(end_transition, 1, end_ids)
# convert tag as (seq_len, batch_size, 1)
new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)
# need convert tags id to search from 400 positions of scores
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len,
batch_size) # seq_len * bat_size
# mask transpose to (seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1, 0))
# ## calculate the score from START_TAG to first label
# start_transition = self.transitions[START_TAG,:].view(1, tag_size).expand(batch_size, tag_size)
# start_energy = torch.gather(start_transition, 1, tags[0,:])
# add all score together
# gold_score = start_energy.sum() + tg_energy.sum() + end_energy.sum()
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def neg_log_likelihood_loss(self, feats, mask, tags):
# nonegative log likelihood
batch_size = feats.size(0)
forward_score, scores = self._calculate_PZ(feats, mask)
gold_score = self._score_sentence(scores, mask, tags)
# print "batch, f:", forward_score.data[0], " g:", gold_score.data[0], " dis:", forward_score.data[0] - gold_score.data[0]
# exit(0)
return forward_score - gold_score
def _viterbi_decode_nbest(self, feats, mask, nbest):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, nbest, seq_len) decoded sequence
path_score: (batch, nbest) corresponding score for each sequence (to be implementated)
nbest decode for sentence with one token is not well supported, to be optimized
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert (tag_size == self.tagset_size + 2)
# calculate sentence length for each sentence
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# mask to (seq_len, batch_size)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
# be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
# need to consider start
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
# record the position of best score
back_points = list()
partition_history = list()
# reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).bool()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone() # bat_size * to_target_size
# initial partition [batch_size, tag_size]
partition_history.append(partition.view(batch_size, tag_size, 1).expand(batch_size, tag_size, nbest))
# iter over last scores
for idx, cur_values in seq_iter:
if idx == 1:
cur_values = cur_values.view(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size,
tag_size,
1).expand(
batch_size, tag_size, tag_size)
else:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * nbest * from_target)
# cur_values: batch_size * from_target * to_target
cur_values = cur_values.view(batch_size, tag_size, 1, tag_size).expand(batch_size, tag_size, nbest,
tag_size) + partition.contiguous().view(
batch_size, tag_size, nbest, 1).expand(batch_size, tag_size, nbest, tag_size)
# compare all nbest and all from target
cur_values = cur_values.view(batch_size, tag_size * nbest, tag_size)
# print "cur size:",cur_values.size()
partition, cur_bp = torch.topk(cur_values, nbest, 1)
# cur_bp/partition: [batch_size, nbest, tag_size], id should be normize through nbest in following backtrace step
# print partition[:,0,:]
# print cur_bp[:,0,:]
# print "nbest, ",idx
if idx == 1:
cur_bp = cur_bp * nbest
partition = partition.transpose(2, 1)
cur_bp = cur_bp.transpose(2, 1)
# print partition
# exit(0)
# partition: (batch_size * to_target * nbest)
# cur_bp: (batch_size * to_target * nbest) Notice the cur_bp number is the whole position of tag_size*nbest, need to convert when decode
partition_history.append(partition)
# cur_bp: (batch_size,nbest, tag_size) topn source score position in current tag
# set padded label as 0, which will be filtered in post processing
# mask[idx] ? mask[idx-1]
cur_bp.masked_fill_(mask[idx].view(batch_size, 1, 1).expand(batch_size, tag_size, nbest), 0)
# print cur_bp[0]
back_points.append(cur_bp)
# add score to final STOP_TAG
partition_history = torch.cat(partition_history, 0).view(seq_len, batch_size, tag_size, nbest).transpose(1,
0).contiguous() ## (batch_size, seq_len, nbest, tag_size)
# get the last position for each setences, and select the last partitions using gather()
last_position = length_mask.view(batch_size, 1, 1, 1).expand(batch_size, 1, tag_size, nbest) - 1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, nbest, 1)
# calculate the score from last partition to end state (and then select the STOP_TAG from it)
last_values = last_partition.expand(batch_size, tag_size, nbest, tag_size) + self.transitions.view(1, tag_size,
1,
tag_size).expand(
batch_size, tag_size, nbest, tag_size)
last_values = last_values.view(batch_size, tag_size * nbest, tag_size)
end_partition, end_bp = torch.topk(last_values, nbest, 1)
# end_partition: (batch, nbest, tag_size)
end_bp = end_bp.transpose(2, 1)
# end_bp: (batch, tag_size, nbest)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size, nbest)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size, nbest)
# select end ids in STOP_TAG
pointer = end_bp[:, STOP_TAG, :] ## (batch_size, nbest)
insert_last = pointer.contiguous().view(batch_size, 1, 1, nbest).expand(batch_size, 1, tag_size, nbest)
back_points = back_points.transpose(1, 0).contiguous()
# move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values
# print "lp:",last_position
# print "il:",insert_last[0]
# exit(0)
# copy the ids of last position:insert_last to back_points, though the last_position index
# last_position includes the length of batch sentences
# print "old:", back_points[9,0,:,:]
back_points.scatter_(1, last_position, insert_last)
# back_points: [batch_size, seq_length, tag_size, nbest]
# print "new:", back_points[9,0,:,:]
# exit(0)
# print pointer[2]
'''
back_points: in simple demonstratration
x,x,x,x,x,x,x,x,x,7
x,x,x,x,x,4,0,0,0,0
x,x,6,0,0,0,0,0,0,0
'''
back_points = back_points.transpose(1, 0).contiguous()
# print back_points[0]
# back_points: (seq_len, batch, tag_size, nbest)
# decode from the end, padded position ids are 0, which will be filtered in following evaluation
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size, nbest))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.data / nbest
# print "pointer-1:",pointer[2]
# exit(0)
# use old mask, let 0 means has token
for idx in range(len(back_points) - 2, -1, -1):
# print "pointer: ",idx, pointer[3]
# print "back:",back_points[idx][3]
# print "mask:",mask[idx+1,3]
new_pointer = torch.gather(back_points[idx].view(batch_size, tag_size * nbest), 1,
pointer.contiguous().view(batch_size, nbest))
decode_idx[idx] = new_pointer.data / nbest
# # use new pointer to remember the last end nbest ids for non longest
pointer = new_pointer + pointer.contiguous().view(batch_size, nbest) * mask[idx].view(batch_size, 1).expand(
batch_size, nbest).long()
# exit(0)
path_score = None
decode_idx = decode_idx.transpose(1, 0)
# decode_idx: [batch, seq_len, nbest]
# print decode_idx[:,:,0]
# print "nbest:",nbest
# print "diff:", decode_idx[:,:,0]- decode_idx[:,:,4]
# print decode_idx[:,0,:]
# exit(0)
# calculate probability for each sequence
scores = end_partition[:, :, STOP_TAG]
# scores: [batch_size, nbest]
max_scores, _ = torch.max(scores, 1)
minus_scores = scores - max_scores.view(batch_size, 1).expand(batch_size, nbest)
path_score = F.softmax(minus_scores, 1)
# path_score: [batch_size, nbest]
# exit(0)
return path_score, decode_idx
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/crf.py
|
crf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/11 19:38
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/3/22 14:04
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_levenshtein
# @Author : LiuYan
# @Time : 2021/4/9 10:15
from doc_similarity.model.levenshtein import LevenshteinSimilarity
# 测试
if __name__ == '__main__':
text_1 = 'simhash算法的主要思想是降维,将高维的特征向量映射成一个低维的特征向量,通过两个向量的Hamming Distance来确定文章是否重复或者高度近似。'
text_2 = '我们所用到的simhash算法的主要思想是降维,将高维特征向量映射成一个低维特征向量,再通过两个向量的Hamming Distance来确定文章是否重复或高度近似。'
# stop_words_path = '../data/stopwords.txt'
stop_words_path = '../data/stop_words.txt'
lev_sim = LevenshteinSimilarity(stop_words_path=stop_words_path)
with open('../data/sample_x.txt', 'r') as x, open('../data/sample_y.txt', 'r') as y:
content_x = x.read()
content_y = y.read()
distance = lev_sim.calculate(text_1, text_2)
# distance = lev_sim.calculate(content_x, content_y)
print('相似度: %.2f%%' % (distance * 100))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/test_levenshtein.py
|
test_levenshtein.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_cosine_sim
# @Author : LiuYan
# @Time : 2021/4/9 10:25
from doc_similarity.model.cosine_similarity import CosineSimilarity
# 测试
if __name__ == '__main__':
text_1 = 'simhash算法的主要思想是降维,将高维的特征向量映射成一个低维的特征向量,通过两个向量的Hamming Distance来确定文章是否重复或者高度近似。'
text_2 = '我们所用到的simhash算法的主要思想是降维,将高维特征向量映射成一个低维特征向量,再通过两个向量的Hamming Distance来确定文章是否重复或高度近似。'
# stop_words_path = '../data/stopwords.txt'
stop_words_path = '../data/stop_words.txt'
cos_sim = CosineSimilarity(stop_words_path=stop_words_path)
with open('../data/sample_x.txt', 'r') as x, open('../data/sample_y.txt', 'r') as y:
content_x = x.read()
content_y = y.read()
similarity = cos_sim.calculate(text_1, text_2)
# similarity = cos_sim.calculate(content_x, content_y)
print('相似度: %.2f%%' % (similarity * 100))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/test_cosine_sim.py
|
test_cosine_sim.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_total
# @Author : LiuYan
# @Time : 2021/4/9 10:27
import os
from utils.log import logger
from doc_similarity.model.cosine_similarity import CosineSimilarity
from doc_similarity.model.jaccard import JaccardSimilarity
from doc_similarity.model.levenshtein import LevenshteinSimilarity
from doc_similarity.model.min_hash import MinHashSimilarity
from doc_similarity.model.sim_hash import SimHashSimilarity, OldSimHashSimilarity
from doc_similarity.model.similarity_tx import Similarity
# 测试
if __name__ == '__main__':
text_1 = 'simhash算法的主要思想是降维,将高维的特征向量映射成一个低维的特征向量,通过两个向量的Hamming Distance来确定文章是否重复或者高度近似。'
text_2 = '我们所用到的simhash算法的主要思想是降维,将高维特征向量映射成一个低维特征向量,再通过两个向量的Hamming Distance来确定文章是否重复或高度近似。'
root_path = '/home/zzsn/liuyan/word2vec/doc_similarity'
stop_words_path = os.path.join(root_path, 'stop_words.txt')
cos_sim = CosineSimilarity(stop_words_path=stop_words_path)
jac_sim = JaccardSimilarity(stop_words_path=stop_words_path)
lev_sim = LevenshteinSimilarity(stop_words_path=stop_words_path)
min_hash_sim = MinHashSimilarity(stop_words_path=stop_words_path)
sim_hash_sim = SimHashSimilarity(stop_words_path=stop_words_path)
old_sim_hash_sim = OldSimHashSimilarity()
sim_tx = Similarity(
model_path=os.path.join(root_path, 'Tencent_AILab_ChineseEmbedding_Min.txt'),
stopword_path=os.path.join(root_path, 'stopwords.txt')
)
with open('../data/sample/sample_x.txt', 'r') as x, open('../data/sample/sample_y.txt', 'r') as y:
content_x = x.read()
content_y = y.read()
result_cos_sim = cos_sim.calculate(text_1, text_2)
# result_cos_sim = cos_sim.calculate(content_x, content_y)
result_jac_sim = jac_sim.calculate(text_1, text_2)
# result_jac_sim = jac_sim.calculate(content_x, content_y)
result_lev_sim = lev_sim.calculate(text_1, text_2)
# result_lev_sim = lev_sim.calculate(content_x, content_y)
result_min_hash_sim = min_hash_sim.calculate(text_1, text_2)
# result_min_hash_sim = min_hash_sim.calculate(content_x, content_y)
result_old_sim_hash_sim = old_sim_hash_sim.calculate(text_1=text_1, text_2=text_2)
# result_old_sim_hash_sim = old_sim_hash_sim.calculate(text_1=content_x, text_2=content_y)
result_new_sim_hash_sim = sim_hash_sim.calculate(text_1, text_2)
# result_new_sim_hash_sim = sim_hash_sim.calculate(content_x, content_y)
result_sim_tx = sim_tx.calculation_sim(text_1, text_2)
# result_sim_tx = sim_tx.calculation_sim(content_x, content_y)
logger.info('Cosine Similarity \t\t: {:.2f}%'.format(result_cos_sim * 100))
logger.info('Jaccard Similarity\t\t: {:.2f}%'.format(result_jac_sim * 100))
logger.info('Levenshtein Similarity\t: {:.2f}%'.format(result_lev_sim * 100))
logger.info('Min hash Similarity \t: {:.2f}%'.format(result_min_hash_sim * 100))
logger.info('Old sim hash Similarity: {:.2f}%'.format(result_old_sim_hash_sim * 100))
threshold = 3 # 阀值
logger.info('New sim hash Similarity: 海明距离: {}, 阈值距离: {}, 是否相似: {}'.format(
result_new_sim_hash_sim, threshold, result_new_sim_hash_sim <= threshold
))
logger.info('CTT Similarity \t\t: {}'.format(result_sim_tx))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/test_total.py
|
test_total.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_jaccard
# @Author : LiuYan
# @Time : 2021/4/8 19:57
from doc_similarity.model.jaccard import JaccardSimilarity
# 测试
if __name__ == '__main__':
text_1 = 'simhash算法的主要思想是降维,将高维的特征向量映射成一个低维的特征向量,通过两个向量的Hamming Distance来确定文章是否重复或者高度近似。'
text_2 = '我们所用到的simhash算法的主要思想是降维,将高维特征向量映射成一个低维特征向量,再通过两个向量的Hamming Distance来确定文章是否重复或高度近似。'
# stop_words_path = '../data/stopwords.txt'
stop_words_path = '../data/stop_words.txt'
jac_sim = JaccardSimilarity(stop_words_path=stop_words_path)
with open('../data/sample_x.txt', 'r') as x, open('../data/sample_y.txt', 'r') as y:
content_x = x.read()
content_y = y.read()
similarity = jac_sim.calculate(text_1, text_2)
# similarity = jac_sim.calculate(content_x, content_y)
print('相似度: %.2f%%' % (similarity * 100))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/test_jaccard.py
|
test_jaccard.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_sim_hash
# @Author : LiuYan
# @Time : 2021/4/8 19:40
from doc_similarity.model.sim_hash import SimHashSimilarity, OldSimHashSimilarity
# 测试
if __name__ == '__main__':
text_1 = 'simhash算法的主要思想是降维,将高维的特征向量映射成一个低维的特征向量,通过两个向量的Hamming Distance来确定文章是否重复或者高度近似。'
text_2 = '我们所用到的simhash算法的主要思想是降维,将高维特征向量映射成一个低维特征向量,再通过两个向量的Hamming Distance来确定文章是否重复或高度近似。'
stop_words_path = '/home/zzsn/liuyan/word2vec/doc_similarity/stop_words.txt'
sim_hash_sim = SimHashSimilarity(stop_words_path=stop_words_path)
old_sim_hash_sim = OldSimHashSimilarity()
with open('../data/sample/sample_x.txt', 'r') as x, open('../data/sample/sample_y.txt', 'r') as y:
content_x = x.read()
content_y = y.read()
similar = old_sim_hash_sim.calculate(text_1=text_1, text_2=text_2)
# similar = sim_hash(text_1=content_x, text_2=content_y)
print(similar)
similarity = sim_hash_sim.calculate(text_1, text_2)
# similarity = sim_hash_sim.calculate(content_x, content_y)
threshold = 3 # 阀值
print(f'海明距离:{similarity} 判定距离:{threshold} 是否相似:{similarity <= threshold}')
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/test_sim_hash.py
|
test_sim_hash.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/8 19:40
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_min_hash
# @Author : LiuYan
# @Time : 2021/4/9 9:10
from doc_similarity.model.min_hash import MinHashSimilarity
# 测试
if __name__ == '__main__':
text_1 = 'simhash算法的主要思想是降维,将高维的特征向量映射成一个低维的特征向量,通过两个向量的Hamming Distance来确定文章是否重复或者高度近似。'
text_2 = '我们所用到的simhash算法的主要思想是降维,将高维特征向量映射成一个低维特征向量,再通过两个向量的Hamming Distance来确定文章是否重复或高度近似。'
# stop_words_path = '../data/stopwords.txt'
stop_words_path = '../data/stop_words.txt'
min_hash_sim = MinHashSimilarity(stop_words_path=stop_words_path)
with open('../data/sample_x.txt', 'r') as x, open('../data/sample_y.txt', 'r') as y:
content_x = x.read()
content_y = y.read()
similarity = min_hash_sim.calculate(text_1, text_2)
# similarity = min_hash_sim.calculate(content_x, content_y)
print('相似度: %.2f%%' % (similarity * 100))
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/test/test_min_hash.py
|
test_min_hash.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : doc_sim_app
# @Author : LiuYan
# @Time : 2021/4/20 20:50
from base.app.base_app import *
from doc_similarity.data.compare import *
doc_sim = Blueprint('/doc_sim', __name__)
@doc_sim.route('/test', methods=('GET', 'POST'))
def test():
app.logger.info('test -> doc_sim_app success!')
# logger.info('test -> doc_sim_app success!')
return 'test -> doc_sim_app success!'
@doc_sim.route('/similarity/', methods=['POST'])
def similarity():
"""
-> data:
:return:
"""
data = request.get_json()
article_list = data['article_list']
title_sim_name = data['title_sim_name']
content_sim_name = data['content_sim_name']
result_dict = compare_single(
article_list=article_list,
title_sim_name=title_sim_name,
content_sim_name=content_sim_name
)
# logger.info(result_dict)
app.logger.info(result_dict)
return json.dumps(result_dict, ensure_ascii=False)
@doc_sim.route('/similarity_list/', methods=['POST'])
def similarity_list():
"""
-> data:
:return:
"""
data = request.get_json()
article_list = data['article_list']
title_sim_name = data['title_sim_name']
content_sim_name = data['content_sim_name']
result_dict = compare_many(
article_list=article_list,
title_sim_name=title_sim_name,
content_sim_name=content_sim_name
)
# logger.info(result_dict)
app.logger.info(result_dict)
return json.dumps(result_dict, ensure_ascii=False)
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/app/doc_sim_app.py
|
doc_sim_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/9 11:32
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/app/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_loader
# @Author : LiuYan
# @Time : 2021/4/9 15:01
from doc_similarity.data.data_process import xlsx2list, list2xlsx
from doc_similarity.model.total_sim import TotalSimilarity
def compare_list(sim: object, total_list: list) -> list:
result_list = []
total_len = len(total_list)
for index_x in range(total_len):
content_x = total_list[index_x]
for index_y in range(index_x + 1, total_len):
content_y = total_list[index_y]
result_dict_title = sim.calculate(content_x['title'], content_y['title'])
result_dict_content = sim.calculate(content_x['content'], content_y['content'])
result_list.append([
content_x['id'], content_y['id'],
result_dict_title, result_dict_content
])
return result_list
pass
if __name__ == '__main__':
stop_words_path = '../data/stop_words.txt'
xlsx_path = '../data/total_datasets.xlsx'
total_sim = TotalSimilarity(stop_words_path=stop_words_path)
total_list = xlsx2list(xlsx_path=xlsx_path)
result_list = compare_list(sim=total_sim, total_list=total_list)
list2xlsx(xlsx_path='../data/result/result_total_datasets.xlsx', result_lists=result_list)
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/data/data_loader.py
|
data_loader.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : compare
# @Author : LiuYan
# @Time : 2021/4/10 14:58
import os
from doc_similarity.model.cosine_similarity import CosineSimilarity
from doc_similarity.model.jaccard import JaccardSimilarity
from doc_similarity.model.levenshtein import LevenshteinSimilarity
from doc_similarity.model.min_hash import MinHashSimilarity
from doc_similarity.model.sim_hash import SimHashSimilarity, OldSimHashSimilarity
# from doc_similarity.model.similarity_tx import Similarity
# from doc_similarity.model.total_sim import TotalSimilarity
root_path = '/home/zzsn/liuyan/word2vec/doc_similarity'
stop_words_path = os.path.join(root_path, 'stop_words.txt')
cos_sim = CosineSimilarity(stop_words_path=stop_words_path)
jac_sim = JaccardSimilarity(stop_words_path=stop_words_path)
lev_sim = LevenshteinSimilarity(stop_words_path=stop_words_path)
min_hash_sim = MinHashSimilarity(stop_words_path=stop_words_path)
sim_hash_sim = SimHashSimilarity(stop_words_path=stop_words_path)
old_sim_hash_sim = OldSimHashSimilarity()
# ctt_sim = Similarity(
# model_path=os.path.join(root_path, 'Tencent_AILab_ChineseEmbedding_Min.txt'),
# stopword_path=os.path.join(root_path, 'stopwords.txt')
# )
# total_sim = TotalSimilarity(root_path=root_path)
sim_dict = {
'cos_sim': cos_sim,
'jac_sim': jac_sim,
'lev_sim': lev_sim,
'min_hash': min_hash_sim,
'sim_hash': old_sim_hash_sim,
# 'ctt_sim': ctt_sim,
'false': False
}
# def compare_all(total_list: list) -> list:
# result_list = []
# total_len = len(total_list)
# for index_x in range(total_len):
# article_x = total_list[index_x]
# for index_y in range(index_x + 1, total_len):
# article_y = total_list[index_y]
# result_dict_title = total_sim.calculate(article_x['title'], article_y['title'])
# result_dict_content = total_sim.calculate(article_x['content'], article_y['content'])
# result_list.append([
# article_x['id'], article_y['id'],
# result_dict_title, result_dict_content
# ])
# return result_list
# pass
def compare_sim_name(title_sim_name: str or bool, content_sim_name: str or bool) -> dict or list:
if title_sim_name in sim_dict:
title_sim = sim_dict[title_sim_name]
else:
return {
'handleMsg': '所选标题相似度算法名称错误或不存在!请核查(cos_sim / jac_sim / lev_sim / min_hash / sim_hash / false)',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
if content_sim_name in sim_dict:
content_sim = sim_dict[content_sim_name]
else:
return {
'handleMsg': '所选正文相似度算法名称错误或不存在!请核查(cos_sim / jac_sim / lev_sim / min_hash / sim_hash / false)',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
return [title_sim, content_sim]
def compare_single(article_list: list, title_sim_name: str or bool, content_sim_name: str or bool) -> dict:
judge_sim_name = compare_sim_name(title_sim_name=title_sim_name, content_sim_name=content_sim_name)
if type(judge_sim_name) is dict:
return judge_sim_name
else:
title_sim, content_sim = judge_sim_name[0], judge_sim_name[1]
if len(article_list) == 2:
article_x, article_y = article_list[0], article_list[1]
title_similarity = title_sim.calculate(
article_x['title'], article_y['title']
) if title_sim else 0.0
content_similarity = content_sim.calculate(
article_x['content'], article_y['content']
) if content_sim else 0.0
result_dict = {
'id_x': article_x['id'],
'id_y': article_y['id'],
'title_sim': title_similarity,
'content_sim': content_similarity
}
else:
return {
'handleMsg': '所对比文章数量不是 2 篇,请核查!',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
return {
'handleMsg': 'success',
'isHandleSuccess': True,
'logs': None,
'resultData': result_dict
}
pass
def compare_many(article_list: list, title_sim_name: str or bool, content_sim_name: str or bool) -> dict:
judge_sim_name = compare_sim_name(title_sim_name=title_sim_name, content_sim_name=content_sim_name)
if type(judge_sim_name) is dict:
return judge_sim_name
else:
title_sim, content_sim = judge_sim_name[0], judge_sim_name[1]
result_list = []
total_len = len(article_list)
if total_len < 3:
return {
'handleMsg': '所对比文章数量少于 3 篇,请核查!(2 篇文章对比请使用接口1 similarity)',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
else:
for index in range(total_len):
article = article_list[index]
article_list[index]['title_transform'] = title_sim.transform(article['title']) if title_sim else None
article_list[index]['content_transform'] = content_sim.transform(
article['content']) if content_sim else None
for index_x in range(total_len):
article_x = article_list[index_x]
for index_y in range(index_x + 1, total_len):
article_y = article_list[index_y]
title_similarity = title_sim.calculate_transform(
article_x['title_transform'], article_y['title_transform']
) if title_sim else 0.0
content_similarity = content_sim.calculate_transform(
article_x['content_transform'], article_y['content_transform']
) if content_sim else 0.0
result_list.append({
'id_x': article_x['id'],
'id_y': article_y['id'],
'title_sim': title_similarity,
'content_sim': content_similarity
})
return {
'handleMsg': 'success',
'isHandleSuccess': True,
'logs': None,
'resultData': {
'sim_list': result_list
}
}
pass
if __name__ == '__main__':
result_dict = compare_single([
{
'id': 1,
'title': 'I love YanLiu',
'content': 'YingLiang love YanLiu'
},
{
'id': 2,
'title': 'I love YingLiang',
'content': 'YanLiu love YingLiang'
}
], 'cos_sim', 'sim_hash')
print(result_dict)
result_list = compare_many([
{
'id': 1,
'title': 'I love YanLiu',
'content': 'YingLiang love YanLiu'
},
{
'id': 2,
'title': 'I love YingLiang',
'content': 'YanLiu love YingLiang'
},
{
'id': 3,
'title': 'I love YingLiang',
'content': 'YanLiu love YingLiang'
}
], 'lev_sim', 'sim_hash')
print(result_list)
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/data/compare.py
|
compare.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/8 19:43
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/data/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : data_process
# @Author : LiuYan
# @Time : 2021/4/9 15:01
import xlrd
import xlsxwriter
def xlsx2list(xlsx_path: str) -> list:
wb = xlrd.open_workbook(xlsx_path)
sh = wb.sheet_by_name('Sheet1')
total_list = list()
for i in range(sh.nrows):
if i < 3:
continue
row = sh.row_values(i)
total_list.append({
'id': int(row[0]),
'title': row[1].replace('\n', '').replace('\r', '').replace('\t', ''),
'content': row[2].replace('\n', '').replace('\r', '').replace('\t', '')
})
# row = sh.row_values(i)
# total_list.append({
# 'id': i,
# 'title': row[0].replace('\n', '').replace('\r', '').replace('\t', ''),
# 'content': row[1].replace('\n', '').replace('\r', '').replace('\t', '')
# })
return total_list
def list2xlsx(xlsx_path=None, result_lists=None):
workbook = xlsxwriter.Workbook(xlsx_path)
worksheet = workbook.add_worksheet('result')
worksheet.write_row(
0, 0, [
'content_id_x', 'content_id_y',
'cos_sim', 'jac_sim', 'lev_sim',
'min_hash', 'old_sim_hash', 'new_sim_hash',
'ctt_sim',
'cos_sim', 'jac_sim', 'lev_sim',
'min_hash', 'old_sim_hash', 'new_sim_hash',
'ctt_sim',
]
)
for index, result in enumerate(result_lists):
worksheet.write_row(
index + 1, 0, [
result[0],
result[1],
result[2]['result_cos_sim'],
result[2]['result_jac_sim'],
result[2]['result_lev_sim'],
result[2]['result_min_hash_sim'],
result[2]['result_old_sim_hash_sim'],
result[2]['result_new_sim_hash_sim'],
result[2]['result_sim_tx'],
result[3]['result_cos_sim'],
result[3]['result_jac_sim'],
result[3]['result_lev_sim'],
result[3]['result_min_hash_sim'],
result[3]['result_old_sim_hash_sim'],
result[3]['result_new_sim_hash_sim'],
result[3]['result_sim_tx'],
]
)
workbook.close()
if __name__ == '__main__':
xlsx_path = '../data/total_datasets.xlsx'
total_list = xlsx2list(xlsx_path=xlsx_path)
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/data/data_process.py
|
data_process.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : tool
# @Author : LiuYan
# @Time : 2021/4/9 9:57
import re
import html
import jieba
import jieba.analyse
class Tool(object):
def __init__(self, stop_words_path: str):
jieba.analyse.set_stop_words(stop_words_path=stop_words_path) # 去除停用词
jieba.cut('北京天安门', cut_all=True) # 切割
@staticmethod
def extract_keyword(content: str, withWeigth: bool): # 提取关键词
re_exp = re.compile(r'(<style>.*?</style>)|(<[^>]+>)', re.S) # 正则过滤 html 标签
content = re_exp.sub(' ', content)
content = html.unescape(content) # html 转义符实体化
seg = [i for i in jieba.cut(content, cut_all=True) if i != ''] # 切割
keywords = jieba.analyse.extract_tags('|'.join(seg), topK=200, withWeight=withWeigth) # 提取关键词
return keywords
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/utils/tool.py
|
tool.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/9 9:56
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/utils/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : min_hash
# @Author : LiuYan
# @Time : 2021/4/8 19:38
from datasketch import MinHash
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class MinHashSimilarity(BaseSimilarity):
"""
MinHash
在大数据集中求杰尔德相似度的解决方案,通过对数据文本的降维,大大提高计算速度。
"""
def __init__(self, stop_words_path: str):
super(MinHashSimilarity, self).__init__()
self._tool = Tool(stop_words_path=stop_words_path)
def calculate(self, content_x: str, content_y: str) -> float:
m1, m2 = MinHash(), MinHash() # MinHash计算
s1 = self._tool.extract_keyword(content_x, withWeigth=False) # 提取关键词
s2 = self._tool.extract_keyword(content_y, withWeigth=False)
for data in s1:
m1.update(data.encode('utf8'))
for data in s2:
m2.update(data.encode('utf8'))
return m1.jaccard(m2)
def transform(self, content: str) -> object:
minhash = MinHash()
keywords = self._tool.extract_keyword(content, withWeigth=False)
for keyword in keywords:
minhash.update(keyword.encode('utf-8'))
return minhash
pass
def calculate_transform(self, transform_x: MinHash, transform_y: MinHash) -> float:
"""
:param transform_x: minhash_1
:param transform_y: minhash_2
:return:
"""
return transform_x.jaccard(transform_y)
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/min_hash.py
|
min_hash.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : levenshtein
# @Author : LiuYan
# @Time : 2021/4/8 19:38
import Levenshtein
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class LevenshteinSimilarity(BaseSimilarity):
"""
编辑距离
"""
def __init__(self, stop_words_path: str):
super(LevenshteinSimilarity, self).__init__()
self._tool = Tool(stop_words_path=stop_words_path)
def calculate(self, content_x: str, content_y: str) -> float:
# 提取关键词
keywords_1 = ', '.join(self._tool.extract_keyword(content_x, withWeigth=False))
keywords_2 = ', '.join(self._tool.extract_keyword(content_y, withWeigth=False))
# ratio计算2个字符串的相似度,它是基于最小编辑距离
distances = Levenshtein.ratio(keywords_1, keywords_2)
return distances
def transform(self, content: str) -> object:
keywords = ', '.join(self._tool.extract_keyword(content, withWeigth=False))
return keywords
pass
def calculate_transform(self, transform_x: object, transform_y: object) -> float:
"""
:param transform_x: keywords_1
:param transform_y: keywords_2
:return:
"""
distances = Levenshtein.ratio(transform_x, transform_y)
return distances
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/levenshtein.py
|
levenshtein.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : sim_hash
# @Author : LiuYan
# @Time : 2021/4/8 19:35
import re
import math
from simhash import Simhash
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class OldSimHashSimilarity(BaseSimilarity):
def __init__(self):
super(OldSimHashSimilarity, self).__init__()
@staticmethod
def _filter_html(html):
"""
:param html: html
:return: 返回去掉html的纯净文本
"""
dr = re.compile(r'<[^>]+>', re.S)
dd = dr.sub('', html).strip()
return dd
def calculate(self, text_1: str, text_2: str): # 求两篇文章相似度
"""
:param text_1: 文本_1
:param text_2: 文本_2
:return: 返回两篇文章的相似度
"""
simhash_1 = Simhash(text_1)
simhash_2 = Simhash(text_2)
# print(len(bin(simhash_1.value)), (len(bin(simhash_2.value))))
max_hash_bit = max(len(bin(simhash_1.value)), (len(bin(simhash_2.value))))
# print(max_hash_bit)
# 海明距离(Hamming distance)
hamming_distance = simhash_1.distance(simhash_2)
# print(hamming_distance)
similarity = 1 - hamming_distance / max_hash_bit
return similarity
def transform(self, content: str) -> object:
simhash = Simhash(content)
return simhash
pass
def calculate_transform(self, transform_x: Simhash, transform_y: Simhash) -> float:
"""
:param transform_x: simhash_1
:param transform_y: simhash_2
:return:
"""
max_hash_bit = max(len(bin(transform_x.value)), (len(bin(transform_y.value))))
hamming_distance = transform_x.distance(transform_y)
similarity = 1 - hamming_distance / max_hash_bit
return similarity
pass
class SimHashSimilarity(object):
"""
SimHash
对单词数量低于500的文章误差较大。
"""
def __init__(self, stop_words_path: str):
self._tool = Tool(stop_words_path=stop_words_path)
pass
@staticmethod
def get_bin_str(source): # 字符串转二进制
if source == '':
return 0
else:
t = ord(source[0]) << 7
m = 1000003
mask = 2 ** 128 - 1
for c in source:
t = ((t * m) ^ ord(c)) & mask
t ^= len(source)
if t == -1:
t = -2
t = bin(t).replace('0b', '').zfill(64)[-64:]
return str(t)
def _run(self, keywords):
ret = []
for keyword, weight in keywords:
bin_str = self.get_bin_str(keyword)
key_list = []
for c in bin_str:
weight = math.ceil(weight)
if c == '1':
key_list.append(int(weight))
else:
key_list.append(-int(weight))
ret.append(key_list)
# 对列表进行"降维"
rows = len(ret)
cols = len(ret[0])
result = []
for i in range(cols):
tmp = 0
for j in range(rows):
tmp += int(ret[j][i])
if tmp > 0:
tmp = '1'
elif tmp <= 0:
tmp = '0'
result.append(tmp)
return ''.join(result)
def calculate(self, content_x: str, content_y: str):
# 提取关键词
s1 = self._tool.extract_keyword(content_x, withWeigth=True)
s2 = self._tool.extract_keyword(content_y, withWeigth=True)
sim_hash_1 = self._run(s1)
sim_hash_2 = self._run(s2)
# print(f'相似哈希指纹1: {sim_hash1}\n相似哈希指纹2: {sim_hash2}')
length = 0
for index, char in enumerate(sim_hash_1):
if char == sim_hash_2[index]:
continue
else:
length += 1
return length
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/sim_hash.py
|
sim_hash.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : jaccard
# @Author : LiuYan
# @Time : 2021/4/8 19:37
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class JaccardSimilarity(BaseSimilarity):
"""
jaccard相似度
在产品描述中,很多运营人员为了偷懒,喜欢复制粘贴稍作修改,造成产品描述重复度高。通过提取产品描述的关键词,再计算两组关键词的交集并集非常适合在此场景下检测产品描述的重复度,即杰卡德相似度。
"""
def __init__(self, stop_words_path: str):
super(JaccardSimilarity, self).__init__()
self._tool = Tool(stop_words_path=stop_words_path)
pass
def calculate(self, content_x: str, content_y: str) -> float:
# 分词与关键词提取
keywords_x = self._tool.extract_keyword(content_x, withWeigth=False)
keywords_y = self._tool.extract_keyword(content_y, withWeigth=False)
# jaccard相似度计算
intersection = len(list(set(keywords_x).intersection(set(keywords_y))))
union = len(list(set(keywords_x).union(set(keywords_y))))
sim = float(intersection) / union if union != 0 else 0
return sim
def transform(self, content: str) -> object:
keywords = self._tool.extract_keyword(content, withWeigth=False)
return keywords
pass
def calculate_transform(self, transform_x: object, transform_y: object) -> float:
"""
:param transform_x: keywords_x
:param transform_y: keywords_y
:return:
"""
# jaccard相似度计算
intersection = len(list(set(transform_x).intersection(set(transform_y))))
union = len(list(set(transform_x).union(set(transform_y))))
sim = float(intersection) / union if union != 0 else 0
return sim
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/jaccard.py
|
jaccard.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/2/7 14:10
# @Author : 程婷婷
# @FileName: similarity_tx.py
# @Software: PyCharm
import numpy as np
import gensim
import jieba
import re
from sklearn.metrics.pairwise import cosine_similarity
class Similarity(object):
def __init__(self, model_path, stopword_path):
self.Word2VecModel = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=False)
self.vocab_list = [word for word, vocab in self.Word2VecModel.wv.vocab.items()]
self.stopword_path = stopword_path
def stop_word_list(self, path):
'''
创建停用词list
:param path:
:return:
'''
stopwords = [line.strip() for line in open(path, 'r', encoding='utf-8').readlines()]
return stopwords
def remove_char(self, text):
'''
保留中文、英语字母、数字和标点
:param text:
:return:
'''
graph_filter = re.compile(r'[^\u4e00-\u9fa5a-zA-Z0-9\s,。\.,?\?!!;;]')
graph = graph_filter.sub('', text)
if len(graph) == 0:
return ''
else:
return graph
def preprocess(self, text):
'''
预处理文本
:param text:
:return:
'''
if isinstance(text, str):
text = self.remove_char(text)
textcut = jieba.cut(text.strip())
stopwords = self.stop_word_list(self.stopword_path)
textcut = filter(lambda x: x in stopwords, textcut)
else:
raise TypeError('text should be str')
return textcut
# 第1个参数是每篇文章分词的结果,第2个参数是word2vec模型对象
def getVector_v4(self, cutWords):
article_vector = np.zeros((1, 200))
for cutWord in cutWords:
if cutWord in self.vocab_list:
article_vector += np.array(self.Word2VecModel.wv[cutWord])
cutWord_vector = article_vector.mean(axis=0)
return cutWord_vector
def calculation_sim(self, text1, text2):
'''
计算相似度
:param texts_train:
:param texts_test:
:return:
'''
text1 = self.preprocess(text1)
text2 = self.preprocess(text2)
matrix_text1 = self.getVector_v4(text1)
matrix_text2 = self.getVector_v4(text2)
dis = cosine_similarity(matrix_text1.reshape(1, -1), matrix_text2.reshape(1, -1))
return dis
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/similarity_tx.py
|
similarity_tx.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : total_sim
# @Author : LiuYan
# @Time : 2021/4/9 15:49
import os
from doc_similarity.model.cosine_similarity import CosineSimilarity
from doc_similarity.model.jaccard import JaccardSimilarity
from doc_similarity.model.levenshtein import LevenshteinSimilarity
from doc_similarity.model.min_hash import MinHashSimilarity
from doc_similarity.model.sim_hash import SimHashSimilarity, OldSimHashSimilarity
from doc_similarity.model.similarity_tx import Similarity
class TotalSimilarity(object):
def __init__(self, root_path: str):
super(TotalSimilarity, self).__init__()
stop_words_path = os.path.join(root_path, 'stop_words.txt')
self._cos_sim = CosineSimilarity(stop_words_path=stop_words_path)
self._jac_sim = JaccardSimilarity(stop_words_path=stop_words_path)
self._lev_sim = LevenshteinSimilarity(stop_words_path=stop_words_path)
self._min_hash_sim = MinHashSimilarity(stop_words_path=stop_words_path)
self._sim_hash_sim = SimHashSimilarity(stop_words_path=stop_words_path)
self._old_sim_hash_sim = OldSimHashSimilarity()
self._ctt_sim = Similarity(
model_path=os.path.join(root_path, 'Tencent_AILab_ChineseEmbedding_Min.txt'),
stopword_path=os.path.join(root_path, 'stopwords.txt')
)
pass
def calculate(self, content_x: str, content_y: str) -> dict:
result_dict = {
'result_cos_sim': self._cos_sim.calculate(content_x, content_y),
'result_jac_sim': self._jac_sim.calculate(content_x, content_y),
'result_lev_sim': self._lev_sim.calculate(content_x, content_y),
'result_min_hash_sim': self._min_hash_sim.calculate(content_x, content_y),
'result_old_sim_hash_sim': self._old_sim_hash_sim.calculate(text_1=content_x, text_2=content_y),
'result_new_sim_hash_sim': self._sim_hash_sim.calculate(content_x, content_y),
'result_ctt_sim': self._ctt_sim.calculation_sim(content_x, content_y)
}
return result_dict
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/total_sim.py
|
total_sim.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : base_similarity
# @Author : LiuYan
# @Time : 2021/4/10 15:34
from abc import ABC, abstractmethod
class BaseSimilarity(ABC):
def __init__(self):
super(BaseSimilarity, self).__init__()
@abstractmethod
def calculate(self, content_x: str, content_y: str) -> float:
pass
@abstractmethod
def transform(self, content: str) -> object:
pass
@abstractmethod
def calculate_transform(self, transform_x: object, transform_y: object) -> float:
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/base_similarity.py
|
base_similarity.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/8 19:35
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/__init__.py
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : cosine_similarity
# @Author : LiuYan
# @Time : 2021/4/8 19:36
from sklearn.metrics.pairwise import cosine_similarity
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class CosineSimilarity(BaseSimilarity):
"""
余弦相似度
"""
def __init__(self, stop_words_path):
super(CosineSimilarity, self).__init__()
self._tool = Tool(stop_words_path=stop_words_path)
@staticmethod
def one_hot(word_dict, keywords): # oneHot编码
# cut_code = [word_dict[word] for word in keywords]
cut_code = [0] * len(word_dict)
for word in keywords:
cut_code[word_dict[word]] += 1
return cut_code
def calculate(self, content_x, content_y):
keywords_1 = self._tool.extract_keyword(content_x, withWeigth=False) # 提取关键词
keywords_2 = self._tool.extract_keyword(content_y, withWeigth=False)
# 词的并集
union = set(keywords_1).union(set(keywords_2))
# 编码
word_dict = {}
i = 0
for word in union:
word_dict[word] = i
i += 1
# oneHot编码
s1_cut_code = self.one_hot(word_dict, keywords_1)
s2_cut_code = self.one_hot(word_dict, keywords_2)
# 余弦相似度计算
sample = [s1_cut_code, s2_cut_code]
# 除零处理
try:
sim = cosine_similarity(sample)
return sim[1][0]
except Exception as e:
print(e)
return 0.0
def transform(self, content: str) -> object:
keywords = self._tool.extract_keyword(content, withWeigth=False) # 提取关键词
return keywords
pass
def calculate_transform(self, transform_x: object, transform_y: object) -> float:
"""
:param transform_x: keywords_1
:param transform_y: keywords_2
:return: float
"""
# 词的并集
union = set(transform_x).union(set(transform_y))
# 编码
word_dict = {}
i = 0
for word in union:
word_dict[word] = i
i += 1
# oneHot编码
s1_cut_code = self.one_hot(word_dict, transform_x)
s2_cut_code = self.one_hot(word_dict, transform_y)
# 余弦相似度计算
sample = [s1_cut_code, s2_cut_code]
# 除零处理
try:
sim = cosine_similarity(sample)
return sim[1][0]
except Exception as e:
print(e)
return 0.0
pass
|
zzsn-nlp
|
/zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/cosine_similarity.py
|
cosine_similarity.py
|
import os
import ftplib
from ftplib import FTP
from flask import Flask, request, url_for, send_from_directory
from werkzeug.utils import secure_filename
from julei.kmeans import Kmeans
HOST = '127.0.0.1'
DEBUG = False
PORT = 8010
ALLOWED_EXTENSIONS = set(['xls', 'xlsx'])
app = Flask(__name__)
# 限定上传文件最大不超过50M
app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024
html = '''
<!DOCTYPE html>
<title>文件传输</title>
<h2>文件传输</h2>
<form method='post' enctype='multipart/form-data'>
<input type='file' name='file' multiple="multiple">
<input type='submit' value='传输该文件'>
</form>
'''
htmls = '''
<!DOCTYPE html>
<title>文件传输</title>
<h2>文件传输</h2>
<form method='post' enctype='multipart/form-data'>
<input type='submit' value='开始传输'>
</form>
'''
#连接并登陆FTP
def loginFTP():
ftp = FTP()
ftp.connect('192.168.1.196', 21) # 连接的ftp sever IP和端口
ftp.login('', '') # 连接的用户名,密码如果匿名登录则用空串代替即可
return ftp,True
# 判断文件类型是否符合要求
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[-1] in ALLOWED_EXTENSIONS
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
@app.route('/download', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
ftp, status = loginFTP()
if status == True:
ftp.cwd('./beida')
files = request.files.getlist('file')
# files = request.files['file']
print(files)
for file in files:
if allowed_file(file.filename):
print(file)
filename = secure_filename(file.filename)
ftp.storbinary('STOR ' + filename, file, blocksize=1024)
else:
return html + '文件类型不匹配'
return html + str(len(files)) + '个文件已经传输成功!'
else:
return html + '连接失败'
return html
@app.route('/upload', methods=['GET', 'POST'])
# 上传整个目录下的文件
def ftpDownload():
if request.method == 'POST':
ftp, status = loginFTP()
remote_path = './'
local_path = './data'
if not os.path.exists(local_path):
os.makedirs(local_path)
ftp.cwd(remote_path)
# print(ftp.dir())
for file in ftp.nlst():
print(file)
if allowed_file(file):
local_file = os.path.join(local_path, file)
# print(file.rsplit('.', 1)[-1])
# print(allowed_file(file))
download_file(ftp=ftp, remote_file=file, local_file=local_file)
else:
print('文件类型有误')
ftp.quit()
return htmls +'传输成功'
return htmls
def download_file(ftp, remote_file, local_file):
try:
buf_size = 1024
file_handler = open(local_file, 'wb')
ftp.retrbinary('RETR ' + remote_file, file_handler.write, buf_size)
file_handler.close()
except Exception as err:
print('传输文件出错,出现异常:%s ' % err)
@app.route('/write/', methods=('GET', 'POST'))
def get_train():
try:
km = Kmeans()
km.write()
except Exception as err:
print('出现异常:' + err)
return 'lose'
return '<h2>模型训练成功,相关文件已保存<h2>'
@app.route('/delete/', methods=('GET', 'POST'))
def delete_dir():
print('当前工作目录为' + os.getcwd())
for root,dir,files in os.walk('./data'):
print('data文件夹中包含' + str(files))
for file in files:
if file.rsplit('.')[-1] == 'xlsx':
os.remove('./data/' + file)
if os.path.exists('./result'):
shutil.rmtree('./result/')
return '<h2>删除文件成功</h2>'
app.run(host=HOST, port=PORT, debug=DEBUG)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/app.py
|
app.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed April 17 22:06:20 2019
@author: Wu-Daqing
"""
import os
import time
import logging
import gensim
from julei.segment import Segment
class Word2vec:
def __init__(self):
pass
def make_model(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
text = gensim.models.word2vec.LineSentence('result/segment/data_segment.txt')
model = gensim.models.word2vec.Word2Vec(sentences=text,size=100,window=5,min_count=5,workers=5,sg=0,iter=20)
if os.path.isdir('result/word2vec/') == False:
os.makedirs(r'result/word2vec/')
model.save('result/word2vec/wordvector_model')
return model
# word2vec = Word2vec()
# word2vec.make_model()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/word2vec_train.py
|
word2vec_train.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed April 17 22:06:20 2019
@author: Wu-Daqing
"""
import time
import os
import pickle
import numpy as np
import gensim
from julei.tfidf import Tfidf
from julei.word2vec_train import Word2vec
class Representation:
def __init__(self):
pass
def make_dir(self):
if os.path.isdir('result/representation/') == False:
os.makedirs(r'result/representation/')
def load_pkl(self):
with open('result/tfidf/tfidf.pkl','rb') as load1:
tfidf = pickle.load(load1)
with open('result/tfidf/vocabulary_tfidf.pkl','rb') as load2:
vocabulary = pickle.load(load2)
return tfidf,vocabulary
def load_embedding(self):
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始导入腾讯公开中文词向量(200维)')
file_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(file_path, 'data/Tencent_AILab_ChineseEmbedding.txt')
model_tencent = gensim.models.KeyedVectors.load_word2vec_format(path, binary=False)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成导入腾讯公开中文词向量(200维)')
vocabulary_tencent = model_tencent.wv.vocab.keys()
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始导入当前数据训练中文词向量(100维)')
word2vector = Word2vec()
model_w2v = word2vector.make_model()
# model_w2v = gensim.models.Word2Vec.load('result/word2vec/wordvector_model')
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成导入当前数据训练中文词向量(100维)')
vocabulary_w2v = model_w2v.wv.vocab.keys()
return model_tencent,vocabulary_tencent,model_w2v,vocabulary_w2v
def count_embedding(self):
tfidf1 = Tfidf()
tfidf,vocabulary = tfidf1.count_tfidf()
# tfidf,vocabulary = self.load_pkl()
model_tencent,vocabulary_tencent,model_w2v,vocabulary_w2v = self.load_embedding()
num_data = tfidf.shape[0]
V = tfidf.shape[1]
vector_matrix = np.zeros((V,300))
count = 0
for word in vocabulary:
if word in vocabulary_tencent:
vector_tencent = model_tencent.wv.word_vec(word)
else:
vector_tencent = np.random.randn(200)
if word in vocabulary_w2v:
vector_w2v = model_w2v.wv.word_vec(word)
else:
vector_w2v = np.random.randn(100)
vector = np.concatenate((vector_tencent,vector_w2v))
vector_matrix[count] = vector
count += 1
if (count+1) % 10000 == 0:
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',count,'个词向量计算完毕')
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',count,'个词向量计算完毕')
self.make_dir()
with open('result/representation/vector_matrix.pkl', 'wb') as save1:
pickle.dump(vector_matrix, save1, protocol=4)
return num_data,vector_matrix
def text_represent(self):
num_data,vector_matrix = self.count_embedding()
print(num_data)
tfidf,vocabulary = self.load_pkl()
text_representation = np.zeros((num_data,300))
for i in range(num_data):
tmp = tfidf[i].toarray()
weighted_average_vector = np.dot(tmp,vector_matrix)
text_representation[i] = weighted_average_vector
if (i+1)%10000 == 0 or (i+1) == num_data:
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',i+1,'条文本表示计算完毕')
with open('result/representation/text_representation.pkl','wb') as save2:
pickle.dump(text_representation,save2,protocol=4)
print(num_data)
return text_representation
# rep = Representation()
# rep.text_represent()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/representation.py
|
representation.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed April 17 22:06:20 2019
@author: Wu-Daqing
"""
import time
import os
import re
import pickle
import xlrd
import collections
from pyhanlp import JClass
class Segment:
def __init__(self):
pass
def make_dir(self):
if os.path.isdir('result/segment/') == False:
os.makedirs(r'result/segment/') # 为分词结果创建文件夹
# 定义从excel中读取内容的函数 (excel格式:日期 时间 内容)
def load_data(self,excel_path):
excel = xlrd.open_workbook(excel_path)
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
content = []
for idx in range(1,num_rows+1):
row = table.row_values(idx)
content.append(row[2])
return content
def data_segment(self):
file_names = sorted(os.listdir('data/')) # 把data文件夹下的所有原始excel数据的名称作为字符串组成list
original_data = collections.defaultdict(list)
for file_name in file_names:
if file_name[-5:] == '.xlsx':
excel_paths = 'data/' + file_name
content = self.load_data(excel_paths)
print(time.strftime('%Y-%m-%d %H:%M:%S'),file_name.split('_')[0],'文本读取完毕')
original_data[file_name.split('_')[0]] = content # 以日期字符串作为key的原始数据
self.make_dir()
data_segment_txt = open('result/segment/data_segment.txt','wb') # 把分词结果写进txt文件里,以方便训练word2vec
vocabulary_segment = collections.defaultdict(int)
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%\t\n\r\f\b\000\v]"
PerceptronLexicalAnalyzer = JClass('com.hankcs.hanlp.model.perceptron.PerceptronLexicalAnalyzer')
segment = PerceptronLexicalAnalyzer()
for key in original_data.keys():
content = original_data[key]
for i in range(len(content)):
words = list(segment.analyze(content[i]).toWordArray())
for word in words:
if re.findall(find_chinese,word) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,word)[0]) == '':
continue
elif len(re.sub(symbols, "",re.findall(find_chinese,word)[0])) == 1:
continue
else:
word_filtrated = re.sub(symbols, "",re.findall(find_chinese,word)[0])
vocabulary_segment[word_filtrated] += 1
data_segment_txt.write(word_filtrated.encode('utf-8'))
data_segment_txt.write(' '.encode('utf-8'))
data_segment_txt.write('\n'.encode('utf-8'))
if (i+1)%100 == 0 or i+1 == len(content):
print(time.strftime('%Y-%m-%d %H:%M:%S'),key,'第',i+1,'条文本分词完毕并写入')
data_segment_txt.close()
return vocabulary_segment
def dump_pkl(self):
vocabulary_segment = self.data_segment()
with open('result/segment/vocabulary_segment.pkl','wb') as save1:
pickle.dump(vocabulary_segment,save1)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'词表长度:',len(vocabulary_segment))
return vocabulary_segment
def write(self):
vocabulary_segment = self.data_segment()
vocabulary_segment_sorted = sorted(vocabulary_segment.items(),key=lambda item:item[1],reverse=True) # 对字典中词的频率从大到小排序
vocabulary_segment_txt = open('result/segment/vocabulary_segment.txt','wb')
for value in vocabulary_segment_sorted:
vocabulary_segment_txt.write(value[0].encode('utf-8'))
vocabulary_segment_txt.write(' '.encode('utf-8'))
vocabulary_segment_txt.write(str(value[1]).encode('utf-8'))
vocabulary_segment_txt.write('\n'.encode('utf-8'))
vocabulary_segment_txt.close()
# se = Segment()
# se.write()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/segment.py
|
segment.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed April 17 22:06:20 2019
@author: Wu-Daqing
"""
import time
import xlrd
import os
import math
import pickle
import numpy as np
from openpyxl import Workbook
from sklearn.cluster import KMeans
from julei.representation import Representation
class Kmeans:
def __init__(self):
pass
def make_dir(self, path):
dir_path = os.path.join(os.getcwd(), path)
if os.path.isdir(dir_path) == False:
os.makedirs(dir_path)
def load_pkl(self):
print(time.strftime('%Y-%m-%d %H:%M:%S'), '开始导入数据')
representation = Representation()
data = representation.text_represent()
# with open('result/representation/text_representation.pkl','rb') as load1:
# data = pickle.load(load1)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成导入数据')
num_data = data.shape[0]
print("====================num_data = " + str(num_data))
return data,num_data
#tmp add:
#print("---------------------num_class = "+str(num_class))
def train(self, path ='result/kmeans/'):
data,num_data = self.load_pkl()
#num_class = 20
num_class = int(math.sqrt(num_data))
# print(num_class)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始训练模型')
kmeans = KMeans(n_clusters=num_class, init='k-means++', n_init=5, max_iter=100)
model = kmeans.fit(data)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成训练模型')
classes = model.labels_
centroids = model.cluster_centers_
result = [[] for j in range(num_class)]
data_cluster = [[] for j in range(num_class)]
for i in range(num_data):
for j in range(num_class):
if classes[i] == j:
result[j].append(i)
data_cluster[j].append(data[i])
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成计算结果')
result_sorted = []
similarity = []
for j in range(num_class):
distances = [(np.linalg.norm(centroids[j] - data_cluster[j][i]),result[j][i]) for i in range(len(result[j]))]
distances_sorted = sorted(distances, key=lambda x: x[0])
result_sorted.append([value[1] for value in distances_sorted])
similarity.append([value[0] for value in distances_sorted])
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成排序结果')
with open(os.path.join(os.getcwd(), path)+'centroids.pkl','wb') as save1:
pickle.dump(centroids,save1)
return num_class,result_sorted,similarity
def load_data(self, excel_path):
excel = xlrd.open_workbook(excel_path)
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
content = []
for idx in range(1,num_rows+1):
row = table.row_values(idx)
content.append([row[0],row[1],row[2],row[3],row[4],row[5],row[6]])#jjia 3ge
return content
def write(self, path ='result/kmeans/'):
self.make_dir(path)
file_names = sorted(os.listdir('data/'))
original_data = []
for file_name in file_names:
if file_name[-5:] == '.xlsx':
content = self.load_data(excel_path='data/'+file_name)
print(time.strftime('%Y-%m-%d %H:%M:%S'),file_name.split('_')[0],'文本读取完毕')
original_data += content
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始写入结果')
num_class,result_sorted,similarity = self.train(path)
for j in range(num_class):
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',j+1,'类有',len(result_sorted[j]),'条文本')
workbook = Workbook()
worksheet = workbook.active
worksheet.title = str(len(result_sorted[j]))
worksheet.cell(row=1,column=1).value = '日期'
worksheet.cell(row=1,column=2).value = '时间'
worksheet.cell(row=1,column=3).value = '距离中心欧氏距离'
worksheet.cell(row=1,column=4).value = '内容'
worksheet.cell(row=1, column=5).value = '来源' #新加
worksheet.cell(row=1, column=6).value = '标题' # 新加
worksheet.cell(row=1, column=7).value = '链接' # 新加
count = 1
for i in range(len(result_sorted[j])):
try:
worksheet.cell(row=count+1,column=5).value = original_data[result_sorted[j][i]][3].encode('gbk','ignore').decode('gbk','ignore')#新加
worksheet.cell(row=count + 1, column=6).value = original_data[result_sorted[j][i]][5].encode('gbk','ignore').decode( 'gbk', 'ignore') # 新加
worksheet.cell(row=count + 1, column=7).value = original_data[result_sorted[j][i]][6].encode('gbk',
'ignore').decode(
'gbk', 'ignore') # 新加
# print(original_data[result_sorted[j][i]][3])
worksheet.cell(row=count+1,column=4).value = original_data[result_sorted[j][i]][2].encode('gbk','ignore').decode('gbk', 'ignore')
worksheet.cell(row=count+1,column=1).value = original_data[result_sorted[j][i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=count+1,column=2).value = original_data[result_sorted[j][i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=count+1,column=3).value = similarity[j][i]
count += 1
except Exception as e:
print('str(e):\t\t', str(e))
continue
workbook.save(os.path.join(os.getcwd(), path)+str(count-1)+'_'+str(j+1)+'.xlsx')
print(time.strftime('%Y-%m-%d %H:%M:%S'),j+1,'类写入Excel完毕','\n')
# km = Kmeans()
# km.write()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/kmeans.py
|
kmeans.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed April 17 22:06:20 2019
@author: Wu-Daqing
"""
import time
import os
import pickle
from julei.segment import Segment
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
class Tfidf:
def make_dir(self):
if os.path.isdir('result/tfidf/') == False:
os.makedirs(r'result/tfidf/')
def load_data(self):
seg = Segment()
vocabulary_segment = seg.dump_pkl()
seg.data_segment()
# with open('result/segment/vocabulary_segment.pkl','rb') as load1:
# vocabulary_segment = pickle.load(load1)
data = []
for line in open('result/segment/data_segment.txt','rb'):
string = line.decode('utf-8-sig')
string_list = string.split(' ')
tmp = []
for word in string_list:
if vocabulary_segment[word] < 5:
continue
else:
tmp.append(word)
data.append(' '.join(tmp))
return data
def count_tfidf(self):
tf_transformer = CountVectorizer(ngram_range=(1,1))
tfidf_transformer = TfidfTransformer(norm='l2',use_idf=True,smooth_idf=True)
data = self.load_data()
tf = tf_transformer.fit_transform(data)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'TF计算完毕')
tfidf = tfidf_transformer.fit_transform(tf) # 'scipy.sparse.csr.csr_matrix'
print(time.strftime('%Y-%m-%d %H:%M:%S'),' TFIDF计算完毕')
self.make_dir()
with open('result/tfidf/tfidf.pkl','wb') as save1:
pickle.dump(tfidf,save1,protocol=4)
# pickle.dump(tfidf,save1,protocol=4)
vocabulary_tfidf = tf_transformer.get_feature_names()
with open('result/tfidf/vocabulary_tfidf.pkl','wb') as save2:
pickle.dump(vocabulary_tfidf,save2)
return tfidf,vocabulary_tfidf
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/julei/tfidf.py
|
tfidf.py
|
from urllib import request, error
import sys
import zipfile
import tarfile
import socket
socket.setdefaulttimeout(15)
def progressbar(cur):
percent = '{:.2%}'.format(cur)
sys.stdout.write('\r')
sys.stdout.write('[%-100s] %s' % ('=' * int(cur*100), percent))
sys.stdout.flush()
print(cur)
def schedule(blocknum,blocksize,totalsize):
'''
blocknum:当前已经下载的块
blocksize:每次传输的块大小
totalsize:网页文件总大小
'''
percent = 0
if totalsize == 0:
percent = 0
elif totalsize == -1 and blocknum==0:
print('响应失败,正在重新连接……')
download()
elif totalsize == -1 and blocknum != 0:
pass
else:
percent = blocknum * blocksize / totalsize
progressbar(percent)
if percent > 1.0:
percent = 1.0
progressbar(percent)
# print('\n'+'download : %.2f%%' %(percent))
def download(url = 'https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0', path = '1.0.0.tar.gz'):
try:
filename,headers = request.urlretrieve(url, path, schedule)
print(headers)
except error.HTTPError as e:
print(e)
print(url + ' download failed!' + '\r\n')
print('请手动下载:%s' %url)
except error.URLError as e:
print(url + ' download failed!' + '\r\n')
print('请手动下载:%s' %url)
print(e)
except Exception as e:
print(e)
print('请手动下载:%s' %url)
else:
print('\r\n' + url + ' download successfully!')
return filename
def unzip(path = '1.0.0.zip'):
zip_file = zipfile.ZipFile(path)
zip_list = zip_file.namelist() # 得到压缩包里所有文件
for f in zip_list:
zip_file.extract(f) # 循环解压文件到指定目录
zip_file.close() # 关闭文件,必须有,释放内存
def untar(path = '1.0.0.tar.gz'):
tar = tarfile.open(path)
tar.extractall()
tar.close()
def download_decompress(url = 'https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0', path = '1.0.0.tar.gz'):
filename = download(url, path)
try:
if str(filename).split('.')[-1] == 'zip':
print('开始解压zip文件,请等待……')
unzip()
print('解压完成,可以使用')
except Exception as e:
print(e)
print('解压失败,请手动解压')
try:
if str(filename).split('.')[-1] == 'gz':
print('开始解压tar.gz文件,请等待……')
untar()
print('解压完成,可以使用')
except Exception as e:
print(e)
print('解压失败,请手动解压')
# if __name__ == '__main__':
print('开始下载:https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0')
download_decompress()
print('开始下载:https://github.com/chengtingting980903/zzsnML/releases/download/1.0.0/data.zip')
download_decompress(url='https://github.com/chengtingting980903/zzsnML/releases/download/1.0.0/data.zip', path='data.zip')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/download_data/download.py
|
download.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/10/13 19:58
# @Author : 程婷婷
# @FileName: b.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/download_data/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
import sys
# sys.path.append('./SVM/')
# sys.path.append('./utils/')
from sentiment_analysis.SVM.svm import *
from sentiment_analysis.utils.utils import *
from sentiment_analysis.utils.word2vec_utils import *
from sklearn.externals import joblib
import os
# setup
file_path = os.getcwd()
stopwords_file = os.path.join(file_path, 'data/stop_words.txt')
pca_model_file = os.path.join(file_path, 'SVM/model/2017-06-01~2017-08-03.pca')
svm_model_file = os.path.join(file_path, 'SVM/model/2017-06-01~2017-08-03.svm')
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
wordvec_file = os.path.join(file_path, 'data/news.ten.zh.text.vector')
def load(wordvec_file, pca_model_file, svm_model_file):
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
# load pca model
pca_ = joblib.load(pca_model_file)
# load svm model
clf = joblib.load(svm_model_file)
# label map
label_map = {0: '负', 1: '非负'}
return model, pca_, clf, label_map
def predict_one(data):
model, pca_, clf, label_map = load(wordvec_file, pca_model_file, svm_model_file)
data_cut = segment([data])
X = buildVecs(data_cut, stopwords, model)
X_reduced = pca_.transform(X)
pred = clf.predict(X_reduced)
pred_label = label_map[pred[0]]
return pred_label
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/svm_app.py
|
svm_app.py
|
# -*- coding: utf-8 -*-
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from werkzeug.utils import secure_filename
import time
import os, sys
# sys.path.append('./app/SVM/')
from sentiment_analysis.svm_app import predict_one
from sentiment_analysis.SVM.svm import svm
import warnings
warnings.filterwarnings('ignore')
DEBUG = False
PORT = 8008
HOST = '0.0.0.0'
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.secret_key = 'skfasmknfdhflm-vkllsbzdfmkqo3ooishdhzo295949mfw,fk'
# APP_ROOT = os.path.abspath('.')
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
## This function is not for sentiment analysis
# @app.route('/api/', methods=('GET', 'POST'))
# def get_result():
# # title = request.args.get('title', '')
# # content = request.args.get('content', '')
# # company = request.args.get('company', '')
# # if title == '' and content == '':
# # return '-2'
# # _content = title + '。' + content
# # # print(_content)
# # relevant = test(_content, company)
# # return relevant
# file_path = request.args.get('file_path', None)
# _all = request.args.get('_all', True)
# prefix = request.args.get('prefix', './')
# if file_path is None:
# return '必须给定输入文件!'
# if type(_all) == str:
# _all = _all.lower()
# if _all == 'false':
# _all = False
# elif _all == 'true':
# _all = True
# else:
# return '_all参数错误,只能取值True或者False。'
# print(file_path, _all, prefix)
# result_file = main(file_path, _all=_all, prefix=prefix)
# return result_file
@app.route('/api2/', methods=('GET', 'POST'))
def get_single_result():
title = request.form['title']
content = request.form['content']
if title == '' and content == '':
return '-1'
_content = title + '。' + content
# print(_content)
sentiment = predict_one(_content)
return sentiment
@app.route('/train/', methods=('GET', 'POST'))
def begin_train():
connection_string = request.form['connection_string']
from_date = request.form['from_date']
to_date = request.form['to_date']
try:
if (connection_string is None) and (from_date is None) and (to_date is None):
print(r'正在使用默认参数训练模型,connection_string为cis/[email protected]:1521/orcl, from_date为2017-06-01, to_date为2017-06-15')
svm.train()
elif (connection_string == '') and (from_date == '') and (to_date == ''):
print(r'正在使用默认参数训练模型,connection_string为cis/[email protected]:1521/orcl, from_date为2017-06-01, to_date为2017-06-15')
svm.train()
else:
print(r'正在使用指定参数训练模型,connection_string为%s, from_date为%s, to_date为%s' %(connection_string, from_date, to_date))
svm.train(connection_string, from_date, to_date)
except Exception as e:
return 'train fail'
else:
return 'train success'
# if __name__ == '__main__':
app.run(debug=DEBUG, host=HOST, port=PORT)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/app.py
|
app.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
from svm import *
import sys
sys.path.append('../../utils')
from augmentation_utils import *
'''
Currently, data augmentation makes result worse. Better augmentation method should be proposed.
'''
connection_string = 'cis/[email protected]:1521/orcl'
from_date = '2017-06-01'
to_date = '2017-08-03'
wordvec_file = '../../data/news.ten.zh.text.vector'
stopwords_file = '../../data/stop_words.txt'
data_file = '../../data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
(X_cut_train, y_train), (X_cut_test, y_test) = train_test_split(data_cut, y)
X_train = buildVecs(X_cut_train, stopwords, model)
X_test = buildVecs(X_cut_test, stopwords, model)
N_train = len(y_train)
# load emotional dictionary
emotional_dict_file = '../../data/情感词典18级_1.pkl'
emotional_dict = load_emotion_dict(emotional_dict_file)
# data augmentation
X_aug, y_aug = avgvector_virtue_complementary_augmentation(X_cut_train, y_train, model, emotional_dict, \
num_aug=10000, neg_aug_ratio=0.1, ratio=[0.2, 0.6, 0.2], min_virtue_sent_len=100)
X_train_combine = np.concatenate((X_train, X_aug), axis=0)
y_train_combine = np.concatenate((y_train, y_aug))
idx = np.random.permutation(X_train_combine.shape[0])
reverse_idx = np.argsort(idx)
X_train_combine = X_train_combine[idx]
y_train_combine = y_train_combine[idx]
# perform pca
print('Performing PCA...')
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train_combine)
pca_.save('model/%s~%s_comple_aug.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train_combine)
X_reduced_test = pca_.transform(X_test)
# train svm
print('Training SVM...')
clf = svm(C=2, probability=True)
clf.fit(X_reduced_train, y_train_combine)
clf.save('model/%s~%s_comple_aug.svm' % (from_date, to_date))
# score
y_pred_prob_train = clf.predict_proba(X_reduced_train[reverse_idx[:N_train]])
y_pred_prob_test = clf.predict_proba(X_reduced_test)
y_pred_train = y_pred_prob_train[:,0] < y_pred_prob_train[:,1]
y_pred_test = y_pred_prob_test[:,0] < y_pred_prob_test[:,1]
y_pred_train = y_pred_train.astype(np.int32)
y_pred_test = y_pred_test.astype(np.int32)
train_score = compute_score(y_train_combine[reverse_idx[:N_train]], y_pred_train, classes=[0, 1])
test_score = compute_score(y_test, y_pred_test, classes=[0, 1])
print_score(train_score, 'Train score of SVM classifier(+aug+PCA)')
print_score(test_score, 'Test score of SVM classifier(+aug+PCA)')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm_with_virtual_complementary_augmentation.py
|
svm_with_virtual_complementary_augmentation.py
|
# -*- coding: utf-8 -*-
import sys
# sys.path.append('../../utils')
from sentiment_analysis.utils.utils import *
from sentiment_analysis.utils.word2vec_utils import *
from sklearn.svm import SVC
from sklearn.externals import joblib
import os
import cx_Oracle
class svm():
def __init__(self, label_dict=None, probability=True, C=5, kernel='rbf', degree=3, gamma='auto', coef0=0.0):
self.label_dict = label_dict
self.probability = probability
self.C = C
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self._svm = SVC(C=self.C, probability=self.probability, class_weight='balanced', kernel=self.kernel, \
degree=self.degree, gamma=self.gamma, coef0=self.coef0)
def fit(self, X, y):
self._svm.fit(X, y)
def predict(self, X, return_real_label=False):
if return_real_label:
assert self.label_dict is not None
return [self.label_dict[p] for p in self._svm.predict(X)]
return self._svm.predict(X)
def predict_proba(self, X):
if self.probability:
return self._svm.predict_proba(X)
else:
raise ValueError('If you want to get the predict probability, fit svm with probability=True.')
def save(self, save_to):
joblib.dump(self._svm, save_to)
# if __name__ == '__main__':
def train(connection_string = 'cis/[email protected]:1521/orcl', from_date = '2017-06-01', to_date = '2017-08-03'):
# connection_string = 'cis/[email protected]:1521/orcl'
wordvec_file = './data/news.ten.zh.text.vector'
stopwords_file = './data/stop_words.txt'
data_file = './data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
X = buildVecs(data_cut, stopwords, model)
(X_train, y_train), (X_test, y_test) = train_test_split(X, y)
# perform pca
print('Performing PCA...')
dir_path = os.path.join(os.getcwd(),'./model')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train)
pca_.save('model/%s~%s.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train)
X_reduced_test = pca_.transform(X_test)
# train svm
print('Training SVM...')
clf = svm(C=5, probability=True)
clf.fit(X_reduced_train, y_train)
clf.save('model/%s~%s.svm' % (from_date, to_date))
# score
y_pred_train = clf.predict(X_reduced_train)
y_pred_test = clf.predict(X_reduced_test)
train_score = compute_score(y_train, y_pred_train, classes=[0, 1])
test_score = compute_score(y_test, y_pred_test, classes=[0, 1])
print_score(train_score, 'Train score of SVM classifier(+PCA)')
print_score(test_score, 'Test score of SVM classifier(+PCA)')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm.py
|
svm.py
|
# -*- coding: utf-8 -*-
from svm import *
import sys
sys.path.append('../../utils')
from augmentation_utils import *
'''
Currently, data augmentation makes result worse. Better augmentation method should be proposed.
'''
connection_string = 'cis/[email protected]:1521/orcl'
from_date = '2017-06-01'
to_date = '2017-08-03'
wordvec_file = '../../data/news.ten.zh.text.vector'
stopwords_file = '../../data/stop_words.txt'
data_file = '../../data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
(X_cut_train, y_train), (X_cut_test, y_test) = train_test_split(data_cut, y)
X_train = buildVecs(X_cut_train, stopwords, model)
X_test = buildVecs(X_cut_test, stopwords, model)
# load emotional dictionary
emotional_dict_file = '../../data/情感词典18级_1.pkl'
emotional_dict = load_emotion_dict(emotional_dict_file)
# data augmentation
X_aug, y_aug = avgvector_virtue_augmentation(X_cut_train, y_train, model, emotional_dict, \
num_aug=10000, neg_aug_ratio=0.1, ratio=[0.4, 0.6, 0.0], min_virtue_sent_len=100)
X_train_combine = np.concatenate((X_train, X_aug), axis=0)
y_train_combine = np.concatenate((y_train, y_aug))
idx = np.random.permutation(X_train_combine.shape[0])
reverse_idx = np.argsort(idx)
X_train_combine = X_train_combine[idx]
y_train_combine = y_train_combine[idx]
# perform pca
print('Performing PCA...')
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train_combine)
pca_.save('model/%s~%s_aug.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train_combine)
X_reduced_test = pca_.transform(X_test)
# train svm
print('Training SVM...')
clf = svm(C=2, probability=True)
clf.fit(X_reduced_train, y_train_combine)
clf.save('model/%s~%s_aug.svm' % (from_date, to_date))
# score
y_pred_train = clf.predict(X_reduced_train)
y_pred_test = clf.predict(X_reduced_test)
train_score = compute_score(y_train_combine, y_pred_train, classes=[0, 1])
test_score = compute_score(y_test, y_pred_test, classes=[0, 1])
print_score(train_score, 'Train score of SVM classifier(+aug+PCA)')
print_score(test_score, 'Test score of SVM classifier(+aug+PCA)')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm_with_virtual_data_augmentation.py
|
svm_with_virtual_data_augmentation.py
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../utils')
from utils import *
from word2vec_utils import *
from sklearn.svm import SVC
import os
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
connection_string = 'cis/[email protected]:1521/orcl'
from_date = '2017-06-01'
to_date = '2017-08-03'
wordvec_file = '../../data/news.ten.zh.text.vector'
stopwords_file = '../../data/stop_words.txt'
data_file = '../../data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
X = buildVecs(data_cut, stopwords, model)
(X_train, y_train), (X_test, y_test) = train_test_split(X, y)
# perform pca
print('Performing PCA...')
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train)
# pca_.save('%s~%s.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train)
X_reduced_test = pca_.transform(X_test)
# train svm
# param_grid = [
# {'C': [1, 10, 100, 1000], 'kernel': ['linear'], 'class_weight': ['balanced', None], 'probability': [True]},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['rbf'], 'class_weight': ['balanced', None], 'probability': [True]},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['sigmoid'], 'class_weight': ['balanced', None], 'probability': [True]},
# {'C': [1, 10, 100, 1000], 'degree': [2, 3, 4], 'kernel': ['poly'], 'class_weight': ['balanced', None], 'probability': [True]}
# ]
param_grid = [
{'C': [5, 10, 20, 30], 'kernel': ['linear'], 'class_weight': ['balanced', None]},
{'C': [5, 10, 20, 30], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['rbf'], 'class_weight': ['balanced', None]},
{'C': [5, 10, 20, 30], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['sigmoid'], 'class_weight': ['balanced', None]},
{'C': [5, 10, 20, 30], 'degree': [2, 3, 4], 'kernel': ['poly'], 'class_weight': ['balanced', None]}
]
print('Training SVM...')
scores = ['precision', 'recall', 'f1']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), param_grid, cv=5, n_jobs=5,
scoring='%s_macro' % score)
clf.fit(X_reduced_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_reduced_test)
print(classification_report(y_true, y_pred))
print()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm_parameter_selection.py
|
svm_parameter_selection.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
from tfidf_utils import Vocabulary
import numpy as np
import time
def avgvector_virtue_augmentation(cut_data, label, model, emotion_dict, num_aug=10000, neg_aug_ratio=0.8, \
ratio=[0.3, 0.5, 0.2], min_virtue_sent_len=10, max_virtue_sent_len=500):
'''ratio: [p1, p2, p3], p1: prob of words from related emotion dict; p2: prob of words from related vocab;
p3: prob of words from opposite vocab.
'''
assert len(cut_data) == len(label)
assert min_virtue_sent_len <= max_virtue_sent_len and min_virtue_sent_len >= 1
signature = int(time.time())
neg_vocab = Vocabulary(signature=signature, name='negative')
pos_vocab = Vocabulary(signature=signature, name='positive')
vocab_dict = {0: neg_vocab, 1: pos_vocab} # label=0 stands for negative
for i, d in enumerate(cut_data):
vocab_dict[label[i]].update(d)
emotion_dict_ = {'neg': [], 'pos': []}
for word, strength in emotion_dict.items():
if strength <= 0:
emotion_dict_['neg'].append(word)
else:
emotion_dict_['pos'].append(word)
num_neg = max(int(num_aug * neg_aug_ratio), 1)
num_pos = max(num_aug - num_neg, 1)
aug_data = []
aug_label = []
neg_words = list(neg_vocab.voc.keys())
neg_words_prob = list(neg_vocab.voc.values())
neg_words_prob = np.array(neg_words_prob) / np.sum(neg_words_prob)
pos_words = list(pos_vocab.voc.keys())
pos_words_prob = list(pos_vocab.voc.values())
pos_words_prob = np.array(pos_words_prob) / np.sum(pos_words_prob)
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_neg)
for i in range(num_neg):
d_ = []
n_neg_related_vocab_words = int(ratio[1]*sents_len[i])
n_neg_opposite_vocab_words = int(ratio[2]*sents_len[i])
n_neg_emotion_words = sents_len[i] - n_neg_opposite_vocab_words - n_neg_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['neg'], replace=True, size=n_neg_emotion_words))
# words from related vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_neg_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_neg_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(0)
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_pos)
for i in range(num_pos):
d_ = []
n_pos_related_vocab_words = int(ratio[1]*sents_len[i])
n_pos_opposite_vocab_words = int(ratio[2]*sents_len[i])
n_pos_emotion_words = sents_len[i] - n_pos_opposite_vocab_words - n_pos_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['pos'], replace=True, size=n_pos_emotion_words))
# words from related vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_pos_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_pos_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(0)
return aug_data, aug_label
def avgvector_virtue_complementary_augmentation(cut_data, label, model, emotion_dict, num_aug=10000, neg_aug_ratio=0.8, \
ratio=[0.3, 0.5, 0.2], min_virtue_sent_len=10, max_virtue_sent_len=500):
'''ratio: [p1, p2, p3], p1: prob of words from opposite emotion dict; p2: prob of words from opposite vocab;
p3: prob of words from related vocab.
'''
assert len(cut_data) == len(label)
assert min_virtue_sent_len <= max_virtue_sent_len and min_virtue_sent_len >= 1
signature = int(time.time())
neg_vocab = Vocabulary(signature=signature, name='negative')
pos_vocab = Vocabulary(signature=signature, name='positive')
vocab_dict = {0: neg_vocab, 1: pos_vocab} # label=0 stands for negative
for i, d in enumerate(cut_data):
vocab_dict[label[i]].update(d)
emotion_dict_ = {'neg': [], 'pos': []}
for word, strength in emotion_dict.items():
if strength <= 0:
emotion_dict_['neg'].append(word)
else:
emotion_dict_['pos'].append(word)
num_neg = max(int(num_aug * neg_aug_ratio), 1)
num_pos = max(num_aug - num_neg, 1)
aug_data = []
aug_label = []
neg_words = list(neg_vocab.voc.keys())
neg_words_prob = list(neg_vocab.voc.values())
neg_words_prob = np.array(neg_words_prob) / np.sum(neg_words_prob)
pos_words = list(pos_vocab.voc.keys())
pos_words_prob = list(pos_vocab.voc.values())
pos_words_prob = np.array(pos_words_prob) / np.sum(pos_words_prob)
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_neg)
for i in range(num_neg):
d_ = []
n_neg_opposite_vocab_words = int(ratio[1]*sents_len[i])
n_neg_related_vocab_words = int(ratio[2]*sents_len[i])
n_pos_emotion_words = sents_len[i] - n_neg_opposite_vocab_words - n_neg_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['pos'], replace=True, size=n_pos_emotion_words))
# words from related vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_neg_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_neg_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(2) # new label: fake
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_pos)
for i in range(num_pos):
d_ = []
n_pos_opposite_vocab_words = int(ratio[1]*sents_len[i])
n_pos_related_vocab_words = int(ratio[2]*sents_len[i])
n_neg_emotion_words = sents_len[i] - n_pos_opposite_vocab_words - n_pos_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['neg'], replace=True, size=n_neg_emotion_words))
# words from related vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_pos_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_pos_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(2) # new label: fake
return aug_data, aug_label
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/augmentation_utils.py
|
augmentation_utils.py
|
# -*- coding: utf-8 -*-
from gensim.models import word2vec, KeyedVectors
import numpy as np
def sent2word(segResult, stopwords):
"""
Segment a sentence to words
Delete stopwords
"""
newSent = []
for word in segResult:
if word in stopwords:
continue
else:
newSent.append(word)
return newSent
def getWordVecs(wordList, model):
vecs = []
for word in wordList:
word = word.replace('\n', '')
try:
vecs.append(model[word])
except KeyError:
continue
# vecs = np.concatenate(vecs)
return np.array(vecs, dtype = 'float')
def buildVecs(data, stopwords, model):
posInput = []
for line in data:
line = sent2word(line, stopwords)
resultList = getWordVecs(line, model)
# for each sentence, the mean vector of all its vectors is used to represent this sentence
if len(resultList) != 0:
resultArray = sum(np.array(resultList))/len(resultList)
posInput.append(resultArray)
return posInput
def load_wordvec(wordvec_file, binary=False):
# model = word2vec.Word2Vec.load_word2vec_format(wordvec_file, binary=binary)
model = KeyedVectors.load_word2vec_format(wordvec_file, binary=binary)
return model
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/word2vec_utils.py
|
word2vec_utils.py
|
# -*- coding: utf-8 -*-
import pickle, os
from gensim.models import word2vec, KeyedVectors
import numpy as np
from sklearn.decomposition import PCA
from sklearn.externals import joblib
import jieba
import cx_Oracle
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
_backend = 'jieba'
try:
from jpype import *
startJVM(getDefaultJVMPath(), "-Djava.class.path=/home/hongjp/hanlp/hanlp-portable-1.3.4.jar:/home/hongjp/hanlp", "-Xms1g", "-Xmx1g") # 启动JVM,Linux需替换分号;为冒号:
HanLP = JClass('com.hankcs.hanlp.HanLP')
_backend = 'hanlp'
print('Using HanLP as Chinese sentence segmentation backend.')
except Exception as e:
print('Fail to load `HanLP`. Using `jieba` as default Chinese sentence segmentation backend.')
def load_data_from_excel(excel_file, config):
pass
def load_data_from_pickle(pickle_file):
with open(pickle_file, 'rb') as f:
data = pickle.load(f)
return data
def load_stopwords(sw_file, encoding='utf-8'):
with open(sw_file, 'r', encoding=encoding) as f:
stopwords = f.read().strip().split('\n')
return stopwords
def load_emotion_dict(emotion_dict_file):
return load_data_from_pickle(emotion_dict_file)
def segment(data):
def hanlp_cut(d):
cut = HanLP.segment(d)
t = [cut[i].word for i in range(len(cut))]
return t
if _backend == 'jieba':
return [jieba.lcut(d) for d in data]
else: # _backend = 'hanlp'
return [hanlp_cut(d) for d in data]
def train_test_split(X, y, ratio=0.8):
X = np.array(X, copy=False)
y = np.array(y, copy=False)
assert X.shape[0] == len(y)
N = X.shape[0]
N_train = int(N * ratio)
idx = np.arange(N)
np.random.shuffle(idx)
X_train = X[idx[:N_train]]
X_test = X[idx[N_train:]]
y_train = y[idx[:N_train]]
y_test = y[idx[N_train:]]
return (X_train, y_train), (X_test, y_test)
def compute_score(Y, predicted_Y, classes=[0, 1]):
recall = {}
precision = {}
F1 = {}
Y = np.array(Y, copy=False)
predicted_Y = np.array(predicted_Y, copy=False)
for key in classes:
N_key = np.sum(Y == key)
if N_key == 0:
recall[key] = 0.0
else:
recall[key] = np.sum((Y == key)*(predicted_Y == key))/(N_key+0.0)
N_predicted_pos = np.sum(predicted_Y == key)
if N_predicted_pos == 0:
precision[key] = 0.0
else:
precision[key] = np.sum((Y == key)*(predicted_Y == key))/(N_predicted_pos+0.0)
F1[key] = 2*recall[key]*precision[key]/(recall[key]+precision[key])
return {'recall': recall, 'precision': precision, 'F1': F1}
def print_score(score, title=None):
if title:
print('='*53)
# print('|' + ' '*51 + '|')
print('|{:^51s}|'.format(title[:51]))
# print('|' + ' '*51 + '|')
print('='*53)
print('|{:^12s}|{:^12s}|{:^12s}|{:^12s}|'.format('class', 'recall', 'precision', 'F1'))
for label in score['recall']:
print('|{:^12s}|{:^12s}|{:^12s}|{:^12s}|'.format(
str(label), '%.2f'%score['recall'][label], '%.2f'%score['precision'][label], '%.2f'%score['F1'][label]))
print('='*53)
def print_multi_scores(score_list, title=None):
N = len(score_list)
assert N > 1 # when N=1, use print_score instead, otherwise, alignment might be troublesome, for len('precision') > 6.
length = 12 + 5 + 6*N*3 + 2*3 # class_col + `|`*5 + score_col*N*3 + `/`*2*3
if title:
print('='*length)
print(('|{:^%ds}|'%(length-2)).format(title[:length]))
print('='*length)
format_ = '|{:^12s}|' + ('{:^6s}/'*(N-1) + '{:^6s}|') * 3
col_title_format = '|{:^12s}|' + '{:^%ds}|'%(N*6+2) * 3
print(col_title_format.format('class', 'recall', 'precision', 'F1'))
for label in score_list[0]['recall']:
data = [str(label)]
for obj in ['recall', 'precision', 'F1']:
for score in score_list:
data.append('%.2f'%score[obj][label])
print(format_.format(*data))
print('='*length)
class pca():
def __init__(self, n_components=100):
self.n_components = n_components
self._pca = PCA(n_components=self.n_components)
self._is_fitted = False
def fit(self, X):
self._pca.fit(X)
self._is_fitted = True
self.explained_variance_ratio_ = self._pca.explained_variance_ratio_
self.ratio_ = np.sum(self.explained_variance_ratio_[:self.n_components])
def transform(self, X):
if self._is_fitted:
return self._pca.transform(X)
else:
print('PCA has not yet been fitted. It would perform fitting on this data. ' + \
'If this is not what you want, check your code, and fit the model first.')
return self._pca.fit_transform(X)
def save(self, save_to):
joblib.dump(self._pca, save_to)
# @staticmethod
# def load(model_file):
# _pca = joblib.load(model_file)
# pca_ = pca(n_components=_pca.n_components_)
# pca_._pca = _pca
# pca_._is_fitted = True
# pca_.explained_variance_ratio_ = _pca.explained_variance_ratio_
# pca_.ratio_ = np.sum(pca_.explained_variance_ratio_[:pca_.n_components])
# return pca_
def fetch_data_from_oracle(connection, from_date, to_date, label_map=None):
print('Fetching data from remote oracle, this might take some time...')
query = '''select b.title,b.content_no_tag,b.orientation as relevance from cis_ans_basedata b inner join cis_ans_basedata_type t on (b.id=t.bid and t.delflag = 0 and (t.repeat=0 or t.repeat is null))
where B.Publish_Date > '%s' and B.Publish_Date < '%s' ''' % (from_date, to_date)
cursor = connection.cursor()
cursor.execute(query)
data = []
label = []
def convert(col):
if isinstance(col, cx_Oracle.LOB):
return col.read().decode('utf-8')
else:
return col
for i, record in enumerate(cursor):
if i % 1000 == 0:
print('.', end='', flush=True)
title = convert(record[0])
article = convert(record[1])
emotion = convert(record[2])
if article is None:
continue
else:
if emotion is None:
emotion = 1
if title is not None:
title = title.strip()
else:
title = ''
article = article.strip()
emotion = '负' if emotion == '2' else '非负'
if label_map:
emotion = label_map[emotion] # for example, convert '负', '非负' to 0, 1 respectively
data.append(title+'。'+article)
label.append(emotion)
connection.close()
print('.')
return data, label
def save_data(data, save_to_file):
with open(save_to_file, 'wb') as f:
pickle.dump(data, f)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/utils.py
|
utils.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
class Vocabulary(object):
def __init__(self, signature, min_word_len=2, name='voc'):
self.signature = signature
self.min_word_len = min_word_len
self.name = name
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.oov = None
self.size = 0
self._fixed_voc = False
def set_state(self, fixed=False):
assert fixed in [True, False, 0, 1]
self._fixed_voc = fixed
def get_state(self):
state = 'Fixed' if self._fixed_voc else 'Not fixed'
return state
def shuffle(self):
self.check_state()
idx = np.random.permutation(self.size)
shuffled_voc = dict()
shuffled_freq = dict()
shuffled_doc_freq = dict()
for key, id in self.voc.items():
shuffled_voc[key] = idx[id]
shuffled_freq[idx[id]] = self.freq[id]
shuffled_doc_freq[idx[id]] = self.doc_freq[id]
del self.voc, self.freq, self.doc_freq
self.voc, self.freq, self.doc_freq = shuffled_voc, shuffled_freq, shuffled_doc_freq
def _is_useless(self, x):
if len(x) < self.min_word_len:
return True
if x.strip('''#&$_%^*-+=<>`~!@(())??/\\[]{}—"';::;,。,.‘’“”|…\n abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890''') == '':
return True
return False
def update(self, words):
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support update.')
for word in words:
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is None: # new word
self.voc[word] = self.size
self.freq[self.size] = 1
self.doc_freq[self.size] = 0 # create doc_freq item
self.size += 1
else:
self.freq[id] += 1
for word in set(words):
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is not None:
self.doc_freq[id] += 1 # update doc_freq
def get(self, word):
return self.voc.get(word, self.oov)
def __getitem__(self, word):
return self.voc.get(word, self.oov)
# def __setitem__(self, word, val):
# self.voc.__setitem__(word, val)
def __contains__(self, word):
return self.voc.__contains__(word)
def __iter__(self):
return iter(self.voc)
def __sizeof__(self):
return self.voc.__sizeof__() + self.freq.__sizeof__() + self.signature.__sizeof__() + self.size.__sizeof__() + \
self.name.__sizeof__() + self._fixed_voc.__sizeof__() + self.oov.__sizeof__() + self.doc_freq.__sizeof__()
def __delitem__(self, word): # delete would destory the inner representation
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support deletion.')
else:
raise NotImplementedError
def get_size(self):
return self.size
def clear(self):
del self.voc, self.freq, self.doc_freq
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.size = 0
self._fixed_voc = False
def check_state(self):
return len(self.voc) == self.size and len(self.freq) == self.size and len(self.doc_freq) == self.size
def to_dict(self):
return self.voc
def set_signature(self, new_signature):
self.signature = new_signature
def remove(self, words_list):
size = 0
new_voc = {}
new_freq = {}
new_doc_freq = {}
for word in self.voc:
id = self.voc[word]
if word in words_list:
continue
else:
new_voc[word] = size
new_freq[size] = self.freq[id]
new_doc_freq[size] = self.doc_freq[id]
size += 1
self.size = size
self.voc = new_voc
self.freq = new_freq
self.doc_freq = new_doc_freq
def save(self, file_name=None):
save_to = (file_name if file_name else self.name)+'-%s.voc'%self.signature
with open(save_to, 'wb') as f:
pickle.dump([self.voc, self.freq, self.doc_freq, self.size, self.min_word_len, \
self.oov, self._fixed_voc, self.name, self.signature], f)
@classmethod
def load(cls, file_name):
with open(file_name, 'rb') as f:
[voc, freq, doc_freq, size, min_word_len, oov, _fixed, name, signature] = pickle.load(f)
voc_from_file = cls(signature, name)
voc_from_file.voc = voc
voc_from_file.freq = freq
voc_from_file.doc_freq = doc_freq
voc_from_file.size = size
voc_from_file.min_word_len = min_word_len
voc_from_file.oov = oov
voc_from_file._fixed_voc = _fixed
voc_from_file.signature = signature
return voc_from_file
class TfidfTransf():
def __init__(self, signature, vocab=None, transformer_type='tfidf', transformer_norm='l2', vocab_name='vocab', min_word_len=2):
self._type = transformer_type.lower()
self._norm = transformer_norm.lower()
self.signature = signature
self.vocab_name = vocab_name
self.min_word_len = min_word_len
self.cv = None
self.transformer = None
if vocab:
if isinstance(vocab, Vocabulary):
self.vocab = vocab
else:
raise TypeError('Vocab needs input of type `Vocabulary`, but got %s.' % (type(vocab)))
else:
self.vocab = Vocabulary(signature, name=self.vocab_name, min_word_len=self.min_word_len)
def update_vocab(self, data):
for doc in data:
self.vocab.update(doc)
def set_state(self, fixed=False):
self.vocab.set_state(fixed=fixed)
def remove_from_vocab(self, words_or_vocab):
self.vocab.remove(words_or_vocab)
def fit(self, data):
if self.vocab.get_size() == 0:
print('Warning: Vocabulary is not yet built. It would built on this data.' + \
' If this is what you want, please update vocab first.')
self.update_vocab(data)
self.vocab.set_state(fixed=True)
self.cv = CountVectorizer(decode_error='replace', vocabulary=self.vocab.to_dict())
if self._type == 'tf':
self.transformer = TfidfTransformer(norm=self._norm, use_idf=False)
else:
self.transformer = TfidfTransformer(norm=self._norm, use_idf=True)
return self.transformer.fit(self.cv.transform(data))
def transform(self, data):
if self.transformer and self.cv:
return self.transformer.transform(self.cv.transform(data))
else:
print('Warning: The transformer has not yet been fitted. It would fit on this data.' + \
'If this is not you want, please fit it first.')
self.fit(data)
return self.transform(data)
def save(self, save_to):
joblib.dump([self.vocab, self.cv, self.transformer], save_to)
@staticmethod
def load(cls, model_file):
vocab, cv, transformer = joblib.load(f)
model = cls(signature=vocab.signature, vocab=vocab)
model.cv = cv
model.transformer = transformer
return model
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/tfidf_utils.py
|
tfidf_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.