code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/23 9:18
# @Author : 程婷婷
# @FileName: app.py
# @Software: PyCharm
# -*- coding: utf-8 -*-
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from ner.extract import Extract
import pandas as pd
import traceback
import json
import os
DEBUG = False
PORT = 8018
HOST = '0.0.0.0'
# HOST = '127.0.0.1'
app = Flask(__name__)
file_path = os.path.dirname(os.path.realpath(__file__))
txt_path = r'./data/feature_dict.txt'
province_path = r'./data/province.txt'
country_path = 'data/国家名称.xlsx'
country_df = pd.read_excel(os.path.join(file_path,country_path), header=None)[0].tolist()
country_df.remove('中国')
extract = Extract(country=country_df)
province = extract.read_txt(os.path.join(file_path, province_path))
money_feature = extract.read_txt(os.path.join(file_path,txt_path))
address_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/address_filter.txt'))
capacity_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/capacity_filter.txt'))
entity_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/entity_filter.txt'))
money_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/money_filter.txt'))
project_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/project_filter.txt'))
state_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/state_filter.txt'))
time_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/time_filter.txt'))
address_pattern = extract.read_txt(os.path.join(file_path,'./data/pattern/address_pattern.txt'))
capacity_pattern = extract.read_txt(os.path.join(file_path,'./data/pattern/capacity_pattern.txt'))
money_pattern = extract.read_txt(os.path.join(file_path,r'./data/pattern/money_pattern.txt'))
state_pattern = extract.read_txt(os.path.join(file_path,r'./data/pattern/state_pattern.txt'))
time_pattern = extract.read_txt(os.path.join(file_path,r'./data/pattern/time_pattern.txt'))
state_no_words = extract.read_txt(os.path.join(file_path,r'./data/filter/state_no_words.txt'))
model = HanLP.newSegment('crf').enableOrganizationRecognize(True)
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
def get_return_info(money_results=None, address_results=None, capacity_results=None,jia=None, yi=None, project_results=None, country_results=None, state_results=None, time_results=None):
return json.dumps({'项目金额': money_results, '项目地址': address_results, '设计产能': capacity_results, '执行机构':jia ,'企业':yi,'项目名称': project_results,
'涉及国家':country_results, '项目状态':state_results, '项目周期':time_results}, ensure_ascii=False)
@app.route('/extract/', methods=('GET', 'POST'))
def get_prediction():
title =str(request.form.get('title'))
text = request.form.get('text')
print(title)
if len(title)==0:
print('文章标题为空')
return get_return_info('文章标题为空')
if len(text)==0:
print('文章内容为空')
return get_return_info('文章内容为空')
if (title is None) and (text is not None):
print('文章标题为空')
return get_return_info('文章标题为空')
if (title is not None) and (text is None):
print('文章内容为空')
return get_return_info('文章内容为空')
try:
global_sentences = extract.segment_para(para=text)
money_results = extract.money_pattern(global_sentences=global_sentences, filter_list=money_filter, money_pattern=money_pattern, money_feature=money_feature)
address_results = extract.address_pattern(global_sentences=global_sentences, filter_list=address_filter, address_pattern=address_pattern)
capacity_results = extract.capacity_pattern(global_sentences=global_sentences, filter_list=capacity_filter, capacity_pattern=capacity_pattern)
jia, yi = extract.org_patterns(global_sentences=global_sentences, filter_list=entity_filter, province=province, model=model)
# org_results.append(jia)
# org_results1.append(yi)
project_results = extract.pro(global_sentences=global_sentences, title=title, objects=project_filter, filter_list=project_filter)
country_results = extract.country_pattern(title=title, para=text)
state_results = extract.state(global_sentences=global_sentences, title=title, filter_list=state_filter, state_pattern=state_pattern, state_no_words= state_no_words)
time_results = extract.time1(global_sentences=global_sentences, filter_list=time_filter, time_pattern=time_pattern)
except:
return get_return_info(traceback.print_exc())
return get_return_info(money_results, address_results, capacity_results, jia, yi, project_results, country_results, state_results, time_results)
# if __name__ == '__main__':
app.run(debug=DEBUG, host=HOST, port=PORT)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/ner/app.py
|
app.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/17 17:01
# @Author : 程婷婷
# @FileName: feature_word.py
# @Software: PyCharm
import jieba.posseg as pseg # 词性标注
import pandas as pd
import os
class FeatureWord():
def read_file(self, filename, column):
df = pd.read_excel(filename)
df1 = df.dropna(subset=[column])
flags = []
features= []
print(len(df1[column]))
for sent in df1[column]:
sent = str(sent).replace(' ', '')
psegs = []
feature = []
words = pseg.cut(str(sent))
for word, flag in words:
psegs.append(flag)
feature.append(word)
flags.append(psegs)
features.append(feature[-1])
return features
def write_txt(self, filename, features):
if os.path.exists(os.path.dirname(filename)) == False:
os.mkdir(os.path.dirname(filename))
f = open(filename, 'w', encoding='utf-8')
for i in features:
f.write(i + '\n')
f.close()
# feature_word = FeatureWord()
# features = set(feature_word.read_file(filename=r'C:\Users\lenovo\Desktop\work\2020-07-13\一带一路_0706.xls', column='合同金额'))
# feature_word.write_txt('./data/feature_dict.txt', features)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/ner/feature_word.py
|
feature_word.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/22 15:03
# @Author : 程婷婷
# @FileName: extract.py
# @Software: PyCharm
import pandas as pd
import re
import jieba.posseg as pseg
import emoji
import time as time_time
import os
import difflib
from pyhanlp import *
class Extract:
def __init__(self, country):
self.country = country
def read_txt(self, filenames):
# r'./feature_dict.txt'
lines = []
f = open(filenames,'r', encoding='utf-8')
for line in f.readlines():
lines.append(line.strip('\n'))
return lines
def segment_para(self, para):
split_pattern = re.compile(r'\n|。|?|!|\?|\!')
global_sentences = split_pattern.split(emoji.demojize(str(para)))
global_sentences = [str(i)+'。' for i in global_sentences]
return global_sentences
def filter_para(self, filter_list, sentence):
phrase = ''
for i in filter_list:
if sentence.count(i) != 0:
phrase = sentence
break
return phrase
def money_pattern(self, global_sentences, filter_list, money_pattern, money_feature):
# split_pattern = re.compile(r'\,\;')
sentences, money = [], []
sentences = [self.filter_para(filter_list=filter_list, sentence=index) for index in global_sentences]
sentences = [i for i in sentences if len(i) != 0]
for i in sentences:
index = 0
psg = ''
words = []
for term in HanLP.segment(i):
if term.word in money_feature:
psg += 'E'
else:
psg += str(term.nature)
words.append(term.word)
psg += str(index)
index += 1
for pattern_str in money_pattern:
pattern = re.compile(r'' + pattern_str)
rules = re.finditer(pattern, psg)
for j in rules:
num = re.sub(r'\D', ' ', (j.group()))
num = num.strip()
start = int(num.split(' ')[0]) + 1
end = int(num.split(' ')[-1]) + 1
money.append(''.join(words[start:end]))
money = [index for index in money if len(index) != 0]
money = list(set(money))
print(money)
return ','.join(money)
def write_excel(self, filename, df):
if os.path.exists(os.path.dirname(filename)) ==False:
os.mkdir(os.path.dirname(filename))
# df[column] = data
xlsx_content = pd.ExcelWriter(filename, engine='xlsxwriter')
df.to_excel(xlsx_content, sheet_name='Sheet1')
xlsx_content.close()
def project_pattern(self):
pattern = re.compile(r'([\。\!\!\:\:\ \丨]|)[a-zA-Z0-9\u4e00-\u9fa5]+((\u9879\u76ee)|(\u5de5\u7a0b)|(\u96a7\u9053))')
charter = ['。','!','!',':',':',',',' ','【','】',',']
# for i in range(len(data)):
# sent = data[i]
sent = self.title
try:
f1 = re.finditer(pattern, sent)
f1.__next__()
except StopIteration:
pattern = re.compile(r'([\。\!\!\:\:\ \丨]|)[a-zA-Z0-9\u4e00-\u9fa5]+((\u9879\u76ee)|(\u5de5\u7a0b))')
sent = self.para
f1 = re.finditer(pattern, sent[:800])
project = []
for index in f1:
start = index.span()[0]
end = index.span()[1]
before = sent[start:end].strip(' ')
after = re.sub('一带一路项目', '', before)
if len(after) != 0:
if after[0] in charter:
after = after.replace(after[0], '')
if len(after) > 2:
if (len(before) < 4) or ('个项目' in before):
pass
else:
project.append(after)
projects = list(set(project))
projects.sort(key=project.index)
return ','.join(projects[:2])
def address_pattern(self, global_sentences, filter_list, address_pattern):
# pattern = re.compile(r'')
# pattern1 = re.compile(r'')
address = []
sentences = [self.filter_para(filter_list=filter_list, sentence=index) for index in global_sentences]
sentences = [index for index in sentences if len(index) != 0]
for i in sentences:
for pattern_str in address_pattern:
pattern = re.compile(r'' + pattern_str)
f= re.finditer(pattern, i)
for index in f:
start = index.span()[0]
end = index.span()[1]
before = i[start:end - 1].strip()
after = re.sub('\d','',before)
if len(before)-len(after) < 6:
address.append(''.join(before))
address = list(set(address))
print(address)
# address.sort(key=addr.index)
return ','.join(address)
def capacity_pattern(self, global_sentences, filter_list, capacity_pattern):
# pattern = re.compile(r'')
# pattern1 = re.compile(r'')
# pa = [pattern, pattern1]
capacity = []
for index in global_sentences:
index = self.filter_para(filter_list=filter_list, sentence=index)
if len(index) != 0:
for pattern_str in capacity_pattern:
pattern = re.compile(r'' + pattern_str)
f1 = re.finditer(pattern, index)
for i in f1:
start = i.span()[0]
end = i.span()[1]
if bool(re.search('\d+', i.group())):
capacity.append(''.join(index[start + 1:end - 1]))
capacity = list(set(capacity))
print(capacity)
return ','.join(capacity)
def country_pattern(self, title, para):
country_dict = {}
country_result = ''
paragraph = title + para
for i in self.country:
num = paragraph[:5000].count(i)
country_dict[i] = num
cou = max(country_dict.values())
if cou != 0:
country_result = ','.join([k for k, v in country_dict.items() if v == cou])
else:
for i in self.country:
num = paragraph[5000:].count(i)
country_dict[i] = num
cou = max(country_dict.values())
if cou != 0:
country_result = ','.join([k for k, v in country_dict.items() if v == cou])
return country_result
def org_pattern(self, global_sentences):
# split_pattern = re.compile(r'\n|。|?|!|\?|\!')
# sentences = split_pattern.split(self.para)
province = self.read_txt(self.filename_province)
all_list = []
first_list, second_list = [], []
j = 0
def extactCompany(tree):
words, word1, word2= '', '', ''
list1, list2 = [], []
id = 0
for word in tree.iterator():
if word.HEAD.POSTAG == 'nt' and word.DEPREL == '定中关系' and ((word.HEAD.DEPREL == '定中关系') or (word.HEAD.DEPREL == '主谓关系')):
id = word.ID
word3 = word.LEMMA + word.HEAD.LEMMA
list1.append(word3)
if word.POSTAG == 'nt'and ((word.DEPREL == '定中关系') or (word.DEPREL == '主谓关系')):
if word.ID - id != 1:
words = word.LEMMA
word1 = word.HEAD.LEMMA
list1.append(words)
if word.HEAD.LEMMA == words and word.DEPREL == '并列关系' and word.POSTAG == 'nt':
list2.append(word.LEMMA)
else:
if word.HEAD.LEMMA == word1 and word.DEPREL == '状中结构':
word2 = word.LEMMA
if word.HEAD.LEMMA == word2 and word.DEPREL == '介宾关系' and word.POSTAG == 'nt':
list2.append(word.LEMMA)
return list1, list2
for index in global_sentences:
if ('签' in index) or ('中标' in index) or ('项目' in index):
index += '。'
model = HanLP.newSegment('crf').enableOrganizationRecognize(True)
org_list = model.seg(str(index))
for item in org_list:
if (str(item.nature) == 'nt'):
tree = HanLP.parseDependency(index)
list1, list2 = extactCompany(tree)
all_list.extend(list1)
all_list.extend(list2)
for i in all_list:
if (i[:2] in province) or (i[:3] in province):
second_list.append(i)
else:
first_list.append(i)
return ','.join(list(set(first_list))), ','.join(list(set(second_list)))
def org_patterns(self, global_sentences, filter_list, province, model):
sentences = [self.filter_para(filter_list=filter_list, sentence=index) for index in global_sentences]
sentence = ''
for index in sentences:
if len(index) != 0:
sentence += index
org, first_list, second_list = [], [], []
org_list = model.seg(str(sentence))
org_list = list(org_list)
for item in org_list:
if ((str(item.nature) == 'nt') or (str(item.nature) == 'ntc')) and ('银行' not in str(item.word)):
num = org_list.index(item)
word = item.word
if str(org_list[num-1].nature) == 'ns':
word = org_list[num-1].word + item.word
org.append(word)
org = list(set(org))
for i in org:
i = i.strip()
if (i[:1] in province) or (i[:2] in province) or (i[:3] in province):
second_list.append(i)
else:
first_list.append(i)
print(first_list)
print(second_list)
return ','.join(list(set(first_list))), ','.join(list(set(second_list)))
def pro(self, global_sentences, title, objects, filter_list):
# split_pattern = re.compile(r'\n|。|?|!|\?|\!|\s|;')
# sentences = split_pattern.split(self.para)
list_re = []
title_two = ''
def string_similar(str1, str2):
return difflib.SequenceMatcher(None, str1, str2).quick_ratio()
# sentences = hanlp.utils.rules.split_sentence(para)
def value(id, id1, filter, tree):
words = ''
if (id != 0) and (id1 != id):
for word in tree.iterator():
if word.ID >= id and word.ID <= id1:
if word.CPOSTAG in filter:
words = ''
break
else:
words += word.LEMMA
if len(words) != 0:
return words.split()[-1]
else:
return words
#规则1:根据动词和所抽取实体首词以及首词的粗粒度词性总结
def one_pattern(tree, objects):
verb = ['签署','签订', '完成', '支援', '援助', '中标', '建设']
word_cpostag = ['ns', 'vg', 'nz', 'nh', 'ni']
list1= []
id, id0, id1, id_end = 0, 0, 0, 0
for word in tree.iterator():
# print(tree)
if id == 0:
if word.LEMMA in verb:
id = word.ID + 1
id0 = word.ID + 2
if id != 0:
if word.ID == id:
if word.CPOSTAG in word_cpostag:
id_end = id
elif word.ID == id0:
if word.CPOSTAG in word_cpostag:
id_end = id0
if (id_end != 0) and (word.LEMMA in objects):
id1 = word.ID
break
words = value(id=id_end, id1=id1, filter = ['v', 'u'], tree=tree)
if len(words) != 0:
list1.append(words)
return list1
#规则2:
def two_pattern(tree, objects):
id, id1 = 0, 0
list2 = []
word_cpostag = ['ns', 'Vg', 'nz', 'nh', 'ni']
for word in tree.iterator():
if word.DEPREL == '定中关系' and word.CPOSTAG in word_cpostag :
id = word.ID
# print(id)
if (word.LEMMA in objects) and (id != 0):
id1 = word.ID
# print(id1)
break
words = value(id=id, id1=id1, filter = [ 'wp', 'u'], tree=tree)
if len(words) != 0:
list2.append(words)
return list2
#规则三
def three_pattern(tree, objects):
id, id1 = 0, 0
list3 = []
word_cpostag = ['ns', 'Vg', 'nh', 'ni']
for word in tree.iterator():
if word.CPOSTAG in word_cpostag :
id = word.ID
# print(id)
if (word.LEMMA in objects) and (id != 0) and (word.HEAD.CPOSTAG == 'v'):
id1 = word.ID
# print(id1)
break
words = value(id=id, id1=id1, filter = ['v', 'wp', 'u'], tree=tree)
if len(words) != 0:
list3.append(words)
return list3
tree = HanLP.parseDependency(title)
title_two = two_pattern(tree, objects)
list_re.extend(title_two)
sentences = [self.filter_para(filter_list=filter_list, sentence=i)for i in global_sentences]
sentences = [i for i in sentences if len(i) != 0 ]
# print(sentences)
for index in sentences:
# if len(index) != 0:
# index += '。'
# print(index)
tree = HanLP.parseDependency(index)
para_three = three_pattern(tree ,objects)
list_re.extend(para_three)
# if len(title_two) == 0:
para_one = one_pattern(tree, objects)
list_re.extend(para_one)
list_re = list(set(list_re))
for i in range(len(list_re)-1, 0, -1):
sum = string_similar(list_re[0], list_re[i])
if sum >= 0.8:
list_re.pop(i)
return ','.join(list_re)
def time1(self, global_sentences, filter_list, time_pattern):
# split_pattern = re.compile(r'\n|。|?|!|\?|\!')
num = []
sentences = [self.filter_para(filter_list=filter_list, sentence=para) for para in global_sentences]
for phrase in sentences:
if len(phrase) != 0:
run = False
for pattern_str in time_pattern:
pattern = re.compile(r'' + pattern_str)
num1 = re.finditer(pattern, phrase)
for index in num1:
start = re.finditer('\d+', index.group())
for i in start:
start = i.span()[0]
num.append(index.group()[start:])
run = True
break
if run:
break
time = list(set(num))
print(time)
return ','.join(time)
def state(self, global_sentences, title, filter_list, state_pattern, state_no_words):
state = []
states = False
year = time_time.asctime(time_time.localtime(time_time.time()))[-4:]
def isyears(string):
years = re.finditer('(\d){4}', string)
return [index.group() for index in years]
start1 = state_pattern.index('建设阶段')
start2 = state_pattern.index('运营阶段')
start3 = state_pattern.index('完成阶段')
sentences = [self.filter_para(filter_list=filter_list, sentence=i) for i in global_sentences]
if len(state) == 0:
for para in sentences:
if len(para) != 0:
for term in HanLP.segment(para):
if str(term.nature) == 'ns' or str(term.nature) == 'nt' or str(term.nature) == 'ntc' or str(term.nature) == 'nsf' or str(term.nature) == 'm':
states = True
if states:
for pattern_str in state_pattern[start1+1:start2]:
pattern = re.compile(r'' + pattern_str)
result = re.finditer(pattern, para)
for item in result:
word = item.group()
if len(word) != 0:
if len(self.filter_para(filter_list=state_no_words, sentence=word)) == 0:
years = isyears(word)
if len(years) == 0:
state.append('建设阶段')
# print(word)
# print(para)
states = False
else:
if years[-1] == year:
state.append('建设阶段')
states = False
break
if states:
for pattern_str2 in state_pattern[start2+1:start3]:
pattern_two = re.compile(pattern_str2)
two_result = re.finditer(pattern_two, para)
for item in two_result:
word = item.group()
if len(word) != 0:
if len(self.filter_para(filter_list=state_no_words, sentence=word)) == 0:
years = isyears(word)
if len(years) == 0:
state.append('运营阶段')
states = False
else:
if years[-1] > year:
state.append('建设阶段')
states = False
if years[-1] == year:
state.append('运营阶段')
states = False
break
if states:
for pattern_str3 in state_pattern[start3+1:]:
pattern_three = re.compile(pattern_str3)
three_result = re.finditer(pattern_three, para)
for item in three_result:
word = item.group()
if len(word) != 0:
if len(self.filter_para(filter_list=state_no_words, sentence=word)) == 0:
years = isyears(word)
if len(years) == 0:
state.append('完成阶段')
else:
if years[-1] <= year:
state.append('完成阶段')
break
state = list(set(state))
if len(state) == 0:
state.append('建设阶段')
print(state)
return ','.join(state)
# if __name__ == '__main__':
# txt_path = r'./data/feature_dict.txt'
# province_path = r'./province.txt'
# country_path = r'./data/国家名称.xls'
# excel_path = r'C:\Users\lenovo\Desktop\一带一路_0706.xls'
# country_df = pd.read_excel(country_path, header= None)[0].tolist()
# country_df.remove('中国')
# # nlp = StanfordCoreNLP(r'E:\迅雷下载\stanford-corenlp-latest\stanford-corenlp-4.1.0', lang='zh')
# df = pd.read_excel(excel_path)[:200]
# money_results = []
# project_results = []
# project_results1 = []
# address_results =[]
# capacity_results =[]
# country_results = []
# org_results = []
# org_results1 = []
# time_results = []
# state_results = []
# for i in range(len(df['内容'])):
# print('============================================================'+ str(i))
# extract = Extract(country=country_df)
# province = extract.read_txt(province_path)
# money_feature = extract.read_txt(txt_path)
# address_filter = extract.read_txt(r'./data/filter/address_filter.txt')
# capacity_filter = extract.read_txt(r'./data/filter/capacity_filter.txt')
# entity_filter = extract.read_txt(r'./data/filter/entity_filter.txt')
# money_filter = extract.read_txt(r'./data/filter/money_filter.txt')
# project_filter = extract.read_txt(r'./data/filter/project_filter.txt')
# state_filter = extract.read_txt(r'./data/filter/state_filter.txt')
# state_no_words = extract.read_txt(r'./data/filter/state_no_words.txt')
# time_filter = extract.read_txt(r'./data/filter/time_filter.txt')
# address_pattern = extract.read_txt('./data/pattern/address_pattern.txt')
# capacity_pattern = extract.read_txt('./data/pattern/capacity_pattern.txt')
# money_pattern = extract.read_txt(r'./data/pattern/money_pattern.txt')
# state_pattern = extract.read_txt(r'./data/pattern/state_pattern.txt')
# time_pattern = extract.read_txt(r'./data/pattern/time_pattern.txt')
# global_sentences = extract.segment_para(para=df['内容'][i])
# money_results.append(extract.money_pattern(global_sentences=global_sentences, filter_list = money_filter, money_pattern = money_pattern, money_feature=money_feature))
# # project_results.append(extract.project_pattern())
# address_results.append(extract.address_pattern(global_sentences=global_sentences, filter_list = address_filter, address_pattern = address_pattern))
# capacity_results.append(extract.capacity_pattern(global_sentences=global_sentences, filter_list = capacity_filter, capacity_pattern = capacity_pattern))
# # print(df['内容'][475])
# jia, yi = extract.org_patterns(global_sentences = global_sentences, filter_list = entity_filter, province=province)
# org_results.append(jia)
# org_results1.append(yi)
# project_results1.append(extract.pro(global_sentences=global_sentences, title=df['标题'][i], objects = project_filter, filter_list=project_filter))
# country_results.append(extract.country_pattern(title=df['标题'][i], para=df['内容'][i]))
# state_results.append(extract.state(global_sentences=global_sentences, title=df['标题'][i], filter_list = state_filter, state_pattern = state_pattern, state_no_words= state_no_words))
# time_results.append(extract.time1(global_sentences=global_sentences, filter_list = time_filter, time_pattern = time_pattern))
# df['合同金额'] = money_results
# # df['项目名称1'] = project_results
# df['项目名称'] = project_results1
# df['项目位置'] = address_results
# df['项目产能'] = capacity_results
# df['国家'] = country_results
# df['企业识别甲方'] = org_results
# df['企业识别乙方'] = org_results1
# df['项目周期'] = time_results
# df['项目状态'] = state_results
# # extract.write_excel('./result/合同信息抽取.xlsx', df)
# df.to_excel('./result/合同信息抽取.xlsx', columns=['标题', '内容', '原文链接', '合同金额', '项目名称', '项目位置', '项目产能','国家', '企业识别甲方', '企业识别乙方', '项目周期', '项目状态'])
# # jpype._jclass.ArrayIndexOutOfBoundsException: java.lang.ArrayIndexOutOfBoundsException: 5777
# # nlp.close()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/ner/extract.py
|
extract.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/ner/__init__.py
|
__init__.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/23 9:18
# @Author : 程婷婷
# @FileName: app.py
# @Software: PyCharm
# -*- coding: utf-8 -*-
import pandas as pd
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from de_duplication.simhash_duplication import calculate_simhash_result
import de_duplication.process
import traceback
import json
DEBUG = False
PORT = 8018
HOST = '0.0.0.0'
# HOST = '127.0.0.1'
app = Flask(__name__)
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
@app.route('/simhash/', methods=('GET', 'POST'))
def get_prediction():
data = request.form.get('data')
if (data is None):
print('id或文章为空')
return ('id或文章为空')
try:
df = pd.DataFrame()
id, context = [], []
data = process.text_process(data)
print(data)
for index in data:
id.append(index[1])
context.append(index[0])
df['context'] = context
df['id'] = id
duplication_json = calculate_simhash_result(df)
except :
return traceback.print_exc()
return duplication_json
# if __name__ == '__main__':
app.run(debug=DEBUG, host=HOST, port=PORT)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/de_duplication/app.py
|
app.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/14 15:37
# @Author : 程婷婷
# @FileName: text_sim.py
# @Software: PyCharm
from datasketch import MinHash
from zhon.hanzi import punctuation
import jieba
import pandas as pd
import json
Amazon_split = []
Google_split = []
b_split = []
def split_line(line):
# 对行进行分词,去除标点符号,按空白字符分词
table = str.maketrans('', '', punctuation)
wipe_line = line.translate(table)
split_line = jieba.lcut(wipe_line)
return split_line
# def read_txt(path='a.txt'):
# # 读入amazon数据集并分词,以列表保存原数据行和分词结果
# with open(path, encoding='utf-8') as Amazon:
# for line in Amazon.readlines():
# line = line.rstrip('\n')
# Amazon_split.append([line, split_line(line)])
# 定义计算两行文本jaccard相似度的函数
def calculate_jaccard(text1,text2):
# 计算两行文本的jaccard相似度
minihash1, minihash2 = MinHash(), MinHash()
for word in text1:
minihash1.update(word.encode('utf-8'))
for word in text2:
minihash2.update(word.encode('utf-8'))
return minihash1.jaccard(minihash2)
def calculate_minhash_result(df):
duplication_list =[]
a_split = []
ids = []
for line in df['context']:
line = line.strip()
a_split.append([line, jieba.lcut(line)])
df['a_split'] = a_split
# 对数据行进行匹配
for i in range(len(df['a_split'])):
start = i + 1
for j in range(start, len(df['a_split']), 1):
present_jaccard = calculate_jaccard(a_split[i][1], a_split[j][1])
if present_jaccard >= 0.7:
if ([int(df['id'][i]), int(df['id'][j])]) not in ids:
duplication_list.append({'ids': [int(df['id'][i]), int(df['id'][j])], 'context1': a_split[i][0], 'context2': a_split[j][0]})
ids.append([int(df['id'][i]), int(df['id'][j])])
return json.dumps(duplication_list, ensure_ascii=False)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/de_duplication/minhash_duplication.py
|
minhash_duplication.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/23 9:18
# @Author : 程婷婷
# @FileName: app1.py
# @Software: PyCharm
# -*- coding: utf-8 -*-
import pandas as pd
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from de_duplication.minhash_duplication import calculate_minhash_result
import de_duplication.process
import traceback
DEBUG = False
PORT = 8019
HOST = '0.0.0.0'
# HOST = '127.0.0.1'
app = Flask(__name__)
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
@app.route('/minhash/', methods=('GET', 'POST'))
def get_prediction():
data = request.form.get('data')
if (data is None):
print('id或文章为空')
return ('id或文章为空')
try:
df = pd.DataFrame()
id, context = [], []
data = process.text_process(data)
print(data)
for index in data:
id.append(index[1])
context.append(index[0])
df['context'] = context
df['id'] = id
duplication_json = calculate_minhash_result(df)
except:
return traceback.print_exc()
return duplication_json
# if __name__ == '__main__':
app.run(debug=DEBUG, host=HOST, port=PORT)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/de_duplication/app1.py
|
app1.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/10/9 14:39
# @Author : 程婷婷
# @FileName: process.py
# @Software: PyCharm
import re
import pandas as pd
import json
import emoji
a = r'<h1>青岛双星控股股东双星集团</h1><p>响了青岛市属国有企业混改第一枪……10月9日,青岛双星<span style="font-size: 24px;">股价应声涨停,显示了市场对于这一举动的期待。</span></p><p><span style="font-size: 24px;">作为国资大省,山东省国有企业三年混改计划和青岛市国有企业改革正<span style="font-family: 隶书, SimLi; font-size: 24px;">步入深水区,双星集</span></span><span style="font-family: 隶书, SimLi;">团一级企业层面混改的启动,或掀起新一轮山东国企改革浪潮。值得注意的是,与此前的混改更多在二级、三级子公司层面相比,此次混改进一步深化,企业集团层面的混改成为国企改革攻坚重点合法权益得不到充分保护 ●由于国有企业和民营企业文化理念不同,双方混合后在管理方式、具体操作等方面存在矛盾,向现代企业制度转轨比较艰难 融合之路 ●省属企业新投资项目,原则上投资主体必须是现有混合所有制企业或新引进非国有资本合作企业 ●研究建立以资本收益为主的考核指标体系,支持混改企业按市场化原则进合法权益得不到充分保护 ●由于国有企业和民营企业文化理念不同,双方混合后在管理方式、具体操作等方面存在矛盾,向现代企业制度转轨比较艰难 融合之路 ●省属企业新投资项目,原则上投资主体必须是现有混合所有制企业或新引进非国有资本合作企业 ●研究建立以资本收益为主的考核指标体系,支持混改企业按市场化原则进</span>。打响了青岛市属国有企业混改第一枪。10月9日,青岛双星<span style="font-size: 24px;">股价应声涨停,显示了市场对于这一举动的期待。</p><h1>双星集团的混改实验</h1><p>省属企业新投资项目,原则上投资主体必须是现有混合所有制企业或新引进非国有资本合作企业</p>'
def filter_emoji(context):
#过滤表情
chars = ''
text = emoji.demojize(context)
for i in range(9636, 11217):
chars += chr(i)
chars = '[' + chars
chars = chars + ']'
rules = re.compile(chars)
text = rules.sub('。', text)
return text
def clean_tag(context):
rule = re.compile('</h[0-9]+>', re.S)
context = rule.sub('\n', context)
rule1 = re.compile('</p>', re.S)
context = rule1.sub('\n', context)
rules = re.compile('<[^>]+>', re.S)
text = rules.sub('', context)
text = filter_emoji(text)
text = text.split('\n')
data = []
for i in text:
data.append((i,text.index(i)))
return data
def split_sentence(tup):
index1 = tup[1]
context = tup[0]
context = re.sub('([。!?\?])([^”’])', r"\1\n\2", context) # 单字符断句符
context = re.sub('(\.{6})([^”’])', r"\1\n\2", context) # 英文省略号
context = re.sub('(\…{2})([^”’])', r"\1\n\2", context) # 中文省略号
context = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', context)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = context.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
sentences = para.split('\n')
data = []
for i in sentences:
data.append((i,index1))
return data
def text_process(context):
data = clean_tag(context)
text = []
for index in data:
text.extend(split_sentence(index))
# context = map(lambda x: split_sentence(x), df['text'])
return text
print(text_process(a))
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/de_duplication/process.py
|
process.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/14 18:13
# @Author : 程婷婷
# @FileName: text_sim2.py
# @Software: PyCharm
from simhash import Simhash
import pandas as pd
import json
from zhon.hanzi import punctuation
def simhash_similarity(text1, text2):
a_simhash = Simhash(text1)
b_simhash = Simhash(text2)
max_hashbit = max(len(bin(a_simhash.value)), len(bin(b_simhash.value)))
# 汉明距离
distince = a_simhash.distance(b_simhash)
similar = 1-distince/max_hashbit
return similar
# 分词
def split_line(line):
table = str.maketrans('','',punctuation)
wipe_line = line.translate(table)
return wipe_line
def calculate_simhash_result(df):
duplication_list =[]
ids = []
for i in range(len(df['context'])):
start = i+1
for j in range(start,len(df['context']),1):
con = split_line(df['context'][i])
con2 = split_line(df['context'][j])
similar = simhash_similarity(con, con2)
if similar >= 0.7:
if ([int(df['id'][i]), int(df['id'][j])]) not in ids:
duplication_list.append({'ids': [int(df['id'][i]), int(df['id'][j])], 'context1':df['context'][i], 'context2':df['context'][j]})
ids.append([int(df['id'][i]), int(df['id'][j])])
return json.dumps(duplication_list, ensure_ascii=False)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/de_duplication/simhash_duplication.py
|
simhash_duplication.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/de_duplication/__init__.py
|
__init__.py
|
This is a simple package. You can use it to write you content.
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/zzsnML-1.0.1.dist-info/DESCRIPTION.rst
|
DESCRIPTION.rst
|
# -*- coding: utf-8 -*-
import jieba
import jieba.posseg as pseg
from relativeness_analysis.vocabulary import Vocabulary
from relativeness_analysis.classifier2 import xgboost
import xlrd, xlwt
import os, sys
import argparse
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
####################################### 参数区 ########################################################
# 1. model path
file_path = os.getcwd()
model_path = os.path.join(file_path, 'classifier') # 模型所在的目录,请最好不要在该目录下放其他文件,以免产生错误识别
vocab_path = os.path.join(file_path, 'vocab') # 词典所在的目录,词典与模型相对应,请最好不要在该目录下放其他文件,以免产生错误识别
####################################### 代码区 ########################################################
def find_vocab(vocab_folder):
files = os.listdir(vocab_folder)
candidate_vocab = {}
for file in files:
tmp = '.'.join(file.split('.')[:-1]).split('-')
if len(tmp) == 3: # vocab-all-1491195238.voc
v, company, signature = tmp
if v == 'vocab':
if candidate_vocab.get(company, None) is None:
candidate_vocab[company] = dict()
candidate_vocab[company][signature] = file
return candidate_vocab
def find_clf(clf_folder):
files = os.listdir(clf_folder)
candidate_clf = {}
for file in files:
tmp = '.'.join(file.split('.')[:-1]).split('-') # # xgboost-all-tf-l1-l2-0.4-1491195238.clf
if len(tmp) == 7: # xgboost-all-tf-l1-l2-0.4-1491195238
c, company, transformer, penalty, norm, thres, signature = tmp
if c == 'xgboost':
if candidate_clf.get(company, None) is None:
candidate_clf[company] = dict()
candidate_clf[company][signature] = (file, transformer, penalty, norm, thres)
return candidate_clf
def match_and_load(candidate_model_file, candidate_vocab_file, model_folder, vocab_folder):
model = dict()
for company in candidate_model_file: # Based on model's key instead of vocab's
tmp_model = candidate_model_file[company]
tmp_vocab = candidate_vocab_file.get(company, None)
if tmp_vocab is not None:
for signature in set(tmp_model.keys()).intersection(tmp_vocab.keys()):
tmp = model.get(company, None)
if tmp is not None:
if int(signature) > int(model[company][0]): # a model created more recently
model[company] = (signature, tmp_model[signature][0], tmp_vocab[signature]) + tmp_model[signature][1:]
else:
model[company] = (signature, tmp_model[signature][0], tmp_vocab[signature]) + tmp_model[signature][1:]
loaded_model = dict()
for company in model:
signature, model_file_name, vocab_file_name, transformer, penalty, norm, thres = model[company]
clf = xgboost.load(os.path.join(model_folder, model_file_name))
clf.thres = float(thres)
vocab = Vocabulary.load(os.path.join(vocab_folder, vocab_file_name))
loaded_model[company] = (signature, clf, vocab, transformer, penalty, norm, thres)
return loaded_model
# # countvectorizer and tfidftransformer
# def create_transformer(model):
# transformer = dict()
# for company in model:
# cv = CountVectorizer(decode_error='replace', vocabulary=model[company][2].to_dict())
# use_idf = True if model[company][3].lower() == 'tfidf' else False
# tfidf = TfidfTransformer(norm=model[company][-2], use_idf=use_idf)
# transformer[company] = lambda data: tfidf.transform(cv.transform(data))
# return transformer
# 查找模型和字典文件
candidate_model_file = find_clf(model_path)
candidate_vocab_file = find_vocab(vocab_path)
if len(candidate_vocab_file) == 0 or len(candidate_model_file) == 0:
raise Exception(u'没有找到训练好的模型和词典文件!')
print(candidate_model_file, candidate_vocab_file)
model = match_and_load(candidate_model_file, candidate_vocab_file, model_path, vocab_path)
# transformer = create_transformer(model)
def read_file_for_eval(path, idx_dict):
xlrd.book.unpack_SST_table.__globals__["unicode"] = lambda s, e: unicode(s, e, errors="replace")
book = xlrd.open_workbook(path, encoding_override="utf-8")
sheet = book.sheet_by_index(0)
content_begin_with = idx_dict['content_begin_with']
article_col = idx_dict['article_col']
title_col = idx_dict['title_col']
topic_col = idx_dict['topic_col']
articles = sheet.col_values(article_col, start_rowx=content_begin_with)
titles = sheet.col_values(title_col, start_rowx=content_begin_with)
topics = sheet.col_values(topic_col, start_rowx=content_begin_with)
data = {}
for i, article in enumerate(articles):
if sys.version_info.major == 2:
topic = topics[i].encode('utf-8').strip()
data[i] = [titles[i].encode('utf-8').strip() + '。' + article.encode('utf-8').strip(), topic]
else:
topic = topics[i].strip()
data[i] = [titles[i].strip() + '。' + article.strip(), topic]
return data
def test(text, company):
# global count_vect, tf_transformer
if company not in model:
return '不支持的企业'
if text == '。':
return '删除'
# if choose_tag[company]:
# processed_text = ' '.join([w for w, flag in pseg.cut(text) if flag in \
# ['n', 'ns', 'nt', 'nz', 'nl', 'ng', 'v', 'vd', 'vn', 'vf', 'vx', \
# 'vi', 'vl', 'vg', 'a', 'ad', 'an', 'ag', 'al', 'd']])
# else:
processed_text = ' '.join([w for w in jieba.lcut(text)])
cv = CountVectorizer(decode_error='replace', vocabulary=model[company][2].to_dict())
tfidf_trans = TfidfTransformer(norm=model[company][-2], use_idf=False)
counts = cv.transform([processed_text])
tfidf = tfidf_trans.transform(counts)
if tfidf.size == 0:
return '删除'
thres = float(model[company][-1])
clf = model[company][1]
predicted_label = clf.predict(tfidf, return_real_label=True)[0]
return predicted_label
def main(file_path, _all=False, prefix='./'):
result_file = '.'.join(os.path.basename(file_path).split('.')[:-1]) + '.xls'
result_file = os.path.join(prefix, result_file)
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('result')
sheet.write(0, 0, "标题+内容")
sheet.write(0, 1, "企业")
sheet.write(0, 2, "相关性")
if _all:
sheet.write(0, 3, "备注:不区分企业")
else:
sheet.write(0, 3, "备注:区分企业")
idx_dict = {}
idx_dict['content_begin_with'] = 1 # 样本从那一行开始,第0行为标注,第1行开始是样本
idx_dict['article_col'] = 1 # 内容在excel文件的哪一列(下标从0开始)
idx_dict['title_col'] = 0 # 标题在excel的哪一列(下标从0开始)
idx_dict['topic_col'] = 4 # 企业在excel的哪一列(下标从0开始)
data = read_file_for_eval(file_path, idx_dict)
for i in data:
text = data[i][0]
company = data[i][1]
if _all:
relevant = test(text, 'all')
else:
relevant = test(text, company)
sheet.write(i+1, 0, text)
sheet.write(i+1, 1, company)
sheet.write(i+1, 2, relevant)
workbook.save(result_file)
return result_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='判断语料与企业的相关性')
parser.add_argument('-file', type=str, required=True,
help='待判断excel文件路径')
parser.add_argument('-all', type=int, default=1,
help='是否区分企业,默认为不区分企业')
parser.add_argument('-prefix', type=str, default='./',
help='判断结果输出到哪个目录下,默认为当前目录')
args = parser.parse_args()
print(args.file, args.all, args.prefix)
main(args.file, _all=args.all, prefix=args.prefix)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/relevant_analysis.py
|
relevant_analysis.py
|
# -*- coding: utf-8 -*-
import pickle
import numpy as np
class Vocabulary(object):
def __init__(self, signature, min_word_len=2, name='voc'):
self.signature = signature
self.min_word_len = min_word_len
self.name = name
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.oov = None
self.size = 0
self._fixed_voc = False
def set_state(self, fixed=False):
assert fixed in [True, False, 0, 1]
self._fixed_voc = fixed
def get_state(self):
state = 'Fixed' if self._fixed_voc else 'Not fixed'
return state
def shuffle(self):
self.check_state()
idx = np.random.permutation(self.size)
shuffled_voc = dict()
shuffled_freq = dict()
shuffled_doc_freq = dict()
for key, id in self.voc.items():
shuffled_voc[key] = idx[id]
shuffled_freq[idx[id]] = self.freq[id]
shuffled_doc_freq[idx[id]] = self.doc_freq[id]
del self.voc, self.freq, self.doc_freq
self.voc, self.freq, self.doc_freq = shuffled_voc, shuffled_freq, shuffled_doc_freq
def _is_useless(self, x):
if len(x) < self.min_word_len:
return True
if x.strip('''#&$_%^*-+=<>`~!@(())??/\\[]{}—"';::;,。,.‘’“”|…\n abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890''') == '':
return True
return False
def update(self, words):
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support update.')
for word in words:
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is None: # new word
self.voc[word] = self.size
self.freq[self.size] = 1
self.doc_freq[self.size] = 0 # create doc_freq item
self.size += 1
else:
self.freq[id] += 1
for word in set(words):
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is not None:
self.doc_freq[id] += 1 # update doc_freq
def get(self, word):
return self.voc.get(word, self.oov)
def __getitem__(self, word):
return self.voc.get(word, self.oov)
# def __setitem__(self, word, val):
# self.voc.__setitem__(word, val)
def __contains__(self, word):
return self.voc.__contains__(word)
def __iter__(self):
return iter(self.voc)
def __sizeof__(self):
return self.voc.__sizeof__() + self.freq.__sizeof__() + self.signature.__sizeof__() + self.size.__sizeof__() + \
self.name.__sizeof__() + self._fixed_voc.__sizeof__() + self.oov.__sizeof__() + self.doc_freq.__sizeof__()
def __delitem__(self, word): # delete would destory the inner representation
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support deletion.')
else:
raise NotImplementedError
def get_size(self):
return self.size
def clear(self):
del self.voc, self.freq, self.doc_freq
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.size = 0
self._fixed_voc = False
def check_state(self):
return len(self.voc) == self.size and len(self.freq) == self.size and len(self.doc_freq) == self.size
def to_dict(self):
return self.voc
def set_signature(self, new_signature):
self.signature = new_signature
def save(self, file_name=None):
save_to = (file_name if file_name else self.name)+'-%s.voc'%self.signature
with open(save_to, 'wb') as f:
pickle.dump([self.voc, self.freq, self.doc_freq, self.size, self.min_word_len, \
self.oov, self._fixed_voc, self.name, self.signature], f)
@classmethod
def load(cls, file_name):
with open(file_name, 'rb') as f:
[voc, freq, doc_freq, size, min_word_len, oov, _fixed, name, signature] = pickle.load(f)
voc_from_file = cls(signature, name)
voc_from_file.voc = voc
voc_from_file.freq = freq
voc_from_file.doc_freq = doc_freq
voc_from_file.size = size
voc_from_file.min_word_len = min_word_len
voc_from_file.oov = oov
voc_from_file._fixed_voc = _fixed
voc_from_file.signature = signature
return voc_from_file
def test():
x = ['哈哈', '测试', '嘿', '嗨', '早上好', '哈哈', '嘿', '下午好', '测试', '你好', 'test', 'c', 'm']
voc = Vocabulary(signature=123, name='test', min_word_len=1)
voc.update(x)
print(voc.__class__.__name__)
print(voc.get('哈哈'), voc.get('测试'))
print(voc['早上好'], voc['c'])
print(voc.__sizeof__())
print('Voc size: %s' % voc.size)
print('`c` in voc: %s, `哈哈` in voc: %s' % ('c' in voc, '哈哈' in voc))
try:
del voc['a']
del voc['哈哈']
except Exception as e:
print(e)
voc.clear()
voc.update(x)
voc.update(x)
print(voc.voc)
print(voc.freq)
voc.save('voc_test.voc')
voc = Vocabulary.load('voc_test.voc')
print('Voc size: %s' % voc.size)
print(voc.voc)
print(voc.freq)
print(voc.doc_freq)
voc.shuffle()
print(voc.voc)
print(voc.freq)
print(voc.doc_freq)
if __name__ == '__main__':
test()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/vocabulary.py
|
vocabulary.py
|
# -*- coding: utf-8 -*-
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
import os
from relativeness_analysis.relevant_analysis import main, test
from relativeness_analysis.manager import test as train_test
import warnings
warnings.filterwarnings('ignore')
DEBUG = False
PORT = 8006
HOST = '0.0.0.0'
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.secret_key = 'skfasmknfdhflm-vkllsbzdfmkqo3ooishdhzo295949mfw,fk'
# APP_ROOT = os.path.abspath('.')
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
@app.route('/api/', methods=('GET', 'POST'))
def get_result():
# title = request.args.get('title', '')
# content = request.args.get('content', '')
# company = request.args.get('company', '')
# if title == '' and content == '':
# return '-2'
# _content = title + '。' + content
# # print(_content)
# relevant = test(_content, company)
# return relevant
file_path = request.form.get('file_path', None)
_all = request.form.get('_all', True)
prefix = request.form.get('prefix', './')
if file_path is None:
return '必须给定输入文件!'
if type(_all) == str:
_all = _all.lower()
if _all == 'false':
_all = False
elif _all == 'true':
_all = True
else:
return '_all参数错误,只能取值True或者False。'
print(file_path, _all, prefix)
result_file = main(file_path, _all=_all, prefix=prefix)
return result_file
@app.route('/api2/', methods=('GET', 'POST'))
def get_single_result():
title = request.form['title']
print(title)
content = request.form['content']
company = request.form['company']
if title == '' and content == '':
return '-2'
_content = title + '。' + content
# print(_content)
relevant = test(_content, company)
return relevant
@app.route('/train/', methods=('GET', 'POST'))
def train():
connection_string = request.form['connection_string']
begin_date = request.form['begin_date']
end_date = request.form['end_date']
try:
if (connection_string is None) and (begin_date is None) and (end_date is None):
print(r'正在使用默认参数训练模型,connection_string为cis/[email protected]:1521/orcl, begin_date为2017-03-01, end_date为2017-07-13')
train_test()
elif (connection_string == '') and (begin_date == '') and (end_date == ''):
print(r'正在使用默认参数训练模型,connection_string为cis/[email protected]:1521/orcl, begin_date为2017-03-01, end_date为2017-07-13')
train_test()
else:
print(r'正在使用指定参数训练模型,connection_string为%s, begin_date为%s, end_date为%s' % (connection_string, begin_date, end_date))
train_test(connection_string, begin_date, end_date)
except Exception as e:
return 'train fail'
else:
return 'train success'
app.run(debug=DEBUG, host=HOST, port=PORT)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/app.py
|
app.py
|
# -*- encoding: utf-8 -*-
import numpy as np
import jieba
import xlrd
import sys, time
import pickle
from relativeness_analysis.vocabulary import Vocabulary
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectFpr, mutual_info_classif, SelectPercentile
import scipy.linalg
from sklearn.base import BaseEstimator, TransformerMixin
class data_processor(object):
def __init__(self, data, transformer='tf', transformer_norm='l2'):
self.data = data
transformer = transformer.lower()
assert transformer in ['tf', 'tfidf']
self.transformer_type = transformer
self.transformer_norm = transformer_norm
self.transformer = None
# if not self.for_test:
# if vocab is not None:
# if type(vocab) == Vocabulary:
# self.vocab = vocab
# self.vocab.set_state(fixed=False)
# else:
# raise Exception('`vocab` should be of type `Vocabulary`.')
# else:
# self.vocab = Vocabulary(signature=int(time.time()), name='vocab')
def reset(self):
self.transformer = None
self.cv = None
def preprocess(self, _all=False, _emotion=False):
processed_data = {}
processed_label = {}
processed_label_dict = {}
label_set = ['保留', '删除']
label_dict = {0: '保留', 1: '删除'}
reverse_label_dict = {'保留': 0, '删除': 1}
# only_have_one_label_key = []
if _all:
if not _emotion: # _all=True, _emotion=False
processed_data['all'] = []
processed_label['all'] = []
processed_label_dict['all'] = label_dict
for key in self.data:
# processed_data['all'] += [' '.join(jieba.lcut(record[0])) for record in data[key]]
if len(processed_data.get('all')) == 0:
processed_data['all'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key]])
else:
processed_data['all'] = np.hstack((processed_data['all'], [' '.join(jieba.lcut(record[0])) for record in self.data[key]]))
label = [record[1] for record in self.data[key]]
if len(processed_label.get('all')) == 0:
processed_label['all'] = np.array([reverse_label_dict[l] for l in label])
else:
processed_label['all'] = np.hstack((processed_label['all'], [reverse_label_dict[l] for l in label]))
# processed_label['all'] += [reverse_label_dict[l] for l in label]
else: # _all=True, _emotion=True
processed_data['all-非负'] = []
processed_data['all-负'] = []
processed_label['all-非负'] = []
processed_label['all-负'] = []
processed_label_dict['all-非负'] = processed_label_dict['all-负'] = label_dict
for key in self.data:
if len(processed_data.get('all-非负')) == 0:
processed_data['all-非负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='非负'])
processed_label['all-非负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='非负'])
else:
processed_data['all-非负'] = np.hstack((processed_data['all-非负'], \
[' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='非负']))
processed_label['all-非负'] = np.hstack((processed_label['all-非负'], \
[reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='非负']))
if len(processed_data.get('all-负')) == 0:
processed_data['all-负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='负'])
processed_label['all-负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='负'])
else:
processed_data['all-负'] = np.hstack((processed_data['all-负'], \
[' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='负']))
processed_label['all-负'] = np.hstack((processed_label['all-负'], \
[reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='负']))
else:
for key in self.data:
if not _emotion: # _all=False, _emotion=False
processed_data[key] = [' '.join(jieba.lcut(record[0])) for record in self.data[key]]
label = [record[1] for record in self.data[key]]
# if len(set(label_set) - set(label)) != 0:
# print('%s: Only have one label(%s)' % (key, label[0]))
# only_have_one_label_key.append(key)
# assert len(set(label_set) - set(label)) == 0, 'It should have exactly two classes.'
# label_dict = {}
# reverse_label_dict = {}
# for i, k in enumerate(label_set):
# label_dict[i] = k
# reverse_label_dict[k] = i
# processed_label[key] = [reverse_label_dict[l] for l in label]
processed_label[key] = np.array([reverse_label_dict[l] for l in label])
processed_label_dict[key] = label_dict
processed_data[key] = np.array(processed_data[key])
else: # _all=False, _emotion=True
processed_data[key+'-非负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='非负'])
processed_data[key+'-负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='负'])
processed_label[key+'-非负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='非负'])
processed_label[key+'-负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='负'])
processed_label_dict[key+'-非负'] = label_dict
processed_label_dict[key+'-负'] = label_dict
# processed_data[key] = processed_data[key]
return processed_data, processed_label, processed_label_dict
def update_vocab(self, vocab, processed_data):
if type(processed_data) == dict:
for key in processed_data:
for record in processed_data[key]:
vocab.update(record.split(' '))
else:
for record in processed_data:
vocab.update(record.split(' '))
assert vocab.check_state(), 'Something wrong with vocabulary.'
def transform(self, vocab, data, label, with_feature_selection=False, feature_selection_method='FDA', binary=False):
vocab.set_state(fixed=True)
assert feature_selection_method in ['FDA', 'SelectPercentile']
if not self.transformer:
self.cv = CountVectorizer(decode_error='replace', vocabulary=vocab.to_dict(), binary=binary)
if self.transformer_type == 'tf':
self.transformer = TfidfTransformer(norm=self.transformer_norm, use_idf=False)
else:
self.transformer = TfidfTransformer(norm=self.transformer_norm, use_idf=True)
if type(data) == dict:
transformed_data = {}
for key in data:
if with_feature_selection:
if feature_selection_method == 'FDA':
transformed_data[key] = FDA().fit_transform(
self.transformer.transform(self.cv.transform(data[key])), label[key]
)
else:
transformed_data[key] = SelectPercentile(mutual_info_classif, 20).fit_transform(
self.transformer.transform(self.cv.transform(data[key])), label[key]
)
else:
transformed_data[key] = self.transformer.transform(self.cv.transform(data[key]))
else:
if with_feature_selection:
if feature_selection_method == 'FDA':
transformed_data = FDA().fit_transform(
self.transformer.transform(self.cv.transform(data)), label
)
else:
transformed_data = SelectPercentile(mutual_info_classif, 20).fit_transform(
self.transformer.transform(self.cv.transform(data)), label
)
else:
transformed_data = self.transformer.transform(self.cv.transform(data))
return transformed_data
class FDA(BaseEstimator, TransformerMixin):
def __init__(self, alpha=1e-4):
'''Fisher discriminant analysis
Arguments:
----------
alpha : float
Regularization parameter
'''
self.alpha = alpha
def fit(self, X, Y):
'''Fit the LDA model
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data
Y : array-like, shape [n_samples]
Training labels
Returns
-------
self : object
'''
n, d_orig = X.shape
classes = np.unique(Y)
assert(len(Y) == n)
if isinstance(X, scipy.sparse.csr.csr_matrix):
mean_global = X.mean(axis=0)
else:
mean_global = np.mean(X, axis=0, keepdims=True)
scatter_within = self.alpha * np.eye(d_orig)
scatter_between = np.zeros_like(scatter_within)
for c in classes:
n_c = np.sum(Y==c)
if n_c < 2:
continue
if isinstance(X, scipy.sparse.csr.csr_matrix):
mu_diff = X[Y==c].mean(axis=0) - mean_global
else:
mu_diff = np.mean(X[Y==c], axis=0, keepdims=True) - mean_global
scatter_between = scatter_between + n_c * np.dot(mu_diff.T, mu_diff)
if isinstance(X, scipy.sparse.csr.csr_matrix):
scatter_within = scatter_within + n_c * np.cov(X[Y==c].todense(), rowvar=0)
else:
scatter_within = scatter_within + n_c * np.cov(X[Y==c], rowvar=0)
e_vals, e_vecs = scipy.linalg.eig(scatter_between, scatter_within)
self.e_vals_ = e_vals
self.e_vecs_ = e_vecs
self.components_ = e_vecs.T
return self
def transform(self, X):
'''Transform data by FDA
Parameters
----------
X : array-like, shape [n_samples, n_features]
Data to be transformed
Returns
-------
X_new : array, shape (n_samples, n_atoms)
'''
return X.dot(self.components_.T)
def fit_transform(self, X, Y):
self.fit(X, Y)
return self.transform(X)
def test():
# file_path = 'D:\\学习\\研究生\\文本挖掘项目\\舆情正负面判别\\舆情标引信息-20170104.xlsx'
file_path = 'test.xlsx'
idx_dict = {}
idx_dict['content_begin_with'] = 1
idx_dict['article_col'] = 1 # 内容在excel文件的哪一列(下标从0开始)
idx_dict['title_col'] = 0 # 标题在excel的哪一列(下标从0开始)
idx_dict['relativeness_col'] = 5 # 相关性在excel的哪一列(下标从0开始)
idx_dict['topic_col'] = 4 # 企业在excel的哪一列(下标从0开始)
vocab = Vocabulary(signature=123, name='test', min_word_len=2)
dp = data_processor(file_path, config=idx_dict, for_test=False)
processed_data, processed_label, processed_label_dict = dp.preprocess(_all=True)
dp.update_vocab(vocab, processed_data)
print(vocab.get_size())
for i, word in enumerate(vocab):
if i < 20:
id = vocab[word]
print('[%s] id: %s, freq: %s, doc_freq: %s' % (word, id, vocab.freq[id], vocab.doc_freq[id]))
else:
break
vocab.save('vocab')
transformed_data = dp.transform(vocab, processed_data['all'])
print(transformed_data.shape)
if __name__ == '__main__':
test()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/utils.py
|
utils.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import xlrd
import numpy as np
import scipy.sparse.csr
import scipy.sparse.csc
import pickle
# from gensim import models
import sys, os
from relativeness_analysis.utils import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from xgboost import XGBClassifier
class LogisticRegression(object):
def __init__(self, label_dict, signature, learning_rate='optimal', penalty='l1', alpha=1e-3, eta0=0.0, class_weight='balanced', thres=0.5):
self.label_dict = label_dict
self.signature = signature
self.lr = learning_rate
self.penalty = penalty
self.alpha = alpha
self.eta0 = eta0
self.class_weight = class_weight
self.thres = thres
self.loss = 'log'
self.clf = None
def set_signature(self, new_signature):
self.signature = new_signature
@staticmethod
def train_test_split(X, Y, train_ratio=0.8):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
N = X.shape[0]
N_train = int(N*train_ratio)
N_test = N - N_train
assert N_train > 0 and N_test > 0, '训练集或测试集必须至少有一个样本'
idx = np.random.permutation(N)
return (X[idx[:N_train]], Y[idx[:N_train]]), (X[idx[N_train:]], Y[idx[N_train:]])
def train(self, X, Y, save_to=None, initial_coef=None, initial_intercept=None, verbose=False):
assert len(self.label_dict) == 2, 'It should have exactly two classes.'
if isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray):
data = X
else:
data = np.array(X, copy=False)
if isinstance(Y, scipy.sparse.csr.csr_matrix):
label = Y.todense()
else:
label = np.array(Y, copy=False)
if len(np.unique(label)) == 1:
print('Only contains one label, training stopped.')
return
# print('Training...')
sgd = SGDClassifier(loss=self.loss, penalty=self.penalty, alpha=self.alpha, class_weight=self.class_weight, \
learning_rate=self.lr, eta0=self.eta0, verbose=verbose)
if initial_coef is None and initial_intercept is None:
self.clf = sgd.fit(data, label, coef_init=initial_coef, intercept_init=initial_intercept)
else:
self.clf = sgd.fit(data, label)
# print('Finished.')
if save_to:
# print('Saving model...')
self.save(save_to)
def save(self, save_to):
file_name = save_to + ('-%s.lr' % self.signature)
with open(file_name, 'wb') as f:
pickle.dump((self.clf, self.label_dict, self.signature), f)
@staticmethod
def load(file_path):
with open(file_path, 'rb') as f:
clf, label_dict, signature = pickle.load(f)
lr = LogisticRegression(label_dict, signature)
lr.clf = clf
return lr
def predict(self, X, return_real_label=False):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
if self.clf and X.shape[0] > 0:
if len(X.shape) == 1:
X = [X]
prob = self.clf.predict_proba(X)
label = np.ones((prob.shape[0],))
label[prob[:,0] >= self.thres] = 0
if return_real_label:
return [self.label_dict[l] for l in label]
else:
return label
else:
if not self.clf:
print('模型还没训练,请先训练模型')
else:
print('数据不能为空')
def predict_proba(self, X):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
if self.clf and X.shape[0] > 0:
if len(X.shape) == 1:
X = [X]
prob = self.clf.predict_proba(X)
return prob
else:
if not self.clf:
print('模型还没训练,请先训练模型')
else:
print('数据不能为空')
def report(self, X, Y, verbose=True):
if not(isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
N = X.shape[0]
assert len(Y) == N
if not self.clf:
print('模型还没训练,请先训练模型')
return
else:
predicted_Y = self.predict(X)
score = self.compute_score(Y, predicted_Y)
recall = score['recall']
precision = score['precision']
F1 = score['F1']
if verbose:
for i in range(N):
print('\tData id@%d, real label: %s, predicted label: %s' % \
(i, self.label_dict[Y[i]], self.label_dict[predicted_Y[i]]))
print('Correct rate: %s' % (np.mean(predicted_Y == Y)))
for key in self.label_dict:
print('Article num of label %s on training dataset: %s, recall: %.3f, precision: %.3f, F1: %.3f' % \
(self.label_dict[key], np.sum(Y == key), recall[key], precision[key], F1[key]))
def compute_score(self, Y, predicted_Y):
recall = {}
precision = {}
F1 = {}
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
if isinstance(predicted_Y, scipy.sparse.csr.csr_matrix):
predicted_Y = predicted_Y.todense()
else:
predicted_Y = np.array(predicted_Y, copy=False)
for key in self.label_dict:
N_key = np.sum(Y == key)
if N_key == 0:
recall[key] = 1.0
else:
recall[key] = np.sum((Y == key)*(predicted_Y == key))/(N_key+0.0)
N_predicted_pos = np.sum(predicted_Y == key)
if N_predicted_pos == 0:
precision[key] = 1.0
else:
precision[key] = np.sum((Y == key)*(predicted_Y == key))/(N_predicted_pos+0.0)
F1[key] = 2*recall[key]*precision[key]/(recall[key]+precision[key])
return {'recall': recall, 'precision': precision, 'F1': F1}
class xgboost(object):
def __init__(self, label_dict, signature, lr=0.1, reg_alpha=0, reg_lambda=1, objective='binary:logitraw', \
with_sample_weight=True, subsample=1, min_child_weight=1, scale_pos_weight=1, thres=0.5):
self.lr = lr
self.label_dict = label_dict
self.signature = signature
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.objective = objective
self.with_sample_weight = with_sample_weight
self.min_child_weight = min_child_weight
self.scale_pos_weight = scale_pos_weight
self.thres = thres
self.clf = None
def set_signature(self, new_signature):
self.signature = new_signature
def train(self, X, Y, save_to=None):
assert len(self.label_dict) == 2, 'It should have exactly two classes.'
if isinstance(X, scipy.sparse.csr.csr_matrix):
data = X.tocsc()
elif isinstance(X, np.ndarray):
data = X
else:
data = np.array(X, copy=False)
if isinstance(Y, scipy.sparse.csr.csr_matrix):
label = Y.todense()
else:
label = np.array(Y, copy=False)
if len(np.unique(label)) == 1:
print('Only contains one label, training stopped.')
return
N_0 = np.sum(label == 0)
N_1 = np.sum(label == 1)
w_0 = (N_0 + N_1) / (2. * N_0)
w_1 = (N_0 + N_1) / (2. * N_1)
# w_0 = w_0 * 1.3
# w_1 = w_1 / 1.1
# print(w_0, w_1)
# print('Training...')
self.clf = XGBClassifier(reg_alpha=self.reg_alpha, reg_lambda=self.reg_lambda, objective=self.objective, \
min_child_weight=self.min_child_weight, scale_pos_weight=self.scale_pos_weight, learning_rate=self.lr)
if self.with_sample_weight:
self.clf.fit(data, label, sample_weight=[w_0 if l == 0 else w_1 for l in label])
else:
self.clf.fit(data, label)
# print('Finished.')
if save_to:
# print('Saving model...')
self.save(save_to)
def save(self, save_to):
file_name = save_to + ('-%s.xgb' % self.signature)
with open(file_name, 'wb') as f:
pickle.dump((self.clf, self.label_dict, self.signature), f)
@staticmethod
def load(file_path):
with open(file_path, 'rb') as f:
clf, label_dict, signature = pickle.load(f)
xgb = xgboost(label_dict, signature)
xgb.clf = clf
return xgb
def predict(self, X, return_real_label=False):
prob = self.predict_proba(X)
label = np.ones((prob.shape[0],))
label[prob[:,0] >= self.thres] = 0
if return_real_label:
return [self.label_dict[l] for l in label]
else:
return label
def predict_proba(self, X):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray) or isinstance(X, scipy.sparse.csc.csc_matrix)):
X = np.array(X, copy=False)
if isinstance(X, scipy.sparse.csr.csr_matrix):
X = X.tocsc()
if self.clf and X.shape[0] > 0:
if len(X.shape) == 1:
X = [X]
prob = self.clf.predict_proba(X)
return prob
else:
if not self.clf:
print('模型还没训练,请先训练模型')
else:
print('数据不能为空')
def report(self, X, Y, verbose=True):
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
N = X.shape[0]
assert len(Y) == N
if not self.clf:
print('模型还没训练,请先训练模型')
return
else:
predicted_Y = self.predict(X)
score = self.compute_score(Y, predicted_Y)
recall = score['recall']
precision = score['precision']
F1 = score['F1']
if verbose:
for i in range(N):
print('\tData id@%d, real label: %s, predicted label: %s' % \
(i, self.label_dict[Y[i]], self.label_dict[predicted_Y[i]]))
print('Correct rate: %s' % (np.mean(predicted_Y == Y)))
for key in self.label_dict:
print('Article num of label %s on training dataset: %s, recall: %.3f, precision: %.3f, F1: %.3f' % \
(self.label_dict[key], np.sum(Y == key), recall[key], precision[key], F1[key]))
@staticmethod
def train_test_split(X, Y, train_ratio=0.8):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray) or isinstance(X, scipy.sparse.csc.csc_matrix)):
X = np.array(X, copy=False)
N = X.shape[0]
N_train = int(N*train_ratio)
N_test = N - N_train
assert N_train > 0 and N_test > 0, '训练集或测试集必须至少有一个样本'
idx = np.random.permutation(N)
return (X[idx[:N_train]], Y[idx[:N_train]]), (X[idx[N_train:]], Y[idx[N_train:]])
def compute_score(self, Y, predicted_Y):
recall = {}
precision = {}
F1 = {}
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
if isinstance(predicted_Y, scipy.sparse.csr.csr_matrix):
predicted_Y = predicted_Y.todense()
else:
predicted_Y = np.array(predicted_Y, copy=False)
for key in self.label_dict:
N_key = np.sum(Y == key)
if N_key == 0:
recall[key] = 1.0
else:
recall[key] = np.sum((Y == key)*(predicted_Y == key))/(N_key+0.0)
N_predicted_pos = np.sum(predicted_Y == key)
if N_predicted_pos == 0:
precision[key] = 1.0
else:
precision[key] = np.sum((Y == key)*(predicted_Y == key))/(N_predicted_pos+0.0)
F1[key] = 2*recall[key]*precision[key]/(recall[key]+precision[key])
return {'recall': recall, 'precision': precision, 'F1': F1}
def test():
# file_path = 'D:\\学习\\研究生\\文本挖掘项目\\舆情正负面判别\\舆情标引信息-20170104.xlsx'
file_path = 'test.xlsx'
idx_dict = {}
idx_dict['content_begin_with'] = 1
idx_dict['article_col'] = 1 # 内容在excel文件的哪一列(下标从0开始)
idx_dict['title_col'] = 0 # 标题在excel的哪一列(下标从0开始)
idx_dict['relativeness_col'] = 5 # 相关性在excel的哪一列(下标从0开始)
idx_dict['topic_col'] = 4 # 企业在excel的哪一列(下标从0开始)
vocab = Vocabulary(signature=123, name='vocab', min_word_len=2)
dp = data_processor(file_path, config=idx_dict, for_test=False, transformer_norm='l2')
processed_data, processed_label, processed_label_dict = dp.preprocess(_all=True)
dp.update_vocab(vocab, processed_data)
# # shuffle the vocabulary, this does not affect the results that much
# dp.vocab.shuffle()
transformed_data = dp.transform(vocab, processed_data)
vocab.save('vocab')
LR = LogisticRegression(label_dict=processed_label_dict['all'], signature=vocab.signature, thres=0.4)
(X_train, Y_train), (X_test, Y_test) = LR.train_test_split(transformed_data['all'], processed_label['all'], train_ratio=0.8)
LR.train(X_train, Y_train, save_to='test_clf')
print('On training dataset:')
LR.report(X_train, Y_train)
print('On test dataset:')
LR.report(X_test, Y_test)
Y_test_predicted = LR.predict(X_test)
print(LR.compute_score(Y_test, Y_test_predicted))
print(LR.clf.coef_.shape, LR.clf.intercept_.shape)
if __name__ == '__main__':
test()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/classifier2.py
|
classifier2.py
|
# -*- coding: utf-8 -*-
from relativeness_analysis.vocabulary import Vocabulary
from relativeness_analysis.classifier2 import xgboost
from relativeness_analysis.utils import data_processor
import time, os
import numpy as np
# import pandas as pd
import cx_Oracle
import pickle
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
class TrainManager(object):
def __init__(self):
self.signature = int(time.time())
def read_sql(self, sql, con):
# print('Fetching data from remote sql...')
# raw_data = pd.read_sql_query(sql, con)
# with open('data.pd', 'wb') as f:
# pickle.dump(raw_data, f)
# raw_data.to_excel('raw_data.xlsx')
# data = {}
# for record in raw_data.iterrows:
# company = record['tid'].strip()
# article = record['content_no_tag'].strip()
# title = record['title'].strip()
# relevant = record['relevance'].strip()
# emotion = '非负' # record['emotion'].strip()
# data[company] = data.get(company, []) + [(title+'。'+article, relevant, emotion)]
# conn.close()
# return data
cursor = con.cursor()
cursor.execute(sql)
data = {}
def convert(col):
if isinstance(col, cx_Oracle.LOB):
return col.read().decode('utf-8')
else:
return col
for record in cursor:
company = convert(record[2])
title = convert(record[0])
article = convert(record[1])
relevant = convert(record[3])
if article is None:
continue
else:
if relevant is None:
relevant = 1
if title is not None:
title = title.strip()
else:
title = ''
article = article.strip()
relevant = '保留' if relevant == 0 else '删除'
emotion = '非负'
data[company] = data.get(company, []) + [(title+'。'+article, relevant, emotion)]
con.close()
return data
def make_dirs(self, path):
dir_path = os.path.join(os.getcwd(), path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def train(self, sql, con, _all=False, _emotion=False, config=None, transformer='tf', transformer_norm='l2', save_to_folder=None, \
lr=0.1, reg_alpha=0, reg_lambda=1, objective='binary:logitraw', with_sample_weight=True, subsample=1, \
min_child_weight=1, scale_pos_weight=1, thres=0.5, train_ratio=0.8):
print('Fetching data from remote SQL...')
data = self.read_sql(sql, con)
print('Done!')
dp = data_processor(data, transformer=transformer, transformer_norm=transformer_norm)
processed_data, processed_label, processed_label_dict = dp.preprocess(_all=_all, _emotion=_emotion)
# print(processed_label)
for company in processed_data:
if len(processed_label[company]) == 0:
print('%s 没有数据!跳过该类!' % company)
continue
try:
dp.reset()
vocab = Vocabulary(signature=self.signature, name='vocab-%s'%company, min_word_len=2)
dp.update_vocab(vocab, processed_data[company])
print('%s, after updating, %s' % (company, vocab.get_size()))
transformed_data = dp.transform(vocab, processed_data[company], processed_label[company])
self.make_dirs(save_to_folder)
vocab_save_to = os.path.join(save_to_folder, 'vocab-%s' % company)
vocab.save(vocab_save_to) # vocab-all-1491195238.voc
xgb = xgboost(processed_label_dict[company], self.signature, lr=lr, reg_alpha=reg_alpha, reg_lambda=reg_lambda, \
objective=objective, with_sample_weight=with_sample_weight, subsample=subsample, thres=thres,\
min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight)
(X_train, Y_train), (X_test, Y_test) = xgb.train_test_split(transformed_data, processed_label[company], train_ratio=train_ratio)
print('Training on %s' % company)
if reg_alpha > 0 and reg_lambda > 0:
penalty = 'l1+l2'
elif reg_alpha > 0:
penalty = 'l1'
elif reg_lambda > 0:
penalty = 'l2'
else:
penalty = 'None'
# xgboost-all-tf-l1+l2-l2-0.5-1496718804.xgb
clf_save_to = os.path.join(save_to_folder, 'xgboost-%s-%s-%s-%s-%s' % (company, transformer, penalty, transformer_norm, thres))
xgb.train(X_train, Y_train, save_to=clf_save_to)
print('On training dataset:')
xgb.report(X_train, Y_train, verbose=False)
print('On test dataset:')
xgb.report(X_test, Y_test, verbose=False)
except Exception as e:
print(e)
# raise e
def test(connection_string = 'cis/[email protected]:1521/orcl', begin_date = '2017-03-01', end_date = '2017-07-13'):
'''
begin_date:开始日期
end_date:结束日期
'''
# company list
# company_list = ['3745', '3089', '3748', '2783', '3440']
company_list = ['3745,3089,3748,2783,3440', '3741,3420,3319']
# 模型参数
save_to_folder = './tmp' # 存放训练结果(分类器和词典)的目录
_all = False # 是否区分企业进行训练,True表示不区分
_emotion = False # 是否区分情感正负面进行训练,True表示区分
thres = 0.5
lr = 0.1
reg_alpha = 0
reg_lambda = 1
objective = 'binary:logitraw'
with_sample_weight = True
subsample = 1
min_child_weight = 1
scale_pos_weight = 1
for company in company_list:
ora_conn = cx_Oracle.connect(connection_string)
sql_query = '''select b.title,b.content_no_tag,'P'||t.tid as tid,t.delflag as relevance from cis_ans_basedata b inner join cis_ans_basedata_type t on (b.id=t.bid and t.delflag is not null)
where (b.orientation !=2 or b.orientation is null)
and t.tid in (%s)
and B.Publish_Date > '%s' and B.Publish_Date < '%s' ''' % (company, begin_date, end_date)
# print(sql_query)
tm = TrainManager()
tm.train(sql_query, ora_conn, _all=_all, _emotion=_emotion, save_to_folder=save_to_folder, lr=lr, reg_alpha=reg_alpha, \
reg_lambda=reg_lambda, objective=objective, with_sample_weight=with_sample_weight, subsample=subsample, \
thres=thres, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight)
if __name__ == '__main__':
test()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/manager.py
|
manager.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/__init__.py
|
__init__.py
|
import pickle
import pandas as pd
import os
with open('../data/id/id_file.pkl','rb') as f:
line = pickle.load(f)
# print(line)
if os.path.isdir('../data/result') == False:
os.makedirs('../data/result')
df = pd.read_excel('../data/result.xlsx')
def get_key (dict, value):
return [k for k,v in dict.items() if v == value]
id_entity1, id_entity2 = [], []
for value in df['实体1']:
values = get_key(line, value)
if len(values) == 0:
id_entity1.append('')
else:
id_entity1.append(values[0])
for value in df['实体2']:
values = get_key(line, value)
if len(values) == 0:
id_entity2.append('')
else:
id_entity2.append(values[0])
df['id_entity1'] = id_entity1
df['id_entity2'] = id_entity2
df.to_excel('../data/result/id_result.xlsx',index=False)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/id_output.py
|
id_output.py
|
with open('../data/process_data/data.tsv','r',encoding='utf-8') as f:
data = f.readlines()
with open('../data/test.tsv','w',encoding='utf-8') as f:
k = 0
for i in data:
if k <300:
k += 1
print(k)
elif (k >= 200) & (k < 300):
f.write(i)
k += 1
else:
break
# with open('../data/test.tsv','w',encoding='utf-8') as f:
# for i in data:
# if k < 300:
# f.write(i)
# k += 1
with open('../data/process_data/kb.tsv','r',encoding='utf-8') as f:
data = f.readlines()
with open('../data/kb.tsv','w',encoding='utf-8') as f:
k = 0
for i in data:
if k < 300:
f.write(i)
k += 1
else:
break
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/division.py
|
division.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 7 21:31:42 2020
@author: zhangzib
"""
#please refer to https://hub.tensorflow.google.cn/google/bert_chinese_L-12_H-768_A-12/1
import sys
sys.path.insert(0, 'D:/peking_code/code_python/Bert201912/bert-master')
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
# 问题: 怎样降维
#############################################################################################
#how the input preprocessing should be done to retrieve the input ids, masks, and segment ids:
def create_tokenizer_from_hub_module(bert_model_hub):
#with tf.Graph().as_default():
bert_module = hub.Module(bert_model_hub)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.compat.v1.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
def convert_text_to_features(model,text_): #created by zzb 20200615
tokenizer = create_tokenizer_from_hub_module(model)
example_ = run_classifier.InputExample(guid='',text_a=text_, label='A')
MAX_SEQ_LENGTH=128
input_feature = run_classifier.convert_single_example(0, example_, ['A','B'], MAX_SEQ_LENGTH, tokenizer)
features1 = []
features2 = []
features3 = []
features1.append(input_feature.input_ids)
features2.append(input_feature.input_mask)
features3.append(input_feature.segment_ids)
bert_inputs = dict(
input_ids=tf.convert_to_tensor(np.array(features1)),
input_mask=tf.convert_to_tensor(np.array(features2)),
segment_ids=tf.convert_to_tensor(np.array(features3)))
return bert_inputs
#############################################################################################
def text2vec(text_):
model_ = "../embeding"
#model_ = "https://hub.tensorflow.google.cn/google/bert_chinese_L-12_H-768_A-12/1"
bert_inputs = convert_text_to_features(model_,text_)
hub_layer = hub.Module(model_, trainable=True)
_output = hub_layer(bert_inputs, signature="tokens", as_dict=True)
with tf.compat.v1.Session() as sess:
tf.compat.v1.global_variables_initializer().run()
pooled_output = sess.run(_output["pooled_output"]) #The pooled_output is a [batch_size, hidden_size] Tensor
#print(type(pooled_output[0]))
return pooled_output[0].tolist() #size: hidden_size
if __name__ == '__main__':
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
t = "要坚持以习近平新时代中国特色社会主义思想为指导,深入学习贯彻党的十九届四中全会精神"
print(text2vec(t))
# =============================================================================
#model = "https://storage.googleapis.com/tfhub-modules/google/bert_chinese_L-12_H-768_A-12/1.tar.gz"
#model = "https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/1"
#=============================================================================
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/hub_TextEmbedding.py
|
hub_TextEmbedding.py
|
# -*- coding: utf-8 -*-
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from werkzeug.utils import secure_filename
import time
import os
#import sys
#sys.path.append("./src")
#os.chdir(os.path.join(os.getcwd(),'src'))
#import rel_prediction
import traceback
from relation_extraction.preprocessing_xls import paragraph_sectioning,preprocessing_xls_4train,preprocessing_xls_4pred
from relation_extraction.rel_train import train_
from relation_extraction.rel_prediction import prediction_,left_bag_of_words_featurizer,simple_bag_of_words_featurizer,right_bag_of_words_featurizer,get_high_prob_excel
DEBUG = False
PORT = 8017
HOST = '0.0.0.0'
app = Flask(__name__)
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
# def index():
# query_form = QueryForm()
# if request.method == 'POST':
# if query_form.validate_on_submit():
# query_file = request.files.get('query', None)
# query_file_name = secure_filename(str(int(time.time()*100)) + '_' + query_file.filename)
# path = os.path.join(app.config['UPLOAD_FOLDER'], query_file_name)
# query_file.save(path)
# session['query_file'] = query_file_name
# return redirect(url_for('show_result'))
# return render_template('index.html', query_form=query_form)
@app.route('/proces_train_xls/', methods=('GET', 'POST'))
def preprocessing_4train():
xls_name = str(request.form.get('xls_name'))
print(xls_name)
if not os.path.isfile(xls_name):
return 'xls文件不存在!'
try:
preprocessing_xls_4train(xls_name)
except Exception as e:
print(e)
return '0'
return '1'
@app.route('/train/', methods=('GET', 'POST'))
def get_train():
try:
train_()
except Exception as e:
print(e)
return '0'
return '1'
from flask import jsonify
import json
def get_return_info(_message, _prob_sorted=None, _text=None):
return json.dumps({'message': _message, 'prob': _prob_sorted, 'min_text': _text},ensure_ascii=False)
@app.route('/predict/', methods=('GET', 'POST'))
def get_prediction():
e1 = request.form.get('e1')
e2 = request.form.get('e2')
test_text = str(request.form.get('test_text'))
# list = {'a1':0.9,'a2':0.8}
# content = {'e1': list, 'e2': e2, 'text': test_text}
# return jsonify(content)
if (e1 is None) and (e2 is not None):
return get_return_info('输入格式应是:e1=XX&e2=YY&test_text=ZZ, 且句中应有左右实体名')
if (e2 is None) and (e1 is not None):
return get_return_info('输入格式应是:e1=XX&e2=YY&test_text=ZZ, 且句中应有左右实体名')
if (e1 is None):
print('左实体名 is None\n')
if (e2 is None):
print('右实体名 is None\n')
#test_text = str(request.args.get('test_text'))
#print(rel_prediction.OLD_URL)
# print(type(test_text),test_text)
#test_text = str('郑新聪 国资国企改革发展 要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,牢牢把握国有企业改革的正确方向。李南轩摄学习宣传贯彻党的十九大精神是全党全国当前和今后一个时期的首要政治任务。如何学习贯彻好党的十九大精神,习近平总书记在十九届中央政治局第一次集体学习时,提出要在学懂弄通做实上下功夫,号召“全党来一个大学习”。日前,福建全省各个领域、各条战线、各行各业兴起习近平新时代中国特色社会主义思想“大学习”热潮。福建省副省长郑新聪前些时候深入三钢集团福建罗源闽光钢铁有限责任公司一线,开展习近平新时代中国特色社会主义思想宣讲。宣讲会前,郑新聪一行深入到罗源闽光公司炼钢厂,沿着参观通道边走边看边听汇报,详细了解罗源闽光公司在绿色发展、技术指标、科技创新、经济效益等方面情况。在随后的宣讲会上,郑新聪以“深入学习习近平新时代中国特色社会主义思想深化和推动国有企业改革发展”为党课主题,分别从习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述、新时代国资国企改革发展肩负新的历史使命、坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展三个方面作了深刻阐释。就下一步如何推进新时代国资国企改革发展,郑新聪要求,要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,深刻认识深化国有企业改革的重大意义,牢牢把握国有企业改革的正确方向。以新发展理念推动国企发展宣讲中,郑新聪与参会人员共同学习回顾了习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述:目前,我国经济已由高速增长阶段转向高质量发展阶段。推动高质量发展是保持经济持续健康发展的必然要求;推动高质量发展是适应我国社会主要矛盾变化的必然要求;推动高质量发展是遵循经济规律发展的必然要求。此外,实现高质量发展必须坚持和践行新发展理念。发展是解决我国一切问题的基础和关键,发展必须是科学发展,必须坚定不移贯彻创新、协调、绿色、开放、共享的发展理念。新发展理念是习近平新时代中国特色社会主义经济思想的主要内容,在推进我国经济高质量发展过程中,必须坚定不移贯彻。为推动我国经济高质量发展,我们要坚持适应把握引领经济发展新常态,要把推进供给侧结构性改革作为经济工作的主线,要建设现代化经济体系。针对以上论述,郑新聪强调,全体成员要把握领会习近平新时代中国特色社会主义思想精神,特别是关于深化和推动国有企业改革发展方面,以此推动国企高质量发展。新时代国资国企肩负新使命郑新聪指出,党的十九大提出“要完善各类国有资产管理体制,改革国有资本授权经营体制,加快国有经济布局优化、结构调整、战略性重组,促进国有资产保值增值,推动国有资本做强做优做大,有效防止国有资产流失;深化国有企业改革,发展混合所有制经济,培育具有全球竞争力的世界一流企业。”这“九句话、109字”为国资国企改革发展指明了前进的方向,是我们推进下一步工作的重要行动指南。郑新聪表示,首先要深刻认识深化国有企业改革的重大意义。国有企业是推进国家现代化、保障人民共同利益的重要力量,是党和国家事业发展的重要物质基础和政治基础。深化国有企业改革是坚持和发展中国特色社会主义的必然要求,深化国有企业改革是实现“两个一百年”奋斗目标的重大任务,深化国有企业改革是推动我国经济持续健康发展的客观要求。在明确国企深化改革的重要性后,郑新聪强调,下一步要牢牢把握国有企业改革的正确方向。首先,要坚持和完善基本经济制度。必须毫不动摇巩固和发展公有制经济,毫不动摇鼓励、支持、引导非公有制经济发展。坚持公有制主体地位,发挥国有经济主导作用,做强做优做大国有企业。其次,要坚持社会主义市场经济改革方向。遵循市场经济规律和企业发展规律,坚持政企分开、政资分开、所有权与经营权分离,坚持权利、义务、责任相统一,促使国有企业真正成为独立市场主体。再者,坚持以解放和发展生产力为标准。始终把握有利于国有资产保值增值、有利于提高国有经济竞争力、有利于放大国有资本功能的要求,着力破除束缚国有企业发展的体制机制障碍,发挥国有企业各类人才积极性、主动性、创造性。同时,坚持增强活力与强化监管相结合。增强活力是搞好国有企业的本质要求,强化监管是搞好国有企业的重要保障,必须处理好两者关系,切实做到有机统一。此外,要更加坚持党对国有企业的领导。坚持党对国有企业的领导是重大政治原则,必须一以贯之。2016年10月,习近平在全国国有企业党的建设工作会议上指出:中国特色现代国有企业制度,“特”就特在把党的领导融入公司治理各环节。党建写入章程真正融入国企中心工作,章程明确了党组织在公司法人治理结构中的法定地位,特别是党组织在决策、执行、监督各环节的权责和工作方式。值得一提的是,郑新聪充分肯定三钢集团公司党委探索出的党支部密切联系群众的“五小工作法”,通过为群众讲清小道理、解决小问题、办好小事情、选树小典型、开展小活动,实现党建工作与生产经营、职工生活有机融合。随后,郑新聪指出,省属企业要扎实做好新时期深化国有企业改革的重点任务。“省属企业要完善各类国有资产管理体制。建立健全各类国有资产监督法律法规体系。以管资本为主深化国有资产监管要加快国有经济布局优化、结构调整、战略性重组。”郑新聪指出,省属企业要围绕服务国家战略,推动国有经济向关系国家安全、国民经济命脉和国计民生的重要行业和关键领域、重点基础设施集中。加快处置低效无效资产,淘汰落后产能,剥离办社会职能,解决历史遗留问题,提高国有资本配置效率。日前,国务院国资委下发了《关于加强国有企业资产负债约束的指导意见》是落实党的十九大精神,推动国有企业降杠杆、防范化解国有企业债务风险的重要举措,促使高负债国有企业资产负债率尽快回归合理水平。郑新聪指出,近年来,福建省省属企业也呈现一批改革发展典型。三钢集团通过兼并重组整合区域资源,集团钢产量成功突破1100万吨,真正步入大型钢铁企业行列。特别是2014年重组三金钢铁有限公司,形成了现在的罗源闽光钢铁公司,通过优化机制,改善工艺,2016年扭亏为盈,2018年18月份盈利10.74亿元,资产负债率从90降至目前的40,让一个濒临倒闭的企业成为一个福州区域明星企业,成为钢铁行业兼并重组成功典范。星网锐捷旗下凯米网络科技有限公司积极探索商业模式创新,向KTV提供“管理、流量、内容、广告”四大核心价值,构建互联网聚会娱乐新生态,用户超7500万,成为行业独角兽。发展混合所有制经济亦是新时期深化国有企业改革的重点任务。积极推进主业处于充分竞争行业和领域的商业类国有企业混合所有制改革,有效探索重点领域混合所有制改革,在引导子公司层面改革的同时探索在集团公司层面推进混合所有制改革。大力推动国有企业改制上市。稳妥有序开展国有控股混合所有制企业员工持股。此外,形成有效制衡的公司法人治理结构和灵活高效的市场化经营机制,加强监管有效防止国有资产流失。以国有资产保值增值、防止流失为目标,加强对企业关键业务、改革重点领域、国有资本运营重要环节的监督。建立健全国有企业重大决策失误和失职、渎职责任追究倒查机制。加强审计监督、纪检监督、巡查监督,形成监督合力。郑新聪表示,培育具有全球竞争力的世界一流企业也是目前省属企业的重点任务之一。支持国有企业深入开展国际化经营,在“一带一路”建设中推动优势产业走出去。')
#test_text = str('郝鹏 太钢精带公司 国务院国资委党委书记、主任郝鹏到太钢精带公司调研')
try:
test_text7,min_text,original_text = paragraph_sectioning(test_text,e1,e2)
if len(test_text7) < 20:
return get_return_info(test_text7)
message_,prob_text,prob_dict_sorted = prediction_(test_line = test_text7)
except:
return get_return_info(traceback.print_exc())
#content_ = {'message': message_, 'prob': prob_dict_sorted, 'min_text': min_text}
return get_return_info(message_, prob_dict_sorted, min_text)
# if (len(min_text) + 10) > len(original_text):
# return result
# return result + '<br> ' + min_text
@app.route('/predict_mass/', methods=('GET', 'POST'))
def get_predict_mass():
xls_name = str(request.form.get('xls_name'))
try:
tsv_fullname = preprocessing_xls_4pred(xls_name)
if not os.path.isfile(tsv_fullname):
return '处理预测文件tsv不存在!'
result = prediction_(filename_ = tsv_fullname)
except:
return traceback.print_exc()
return result
@app.route('/predict_high_prob/', methods=('GET', 'POST'))
def get_high_predict_mass():
xls_name = str(request.form.get('xls_name'))
prob_threshold_ = float(str(request.form.get('prob_threshold')))
if not os.path.isfile(xls_name):
return '被检索的xls文件不存在!'
try:
xls_fullname = get_high_prob_excel(predicted_result_file = xls_name,prob_threshold = prob_threshold_)
except:
return traceback.print_exc()
return xls_fullname
app.run(debug=DEBUG, host=HOST, port=PORT)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/app.py
|
app.py
|
import jieba
import re
import os
import xlwt
# 使用停用词
filepath = os.path.dirname(os.path.realpath(__file__))
stop = open(os.path.join(filepath, './user_data/stop.txt'), 'r+', encoding='utf-8')
stopword = stop.read().split("\n")
# 最长句子长度
word_len = 600
# 判断汉字个数
def han_number(char):
number = 0
for item in char:
if 0x4E00 <= ord(item) <= 0x9FA5:
number += 1
return number
# 分句
def cut_j(text_):
text = re.sub('([。!?\?])([^”’])', r"\1\n\2", text_)
text = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', text)
text = text.rstrip().split("\n")
#k = math.floor(han_number(text_)/600)
j = 0
t = ['']
for i in text:
if han_number(t[j])<word_len:
t[j] = t[j]+i
else:
t.append('')
j = j+1
return t
# 判断距离
def lenc(x,y,z):
xl = han_number(x)
yl = han_number(y)
xx = [10000]
yy = [20000]
min_ = 1000
for i in range(han_number(z)-max(xl,yl)):
if z[i:i+xl] == x:
xx.append(i)
xx.append(i+xl)
if z[i:i+yl] == y:
yy.append(i)
yy.append(i+yl)
# print(xx,yy)
a = 0
b = 0
for i in xx:
for j in yy:
if min_>abs(i-j):
a = i
b = j
min_ = abs(i-j)
if a>b:
return min_,y,x,b,a
else:
return min_,x,y,a,b
def keyword(entity_1,entity_2,text_,ii=0,jj=0):
key = {'left':[],'mention_1':[entity_1],'middle':[],'mention_2':[entity_2],'right':[]}
key['left'] = list(jieba.cut(text_[:ii-len(entity_1)]))
key['middle'] = list(jieba.cut(text_[ii:jj]))
key['right'] = list(jieba.cut(text_[jj+len(entity_2):]))
print('关键信息提取--------------------------')
print(key)
return key
###########################################3
def k(text,x='',y=''):
min_txt = ['0',1000]
if x == '':
p = 0
k = list(jieba.cut(text))
d = {}
for i in k:
if i in stopword:
continue
if i in d:
d[i] += 1
else:
d[i] = 1
m1 = ['1',1]
m2 = ['2',0]
for i in d:
if int(d[i])>=m1[1]:
m2[0] = m1[0]
m2[1] = m1[1]
m1[1] = d[i]
m1[0] = i
elif d[i]>m2[1]:
m2[0] = i
m2[1] = d[i]
else:
m1 = [x,0]
m2 = [y,0]
jl = cut_j(text)
keyword_ = []
for str_ in jl:
p,xx,yy,ii,jj = lenc(m1[0],m2[0],str_)
if min_txt[1]>p:
min_txt[0] = str_
min_txt[1] = p
keyword_ = [xx,yy,ii,jj]
print('关键词---------------------------------------')
print(keyword_[0],keyword_[1])
print('这句话两个词相距最近-------------------------')
print(min_txt)
keyword(keyword_[0],keyword_[1],min_txt[0],keyword_[2],keyword_[3])
return min_txt,m1,m2
# =============================================================================
# def position_(text,x='',y=''):
# min_txt = ['0',1000]
# if x == '':
# p = 0
# k = list(jieba.cut(text))
# d = {}
# for i in k:
# if i in stopword:
# continue
# if i in d:
# d[i] += 1
# else:
# d[i] = 1
# m1 = ['1',1]
# m2 = ['2',0]
# for i in d:
# if int(d[i])>=m1[1]:
# m2[0] = m1[0]
# m2[1] = m1[1]
# m1[1] = d[i]
# m1[0] = i
# elif d[i]>m2[1]:
# m2[0] = i
# m2[1] = d[i]
# else:
# m1 = [x,0]
# m2 = [y,0]
# jl = cut_j(text)
# keyword_ = []
# for str_ in jl:
# p,xx,yy,ii,jj = lenc(m1[0],m2[0],str_)
# if min_txt[1]>p:
# min_txt[0] = str_
# min_txt[1] = p
# keyword_ = [xx,yy,ii,jj]
# print('关键词: ',xx,yy,'出现在下面这段话,且距离最近:\n')
#
# print(min_txt)
#
# return ii,jj
#
# =============================================================================
def position_mintxt(text,x='',y=''):
min_txt = ['0',1000]
if x == '':
p = 0
k = list(jieba.cut(text))
d = {}
for i in k:
if i in stopword:
continue
if i in d:
d[i] += 1
else:
d[i] = 1
m1 = ['1',1]
m2 = ['2',0]
for i in d:
if int(d[i])>=m1[1]:
m2[0] = m1[0]
m2[1] = m1[1]
m1[1] = d[i]
m1[0] = i
elif d[i]>m2[1]:
m2[0] = i
m2[1] = d[i]
else:
m1 = [x,0]
m2 = [y,0]
keyword_ = []
if han_number(text)<word_len:
print(m1[0],m2[0])
p,xx,yy,ii,jj = lenc(m1[0],m2[0],text)
keyword_ = [xx,yy,ii,jj]
print(xx,yy)
min_txt = [text,p]
else:
jl = cut_j(text)
for str_ in jl:
print(m1[0],m2[0])
p,xx,yy,ii,jj = lenc(m1[0],m2[0],str_)
if min_txt[1]>p:
min_txt[0] = str_
min_txt[1] = p
keyword_ = [xx,yy,ii,jj]
#print(keyword_)
if min_txt[1]>word_len:
print('未找到适合的句子')
else:
print('关键词: ',xx,yy,'出现在下面这段话,且距离最近:')
print(min_txt)
return min_txt[0],ii,jj
import pandas as pd
#Example = namedtuple('Example', 'entity_1, entity_2, left, mention_1, middle, mention_2, right, ' )
def position__last_occering(entity_,text_):
#jieba.load_userdict("../user_data/userdict.txt") #加载自定义词典
jieba.load_userdict(os.path.join(filepath, './user_data/company.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/expert.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/leader.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/region.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/researcharea.txt'))
index = -1
while True:
end_index = index
index = str(text_).find(entity_,index + 1)
#if (len(text_) < (index + len(entity_) + 5)) & (end_index != -1):
#break
if index == -1:
break
print(end_index)
return end_index
def paragraph_sectioning_to7(entity_1,entity_2,text_):
p1 = str(text_).find(entity_1)
p2 = position__last_occering(entity_2,text_)
if (p1 < 0) or (p2 < 0):
print('出错:句中无实体名!',p1,p2,entity_1,entity_2,text_)
return "出错:句中无实体名!"
#print('entity_1 position: ',p1,'\n')
l1 = p1 + len(entity_1)
l = " ".join(jieba.cut(text_[:p1]))
#p2 = str(text_).find(entity_2)
m = " ".join(jieba.cut(text_[l1:p2]))
l2 = p2 + len(entity_2)
r = " ".join(jieba.cut(text_[l2:]))
tuple_7 = str(entity_1) + '\t' + str(entity_2) + '\t' + l.replace('\t',' ') + '\t' + str(entity_1) + '\t' + m.replace('\t',' ') + '\t' + str(entity_2) + '\t' + r.replace('\t',' ') + '\n'
#print('\n',tuple_7,'\n')
return tuple_7
#Example = namedtuple('Example', 'entity_1, entity_2, text_ ' )
# =============================================================================
# def paragraph_sectioning(text_): #3to7
#
# fields = text_[:].split('\t')
# print('(fields): ',len(fields),fields)
# if len(fields) != 3:
# return '0','0','0'
# #print(type(fields))
# entity_1 = fields[0]
# entity_2 = fields[1]
# min_text,i,j = position_mintxt(fields[2],x = entity_1, y = entity_2)
# #print('===============:',len(min_text), len(fields[2]))
# return paragraph_sectioning_to7(entity_1,entity_2,min_text),min_text,fields[2]
#
# =============================================================================
def paragraph_sectioning(text_,e1=None,e2=None): #3to7
if e2 is None:
if e1 is not None:
return "参数格式错" ,'0','0'
if e1 is None:
if e2 is not None:
return "参数格式错",'0','0'
fields = text_[:].split('\t')
#print('(fields): ',len(fields),fields)
if len(fields) != 3:
return '0','0','0'
#print(type(fields))
entity_1 = fields[0]
entity_2 = fields[1]
min_text,i,j = position_mintxt(fields[2],x = entity_1, y = entity_2)
#print('========1111111=======:',entity_1, entity_2, fields[2])
return paragraph_sectioning_to7(entity_1,entity_2,min_text),min_text,fields[2]
min_text,i,j = position_mintxt(text_,x = str(e1), y = str(e2))
#print('========2222222=======:',e1,e2, text_)
return paragraph_sectioning_to7(e1,e2,min_text),min_text,text_
Text_Minlen = 30
def preprocessing_xls_4train(src_filename):
data1 = pd.read_excel(src_filename,keep_default_na=False)
kb_ = {}
with open('./data/corpus.tsv','w', encoding='UTF-8') as f_corpus:
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
if len(line_[0])<2:
continue
if len(line_[2])<2:
continue
if len(line_[3]) < Text_Minlen :
continue
#min_txt,position_1,position_2 = position_mintxt(line_[3],x=line_[0],y=line_[2])
tuple_7 = paragraph_sectioning_to7(line_[0],line_[2],line_[3])
if len(tuple_7)<30:
continue
f_corpus.writelines(tuple_7)
if len(line_[1]) < 2:# 为空时 是负样本
continue
if str(line_[1]) not in kb_.keys():
kb_[str(line_[1])] = []
kb_triple_str = str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2])
#kb_[str(line_[1])].append(str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2]))
#print('--len(unrelated_pairs)-----------------------------',str(line_[1]),len(kb_[str(line_[1])]))
#f_kb.writelines(str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2]) + '\n')
if kb_triple_str not in kb_[str(line_[1])] :
kb_[str(line_[1])].append(kb_triple_str)
with open('./data/kb.tsv','w', encoding='UTF-8') as f_kb:
for rel_ in kb_.keys():
if len( kb_[rel_]) < 2: #某一个关系rel存在的KBTriple(rel, sbj, obj)少于2个,单个三元组存在的examples不会太多,比如实际中超不过20个
continue
for truple_ in kb_[rel_]:
f_kb.writelines(str(truple_) + '\n')
return '1'
def clean_xls_4train(src_filename):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('关系样本')
sheet.write(0, 0, "左实体1")
sheet.write(0, 1, "关系类型")
sheet.write(0, 2, "右实体")
sheet.write(0, 3, "语料")
i = 0
data1 = pd.read_excel(src_filename)
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
# if len(line_[1])<2: #为空时 是负样本
# continue
if len(line_[0])<2:
continue
if len(line_[2])<2:
continue
if len(line_[3]) < Text_Minlen :
continue
#min_txt,position_1,position_2 = position_mintxt(line_[3],x=line_[0],y=line_[2])
tuple_7 = paragraph_sectioning_to7(line_[0],line_[2],line_[3])
if len(tuple_7)<30:
continue
i = i+1
sheet.write(i, 0, line_[0])
sheet.write(i, 1, line_[1])
sheet.write(i, 2, line_[2])
sheet.write(i, 3, line_[3])
workbook.save(os.path.join(os.path.dirname(os.path.abspath(src_filename)),'cleaned_teain_corpus.xlsx'))
def preprocessing_xls_4pred(src_filename):
if not os.path.isfile(src_filename):
src_filename = os.path.join('../data',src_filename)
if not os.path.isfile(src_filename):
return 'xls文件不存在!'
data1 = pd.read_excel(src_filename)
dir_ = os.path.dirname(os.path.abspath(src_filename))
tsv_file = os.path.join(dir_,'test.tsv')
tsv_4section = os.path.join(dir_,'test_4section.tsv')
with open(tsv_file,'w', encoding='UTF-8') as f_corpus,open(tsv_4section,'w', encoding='UTF-8') as f2_corpus:
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
print('----------', len(line_))
if len(line_[0])<2:
continue
if len(line_[2])<2:
continue
if len(line_[3]) < Text_Minlen :
continue
#print('------&&&&--line_[3]--', line_[3])
min_text,i,j = position_mintxt(line_[3],x = line_[0], y = line_[2])
tuple_7 = paragraph_sectioning_to7(line_[0],line_[2],min_text)
if len(tuple_7)<30:
continue
tuple_4 = str(line_[0]) + '\t' + str(line_[2]) + '\t' + str(min_text).replace('\t',' ') + '\t' + str(line_[3]).replace('\t',' ') + '\n'
f2_corpus.writelines(tuple_4)
f_corpus.writelines(tuple_7)
return tsv_file #返回全路径
#print(dirpath)
if __name__=="__main__":
#preprocessing_xls_4pred('../user_data/pre.xls')
#clean_xls_4train('../user_data/所有关系0603.xls')
preprocessing_xls_4train('./user_data/t.xls')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/preprocessing_xls.py
|
preprocessing_xls.py
|
from collections import Counter
import os
from relation_extraction import rel_ext
import pandas as pd
def simple_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.middle)
for word in ex.middle.split(' '):
feature_counter[word] += 5
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.middle.split(' '):
feature_counter[word] += 1
return feature_counter
def left_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.left.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.left.split(' '):
feature_counter[word] += 1
return feature_counter
def right_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.right.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.right.split(' '):
feature_counter[word] += 1
return feature_counter
def train_(rex_ext_data_home='./data'):
#rex_ext_data_home = os.path.join('..','data')
# rex_ext_data_home_corpus = r'../data/rel_ext_data/corpus.tsv.gz'
# rex_ext_data_home_kb = r'../data/rel_ext_data/kb.tsv.gz'
# corpus = rel_ext.Corpus(rex_ext_data_home_corpus)
# kb = rel_ext.KB(rex_ext_data_home_kb)
corpus = rel_ext.Corpus(os.path.join(rex_ext_data_home,'corpus.tsv'))
kb = rel_ext.KB(os.path.join(rex_ext_data_home, 'kb.tsv'))
dataset = rel_ext.Dataset(corpus, kb)
dataset.count_examples()
dataset.count_relation_combinations()
#print(dataset)
# splits = dataset.build_splits()
# kbts_by_rel, labels_by_rel = dataset.build_dataset()
# all_relations = set(kbts_by_rel.keys())
train_result = rel_ext.train_models(
#all_relations,
featurizers=[left_bag_of_words_featurizer,simple_bag_of_words_featurizer,right_bag_of_words_featurizer],
data=dataset
)
print(train_result)
# rel_ext.examine_model_weights(train_result)
if __name__ == '__main__':
train_()
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/rel_train.py
|
rel_train.py
|
import pandas as pd
with open('../data/rel_ext_data/corpus.tsv','r',encoding='utf-8') as f:
data = f.readline()
datas = data.split('\t')
for i in range(len(datas)):
print(datas[i])
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/test.py
|
test.py
|
from relation_extraction import rel_ext
import os
import pandas as pd
import xlrd, xlwt
from sklearn.metrics import precision_recall_fscore_support
import collections
from collections import namedtuple
def simple_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
for word in ex.middle.split(' '):
feature_counter[word] += 5
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.middle.split(' '):
feature_counter[word] += 1
return feature_counter
def left_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.left.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.left.split(' '):
feature_counter[word] += 1
return feature_counter
def right_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.right.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.right.split(' '):
feature_counter[word] += 1
return feature_counter
#d: defaultdict(<class 'dict'>, {('实体1','实体2'): {'关系1': 0.625, '关系2': 0.0, ...}, ('实体x','实体y'): {'关系1': 0.625, '关系2': 0.0, ...}})
def prob2excel(d,ismass = False,dir_ = '../data'):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('概率')
sheet.write(0, 0, "实体1")
sheet.write(0, 1, "实体2")
sheet.write(0, 2, "关系类型")
sheet.write(0, 3, "概率")
i = 0
prob2text = ''
for pair_,value in d.items():
new_value = {}
prob2text = prob2text + str(pair_[0]) + ' \t' + str(pair_[1]) + ' : '
for rel_type in sorted(value,key=value.__getitem__,reverse=True):
i = i+1
sheet.write(i, 0, pair_[0])
sheet.write(i, 1, pair_[1])
sheet.write(i, 2, rel_type)
sheet.write(i, 3, value[rel_type])
new_value[rel_type] = value[rel_type]
if not ismass:
prob2text = prob2text + str(rel_type) + ' \t' + str(value[rel_type])
prob2text = prob2text + '<br> ' + '<br> '
d[pair_] = new_value
# =============================================================================
# for pair_,value in d.items():
# for rel_type, p in value.items():
# print('===============:',str(pair_[0]) , str(pair_[1]),rel_type,p)
#
# =============================================================================
if ismass :
if i>0:
workbook.save(os.path.join(dir_,'predicted_result.xlsx'))
return '预测结果保存到了 ' + dir_ + '\\predicted_result.xlsx'
else:
return 'do nothing'
return prob2text
def prob2excel_2(d,ismass = False,dir_ = '../data'):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('概率')
sheet.write(0, 0, "实体1")
sheet.write(0, 1, "实体2")
sheet.write(0, 2, "概率")
sheet.write(0, 3, "语料")
sheet.write(0, 4, "原语料")
i = 0
prob2text = ''
if ismass :
with open(os.path.join(dir_, 'test_4section.tsv'),'r', encoding='UTF-8') as f:
test_4section_data = f.readlines()
prob_dict_sorted = collections.defaultdict(dict)
for pair_,value in d.items():
prob2text = prob2text + str(pair_[0]) + ' \t' + str(pair_[1]) + ' : '
i = i+1
sheet.write(i, 0, pair_[0])
sheet.write(i, 1, pair_[1])
rel_value_str = ''
for rel_type in sorted(value,key=value.__getitem__,reverse=True):
rel_value_str = rel_value_str + str(rel_type) +':'+ str(value[rel_type])+'; '
prob_dict_sorted[str(pair_[0]) + ',' + str(pair_[1])][rel_type] = value[rel_type]
if not ismass:
prob2text = prob2text + str(rel_type) + ' \t' + str(value[rel_type])
prob2text = prob2text + '<br> ' + '<br> '
sheet.write(i, 2, rel_value_str)
if ismass :
for line in test_4section_data:
fields = line[:-1].split('\t')
#print('========fields=======:',len(fields))
if (fields[0] == pair_[0]) and (fields[1] == pair_[1]) :
sheet.write(i, 3, fields[2])
sheet.write(i, 4, fields[3])
break
if ismass :
if i>0:
workbook.save(os.path.join(dir_,'predicted_result.xlsx'))
return dir_ + '\\predicted_result.xlsx',None,None
else:
return 'do nothing',None,None
return 'ok',prob2text,prob_dict_sorted
def get_high_prob_excel(predicted_result_file = './data/predicted_result.xlsx', prob_threshold = 0.2):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('概率')
sheet.write(0, 0, "实体1")
sheet.write(0, 1, "实体2")
sheet.write(0, 2, "概率")
sheet.write(0, 3, "语料")
sheet.write(0, 4, "原语料")
i = 0
data1 = pd.read_excel(predicted_result_file)
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
fields = line_[2].split('; ')
high_prob = fields[0].split(':')
if float(high_prob[1]) < prob_threshold:
continue
i = i+1
sheet.write(i, 0, line_[0])
sheet.write(i, 1, line_[1])
sheet.write(i, 2, line_[2])
sheet.write(i, 3, line_[3])
sheet.write(i, 4, line_[4])
if i < 1:
return 'do nothing'
file_name = os.path.join(os.path.dirname(os.path.abspath(predicted_result_file)),'high_prob.xlsx')
workbook.save(file_name)
if not os.path.isfile(file_name):
return 'do nothing'
#print('precision', file_name)
return file_name
Example = namedtuple('Example',
'entity_1, entity_2, left, mention_1, middle, mention_2, right, '
)
def prediction_(rex_ext_data_home='./data',test_line = '',filename_ = ''):
#rex_ext_data_home = os.path.join('..','data')
if '.tsv' in filename_ :
if not os.path.isfile(filename_):
filename_ = os.path.join(rex_ext_data_home,filename_)
if not os.path.isfile(filename_):
#prob_dict = collections.defaultdict(dict)
return "失败:处理预测文件tsv出错!",None,None
corpus = rel_ext.Corpus(filename_)
abspath_ = os.path.dirname(os.path.abspath(filename_))
#print(dirpath)
is_mass = True
else:
is_mass = False
data_list = []
test_line = test_line[:].split('\t')
data_list.append(Example(*test_line))
#print(type(test_line),test_line)
corpus = rel_ext.Corpus(data_list)
kb = rel_ext.KB(os.path.join(rex_ext_data_home, 'kb.tsv'))
dataset = rel_ext.Dataset(corpus, kb)
#defaultdict(<class 'dict'>, {('实体1','实体2'): {'关系1': 0.625, '关系2': 0.0, ...}, ('实体x','实体y'): {'关系1': 0.625, '关系2': 0.0, ...}})
#rel_prob_dict = collections.defaultdict(dict)
# data = pd.read_csv('../data/dev.tsv')
# splits = dataset.build_splits()
rel_prob_dict = rel_ext.find_new_relation_instances_new(
featurizers=[left_bag_of_words_featurizer,simple_bag_of_words_featurizer,right_bag_of_words_featurizer],
test_split = dataset)
#if isinstance(rel_prob_dict,int):
if len(rel_prob_dict) < 1 :
return "失败:可能概率太低或已有该实体对及其关系",None,None
if is_mass :
return prob2excel_2(rel_prob_dict,ismass = is_mass,dir_ = abspath_)
return prob2excel_2(rel_prob_dict)
#import tensorflow as tf
#from transformers import BertTokenizer, TFAutoModelForSequenceClassification,TFPreTrainedModel
from relation_extraction.preprocessing_xls import paragraph_sectioning
if __name__ == '__main__':
# model = TFAutoModelForSequenceClassification.from_pretrained('D:/peking_code/code_python/relation_extraction/src/chinese_L-12_H-768_A-12/bert_config.json')
#model = TFBertForSequenceClassification.from_pretrained("chinese_L-12_H-768_A-12/bert_config.json")
# nlp_bert_lg = pipeline('feature-extraction',model=model,from_tf=True)
# print(len(nlp_bert_lg('Hugging Face is a French company based in New York.')))
#test_text7,min_text,original_text = paragraph_sectioning('郑新聪 国资国企改革发展 要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,牢牢把握国有企业改革的正确方向。李南轩摄学习宣传贯彻党的十九大精神是全党全国当前和今后一个时期的首要政治任务。如何学习贯彻好党的十九大精神,习近平总书记在十九届中央政治局第一次集体学习时,提出要在学懂弄通做实上下功夫,号召“全党来一个大学习”。日前,福建全省各个领域、各条战线、各行各业兴起习近平新时代中国特色社会主义思想“大学习”热潮。福建省副省长郑新聪前些时候深入三钢集团福建罗源闽光钢铁有限责任公司一线,开展习近平新时代中国特色社会主义思想宣讲。宣讲会前,郑新聪一行深入到罗源闽光公司炼钢厂,沿着参观通道边走边看边听汇报,详细了解罗源闽光公司在绿色发展、技术指标、科技创新、经济效益等方面情况。在随后的宣讲会上,郑新聪以“深入学习习近平新时代中国特色社会主义思想深化和推动国有企业改革发展”为党课主题,分别从习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述、新时代国资国企改革发展肩负新的历史使命、坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展三个方面作了深刻阐释。就下一步如何推进新时代国资国企改革发展,郑新聪要求,要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,深刻认识深化国有企业改革的重大意义,牢牢把握国有企业改革的正确方向。以新发展理念推动国企发展宣讲中,郑新聪与参会人员共同学习回顾了习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述:目前,我国经济已由高速增长阶段转向高质量发展阶段。推动高质量发展是保持经济持续健康发展的必然要求;推动高质量发展是适应我国社会主要矛盾变化的必然要求;推动高质量发展是遵循经济规律发展的必然要求。此外,实现高质量发展必须坚持和践行新发展理念。发展是解决我国一切问题的基础和关键,发展必须是科学发展,必须坚定不移贯彻创新、协调、绿色、开放、共享的发展理念。新发展理念是习近平新时代中国特色社会主义经济思想的主要内容,在推进我国经济高质量发展过程中,必须坚定不移贯彻。为推动我国经济高质量发展,我们要坚持适应把握引领经济发展新常态,要把推进供给侧结构性改革作为经济工作的主线,要建设现代化经济体系。针对以上论述,郑新聪强调,全体成员要把握领会习近平新时代中国特色社会主义思想精神,特别是关于深化和推动国有企业改革发展方面,以此推动国企高质量发展。新时代国资国企肩负新使命郑新聪指出,党的十九大提出“要完善各类国有资产管理体制,改革国有资本授权经营体制,加快国有经济布局优化、结构调整、战略性重组,促进国有资产保值增值,推动国有资本做强做优做大,有效防止国有资产流失;深化国有企业改革,发展混合所有制经济,培育具有全球竞争力的世界一流企业。”这“九句话、109字”为国资国企改革发展指明了前进的方向,是我们推进下一步工作的重要行动指南。郑新聪表示,首先要深刻认识深化国有企业改革的重大意义。国有企业是推进国家现代化、保障人民共同利益的重要力量,是党和国家事业发展的重要物质基础和政治基础。深化国有企业改革是坚持和发展中国特色社会主义的必然要求,深化国有企业改革是实现“两个一百年”奋斗目标的重大任务,深化国有企业改革是推动我国经济持续健康发展的客观要求。在明确国企深化改革的重要性后,郑新聪强调,下一步要牢牢把握国有企业改革的正确方向。首先,要坚持和完善基本经济制度。必须毫不动摇巩固和发展公有制经济,毫不动摇鼓励、支持、引导非公有制经济发展。坚持公有制主体地位,发挥国有经济主导作用,做强做优做大国有企业。其次,要坚持社会主义市场经济改革方向。遵循市场经济规律和企业发展规律,坚持政企分开、政资分开、所有权与经营权分离,坚持权利、义务、责任相统一,促使国有企业真正成为独立市场主体。再者,坚持以解放和发展生产力为标准。始终把握有利于国有资产保值增值、有利于提高国有经济竞争力、有利于放大国有资本功能的要求,着力破除束缚国有企业发展的体制机制障碍,发挥国有企业各类人才积极性、主动性、创造性。同时,坚持增强活力与强化监管相结合。增强活力是搞好国有企业的本质要求,强化监管是搞好国有企业的重要保障,必须处理好两者关系,切实做到有机统一。此外,要更加坚持党对国有企业的领导。坚持党对国有企业的领导是重大政治原则,必须一以贯之。2016年10月,习近平在全国国有企业党的建设工作会议上指出:中国特色现代国有企业制度,“特”就特在把党的领导融入公司治理各环节。党建写入章程真正融入国企中心工作,章程明确了党组织在公司法人治理结构中的法定地位,特别是党组织在决策、执行、监督各环节的权责和工作方式。值得一提的是,郑新聪充分肯定三钢集团公司党委探索出的党支部密切联系群众的“五小工作法”,通过为群众讲清小道理、解决小问题、办好小事情、选树小典型、开展小活动,实现党建工作与生产经营、职工生活有机融合。随后,郑新聪指出,省属企业要扎实做好新时期深化国有企业改革的重点任务。“省属企业要完善各类国有资产管理体制。建立健全各类国有资产监督法律法规体系。以管资本为主深化国有资产监管要加快国有经济布局优化、结构调整、战略性重组。”郑新聪指出,省属企业要围绕服务国家战略,推动国有经济向关系国家安全、国民经济命脉和国计民生的重要行业和关键领域、重点基础设施集中。加快处置低效无效资产,淘汰落后产能,剥离办社会职能,解决历史遗留问题,提高国有资本配置效率。日前,国务院国资委下发了《关于加强国有企业资产负债约束的指导意见》是落实党的十九大精神,推动国有企业降杠杆、防范化解国有企业债务风险的重要举措,促使高负债国有企业资产负债率尽快回归合理水平。郑新聪指出,近年来,福建省省属企业也呈现一批改革发展典型。三钢集团通过兼并重组整合区域资源,集团钢产量成功突破1100万吨,真正步入大型钢铁企业行列。特别是2014年重组三金钢铁有限公司,形成了现在的罗源闽光钢铁公司,通过优化机制,改善工艺,2016年扭亏为盈,2018年18月份盈利10.74亿元,资产负债率从90降至目前的40,让一个濒临倒闭的企业成为一个福州区域明星企业,成为钢铁行业兼并重组成功典范。星网锐捷旗下凯米网络科技有限公司积极探索商业模式创新,向KTV提供“管理、流量、内容、广告”四大核心价值,构建互联网聚会娱乐新生态,用户超7500万,成为行业独角兽。发展混合所有制经济亦是新时期深化国有企业改革的重点任务。积极推进主业处于充分竞争行业和领域的商业类国有企业混合所有制改革,有效探索重点领域混合所有制改革,在引导子公司层面改革的同时探索在集团公司层面推进混合所有制改革。大力推动国有企业改制上市。稳妥有序开展国有控股混合所有制企业员工持股。此外,形成有效制衡的公司法人治理结构和灵活高效的市场化经营机制,加强监管有效防止国有资产流失。以国有资产保值增值、防止流失为目标,加强对企业关键业务、改革重点领域、国有资本运营重要环节的监督。建立健全国有企业重大决策失误和失职、渎职责任追究倒查机制。加强审计监督、纪检监督、巡查监督,形成监督合力。郑新聪表示,培育具有全球竞争力的世界一流企业也是目前省属企业的重点任务之一。支持国有企业深入开展国际化经营,在“一带一路”建设中推动优势产业走出去。')
#test_text7 = paragraph_sectioning(str('郑新聪 国资国企改革发展 要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,牢牢把握国有企业改革的正确方向。李南轩摄学习宣传贯彻党的十九大精神是全党全国当前和今后一个时期的首要政治任务。如何学习贯彻好党的十九大精神,习近平总书记在十九届中央政治局第一次集体学习时,提出要在学懂弄通做实上下功夫,号召“全党来一个大学习”。日前,福建全省各个领域、各条战线、各行各业兴起习近平新时代中国特色社会主义思想“大学习”热潮。福建省副省长郑新聪前些时候深入三钢集团福建罗源闽光钢铁有限责任公司一线,开展习近平新时代中国特色社会主义思想宣讲。宣讲会前,郑新聪一行深入到罗源闽光公司炼钢厂,沿着参观通道边走边看边听汇报,详细了解罗源闽光公司在绿色发展、技术指标、科技创新、经济效益等方面情况。在随后的宣讲会上,郑新聪以“深入学习习近平新时代中国特色社会主义思想深化和推动国有企业改革发展”为党课主题,分别从习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述、新时代国资国企改革发展肩负新的历史使命、坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展三个方面作了深刻阐释。就下一步如何推进新时代国资国企改革发展,郑新聪要求,要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,深刻认识深化国有企业改革的重大意义,牢牢把握国有企业改革的正确方向。以新发展理念推动国企发展宣讲中,郑新聪与参会人员共同学习回顾了习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述:目前,我国经济已由高速增长阶段转向高质量发展阶段。推动高质量发展是保持经济持续健康发展的必然要求;推动高质量发展是适应我国社会主要矛盾变化的必然要求;推动高质量发展是遵循经济规律发展的必然要求。此外,实现高质量发展必须坚持和践行新发展理念。发展是解决我国一切问题的基础和关键,发展必须是科学发展,必须坚定不移贯彻创新、协调、绿色、开放、共享的发展理念。新发展理念是习近平新时代中国特色社会主义经济思想的主要内容,在推进我国经济高质量发展过程中,必须坚定不移贯彻。为推动我国经济高质量发展,我们要坚持适应把握引领经济发展新常态,要把推进供给侧结构性改革作为经济工作的主线,要建设现代化经济体系。针对以上论述,郑新聪强调,全体成员要把握领会习近平新时代中国特色社会主义思想精神,特别是关于深化和推动国有企业改革发展方面,以此推动国企高质量发展。新时代国资国企肩负新使命郑新聪指出,党的十九大提出“要完善各类国有资产管理体制,改革国有资本授权经营体制,加快国有经济布局优化、结构调整、战略性重组,促进国有资产保值增值,推动国有资本做强做优做大,有效防止国有资产流失;深化国有企业改革,发展混合所有制经济,培育具有全球竞争力的世界一流企业。”这“九句话、109字”为国资国企改革发展指明了前进的方向,是我们推进下一步工作的重要行动指南。郑新聪表示,首先要深刻认识深化国有企业改革的重大意义。'))
#prediction_(test_line = test_text7)
prediction_(filename_ = 'test.tsv')
#get_high_prob_excel(predicted_result_file = '../user_data/predicted_result0602.xlsx', prob_threshold = 0.8)
# =============================================================================
# predictions, assess_o = rel_ext.predict_new(
# featurizers=[left_bag_of_words_featurizer,simple_bag_of_words_featurizer],
# assess_dataset = dataset)
# df = pd.DataFrame(columns=['实体1','实体2','实体关系'])
# sbjs, objs, pre = [],[],[]
# for item in assess_o.items():
# for i in item[1]:
# sbjs.append(i.sbj)
# objs.append(i.obj)
# for i in predictions.items():
# for j in i[1]:
# if j == True:
# pre.append(i[0])
# else:
# pre.append('not ' + i[0])
# df['实体1'] = sbjs
# df['实体2'] = objs
# df['实体关系'] = pre
# df.to_excel('../data/result.xlsx',index=False)
# =============================================================================
# df = pd.read_excel('../data/result.xlsx')
# predictions = df['实体关系']
# true_labels = df['label']
# predictions=[True if i == '调研' else False for i in predictions]
# true_labels = [True if i == '调研' else False for i in true_labels]
# # rel_ext.evaluate_predictions(predictions, true_labels)
# stats = precision_recall_fscore_support(true_labels, predictions, labels=[True, False])
# print('precision', 'recall', 'f-score', 'support')
# statss = [round(stat[0], 3)for stat in stats]
# stats = [round(stat[1], 3) for stat in stats]
# print(statss)
# print(stats)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/rel_prediction.py
|
rel_prediction.py
|
import pickle
import os
with open('../data/process_data/dev.tsv','r',encoding='utf-8') as f:
data = f.readlines()
line1, line2, line3 = {}, {},{}
for i in data:
key1 = i.split('\t')[0]
value1 = i.split('\t')[1]
line1[key1] = value1
key2 = i.split('\t')[2]
value2 = i.split('\t')[3]
line2[key2] = value2
line3 = {**line1, **line2}
# set(line3.keys())
print(line3)
if os.path.isdir('../data/id') == False:
os.makedirs('../data/id')
with open('../data/id/id_file.pkl','wb') as f:
pickle.dump(line3,f)
import pandas as pd
df = pd.read_csv('../data/process_data/dev.tsv',sep='\t',header=None)
df.drop([0,2],axis=1, inplace=True)
df.to_csv('../data/process_data/data.tsv', sep='\t', header=False,index=False)
# print(df)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/id_input.py
|
id_input.py
|
from collections import Counter, defaultdict, namedtuple
import gzip
import numpy as np
import os
import random
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import joblib
import pickle
import pandas as pd
__author__ = "Bill MacCartney"
__version__ = "CS224u, Stanford, Spring 2019"
Example = namedtuple('Example',
'entity_1, entity_2, left, mention_1, middle, mention_2, right, '
)
class Corpus(object):
def __init__(self, src_filename_or_examples):
if isinstance(src_filename_or_examples, str):
self.examples = self.read_examples(src_filename_or_examples)
else:
self.examples = src_filename_or_examples
self.examples_by_entities = {}
self._index_examples_by_entities()
@staticmethod
#解压语料corpus
# def read_examples(src_filename):
# examples = []
# with gzip.open(src_filename, mode='rt', encoding='utf8') as f:
# for line in f:
# fields = line[:-1].split('\t')
# examples.append(Example(*fields))
# return examples
def read_examples(src_filename):
examples = []
if '.gz' in src_filename:
with gzip.open(src_filename, mode='rt', encoding='utf8') as f:
for line in f:
fields = line[:-1].split('\t')
examples.append(Example(*fields))
else:
if '.xls' in src_filename:
data1 = pd.read_excel(src_filename)
fields = []
with open('../data/kb.tsv','w', encoding='UTF-8') as f:
for indexs in data1.index:
if len(data1.loc[indexs].values[3]) < 30 :
continue
line_ = list(data1.loc[indexs].values[:])
fields.append(line_[0])
fields.append(line_[2])
#fields.append(paragraph_ sectioning(line_[3]))
#examples.append(Example(*fields))
f.writelines(str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2]) + '\n')
else:
with open(src_filename,'r', encoding='UTF-8') as f:
data = f.readlines()
for line in data:
#print(type(line))
fields = line[:-1].split('\t')
#print(type(fields))
fields = fields[:7] #202005 add
examples.append(Example(*fields))
return examples
def input_examples(self,data):
examples = []
for line in data:
fields = line[:-1].split('\t')
examples.append(Example(*fields))
print(Example(*fields))
self.examples = examples
return examples
def _index_examples_by_entities(self):
for ex in self.examples:
if ex.entity_1 not in self.examples_by_entities:
self.examples_by_entities[ex.entity_1] = {}
if ex.entity_2 not in self.examples_by_entities[ex.entity_1]:
self.examples_by_entities[ex.entity_1][ex.entity_2] = []
self.examples_by_entities[ex.entity_1][ex.entity_2].append(ex)
def get_examples_for_entities(self, e1, e2):
try:
return self.examples_by_entities[e1][e2]
except KeyError:
return []
# 展示第一个example
def show_examples_for_pair(self, e1, e2):
exs = self.get_examples_for_entities(e1, e2)
if exs:
print('The first of {0:,} examples for {1:} and {2:} is:'.format(
len(exs), e1, e2))
print(exs[0])
else:
print('No examples for {0:} and {1:}'.format(e1, e2))
def __str__(self):
return 'Corpus with {0:,} examples'.format(len(self.examples))
def __repr__(self):
return str(self)
def __len__(self):
return len(self.examples)
KBTriple = namedtuple('KBTriple', 'rel, sbj, obj')
class KB(object):
def __init__(self, src_filename_or_triples):
if isinstance(src_filename_or_triples, str):
self.kb_triples = self.read_kb_triples(src_filename_or_triples)
else:
self.kb_triples = src_filename_or_triples
self.all_relations = []
self.all_entity_pairs = []
self.kb_triples_by_relation = {}
self.kb_triples_by_entities = {}
self._collect_all_entity_pairs()
self._index_kb_triples_by_relation()
self._index_kb_triples_by_entities()
@staticmethod
# 解压kb,获得所有的三元组kb_triples
def read_kb_triples(src_filename):
kb_triples = []
if '.gz' in src_filename:
with gzip.open(src_filename, mode='rt', encoding='utf8') as f:
for line in f:
rel, sbj, obj = line[:-1].split('\t')
kb_triples.append(KBTriple(rel, sbj, obj))
else:
with open(src_filename,'r', encoding='UTF-8') as f:
data = f.readlines()
for line in data:
rel, sbj, obj = line[:-1].split('\t')
kb_triples.append(KBTriple(rel, sbj, obj))
return kb_triples
#获得kb中的所有二元组实体
def _collect_all_entity_pairs(self):
pairs = set()
for kbt in self.kb_triples:
pairs.add((kbt.sbj, kbt.obj))
self.all_entity_pairs = sorted(list(pairs))
# 获得kb中的all_relations
def _index_kb_triples_by_relation(self):
for kbt in self.kb_triples:
if kbt.rel not in self.kb_triples_by_relation:
self.kb_triples_by_relation[kbt.rel] = []
self.kb_triples_by_relation[kbt.rel].append(kbt)
self.all_relations = sorted(list(self.kb_triples_by_relation))
#寻找同一人名实体的三元组
def _index_kb_triples_by_entities(self):
for kbt in self.kb_triples:
if kbt.sbj not in self.kb_triples_by_entities:
self.kb_triples_by_entities[kbt.sbj] = {}
if kbt.obj not in self.kb_triples_by_entities[kbt.sbj]:
self.kb_triples_by_entities[kbt.sbj][kbt.obj] = []
self.kb_triples_by_entities[kbt.sbj][kbt.obj].append(kbt)
# print(self.kb_triples_by_entities[kbt.sbj][kbt.obj])
# 获取指定关系的三元组
def get_triples_for_relation(self, rel):
try:
return self.kb_triples_by_relation[rel]
except KeyError:
return []
def get_triples_for_entities(self, e1, e2):
try:
return self.kb_triples_by_entities[e1][e2]
except KeyError:
return []
def __str__(self):
return 'KB with {0:,} triples'.format(len(self.kb_triples))
def __repr__(self):
return str(self)
def __len__(self):
return len(self.kb_triples)
class Dataset(object):
def __init__(self, corpus, kb):
self.corpus = corpus
self.kb = kb
# 获取测试集中的实体二元组
def find_unrelated_pairs(self, to_tsv=None):
unrelated_pairs = set()
if to_tsv is None:
for ex in self.corpus.examples:
if self.kb.get_triples_for_entities(ex.entity_1, ex.entity_2):
continue
#if self.kb.get_triples_for_entities(ex.entity_2, ex.entity_1): #20200527 ommit
#continue
unrelated_pairs.add((ex.entity_1, ex.entity_2))
print(unrelated_pairs)
#unrelated_pairs.add((ex.entity_2, ex.entity_1))#20200527 ommit
return unrelated_pairs
with open('../data/corpus_unrelated.tsv','w',encoding='utf-8') as f:
for ex in self.corpus.examples:
if self.kb.get_triples_for_entities(ex.entity_1, ex.entity_2):
continue
#if self.kb.get_triples_for_entities(ex.entity_2, ex.entity_1):#20200527 ommit
#continue
unrelated_pairs.add((ex.entity_1, ex.entity_2))
#unrelated_pairs.add((ex.entity_2, ex.entity_1))#20200527 ommit
f.write(ex.entity_1 + '\t' + ex.entity_2)
f.write('\n')
#print(unrelated_pairs)
return unrelated_pairs
# 特征
def featurize(self, kbts_by_rel, featurizers, vectorizer=None):
# Create feature counters for all instances (kbts).
feat_counters_by_rel = defaultdict(list)
for rel, kbts in kbts_by_rel.items():
for kbt in kbts:
#print(kbt)
feature_counter = Counter()
for featurizer in featurizers:
feature_counter = featurizer(kbt, self.corpus, feature_counter)
feat_counters_by_rel[rel].append(feature_counter)
feat_matrices_by_rel = defaultdict(list)
# If we haven't been given a Vectorizer, create one and fit
# it to all the feature counters.
if vectorizer is None:
vectorizer = DictVectorizer(sparse=True)
def traverse_dicts():
for dict_list in feat_counters_by_rel.values():
for d in dict_list:
yield d
vectorizer.fit(traverse_dicts())
# Now use the Vectorizer to transform feature dictionaries
# into feature matrices.
for rel, feat_counters in feat_counters_by_rel.items():
#print(feat_counters)
#print('\n\r')
feat_matrices_by_rel[rel] = vectorizer.transform(feat_counters)
#print('\n feat_matrices_by_rel[rel]...................',type(feat_matrices_by_rel[rel]))
return feat_matrices_by_rel, vectorizer
# 创建输入的dataset,获取未出现在训练集的实体二元组,负样本以0.1的比例输入,将负样本或测试集中的label打为false
def build_dataset(self,
include_positive=True,
sampling_rate=1,
seed=1):
unrelated_pairs = self.find_unrelated_pairs()
random.seed(seed)
print('--len(unrelated_pairs)-----------------------------',len(unrelated_pairs))
unrelated_pairs = random.sample(
unrelated_pairs, int(sampling_rate * len(unrelated_pairs)))
kbts_by_rel = defaultdict(list)
labels_by_rel = defaultdict(list)
for index, rel in enumerate(self.kb.all_relations):
ii = 0
if include_positive:
for kbt in self.kb.get_triples_for_relation(rel):
kbts_by_rel[rel].append(kbt)
labels_by_rel[rel].append(True)
for index2, rel2 in enumerate(self.kb.all_relations): #将其他关系类型作为负样本 20200531 add
if index2 == index :
continue
for kbt_ in self.kb.get_triples_for_relation(rel2):
kbts_by_rel[rel].append(kbt_)
labels_by_rel[rel].append(False)
ii = ii + 1
for sbj, obj in unrelated_pairs:
kbts_by_rel[rel].append(KBTriple(rel, sbj, obj))
#print(KBTriple(rel, sbj, obj))
labels_by_rel[rel].append(False)
ii = ii + 1
#print('--index, rel----total--unrelated--',index, rel,len(self.kb.get_triples_for_relation(rel) ),ii)
return kbts_by_rel, labels_by_rel
# ============================================================================================
def count_examples(self):
counter = Counter()
for rel in self.kb.all_relations:
for kbt in self.kb.get_triples_for_relation(rel):
# count examples in both forward and reverse directions
counter[rel] += len(self.corpus.get_examples_for_entities(kbt.sbj, kbt.obj))
counter[rel] += len(self.corpus.get_examples_for_entities(kbt.obj, kbt.sbj))
# report results
print('{:20s} {:>10s} {:>10s} {:>10s}'.format(
'', '', '', 'examples'))
print('{:20s} {:>10s} {:>10s} {:>10s}'.format(
'relation', 'examples', 'triples', '/triple'))
print('{:20s} {:>10s} {:>10s} {:>10s}'.format(
'--------', '--------', '-------', '-------'))
for rel in self.kb.all_relations:
nx = counter[rel]
nt = len(self.kb.get_triples_for_relation(rel))
print('{:20s} {:10d} {:10d} {:10.2f}'.format(
rel, nx, nt, 1.0 * nx / nt))
def count_relation_combinations(self):
counter = Counter()
for sbj, obj in self.kb.all_entity_pairs:
rels = tuple(sorted({kbt.rel for kbt in self.kb.get_triples_for_entities(sbj, obj)}))
if len(rels) > 1:
counter[rels] += 1
counts = sorted([(count, key) for key, count in counter.items()], reverse=True)
print('The most common relation combinations are:')
for count, key in counts:
print('{:10d} {}'.format(count, key))
def __str__(self):
return "{}; {}".format(self.corpus, self.kb)
def __repr__(self):
return str(self)
def print_statistics_header():
print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'relation', 'precision', 'recall', 'f-score', 'support', 'size'))
print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'-' * 18, '-' * 9, '-' * 9, '-' * 9, '-' * 9, '-' * 9))
def make_dirs(path):
dir_path = os.path.join(os.getcwd(),path)
if not os.path.isdir(dir_path): # 无文件夹时创建
os.makedirs(dir_path)
# def print_statistics_row(rel, result):
# print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:10d} {:10d}'.format(rel, *result))
def print_statistics_row(rel, result):
print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:.0f} {:10d}'.format(rel, *result))
# def print_statistics_footer(avg_result):
# print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
# '-' * 18, '-' * 9, '-' * 9, '-' * 9, '-' * 9, '-' * 9))
# print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:10d} {:10d}'.format('macro-average', *avg_result))
def print_statistics_footer(avg_result):
print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'-' * 18, '-' * 9, '-' * 9, '-' * 9, '-' * 9, '-' * 9))
print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:.0f} {:10d}'.format('macro-average', *avg_result))
def macro_average_results(results):
avg_result = [np.average([r[i] for r in results.values()]) for i in range(3)]
avg_result.append(np.sum([r[3] for r in results.values()]))
avg_result.append(np.sum([r[4] for r in results.values()]))
return avg_result
def evaluate(splits, classifier, test_split='dev', verbose=True):
test_kbts_by_rel, true_labels_by_rel = splits[test_split].build_dataset()
results = {}
if verbose:
print_statistics_header()
for rel in splits['all'].kb.all_relations:
pred_labels = classifier(test_kbts_by_rel[rel])
stats = precision_recall_fscore_support(true_labels_by_rel[rel], pred_labels, beta=0.5)
stats = [stat[1] for stat in stats] # stats[1] is the stat for label True
stats.append(len(pred_labels)) # number of examples
results[rel] = stats
if verbose:
print_statistics_row(rel, results[rel])
avg_result = macro_average_results(results)
if verbose:
print_statistics_footer(avg_result)
return avg_result[2] # return f_0.5 score as summary statistic
def evaluate_new(classifier, all_relations,data,verbose=True):
test_kbts_by_rel, true_labels_by_rel = data.build_dataset()
results = {}
if verbose:
print_statistics_header()
for rel in all_relations:
pred_labels = classifier(test_kbts_by_rel[rel])
stats = precision_recall_fscore_support(true_labels_by_rel[rel], pred_labels, beta=0.5)
stats = [stat[1] for stat in stats] # stats[1] is the stat for label True
stats.append(len(pred_labels)) # number of examples
results[rel] = stats
if verbose:
print_statistics_row(rel, results[rel])
avg_result = macro_average_results(results)
if verbose:
print_statistics_footer(avg_result)
return avg_result[2] # return f_0.5 score as summary statistic
def train_models(
# splits,
#all_relations,
featurizers,
data,
# split_name='train',
model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
verbose=True):
train_dataset = data
# print(train_dataset)
train_o, train_y = train_dataset.build_dataset()
all_relations = set(train_o.keys())
# print(train_o,train_y)
train_X, vectorizer = train_dataset.featurize(train_o, featurizers)
models = {}
make_dirs('./data/saved_model')
with open('./data/saved_model/data.pkl', 'wb') as save1:
tuple_objects = (featurizers, vectorizer, all_relations)
pickle.dump(tuple_objects, save1)
for rel in all_relations:
models[rel] = model_factory()
models[rel].fit(train_X[rel], train_y[rel])
#print('\n models[rel].fit...................',rel,train_X[rel].shape[0])
joblib.dump( models[rel], './data/saved_model/' + rel + '_model.pkl')
return {
'featurizers': featurizers,
'vectorizer': vectorizer,
'models': models,
'all_relations': all_relations}
def predict(splits, train_result, split_name='dev'):
assess_dataset = splits[split_name]
assess_o, assess_y = assess_dataset.build_dataset()
test_X, _ = assess_dataset.featurize(
assess_o,
featurizers=train_result['featurizers'],
vectorizer=train_result['vectorizer'])
# print(test_X)
predictions = {}
for rel in train_result['all_relations']:
predictions[rel] = train_result['models'][rel].predict(test_X[rel])
return predictions, assess_y
# ==================================================================================================================
def predict_new(assess_dataset,featurizers):
# assess_dataset = splits[split_name]
assess_o, assess_y = assess_dataset.build_dataset(
include_positive=False,
sampling_rate=1)
# print(assess_o)
fp = open('../data/saved_model/data.pkl', 'rb') #202005 add
featurizer, vectorizer, all_relations = pickle.load(fp)
test_X, _ = assess_dataset.featurize(
assess_o,
featurizers=featurizers,
vectorizer=vectorizer)
predictions = {}
for rel in all_relations:
if test_X[rel].shape[0] < 1:
continue
model = joblib.load('../data/saved_model/' + rel + '_model.pkl')
predictions[rel] = model.predict(test_X[rel])
print(rel,predictions[rel])
fp.close()
return predictions,assess_o
def evaluate_predictions(predictions, test_y, verbose=True):
results = {} # one result row for each relation
if verbose:
print_statistics_header()
for rel, preds in predictions.items():
print()
stats = precision_recall_fscore_support(test_y[rel], preds, beta=0.5)
stats = [stat[1] for stat in stats] # stats[1] is the stat for label True
stats.append(len(test_y[rel]))
results[rel] = stats
if verbose:
print_statistics_row(rel, results[rel])
avg_result = macro_average_results(results)
if verbose:
print_statistics_footer(avg_result)
return avg_result[2] # return f_0.5 score as summary statistic
def experiment(
splits,
featurizers,
train_split='train',
test_split='dev',
model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
verbose=True):
train_result = train_models(
splits,
featurizers=featurizers,
split_name=train_split,
model_factory=model_factory,
verbose=verbose)
predictions, test_y = predict(
splits,
train_result,
split_name=test_split)
evaluate_predictions(
predictions,
test_y,
verbose)
return train_result
def examine_model_weights(train_result, k=3, verbose=True):
feature_names = train_result['vectorizer'].get_feature_names()
for rel, model in train_result['models'].items():
print('Highest and lowest feature weights for relation {}:\n'.format(rel))
try:
coefs = model.coef_.toarray()
except AttributeError:
coefs = model.coef_
sorted_weights = sorted([(wgt, idx) for idx, wgt in enumerate(coefs[0])], reverse=True)
for wgt, idx in sorted_weights[:k]:
print('{:10.3f} {}'.format(wgt, feature_names[idx]))
print('{:>10s} {}'.format('.....', '.....'))
for wgt, idx in sorted_weights[-k:]:
print('{:10.3f} {}'.format(wgt, feature_names[idx]))
print('\n')
def find_new_relation_instances(
dataset,
featurizers,
train_split='train',
test_split='dev',
model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
k=10,
verbose=True):
splits = dataset.build_splits()
# train models
train_result = train_models(
splits,
split_name=train_split,
featurizers=featurizers,
model_factory=model_factory,
verbose=True)
test_split = splits[test_split]
neg_o, neg_y = test_split.build_dataset(
include_positive=False,
sampling_rate=1.0)
neg_X, _ = test_split.featurize(
neg_o,
featurizers=featurizers,
vectorizer=train_result['vectorizer'])
# Report highest confidence predictions:
for rel, model in train_result['models'].items():
print(train_result['models'].items())
print('Highest probability examples for relation {}:\n'.format(rel))
probs = model.predict_proba(neg_X[rel])
probs = [prob[1] for prob in probs] # probability for class True
sorted_probs = sorted([(p, idx) for idx, p in enumerate(probs)], reverse=True)
for p, idx in sorted_probs[:k]:
print('{:10.3f} {}'.format(p, neg_o[rel][idx]))
print()
def find_new_relation_instances_new(
# dataset,
featurizers,
# train_split='train',
# test_split='dev',
# file,
test_split,
# model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
k=10,
# verbose=True
):
# train models
# train_result = train_models(
# splits,
# split_name=train_split,
# featurizers=featurizers,
# model_factory=model_factory,
# verbose=True)
# test_split = splits[test_split]
fp = open('./data/saved_model/data.pkl', 'rb') #202005 add
featurizers1, vectorizer, all_relations = pickle.load(fp)
neg_o, neg_y = test_split.build_dataset(
include_positive=False,
sampling_rate=1.0)
# print(len(neg_y))
neg_X, _ = test_split.featurize(
neg_o,
featurizers=featurizers,
vectorizer=vectorizer)
# Report highest confidence predictions:
fp.close()
import collections
#defaultdict(<class 'dict'>, {('实体1','实体2'): {'关系1': 0.625, '关系2': 0.0, ...}, ('实体x','实体y'): {'关系1': 0.625, '关系2': 0.0, ...}})
rel_prob_dict = collections.defaultdict(dict)
if len(neg_X) < 1 :
return rel_prob_dict
for rel in all_relations:
if neg_X[rel].shape[0] < 1: #202004 add
continue
model = joblib.load('./data/saved_model/' + rel + '_model.pkl')
#print('\n Highest probability examples for relation {}:'.format(rel)) #ommit 20200527
#print(neg_X[rel])
probs = model.predict_proba(neg_X[rel])
probs = [prob[1] for prob in probs] # probability for class True
sorted_probs = sorted([(p, idx) for idx, p in enumerate(probs)], reverse=True)
for p, idx in sorted_probs:
if p >0.01:
rel_prob_dict[(neg_o[rel][idx].sbj,neg_o[rel][idx].obj)][rel] = round(p,3) #add at 2020
#print ('{:10.3f} {}'.format(p, neg_o[rel][idx]))#ommit 20200527
return rel_prob_dict
def bake_off_experiment(train_result, rel_ext_data_home, verbose=True):
test_corpus_filename = os.path.join(rel_ext_data_home, "corpus-test.tsv.gz")
test_kb_filename = os.path.join(rel_ext_data_home, "kb-test.tsv.gz")
corpus = Corpus(test_corpus_filename)
kb = KB(test_kb_filename)
test_dataset = Dataset(corpus, kb)
test_o, test_y = test_dataset.build_dataset()
test_X, _ = test_dataset.featurize(
test_o,
featurizers=train_result['featurizers'],
vectorizer=train_result['vectorizer'])
predictions = {}
for rel in train_result['all_relations']:
predictions[rel] = train_result['models'][rel].predict(test_X[rel])
evaluate_predictions(
predictions,
test_y,
verbose=verbose)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/rel_ext.py
|
rel_ext.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/__init__.py
|
__init__.py
|
with open('../data/notCoutent.txt','r',encoding='utf-8') as f:
data = f.readlines()
df = []
for i in data:
df.append(i)
df = set(df)
with open('../data/notCoutent_chong.txt','w',encoding='utf-8') as f:
for i in df:
f.write(i)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/relation_extraction/repeat.py
|
repeat.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 2018
@author: WuDaqing
"""
import os
import pickle
from catl.utilities import preprocess_train
from catl.model import ensemble
from openpyxl import Workbook
name = input('Please input the name of company: ')
current_path = os.getcwd()
if os.path.isdir('data/'+name+'/preprocess') == False:
os.makedirs(r'data/'+name+'/preprocess')
if os.path.isdir('results/'+name+'/train/model/') == False:
os.makedirs(r'results/'+name+'/train/model/')
if os.path.isdir('results/'+name+'/train/results/') == False:
os.makedirs(r'results/'+name+'/train/results/')
# print(os.getcwd())
preprocess = preprocess_train(name=name,path=r'data/'+name+'/'+name)
preprocess.read_excel()
Original_Data,Original_Data_Useless,Labels = preprocess.excel2sentences()
Vocabulary_Title = preprocess.get_vocabulary_title(title_weight=5,feature_ratio=0.1) # feature_ratio可调节,用来控制词表的长度,防止词表过长,运行时间太长或者内存溢出。
TFIDF_Title,IDF_Title = preprocess.get_tfidf_title(title_weight=5) # title_weight可调节,用于标题重复几次,增加标题的作用。
with open('data/'+name+'/preprocess/'+name+'_vocabulary_title.pkl','wb') as save1:
pickle.dump(Vocabulary_Title,save1)
with open('data/'+name+'/preprocess/'+name+'_idf_title.pkl','wb') as save2:
pickle.dump(IDF_Title,save2)
Model = ensemble(name=name,r=0.95,data=TFIDF_Title,labels=Labels,model_save_path='results/'+name+'/train/model/',results_save_path='results/'+name+'/train/results/') # r可调节,训练在召回率低于r时停止过滤进入下阶段过滤。
Threshold,Index_Retain_Predict_Title,Index_Delete_Title = Model.train_title()
Vocabulary_Content = preprocess.get_vocabulary_content(feature_ratio=0.2,index=Index_Retain_Predict_Title) # feature_ratio可调节,用来控制词表的长度,防止词表过长,运行时间太长或者内存溢出。
TFIDF_Content,IDF_Content = preprocess.get_tfidf_content(index=Index_Retain_Predict_Title)
with open('data/'+name+'/preprocess/'+name+'_vocabulary_content.pkl','wb') as save3:
pickle.dump(Vocabulary_Content,save3)
with open('data/'+name+'/preprocess/'+name+'_idf_content.pkl','wb') as save4:
pickle.dump(IDF_Content,save4)
threshold,Index_Retain_Predict_Content,Index_Delete_Content = Model.train_content(data=TFIDF_Content,r=0.9) # r可调节,训练最终在召回率低于r时终止。
with open('results/'+name+'/train/model/'+'title_threshold.pkl','wb') as save5:
pickle.dump(Threshold,save5)
with open('results/'+name+'/train/model/'+'content_threshold.pkl','wb') as save6:
pickle.dump(threshold,save6)
workbook = Workbook()
worksheet1 = workbook.active
worksheet1.title = 'finally'
worksheet1.cell(row=1,column=1).value = 'title'
worksheet1.cell(row=1,column=2).value = 'content'
worksheet1.cell(row=1,column=3).value = 'label'
for i in range(len(Index_Retain_Predict_Content)):
worksheet1.cell(row=i+2,column=1).value = Original_Data[Index_Retain_Predict_Content[i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=2).value = Original_Data[Index_Retain_Predict_Content[i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=3).value = Original_Data[Index_Retain_Predict_Content[i]][2].encode('gbk','ignore').decode('gbk','ignore')
worksheet2 = workbook.create_sheet('delete through key words')
worksheet2.cell(row=1,column=1).value = 'title'
worksheet2.cell(row=1,column=2).value = 'content'
worksheet2.cell(row=1,column=3).value = 'label'
for i in range(len(Original_Data_Useless)):
worksheet2.cell(row=i+2,column=1).value = Original_Data_Useless[i][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=2).value = Original_Data_Useless[i][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=3).value = Original_Data_Useless[i][2].encode('gbk','ignore').decode('gbk','ignore')
worksheet3 = workbook.create_sheet('delete through content')
worksheet3.cell(row=1,column=1).value = 'title'
worksheet3.cell(row=1,column=2).value = 'content'
worksheet3.cell(row=1,column=3).value = 'label'
for i in range(len(Index_Delete_Content)):
worksheet3.cell(row=i+2,column=1).value = Original_Data[Index_Delete_Content[i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet3.cell(row=i+2,column=2).value = Original_Data[Index_Delete_Content[i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet3.cell(row=i+2,column=3).value = Original_Data[Index_Delete_Content[i]][2].encode('gbk','ignore').decode('gbk','ignore')
for ite in range(len(Index_Delete_Title)):
worksheet = workbook.create_sheet('delete through title '+str(ite+1))
worksheet.cell(row=1,column=1).value = 'title'
worksheet.cell(row=1,column=2).value = 'content'
worksheet.cell(row=1,column=3).value = 'label'
for i in range(len(Index_Delete_Title[ite+1])):
worksheet.cell(row=i+2,column=1).value = Original_Data[Index_Delete_Title[ite+1][i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=i+2,column=2).value = Original_Data[Index_Delete_Title[ite+1][i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=i+2,column=3).value = Original_Data[Index_Delete_Title[ite+1][i]][2].encode('gbk','ignore').decode('gbk','ignore')
workbook.save('results/'+name+'/train/results/train_results.xlsx')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/train.py
|
train.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 2018
@author: WuDaqing
"""
from catl.utilities import single_predict
name = input('Please input the name of company: ')
title = input('Please input the title: ')
content = input('Please input the content: ')
Predict = single_predict(name=name,title=title,content=content)
prediction = Predict.predict() # prediction 是 '保留' 或者 '删除'
print('Text is '+prediction)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/predict.py
|
predict.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 2018
@author: WuDaqing
"""
import os
import pickle
import xlrd
import re
import jieba
from openpyxl import Workbook
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
from sklearn import metrics
from sklearn.externals import joblib
def document2sentences(document,key_words):
symbols = frozenset(u",。!?\n:;“”|)\u3000")
sentences= []
tmp = []
for character in document:
if not symbols.__contains__(character):
tmp.append(character)
elif character in ",。!?\n:;“”|)":
tmp.append("。")
for i in range(len(key_words)):
if key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
tmp = []
elif character == "\u3000":
continue
for i in range(len(key_words)):
if key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
return ''.join(sentences)
def filtrate_words(words,chinese_stopwords):
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%]"
filtrated_words = []
for j in range(len(words)):
if re.findall(find_chinese,words[j]) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) == '':
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) in chinese_stopwords:
continue
else:
filtrated_words.append(re.sub(symbols, "",re.findall(find_chinese,words[j])[0]))
return ' '.join(filtrated_words)
name = input('Please input the name of company: ')
current_path = os.getcwd()
if os.path.isdir('results/'+name+'/predict/results/') == False:
os.makedirs(r'results/'+name+'/predict/results/')
path = 'data/'+name+'/'+name
model_load_path = 'results/'+name+'/train/model/'
chinese_stopwords = []
for line in open('data/stopwords.txt','rb'):
chinese_stopwords.append(line.decode('utf-8-sig').split()[0])
key_words = []
for line in open(path+'_original_key_words.txt','rb'):
key_words.append(line.decode('utf-8-sig').split()[0])
jieba.load_userdict(path+'_original_key_words.txt')
with open('data/'+name+'/preprocess/'+name+'_vocabulary_title.pkl','rb') as load1:
vocabulary_title = pickle.load(load1)
with open('data/'+name+'/preprocess/'+name+'_idf_title.pkl','rb') as load2:
idf_title = pickle.load(load2)
with open('results/'+name+'/train/model/'+'title_threshold.pkl','rb') as load3:
Threshold = pickle.load(load3)
with open('results/'+name+'/train/model/'+'content_threshold.pkl','rb') as load4:
threshold = pickle.load(load4)
with open('data/'+name+'/preprocess/'+name+'_vocabulary_content.pkl','rb') as load5:
vocabulary_content = pickle.load(load5)
with open('data/'+name+'/preprocess/'+name+'_idf_content.pkl','rb') as load6:
idf_content = pickle.load(load6)
workbook = Workbook()
worksheet1 = workbook.active
worksheet1.title = 'retain'
worksheet1.cell(row=1,column=1).value = 'title'
worksheet1.cell(row=1,column=2).value = 'content'
worksheet1.cell(row=1,column=3).value = 'label'
worksheet2 = workbook.create_sheet('delete')
worksheet2.cell(row=1,column=1).value = 'title'
worksheet2.cell(row=1,column=2).value = 'content'
worksheet2.cell(row=1,column=3).value = 'label'
count_retain = 2
count_delete = 2
excel = xlrd.open_workbook(path+'_test.xls')
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
Labels = []
Predictions = []
for idx in range(1,num_rows):
original_data = table.row_values(idx)
label = int(original_data[2]=='保留')
Labels.append(label)
content = original_data[1]
content_sentences = document2sentences(content,key_words)
if content_sentences == '':
prediction = 0
Predictions.append(prediction)
worksheet2.cell(row=count_delete,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_delete += 1
print(name+' | Predict | Index | '+str(idx)+' | Delete')
else:
title = original_data[0]
title_tokenized = jieba.lcut(title)
content_sentences_tokenized = jieba.lcut(content_sentences)
title_tokenized_filtered = filtrate_words(title_tokenized,chinese_stopwords)
content_sentences_tokenized_filtered = filtrate_words(content_sentences_tokenized,chinese_stopwords)
data_title = [5*(title_tokenized_filtered+' ')+content_sentences_tokenized_filtered]
tf_transformer_title = CountVectorizer(ngram_range=(1,3),vocabulary=vocabulary_title)
tf_title = tf_transformer_title.fit_transform(data_title)
tf_weight_title = tf_title.toarray().tolist()
tfidf_weight_title = normalize([[x*y for x,y in zip(tf_weight_title[0],idf_title)]], norm='l2').tolist()
for ite in range(1,len(Threshold)+1):
clf_title = joblib.load(model_load_path+name+'_iteration_'+str(ite)+'_train_title_classifier.m')
tmp = clf_title.predict_proba(tfidf_weight_title).tolist()
if tmp[0][1] < Threshold[ite]:
prediction = 0
Predictions.append(prediction)
worksheet2.cell(row=count_delete,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_delete += 1
print(name+' | Predict | Index | '+str(idx)+' | Delete')
ite -= 1
break
else:
continue
if ite == len(Threshold):
data_content = [content_sentences_tokenized_filtered]
tf_transformer_content = CountVectorizer(ngram_range=(1,3),vocabulary=vocabulary_content)
tf_content = tf_transformer_content.fit_transform(data_content)
tf_weight_content = tf_content.toarray().tolist()
tfidf_weight_content = normalize([[x*y for x,y in zip(tf_weight_content[0],idf_content)]], norm='l2').tolist()
clf_content = joblib.load(model_load_path+name+'_train_content_classifier.m')
tmp = clf_content.predict_proba(tfidf_weight_content).tolist()
if tmp[0][1] < threshold:
prediction = 0
Predictions.append(prediction)
worksheet2.cell(row=count_delete,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_delete += 1
print(name+' | Predict | Index | '+str(idx)+' | Delete')
else:
prediction = 1
Predictions.append(prediction)
worksheet1.cell(row=count_retain,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=count_retain,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=count_retain,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_retain += 1
print(name+' | Predict | Index | '+str(idx)+' | Retain')
workbook.save('results/'+name+'/predict/results/'+name+'_predict_results.xlsx')
print(name+' | Predict | Number of Data | '+str(len(Labels)))
num_positive = Labels.count(1)
num_negative = Labels.count(0)
print(name+' | Predict | Number of Positive | '+str(num_positive))
print(name+' | Predict | Number of Negative | '+str(num_negative)+'\n')
recall = metrics.recall_score(Labels,Predictions,pos_label=1)
precision = metrics.precision_score(Labels,Predictions,pos_label=1)
f1 = metrics.f1_score(Labels,Predictions,pos_label=1)
print(name+' | Predict | Positive Recall | ' + '%.4f'%recall)
print(name+' | Predict | Positive Precision | ' + '%.4f'%precision)
print(name+' | Predict | Positive F1 | ' + '%.4f'%f1+'\n')
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/draft.py
|
draft.py
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/text.py
|
text.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 2018
@author: WuDaqing
"""
import os
import pickle
import xlrd
import re
import jieba
from openpyxl import Workbook
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
from sklearn import metrics
from openpyxl import Workbook
from sklearn.externals import joblib
class preprocess_train(object):
def __init__(self,name,path):
self.name = name
self.path = path
self.chinese_stopwords = []
home_path = os.path.dirname(os.path.realpath(__file__))
stopwords_path = os.path.join(home_path,'data/stopwords.txt')
for line in open(stopwords_path,'rb'):
self.chinese_stopwords.append(line.decode('utf-8-sig').split()[0])
self.key_words = []
for line in open(self.path+'_original_key_words.txt','rb'):
self.key_words.append(line.decode('utf-8-sig').split()[0])
jieba.load_userdict(self.path+'_original_key_words.txt')
def read_excel(self):
excel = xlrd.open_workbook(self.path+'_train.xls')
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
self.original_data = []
for idx in range(1,num_rows+1):
row = table.row_values(idx)
self.original_data.append(row)
self.data = list(map(list,zip(*self.original_data)))
self.labels = [int(self.data[2][i]=='保留') for i in range(num_rows)]
def document2sentences(self,document):
symbols = frozenset(u",。!?\n:;“”|)\u3000")
sentences= []
tmp = []
for character in document:
if not symbols.__contains__(character):
tmp.append(character)
elif character in ",。!?\n:;“”|)":
tmp.append("。")
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
tmp = []
elif character == "\u3000":
continue
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
return ''.join(sentences)
def filtrate_words(self,words):
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%]"
filtrated_words = []
for j in range(len(words)):
if re.findall(find_chinese,words[j]) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) == '':
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) in self.chinese_stopwords:
continue
else:
filtrated_words.append(re.sub(symbols, "",re.findall(find_chinese,words[j])[0]))
return ' '.join(filtrated_words)
def excel2sentences(self):
contents = self.data[1]
print(self.name+' | Train | Content | Document 2 Sentences ......')
contents_sentences = [self.document2sentences(document) for document in contents]
original_data_useless = [self.original_data[i] for i in range(len(contents_sentences)) if contents_sentences[i] == '']
self.original_data = [self.original_data[i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
self.labels = [self.labels[i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
titles = [self.data[0][i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
contents_sentences = [contents_sentences[i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
workbook = Workbook()
worksheet1 = workbook.active
worksheet1.title = 'use'
worksheet1.cell(row=1,column=1).value = 'title'
worksheet1.cell(row=1,column=2).value = 'content'
worksheet1.cell(row=1,column=3).value = 'label'
for i in range(len(self.original_data)):
print(i)
worksheet1.cell(row=i+2,column=1).value = self.original_data[i][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=2).value = contents_sentences[i].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=3).value = self.original_data[i][2].encode('gbk','ignore').decode('gbk','ignore')
worksheet2 = workbook.create_sheet('useless')
worksheet2.cell(row=1,column=1).value = 'title'
worksheet2.cell(row=1,column=2).value = 'content'
worksheet2.cell(row=1,column=3).value = 'label'
for i in range(len(original_data_useless)):
worksheet2.cell(row=i+2,column=1).value = original_data_useless[i][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=2).value = original_data_useless[i][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=3).value = original_data_useless[i][2].encode('gbk','ignore').decode('gbk','ignore')
workbook.save(self.path+'_train_sentences.xlsx')
print(self.name+' | Train | Title | Tokenized ......')
titles_tokenized = [jieba.lcut(sentences) for sentences in titles]
print(self.name+' | Train | Content | Tokenized ......')
contents_sentences_tokenized = [jieba.lcut(sentences) for sentences in contents_sentences]
print(self.name+' | Train | Title | Filtered ......')
self.titles_tokenized_filtered = [self.filtrate_words(words) for words in titles_tokenized]
print(self.name+' | Train | Content | Filtered ......')
self.contents_sentences_tokenized_filtered = [self.filtrate_words(words) for words in contents_sentences_tokenized]
return self.original_data,original_data_useless,self.labels
def get_chi(self,data,labels):
num = len(data)
length = len(data[0])
data_p = [data[i] for i in range(num) if labels[i]==1]
data_n = [data[i] for i in range(num) if labels[i]==0]
num_p = len(data_p)
num_n = len(data_n)
data_p_t = list(map(list,zip(*data_p)))
data_n_t = list(map(list,zip(*data_n)))
chi_square = []
for i in range(length):
b = data_p_t[i].count(0)
d = data_n_t[i].count(0)
a = num_p-b
c = num_n-d
if num_p*num_n*(a+c)*(b+d) == 0:
chi_square.append(0)
else:
chi_square.append((num*pow(a*d-b*c,2))/(num_p*num_n*(a+c)*(b+d)))
return chi_square
def get_vocabulary_title(self,title_weight,feature_ratio):
data = [title_weight*(self.titles_tokenized_filtered[i]+' ')+self.contents_sentences_tokenized_filtered[i] for i in range(len(self.labels))]
labels = self.labels
tf_transformer = CountVectorizer(ngram_range=(1,3))
tf = tf_transformer.fit_transform(data)
vocabulary_list = tf_transformer.get_feature_names()
print(self.name+' | Train | Title | Vocabulary | Original Length | ' + str(len(vocabulary_list)))
num_key_words = int(len(vocabulary_list)*feature_ratio)
print(self.name+' | Train | Title | Vocabulary | Length | ' + str(num_key_words))
tf_weights = tf.toarray().tolist()
chi_square = self.get_chi(tf_weights,labels)
print(self.name+' | Train | Title | Vocabulary | Complete by CHI ......')
original_vocabulary_chi_square = [(vocabulary_list[i],chi_square[i]) for i in range(len(vocabulary_list))]
sorted_original_vocabulary_chi_square = sorted(original_vocabulary_chi_square,key=lambda x:x[1],reverse=True)
vocabulary_list = [sorted_original_vocabulary_chi_square[i][0] for i in range(num_key_words)]
self.vocabulary_title = {}
k = 0
for word in vocabulary_list:
self.vocabulary_title[word] = k
k += 1
return self.vocabulary_title
def get_tfidf_title(self,title_weight):
data = [title_weight*(self.titles_tokenized_filtered[i]+' ')+self.contents_sentences_tokenized_filtered[i] for i in range(len(self.labels))]
tf_transformer = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_title)
train_tf = tf_transformer.fit_transform(data)
print(self.name+' | Train | Title | TF | Completed ......')
tfidf_transformer = TfidfTransformer(norm='l2',use_idf=True,smooth_idf=True)
train_tfidf = tfidf_transformer.fit_transform(train_tf)
train_tfidf_weights = train_tfidf.toarray().tolist()
print(self.name+' | Train | Title | TFIDF | Completed ......')
idf = tfidf_transformer.idf_.tolist()
return train_tfidf_weights,idf
def get_vocabulary_content(self,feature_ratio,index):
data = [self.contents_sentences_tokenized_filtered[idx] for idx in index]
labels = [self.labels[idx] for idx in index]
tf_transformer = CountVectorizer(ngram_range=(1,3))
tf = tf_transformer.fit_transform(data)
vocabulary_list = tf_transformer.get_feature_names()
print(self.name+' | Train | Content | Vocabulary | Original Length | ' + str(len(vocabulary_list)))
num_key_words = int(len(vocabulary_list)*feature_ratio)
print(self.name+' | Train | Content | Vocabulary | Length | ' + str(num_key_words))
tf_weights = tf.toarray().tolist()
chi_square = self.get_chi(tf_weights,labels)
print(self.name+' | Train | Content | Vocabulary | Complete by CHI ......')
original_vocabulary_chi_square = [(vocabulary_list[i],chi_square[i]) for i in range(len(vocabulary_list))]
sorted_original_vocabulary_chi_square = sorted(original_vocabulary_chi_square,key=lambda x:x[1],reverse=True)
vocabulary_list = [sorted_original_vocabulary_chi_square[i][0] for i in range(num_key_words)]
self.vocabulary_content = {}
k = 0
for word in vocabulary_list:
self.vocabulary_content[word] = k
k += 1
return self.vocabulary_content
def get_tfidf_content(self,index):
data = [self.contents_sentences_tokenized_filtered[idx] for idx in index]
tf_transformer = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_content)
train_tf = tf_transformer.fit_transform(data)
print(self.name+' | Train | Content | TF | Completed ......')
tfidf_transformer = TfidfTransformer(norm='l2',use_idf=True,smooth_idf=True)
train_tfidf = tfidf_transformer.fit_transform(train_tf)
train_tfidf_weights = train_tfidf.toarray().tolist()
print(self.name+' | Train | Content | TFIDF | Completed ......')
idf = tfidf_transformer.idf_.tolist()
return train_tfidf_weights,idf
class single_predict(object):
def __init__(self,name,title,content):
self.name = name
self.title = title
self.content = content
current_path = os.getcwd()
if os.path.isdir('results/'+self.name+'/predict/results/') == False:
os.makedirs(r'results/'+self.name+'/predict/results/')
self.path = 'data/'+self.name+'/'+self.name
self.model_load_path = 'results/'+self.name+'/train/model/'
self.chinese_stopwords = []
file_path = os.path.dirname(os.path.realpath(__file__))
for line in open(os.path.join(file_path, 'data/stopwords.txt'),'rb'):
self.chinese_stopwords.append(line.decode('utf-8-sig').split()[0])
self.key_words = []
for line in open(self.path+'_original_key_words.txt','rb'):
self.key_words.append(line.decode('utf-8-sig').split()[0])
jieba.load_userdict(self.path+'_original_key_words.txt')
with open('data/'+self.name+'/preprocess/'+self.name+'_vocabulary_title.pkl','rb') as load1:
self.vocabulary_title = pickle.load(load1)
with open('data/'+self.name+'/preprocess/'+self.name+'_idf_title.pkl','rb') as load2:
self.idf_title = pickle.load(load2)
with open('results/'+self.name+'/train/model/'+'title_threshold.pkl','rb') as load3:
self.Threshold = pickle.load(load3)
with open('results/'+self.name+'/train/model/'+'content_threshold.pkl','rb') as load4:
self.threshold = pickle.load(load4)
with open('data/'+self.name+'/preprocess/'+self.name+'_vocabulary_content.pkl','rb') as load5:
self.vocabulary_content = pickle.load(load5)
with open('data/'+self.name+'/preprocess/'+self.name+'_idf_content.pkl','rb') as load6:
self.idf_content = pickle.load(load6)
def document2sentences(self,document):
symbols = frozenset(u",。!?\n:;“”|)\u3000")
sentences= []
tmp = []
for character in document:
if not symbols.__contains__(character):
tmp.append(character)
elif character in ",。!?\n:;“”|)":
tmp.append("。")
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
tmp = []
elif character == "\u3000":
continue
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
return ''.join(sentences)
def filtrate_words(self,words):
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%]"
filtrated_words = []
for j in range(len(words)):
if re.findall(find_chinese,words[j]) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) == '':
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) in self.chinese_stopwords:
continue
else:
filtrated_words.append(re.sub(symbols, "",re.findall(find_chinese,words[j])[0]))
return ' '.join(filtrated_words)
def predict(self):
content_sentences = self.document2sentences(self.content)
if content_sentences == '':
prediction = '删除'
else:
title_tokenized = jieba.lcut(self.title)
content_sentences_tokenized = jieba.lcut(content_sentences)
title_tokenized_filtered = self.filtrate_words(title_tokenized)
content_sentences_tokenized_filtered = self.filtrate_words(content_sentences_tokenized)
data_title = [5*(title_tokenized_filtered+' ')+content_sentences_tokenized_filtered]
tf_transformer_title = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_title)
tf_title = tf_transformer_title.fit_transform(data_title)
tf_weight_title = tf_title.toarray().tolist()
tfidf_weight_title = normalize([[x*y for x,y in zip(tf_weight_title[0],self.idf_title)]], norm='l2').tolist()
for ite in range(1,len(self.Threshold)+1):
clf_title = joblib.load(self.model_load_path+self.name+'_iteration_'+str(ite)+'_train_title_classifier.m')
tmp = clf_title.predict_proba(tfidf_weight_title).tolist()
if tmp[0][1] < self.Threshold[ite]:
prediction = '删除'
ite -= 1
break
else:
continue
if ite == len(self.Threshold):
data_content = [content_sentences_tokenized_filtered]
tf_transformer_content = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_content)
tf_content = tf_transformer_content.fit_transform(data_content)
tf_weight_content = tf_content.toarray().tolist()
tfidf_weight_content = normalize([[x*y for x,y in zip(tf_weight_content[0],self.idf_content)]], norm='l2').tolist()
clf_content = joblib.load(self.model_load_path+self.name+'_train_content_classifier.m')
tmp = clf_content.predict_proba(tfidf_weight_content).tolist()
if tmp[0][1] < self.threshold:
prediction = '删除'
else:
prediction = '保留'
return prediction
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/utilities.py
|
utilities.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 16 2018
@author: WuDaqing
"""
import numpy as np
import heapq
from sklearn import linear_model
from sklearn.externals import joblib
import matplotlib.pyplot as plt
from sklearn import metrics
class ensemble(object):
def __init__(self,name,r,data,labels,model_save_path,results_save_path):
self.Name = name
self.Data = data
self.Labels = labels
self.model_save_path = model_save_path
self.results_save_path = results_save_path
self.Num = len(labels)
self.Index = [i for i in range(self.Num)]
print(self.Name+' | Train | Title | Number of Data | '+str(self.Num))
self.Num_Positive = self.Labels.count(1)
self.Num_Negative = self.Labels.count(0)
print(self.Name+' | Train | Title | Number of Positive | '+str(self.Num_Positive))
print(self.Name+' | Train | Title | Number of Negative | '+str(self.Num_Negative))
print(self.Name+' | Train | Title | Data Loaded'+'\n')
self.Ite = 1
self.Index_Retain_Train = [i for i in range(self.Num)]
self.Index_Retain_Predict = [i for i in range(self.Num)]
self.Index_Delete = {}
self.Recall = []
self.Precision = []
self.F1 = []
self.Threshold = {}
self.recall = r
self.config = True
def classifier(self,data,labels):
clf = linear_model.SGDClassifier(loss='log',penalty='l1',alpha=1e-3,class_weight='balanced',learning_rate='optimal',eta0=0.0)
clf.fit(data,labels)
probabilities = []
probabilities_positive = []
probabilities_negative = []
tmp = clf.predict_proba(data)
for i in range(len(data)):
if labels[i] == 1:
probabilities.append(tmp[i][1])
probabilities_positive.append(tmp[i][1])
else:
probabilities.append(tmp[i][1])
probabilities_negative.append(tmp[i][1])
return clf,probabilities,probabilities_positive,probabilities_negative
def unit(self):
data_train = [self.Data[idx] for idx in self.Index_Retain_Train]
labels_train = [self.Labels[idx] for idx in self.Index_Retain_Train]
num_positive = labels_train.count(1)
num_negative = labels_train.count(0)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Logistic Regression ... ...')
clf_lr,probabilities_train,probabilities_positive_train,probabilities_negative_train = self.classifier(data=data_train,labels=labels_train)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Adjust Threshold ... ...')
threshold = heapq.nsmallest(int(0.01*self.Num_Positive),probabilities_positive_train)[-1]
Index_Retain_Train = []
for i in range(num_positive+num_negative):
if labels_train[i] == 1:
Index_Retain_Train.append(self.Index_Retain_Train[i])
elif probabilities_train[i] > threshold:
Index_Retain_Train.append(self.Index_Retain_Train[i])
self.Index_Retain_Train = Index_Retain_Train
data_predict = [self.Data[idx] for idx in self.Index_Retain_Predict]
tmp = clf_lr.predict_proba(data_predict).tolist()
probabilities_predict = list(map(list,zip(*tmp)))[1]
Predictions = [0 for i in range(self.Num)]
Index_Retain_Predict = []
self.Index_Delete[self.Ite] = []
for i in range(len(data_predict)):
if probabilities_predict[i] >= threshold:
Index_Retain_Predict.append(self.Index_Retain_Predict[i])
Predictions[self.Index_Retain_Predict[i]] = 1
else:
self.Index_Delete[self.Ite].append(self.Index_Retain_Predict[i])
self.Index_Retain_Predict = Index_Retain_Predict
recall = metrics.recall_score(self.Labels,Predictions,pos_label=1)
precision = metrics.precision_score(self.Labels,Predictions,pos_label=1)
f1 = metrics.f1_score(self.Labels,Predictions,pos_label=1)
if recall >= self.recall:
self.f1 = f1
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive Recall | ' + '%.4f'%recall)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive Precision | ' + '%.4f'%precision)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive F1 | ' + '%.4f'%f1+'\n')
self.Recall.append(recall)
self.Precision.append(precision)
self.F1.append(f1)
joblib.dump(clf_lr,self.model_save_path+self.Name+'_iteration_'+str(self.Ite)+'_train_title_classifier.m')
self.Threshold[self.Ite] = threshold
self.Ite += 1
else:
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive Recall Less Than Given Recall'+'\n')
self.Index_Retain_Predict += self.Index_Delete[self.Ite]
del self.Index_Delete[self.Ite]
self.config = False
def train_title(self):
while self.config == True:
self.unit()
plt.figure(figsize=(8,8),dpi=100)
plt.xlim(0,self.Ite+1)
plt.scatter(range(1,self.Ite),self.Recall,s=100,marker='+',color='r')
plt.plot(range(1,self.Ite),self.Recall,linestyle='-',color='r',linewidth=1.5,label='recall')
plt.scatter(range(1,self.Ite),self.Precision,s=100,marker='+',color='g')
plt.plot(range(1,self.Ite),self.Precision,linestyle='-',color='g',linewidth=1.5,label='precision')
plt.scatter(range(1,self.Ite),self.F1,s=100,marker='+',color='b')
plt.plot(range(1,self.Ite),self.F1,linestyle='-',color='b',linewidth=1.5,label='f1')
plt.legend(loc='lower right',fontsize=10)
plt.savefig(self.results_save_path+self.Name+'_train_title_results.png')
return self.Threshold,self.Index_Retain_Predict,self.Index_Delete
def train_content(self,data,r):
data_train = data
labels_train = [self.Labels[idx] for idx in self.Index_Retain_Predict]
print(self.Name+' | Train | Content | Number of Data | '+str(len(labels_train)))
num_positive = labels_train.count(1)
num_negative = labels_train.count(0)
print(self.Name+' | Train | Content | Number of Positive | '+str(num_positive))
print(self.Name+' | Train | Content | Number of Negative | '+str(num_negative)+'\n')
clf_xg = linear_model.SGDClassifier(loss='log',penalty='l1',alpha=1e-3,class_weight='balanced',learning_rate='optimal',eta0=0.0)
clf_xg.fit(data_train,labels_train)
joblib.dump(clf_xg,self.model_save_path+self.Name+'_train_content_classifier.m')
tmp = clf_xg.predict_proba(np.array(data_train)).tolist()
probabilities_predict = list(map(list,zip(*tmp)))[1]
Recall = []
Precision = []
F1 = []
Threshold = []
for t in [x/1000 for x in range(1001)]:
Predictions = [0 for i in range(self.Num)]
for i in range(len(data_train)):
if probabilities_predict[i] >= t:
Predictions[self.Index_Retain_Predict[i]] = 1
recall = metrics.recall_score(self.Labels,Predictions,pos_label=1)
precision = metrics.precision_score(self.Labels,Predictions,pos_label=1)
f1 = metrics.f1_score(self.Labels,Predictions,pos_label=1)
Recall.append(recall)
Precision.append(precision)
F1.append(f1)
Threshold.append(t)
if recall < r:
break
print(self.Name+' | Train | Content | Finally | Threshold | ' + '%.4f'%Threshold[-1]+'\n')
print(self.Name+' | Train | Content | Finally | Positive Recall | ' + '%.4f'%Recall[-1])
print(self.Name+' | Train | Content | Finally | Positive Precision | ' + '%.4f'%Precision[-1])
print(self.Name+' | Train | Content | Finally | Positive F1 | ' + '%.4f'%F1[-1]+'\n')
plt.figure(figsize=(8,8),dpi=100)
plt.plot(Threshold,Recall,linestyle='-',color='r',linewidth=1.5,label='recall')
plt.plot(Threshold,Precision,linestyle='-',color='g',linewidth=1.5,label='precision')
plt.plot(Threshold,F1,linestyle='-',color='b',linewidth=1.5,label='f1')
plt.legend(loc='lower center',fontsize=10)
plt.savefig(self.results_save_path+self.Name+'_train_content_results.png')
Index_Retain_Predict = []
Index_Delete = []
for i in range(len(data_train)):
if probabilities_predict[i] >= Threshold[-1]:
Index_Retain_Predict.append(self.Index_Retain_Predict[i])
else:
Index_Delete.append(self.Index_Retain_Predict[i])
return Threshold[-1],Index_Retain_Predict,Index_Delete
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/model.py
|
model.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/24 19:09
# @Author : 程婷婷
# @FileName: __init__.py
# @Software: PyCharm
|
zzsnML
|
/zzsnML-1.0.1-py3-none-any.whl/catl/__init__.py
|
__init__.py
|
from zzstocklib_pkg import zzlogger
from urllib import request,parse
import time,datetime
import json
import re
import pandas as pd
import numpy as np
logger = zzlogger.logger
def get_sinacodelist(stock_list):
"""根据股票代码转译为sina所需要的代码,香港hk,沪sh,深sz""" #https://www.cnblogs.com/xuliangxing/p/8492705.html
new_codelist = []
for code in stock_list:
if len(code) == 5: #香港交易所
code = "hk" + code
elif len(code) == 6: #沪深交易所
if code.startswith('600') or code.startswith('601') or code.startswith('603') or code.startswith('688') or code.startswith('501') or code.startswith('516') or code.startswith('113'):
code = "sh" + code
elif code.startswith('000') or code.startswith('001') or code.startswith('002') or code.startswith('300') or code.startswith('128') or code.startswith('127'):
code = "sz" + code
else:
logger.error("Error: code " + code + " not found in stock market!")
continue
else:
logger.error("Error: code " + code + " not found in stock market!")
continue
new_codelist.append(code)
#print(new_codelist)
return new_codelist
def get_stocklistprice(stock_list):
"""获取当前股票价格"""
stocks = ','.join(get_sinacode(stock_list))
stock_price = pd.DataFrame(columns=('name','open_price','lastday_price','current_price','highest_price','lowest_price'))
try:
page = request.urlopen("http://hq.sinajs.cn/?list=" + stocks)
result = page.read().decode('gb2312')
except request.URLError as e:
logger.error(e)
else:
price_list = result.split('\n')
for stock in price_list:
if len(stock.strip()) <= 0:
continue
data = re.findall(r'"(.+?)"', stock)
code = re.findall(r'str_[hkszsh]{2}(.+?)=', stock)
#print(data)
data = data[0].split(',')
if "str_hk" in stock: #如果是港股,则将英文名去掉和沪深的格式看齐,且将当前价格位置位移到第3位
del data[0]
data.insert(3,data[5])
stock = data
df = pd.DataFrame([stock[0:6]], index=code, columns=('name','open_price','lastday_price','current_price','highest_price','lowest_price'))
stock_price = stock_price.append(df)
stock_price['open_price']=stock_price['open_price'].apply(float)
stock_price['lastday_price']=stock_price['lastday_price'].apply(float)
stock_price['current_price']=stock_price['current_price'].apply(float)
stock_price['highest_price']=stock_price['highest_price'].apply(float)
stock_price['lowest_price']=stock_price['lowest_price'].apply(float)
return stock_price
def get_sinacode(stock_code):
"""根据股票代码转译为sina所需要的代码,香港hk,沪sh,深sz""" #https://www.cnblogs.com/xuliangxing/p/8492705.html
code = stock_code
if len(code) == 5: # 香港交易所
code = "hk" + code
elif len(code) == 6: # 沪深交易所
if code.startswith('600') or code.startswith('601') or code.startswith('603') or code.startswith('688') or code.startswith('501') or code.startswith('516') or code.startswith('113'):
code = "sh" + code
elif code.startswith('000') or code.startswith('001') or code.startswith('002') or code.startswith('300') or code.startswith('128') or code.startswith('127'):
code = "sz" + code
else:
logger.error("Error: code " + code +" not found in stock market!")
else:
logger.error("Error: code " + code + " not found in stock market!")
return code
def get_lastday_stockprice(stock_code):
"""获取当前股票价格"""
try:
page = request.urlopen("http://hq.sinajs.cn/?list=" + get_sinacode(stock_code))
result = page.read().decode('gb2312')
#print(result)
except request.URLError as e:
logger.error(e)
else:
content_data = re.findall(r'"(.+?)"', result)
#print(content_data)
data = content_data[0].split(',')
if "str_hk" in result: #如果是港股,则将英文名去掉和沪深的格式看齐,且将当前价格位置位移到第3位
del data[0]
data.insert(3,data[5])
stock_lastday_price = data[2]
#df = pd.DataFrame([stock[0:6]], index=code, columns=('name','open_price','lastday_price','current_price','highest_price','lowest_price'))
return float(stock_lastday_price)
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/sinaFinanceUtility.py
|
sinaFinanceUtility.py
|
import pika
from propertiesUtility import properties
prop = properties
print(prop.get('rabbitMQ_Host'))
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=prop.get('rabbitMQ_Host')))
channel = connection.channel()
channel.exchange_declare(exchange='directexchangenotifcation',
exchange_type='direct')
result = channel.queue_declare(queue=prop.get('notification_queue_name'), exclusive=True)
queue_name = result.method.queue
print(queue_name)
channel.queue_bind(exchange='directexchangenotifcation',
queue=queue_name,
routing_key = "msg")
print(' [*] Waiting for info. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r" % body)
channel.basic_consume(queue=queue_name,
on_message_callback=callback,
auto_ack=True)
channel.start_consuming()
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/dealwith_notification.py
|
dealwith_notification.py
|
# -*- coding:utf-8 -*-
import re
import os
import tempfile
class Properties:
def __init__(self, file_name):
self.file_name = file_name
self.properties = {}
if os.path.exists(file_name):
with open(file_name) as f:
for line in f:
tline = line.strip()
if tline.startswith('#'):
continue
else:
kv_list = tline.split('=', 2)
if not kv_list or len(kv_list) != 2:
continue
else:
value_list = kv_list[1].strip().split(',')
if not value_list:
continue
else:
if len(value_list) == 1:
self.properties[kv_list[0].strip()] = value_list[0].strip()
else:
temp = []
for v in value_list:
temp.append(v.strip())
self.properties[kv_list[0].strip()] = temp
else:
raise Exception("file %s not found" % file_name)
def get(self, key):
if key in self.properties:
return self.properties[key]
return ''
def get_list(self, key):
if key in self.properties:
temp = self.properties[key]
if isinstance(temp, list):
return temp
else:
return [temp]
return []
def get_num(self, key):
if key in self.properties:
return float(self.properties[key])
return 0
path = os.path.split(os.path.realpath(__file__))[0]
config_file_path = os.path.join(path, 'config/global.properties') # 存放log文件的路径
properties = Properties(config_file_path)
#print(properties.get('notification_queue_name'))
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/propertiesUtility.py
|
propertiesUtility.py
|
import sinaFinanceUtility as sina
import pandas as pd
excel_url = r'https://api.onedrive.com/v1.0/shares/u!aHR0cHM6Ly8xZHJ2Lm1zL3gvcyFBb2NpM3BwNVE0LXRwenR1ZmlWRld0S0NJR2NKP2U9U2RJT2NR/root/content'
df = pd.read_excel(excel_url, dtype={'股票代码': str})
#将NaN(缺失值)替换为0
df = df.fillna(0)
df1 = df.groupby(['股票代码', '股票名称'])['数量'].sum().reset_index(name='持仓数量')
stocks = df1.sort_values(by='持仓数量',ascending=False)
stocks['成本价'] = 0.0
stocks['最近交易价格'] = 0.0
stocks['最近交易数量'] = 0
stocks['最近目标价格'] = 0.0
stocks['收盘价'] = 0.0
stocks['最近交易价格+3%'] = 0.0
stocks['收盘价+3%'] = 0.0
stocks['止损价-10%'] = 0.0
for index, row in stocks.iterrows(): # dataframe 遍历
df3 = df[df['股票代码'] == row['股票代码']].sort_values(by='交易日期', ascending=False)
df3['成本'] = df3['数量']*df3['交易价格']
row['最近交易价格'] = df3.iloc[0, 4]
row['最近交易数量'] = df3.iloc[0, 3]
row['最近目标价格'] = df3['目标价格'].max()
row['收盘价'] = sina.get_lastday_stockprice(row['股票代码'])
row['收盘价+3%'] = row['收盘价']*1.03
if row['持仓数量'] > 0: # 只关注于持仓股票池
row['成本价'] = df3['成本'].sum()/row['持仓数量']
row['最近交易价格+3%'] = row['最近交易价格']*1.03
row['止损价-10%'] = row['成本价']*0.9
stocks.iloc[index] = row
stocks = stocks.sort_values(by='持仓数量',ascending=False)
stocks.to_excel('C:\workspace\stock\stockdata\dict\stock_updated.xlsx')
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/genStockreport.py
|
genStockreport.py
|
from urllib import request,parse
import time,datetime
import json
import zzlogger
logger = zzlogger.logger
def send_wxnotification(message):
"""发送公众号提醒""" #文档字符串用三引号括起,Python使用它们来生成有关程序中函数的文档。
miao_code="tLmPyT4"
text = message
page = request.urlopen("http://miaotixing.com/trigger?" + parse.urlencode({"id":miao_code, "text":text, "type":"json"}))
result = page.read()
jsonObj = json.loads(result)
if(jsonObj["code"] == 0):
logger.debug("send " + message + " to WeChat success!")
else:
logger.error("failed, err code:" + str(jsonObj["code"]) + ", desc:" + jsonObj["msg"])
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/wx_notification.py
|
wx_notification.py
|
import pandas as pd
import requests
from lxml import etree
import time
import json
from pandas.io.json import json_normalize
import os
import re
def get_KZZInfo_from_JSL():
now_time = time.time()
url = 'https://www.jisilu.cn/data/cbnew/cb_list/?___jsl=LST___t=' + str(now_time)
# 发送请求并解析HTML对象
response = requests.get(url)
jsonObj = response.json()
df = pd.DataFrame.from_dict(json_normalize(jsonObj['rows']), orient='columns')
#print(df)
#bond_code, bond_price, bond_increase_rate,bond_stock_price,bond_stock_increase_rate
jslbond_df = df[['cell.bond_id','cell.price','cell.increase_rt','cell.sprice','cell.sincrease_rt']]
jslbond_df.columns = ['bond_code','bond_price','bond_increase_rate','bond_stock_price','bond_stock_increase_rate']
return jslbond_df
#Assume the kzz_df is a dataframe
def merge_KZZlist_withJSLprice(kzz_df):
#kzz_df['债券现价'] = 0.0
kzz_df['债券振幅'] = 0.0
#kzz_df['股票现价'] = 0.0
kzz_df['正股振幅'] = 0.0
current_bondprice_df = get_KZZInfo_from_JSL()
for index, row in current_bondprice_df.iterrows():
try:
temp_row = kzz_df.loc[kzz_df['债券代码'] == row['bond_code']]
temp_row['当前价'] = row['bond_price']
temp_row['债券振幅'] = row['bond_increase_rate']
temp_row['正股股价'] = row['bond_stock_price']
temp_row['正股振幅'] = row['bond_stock_increase_rate']
kzz_df.loc[kzz_df['债券代码'] == row['bond_code']] = temp_row
except KeyError:
print('code {} is not in KZZ list. It need to be updated.'.format(row['bond_code']))
return kzz_df
def gen_KZZDetaillist_with_RPAData(rpa_data_file_path):
#path = "/Users/zhangzhi/temp/zz/" #文件夹目录
path = rpa_data_file_path
files= os.listdir(path) #得到文件夹下的所有文件名称
data = pd.DataFrame(columns=('债券代码','债券名称','正股代码','正股名称','正股股价','市净率','每股净资产','转股价','信用级别'
,'转股开始日','转股结束日','回售触发价','回售执行日','强赎触发价','赎回登记日','上市日','到期日'
,'发行规模','利率1','利率2','利率3','利率4','利率5','利率6','赎回利率','回售条款','赎回条款'))
for file in files:
if file.find("bak") == -1:
print(file)
df = pd.read_csv(path + file,header=None)
df2 = pd.read_csv(path + df[2][0] + 'bak.csv',header=None)
code = re.findall(r'年(.+?%)', df[2][27])
lastcode = re.findall(r'([0-9]+%)', df2[2][1])
if len(code) == 6:
row={'债券代码':df[2][0],'债券名称':df[4][0],'正股代码':df[2][4],'正股名称':df[4][4],'正股股价':df[2][10],'市净率':df[4][10],
'每股净资产':0,'转股价':df[4][11],'信用级别':df[2][23]
,'转股开始日':df[2][14],'转股结束日':df[4][14],'回售触发价':df[2][13],'回售执行日':df[2][18],
'强赎触发价':df[4][13],'赎回登记日':df[2][16],'上市日':df[2][24],'到期日':df[2][26]
,'发行规模':df[4][21],'利率1':code[0],'利率2':code[1],'利率3':code[2],'利率4':code[3],
'利率5':code[4],'利率6':code[5],'赎回利率':lastcode[0],'回售条款':df2[2][0],'赎回条款':df2[2][1]}
else:
row={'债券代码':df[2][0],'债券名称':df[4][0],'正股代码':df[2][4],'正股名称':df[4][4],'正股股价':df[2][10],'市净率':df[4][10],
'每股净资产':0,'转股价':df[4][11],'信用级别':df[2][23]
,'转股开始日':df[2][14],'转股结束日':df[4][14],'回售触发价':df[2][13],'回售执行日':df[2][18],
'强赎触发价':df[4][13],'赎回登记日':df[2][16],'上市日':df[2][24],'到期日':df[2][26]
,'发行规模':df[4][21],'利率1':code[0],'利率2':code[1],'利率3':code[2],'利率4':code[3],
'利率5':code[4],'赎回利率':lastcode[0],'回售条款':df2[2][0],'赎回条款':df2[2][1]}
data = data.append(row,ignore_index=True)
return data
def check_KZZ_with_Rules(kzz_df):
notification_dict = {}
for index, row in kzz_df.iterrows():
#rule 1:当某只可转债跌破历史最低价
if row['当前价'] <= row['历史最低价']:
key = '债券:{} code:{} 当前价格低于历史最低价{}'.format(row['债券名称'],row['债券代码'],row['历史最低价'])
if (key not in notification_dict.keys()):
notification_dict[key] = 'waitting'
#rule 2:当某只可转债跌到历史最低价+5%左右,且年化收益在5%以上
if row['当前价'] <= row['历史最低价']*1.05 and row['当前价'] <= row['年化%5收益率']:
key = '债券:{} code:{} 当前价格历史最低价{}+5%及年化收益5%空间'.format(row['债券名称'],row['债券代码'],row['历史最低价'])
if (key not in notification_dict.keys()):
notification_dict[key] = 'waitting'
return notification_dict
pd = gen_KZZDetaillist_with_RPAData("C:\\zz\\")
pd.to_excel("C:\ss.xlsx")
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/KZZUtility.py
|
KZZUtility.py
|
import pandas as pd
import struct
import datetime
import os
#only deal with tongxinda shanghai&shenzhen stock lday data
def stock_csv(code):
file_object_path = 'C:/workspace/stock/stockdata/lday/' + code +'.csv'
filepath ='C:/new_jyplug/vipdoc/sz/lday/sz' + code +'.day'
if not os.path.exists(filepath):
filepath = 'C:/new_jyplug/vipdoc/sh/lday/sh' + code +'.day'
if not os.path.exists(filepath):
filepath = 'C:/new_jyplug/vipdoc/sh/lday/sh' + code +'.day'
#如果当前上海和深圳市场都没有文件,则在本地新建一个空文件退出
file_object = open(file_object_path, 'w+')
file_object.close()
return
data = []
with open(filepath, 'rb') as f:
file_object = open(file_object_path, 'w+')
while True:
stock_date = f.read(4)
stock_open = f.read(4)
stock_high = f.read(4)
stock_low= f.read(4)
stock_close = f.read(4)
stock_amount = f.read(4)
stock_vol = f.read(4)
stock_reservation = f.read(4)
# date,open,high,low,close,amount,vol,reservation
if not stock_date:
break
stock_date = struct.unpack("l", stock_date) # 4字节 如20091229
stock_open = struct.unpack("l", stock_open) #开盘价*1000
stock_high = struct.unpack("l", stock_high) #最高价*1000
stock_low= struct.unpack("l", stock_low) #最低价*1000
stock_close = struct.unpack("l", stock_close) #收盘价*1000
stock_amount = struct.unpack("f", stock_amount) #成交额
stock_vol = struct.unpack("l", stock_vol) #成交量
stock_reservation = struct.unpack("l", stock_reservation) #保留值
date_format = datetime.datetime.strptime(str(stock_date[0]),'%Y%M%d') #格式化日期
list= date_format.strftime('%Y-%M-%d')+","+str(stock_open[0]/1000)+","+str(stock_high[0]/1000.0)+","+str(stock_low[0]/1000.0)+","+str(stock_close[0]/1000.0)+","+str(stock_amount[0])+","+str(stock_vol[0])+"\r\n"
file_object.writelines(list)
file_object.close()
def load_stock(code):
file_url = 'C:/workspace/stock/stockdata/lday/' + code +'.csv'
#if not os.path.exists(file_url):
# stock_csv(code)
# 每次都装载最新
stock_csv(code)
df = pd.read_csv(file_url, names=['date','open','high','low','close','amount','vol'])
return df
kzz_df = pd.read_excel('C:\\workspace\\stock\\stockdata\\dict\\KZZ.xlsx',dtype={'债券代码':str})
# 对于每一行,通过列名name访问对应的元素
kzz_df['历史最低价'] = 0.0
kzz_df['历史最高价'] = 0.0
kzz_df['当前价'] = 0.0
kzz_df['剩余年限'] = 0.0
kzz_df['到期价值'] = 0.0
kzz_df['到期收益率'] = 0.0
kzz_df['到期年化收益率'] = 0.0
kzz_df['年化%5收益率'] = 0.0
for index, row in kzz_df.iterrows():
temp_df = load_stock(row['债券代码'])
if len(temp_df) > 0:
row['历史最低价'] = temp_df['close'].min()
row['历史最高价'] = temp_df['close'].max()
row['当前价'] = temp_df.iloc[-1]['close']
#print(row['债券代码'] + ' ' + str(row['历史最低价']))
kzz_df.iloc[index] = row
kzz_df.to_excel('C:\workspace\stock\stockdata\dict\kzz_updated.xlsx',index=False)
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/genKZZreport.py
|
genKZZreport.py
|
name = "zzstocklib_pkg"
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/__init__.py
|
__init__.py
|
import os
import logging
from logging.handlers import TimedRotatingFileHandler
path = os.path.split(os.path.realpath(__file__))[0]
log_path = os.path.join(path, 'logs')#存放log文件的路径
class Logger(object):
def __init__(self, logger_name='logs…'):
self.logger = logging.getLogger(logger_name)
logging.root.setLevel(logging.NOTSET)
self.log_file_name = 'zzstock.log'#日志文件的名称
self.backup_count = 5#最多存放日志的数量
# 日志输出级别
self.console_output_level = 'WARNING'
self.file_output_level = 'INFO'
# 日志输出格式
self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def get_logger(self):
"""在logger中添加日志句柄并返回,如果logger已有句柄,则直接返回"""
if not self.logger.handlers: # 避免重复日志
console_handler = logging.StreamHandler()
console_handler.setFormatter(self.formatter)
console_handler.setLevel(self.console_output_level)
self.logger.addHandler(console_handler)
# 每天重新创建一个日志文件,最多保留backup_count份
file_handler = TimedRotatingFileHandler(filename=os.path.join(log_path, self.log_file_name), when='D',
interval=1, backupCount=self.backup_count, delay=True, encoding='utf-8')
file_handler.setFormatter(self.formatter)
file_handler.setLevel(self.file_output_level)
self.logger.addHandler(file_handler)
return self.logger
logger = Logger().get_logger()
|
zzstocklib-pkg-pubbyzz
|
/zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/zzlogger.py
|
zzlogger.py
|
# ZZSUKI_TEST
## Readme
nothing happened
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/README.md
|
README.md
|
from setuptools import setup, find_packages
with open("README.md", "r", encoding='utf8') as fh:
long_description = fh.read()
setup(
name='zzsukitest',
version='1.0.6',
author='zzsuki',
author_email='[email protected]',
url='https://gitee.com/he_weidong/zzsuki_tests',
description='test runner for automation',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["Jinja2", "PyYAML", "requests"],
packages=find_packages(),
package_data={
"": ["*.html", '*.md'],
},
python_requires='>=3.6',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/setup.py
|
setup.py
|
from .core.test_runner import TestRunner, Load
from .core.data_driver import ddt, list_data, json_data, yaml_data
from .core.re_run import re_run
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/__init__.py
|
__init__.py
|
import re
import traceback
import unittest
import sys
import time
from io import StringIO
origin_stdout = sys.stdout
def output2console(s):
"""Output stdout content to console"""
tmp_stdout = sys.stdout
sys.stdout = origin_stdout
print(s, end='')
sys.stdout = tmp_stdout
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
origin_stdout.write(str(s))
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
class TestResult(unittest.TestResult):
def __init__(self):
super().__init__()
self.fields = {
"success": 0,
"all": 0,
"fail": 0,
"skip": 0,
"error": 0,
"begin_time": "",
"results": [],
"testClass": []
}
self.sys_stdout = None
self.sys_stderr = None
self.outputBuffer = None
self.start_time = None
def startTest(self, test):
super().startTest(test)
self.start_time = time.time()
self.outputBuffer = StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
if self.sys_stdout:
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
self.sys_stdout = None
self.sys_stderr = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
test.run_time = '{:.3}s'.format((time.time() - self.start_time))
test.class_name = test.__class__.__qualname__
test.method_name = test.__dict__['_testMethodName']
test.method_doc = test.shortDescription()
self.fields['results'].append(test)
tmp_set = set(self.fields["testClass"])
tmp_set.add(test.class_name)
self.fields["testClass"] = list(tmp_set)
self.complete_output()
def stopTestRun(self, title=None):
self.fields['fail'] = len(self.failures)
self.fields['error'] = len(self.errors)
self.fields['skip'] = len(self.skipped)
self.fields['all'] = sum(
[self.fields['fail'], self.fields['error'], self.fields['skip'], self.fields['success']])
self.fields['testClass'] = list(self.fields['testClass'])
def addSuccess(self, test):
self.fields["success"] += 1
test.state = '成功'
sys.stdout.write("{}执行——>【通过】\n".format(test))
logs = []
output = self.complete_output()
logs.append(output)
test.run_info = logs
def addFailure(self, test, err):
super().addFailure(test, err)
logs = []
test.state = '失败'
sys.stderr.write("{}执行——>【失败】\n".format(test))
output = self.complete_output()
logs.append(output)
logs.extend(traceback.format_exception(*err))
test.run_info = logs
def addSkip(self, test, reason):
super().addSkip(test, reason)
test.state = '跳过'
sys.stdout.write("{}执行--【跳过Skip】\n".format(test))
logs = [reason]
test.run_info = logs
def addError(self, test, err):
super().addError(test, err)
test.state = '错误'
sys.stderr.write("{}执行——>【错误Error】\n".format(test))
logs = []
logs.extend(traceback.format_exception(*err))
test.run_info = logs
if test.__class__.__qualname__ == '_ErrorHolder':
test.run_time = 0
res = re.search(r'(.*)\(.*\.(.*)\)', getattr(test, 'description'))
test.class_name = res.group(2)
test.method_name = res.group(1)
test.method_doc = test.shortDescription()
self.fields['results'].append(test)
tmp_set = set(self.fields["testClass"])
tmp_set.add(test.class_name)
self.fields["testClass"] = list(tmp_set)
# self.fields["testClass"].add(test.class_name)
else:
output = self.complete_output()
logs.append(output)
class ReRunResult(TestResult):
def __init__(self, count, interval):
super().__init__()
self.count = count
self.interval = interval
self.run_cases = []
def startTest(self, test):
if not hasattr(test, "count"):
super().startTest(test)
def stopTest(self, test):
if test not in self.run_cases:
self.run_cases.append(test)
super().stopTest(test)
def addFailure(self, test, err):
if not hasattr(test, 'count'):
test.count = 0
if test.count < self.count:
test.count += 1
sys.stderr.write("{}执行——>【失败Failure】\n".format(test))
for string in traceback.format_exception(*err):
sys.stderr.write(string)
sys.stderr.write("================{}重运行第{}次================\n".format(test, test.count))
time.sleep(self.interval)
test.run(self)
else:
super().addFailure(test, err)
if test.count != 0:
sys.stderr.write("================重运行{}次完毕================\n".format(test.count))
def addError(self, test, err):
if not hasattr(test, 'count'):
test.count = 0
if test.count < self.count:
test.count += 1
sys.stderr.write("{}执行——>【错误Error】\n".format(test))
for string in traceback.format_exception(*err):
sys.stderr.write(string)
sys.stderr.write("================{}重运行第{}次================\n".format(test, test.count))
time.sleep(self.interval)
test.run(self)
else:
super().addError(test, err)
if test.count != 0:
sys.stderr.write("================重运行{}次完毕================\n".format(test.count))
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/test_result.py
|
test_result.py
|
import os
import smtplib
import time
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
class EmailConf:
EmailQQ = {"host": "smtp.qq.com", "port": 465}
Email163 = {"host": "smtp.163.com", "port": 465}
class SendEmail:
"""发送邮件"""
def __init__(self, host, user, password, port=465):
"""
初始化设置
:param host: smtp服务器地址(qq邮箱:smtp.qq.com,163邮箱:smtp.163.com")
:param port: smtp服务器端口:465
:param user: 邮箱账号
:param password: 邮箱的smtp服务授权码
"""
self.smtp = smtplib.SMTP_SSL(host=host, port=port)
self.smtp.login(user=user, password=password)
self.user = user
def send_email(self, subject="测试报告", content=None, filename=None, to_addrs=None):
"""
发送邮件
:param subject: 邮件主题
:param content: 邮件内容
:param filename: 报告文件的完整路径
:param to_addrs: 收件人地址
:type to_addrs: str or list
:return:
"""
print("--------准备发送测试报告---------")
msg = MIMEMultipart()
msg["Subject"] = subject
msg["From"] = self.user
if isinstance(to_addrs, str):
msg["To"] = to_addrs
elif to_addrs and isinstance(to_addrs, list):
msg["To"] = to_addrs[0]
if not content:
content = time.strftime("%Y-%m-%d-%H_%M_%S") + ":测试报告"
# 构建邮件的文本内容
text = MIMEText(content, _subtype="html", _charset="utf8")
msg.attach(text)
# 判断是否要发送附件
if filename and os.path.isfile(filename):
with open(filename, "rb") as f:
content = f.read()
report = MIMEApplication(content, _subtype=None)
name = os.path.split(filename)[1]
report.add_header('content-disposition', 'attachment', filename=name)
msg.attach(report)
# 发送邮件
try:
self.smtp.send_message(msg, from_addr=self.user, to_addrs=to_addrs)
except Exception as e:
print("--------测试报告发送失败------")
raise e
else:
print("--------测试报告发送完毕------")
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/send_email.py
|
send_email.py
|
import re
import sys
import inspect
import warnings
from functools import wraps
from types import MethodType as MethodType
from collections import namedtuple
try:
from collections import OrderedDict as MaybeOrderedDict
except ImportError:
MaybeOrderedDict = dict
from unittest import TestCase
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception):
pass
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
class InstanceType():
pass
lzip = lambda *a: list(zip(*a))
text_type = str
string_types = str,
bytes_type = bytes
def make_method(func, instance, type):
if instance is None:
return func
return MethodType(func, instance)
CompatArgSpec = namedtuple("CompatArgSpec", "args varargs keywords defaults")
def getargspec(func):
if PY2:
return CompatArgSpec(*inspect.getargspec(func))
args = inspect.getfullargspec(func)
if args.kwonlyargs:
raise TypeError((
"parameterized does not (yet) support functions with keyword "
"only arguments, but %r has keyword only arguments. "
"Please open an issue with your usecase if this affects you: "
"https://github.com/wolever/parameterized/issues/new"
) % (func,))
return CompatArgSpec(*args[:4])
def skip_on_empty_helper(*a, **kw):
raise SkipTest("parameterized input is empty")
def reapply_patches_if_need(func):
def dummy_wrapper(orgfunc):
@wraps(orgfunc)
def dummy_func(*args, **kwargs):
return orgfunc(*args, **kwargs)
return dummy_func
if hasattr(func, 'patchings'):
func = dummy_wrapper(func)
tmp_patchings = func.patchings
delattr(func, 'patchings')
for patch_obj in tmp_patchings:
func = patch_obj.decorate_callable(func)
return func
def delete_patches_if_need(func):
if hasattr(func, 'patchings'):
func.patchings[:] = []
_param = namedtuple("param", "args kwargs")
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args, **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
elif isinstance(args, string_types):
args = (args,)
try:
return cls(*args)
except TypeError as e:
if "after * must be" not in str(e):
raise
raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
% (args, args),
)
def __repr__(self):
return "param(*%r, **%r)" % self
class QuietOrderedDict(MaybeOrderedDict):
""" When OrderedDict is available, use it to make sure that the kwargs in
doc strings are consistently ordered. """
__str__ = dict.__str__
__repr__ = dict.__repr__
def parameterized_argument_value_pairs(func, p):
"""Return tuples of parameterized arguments and their values.
This is useful if you are writing your own doc_func
function and need to know the values for each parameter name::
>>> def func(a, foo=None, bar=42, **kwargs): pass
>>> p = param(1, foo=7, extra=99)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
If the function's first argument is named ``self`` then it will be
ignored::
>>> def func(self, a): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1)]
Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
>>> def func(foo, *args): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1)]
>>> p = param(1, 16)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1), ("*args", (16, ))]
"""
argspec = getargspec(func)
arg_offset = 1 if argspec.args[:1] == ["self"] else 0
named_args = argspec.args[arg_offset:]
result = lzip(named_args, p.args)
named_args = argspec.args[len(result) + arg_offset:]
varargs = p.args[len(result):]
result.extend([
(name, p.kwargs.get(name, default))
for (name, default)
in zip(named_args, argspec.defaults or [])
])
seen_arg_names = set([n for (n, _) in result])
keywords = QuietOrderedDict(sorted([
(name, p.kwargs[name])
for name in p.kwargs
if name not in seen_arg_names
]))
if varargs:
result.append(("*%s" % (argspec.varargs,), tuple(varargs)))
if keywords:
result.append(("**%s" % (argspec.keywords,), keywords))
return result
def short_repr(x, n=64):
""" A shortened repr of ``x`` which is guaranteed to be ``unicode``::
>>> short_repr("foo")
u"foo"
>>> short_repr("123456789", n=4)
u"12...89"
"""
x_repr = repr(x)
if isinstance(x_repr, bytes_type):
try:
x_repr = text_type(x_repr, "utf-8")
except UnicodeDecodeError:
x_repr = text_type(x_repr, "latin1")
if len(x_repr) > n:
x_repr = x_repr[:n // 2] + "..." + x_repr[len(x_repr) - n // 2:]
return x_repr
def default_doc_func(func, num, p):
if func.__doc__ is None:
return None
all_args_with_values = parameterized_argument_value_pairs(func, p)
# Assumes that the function passed is a bound method.
descs = ["%s=%s" % (n, short_repr(v)) for n, v in all_args_with_values]
# The documentation might be a multiline string, so split it
# and just work with the first string, ignoring the period
# at the end if there is one.
first, nl, rest = func.__doc__.lstrip().partition("\n")
suffix = ""
if first.endswith("."):
suffix = "."
first = first[:-1]
args = "%s[with %s]" % (len(first) and " " or "", ", ".join(descs))
return "".join([first.rstrip(), args, suffix, nl, rest])
def default_name_func(func, num, p):
base_name = func.__name__
name_suffix = "_%s" % (num,)
if len(p.args) > 0 and isinstance(p.args[0], string_types):
name_suffix += "_" + parameterized.to_safe_name(p.args[0])
return base_name + name_suffix
_test_runner_override = None
_test_runner_guess = False
_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
_test_runner_aliases = {
"_pytest": "pytest",
}
def set_test_runner(name):
global _test_runner_override
if name not in _test_runners:
raise TypeError(
"Invalid test runner: %r (must be one of: %s)"
% (name, ", ".join(_test_runners)),
)
_test_runner_override = name
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
it's done during test disocvery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
global _test_runner_guess
if _test_runner_guess is False:
stack = inspect.stack()
for record in reversed(stack):
frame = record[0]
module = frame.f_globals.get("__name__").partition(".")[0]
if module in _test_runner_aliases:
module = _test_runner_aliases[module]
if module in _test_runners:
_test_runner_guess = module
break
if record[1].endswith("python2.6/unittest.py"):
_test_runner_guess = "unittest"
break
else:
_test_runner_guess = None
return _test_runner_guess
class parameterized(object):
""" Parameterize a test case::
class TestInt(object):
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input, doc_func=None, skip_on_empty=False):
self.get_input = self.input_as_callable(input)
self.doc_func = doc_func or default_doc_func
self.skip_on_empty = skip_on_empty
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
if test_self is not None:
if issubclass(test_cls, InstanceType):
raise TypeError((
"@parameterized can't be used with old-style classes, but "
"%r has an old-style class. Consider using a new-style "
"class, or '@parameterized.expand' "
"(see http://stackoverflow.com/q/54867/71522 for more "
"information on old-style classes)."
) % (test_self,))
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
p = param.from_decorator(args)
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
try:
wrapper.__doc__ = nose_tuple[0].__doc__
# Nose uses `getattr(instance, test_func.__name__)` to get
# a method bound to the test instance (as opposed to a
# method bound to the instance of the class created when
# tests were being enumerated). Set a value here to make
# sure nose can get the correct test method.
if test_self is not None:
setattr(test_cls, test_func.__name__, unbound_func)
yield nose_tuple
finally:
if test_self is not None:
delattr(test_cls, test_func.__name__)
wrapper.__doc__ = original_doc
input = self.get_input()
if not input:
if not self.skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
wrapper = wraps(test_func)(skip_on_empty_helper)
wrapper.parameterized_input = input
wrapper.parameterized_func = test_func
test_func.__name__ = "_parameterized_original_%s" % (test_func.__name__,)
return wrapper
def param_as_nose_tuple(self, test_self, func, num, p):
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
nose_func.__doc__ = self.doc_func(func, num, p)
# Track the unbound function because we need to setattr the unbound
# function onto the class for nose to work (see comments above), and
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
# Under nose on Py2 we need to return an unbound method to make
# sure that the `self` in the method is properly shared with the
# `self` used in `setUp` and `tearDown`. But only there. Everyone
# else needs a bound method.
func_self = (
None if PY2 and detect_runner() == "nose" else
test_self
)
nose_func = make_method(nose_func, func_self, type(test_self))
return unbound_func, (nose_func,) + p.args + (p.kwargs or {},)
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead.")
def _terrible_magic_get_defining_classes(self):
""" Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, _, parents = code_context.partition("(")
parents, _, _ = parents.partition(")")
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
# Explicitly convery non-list inputs to a list so that:
# 1. A helpful exception will be raised if they aren't iterable, and
# 2. Generators are unwrapped exactly once (otherwise `nosetests
# --processes=n` has issues; see:
# https://github.com/wolever/nose-parameterized/pull/31)
if not isinstance(input_values, list):
input_values = list(input_values)
return [param.from_decorator(p) for p in input_values]
@classmethod
def expand(cls, input, name_func=None, doc_func=None, skip_on_empty=False,
**legacy):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
"""
if "testcase_func_name" in legacy:
warnings.warn("testcase_func_name= is deprecated; use name_func=",
DeprecationWarning, stacklevel=2)
if not name_func:
name_func = legacy["testcase_func_name"]
if "testcase_func_doc" in legacy:
warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
DeprecationWarning, stacklevel=2)
if not doc_func:
doc_func = legacy["testcase_func_doc"]
doc_func = doc_func or default_doc_func
name_func = name_func or default_name_func
def parameterized_expand_wrapper(f, instance=None):
stack = inspect.stack()
frame = stack[1]
frame_locals = frame[0].f_locals
parameters = cls.input_as_callable(input)()
if not parameters:
if not skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized.expand([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
return wraps(f)(lambda: skip_on_empty_helper())
digits = len(str(len(parameters) - 1))
for num, p in enumerate(parameters):
name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p)
# If the original function has patches applied by 'mock.patch',
# re-construct all patches on the just former decoration layer
# of param_as_standalone_func so as not to share
# patch objects between new functions
nf = reapply_patches_if_need(f)
frame_locals[name] = cls.param_as_standalone_func(p, nf, name)
frame_locals[name].__doc__ = doc_func(f, num, p)
# Delete original patches to prevent new function from evaluating
# original patching object as well as re-constructed patches.
delete_patches_if_need(f)
f.__test__ = False
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
@wraps(func)
def standalone_func(*a):
return func(*(a + p.args), **p.kwargs)
standalone_func.__name__ = name
# place_as is used by py.test to determine what source file should be
# used for this test.
standalone_func.place_as = func
# Remove __wrapped__ because py.test will try to look at __wrapped__
# to determine which parameters should be used with this test case,
# and obviously we don't need it to do any parameterization.
try:
del standalone_func.__wrapped__
except AttributeError:
pass
return standalone_func
@classmethod
def to_safe_name(cls, s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
def parameterized_class(attrs, input_values=None, class_name_func=None, classname_func=None):
""" Parameterizes a test class by setting attributes on the class.
Can be used in two ways:
1) With a list of dictionaries containing attributes to override::
@parameterized_class([
{ "username": "foo" },
{ "username": "bar", "access_level": 2 },
])
class TestUserAccessLevel(TestCase):
...
2) With a tuple of attributes, then a list of tuples of values:
@parameterized_class(("username", "access_level"), [
("foo", 1),
("bar", 2)
])
class TestUserAccessLevel(TestCase):
...
"""
if isinstance(attrs, string_types):
attrs = [attrs]
input_dicts = (
attrs if input_values is None else
[dict(zip(attrs, vals)) for vals in input_values]
)
class_name_func = class_name_func or default_class_name_func
if classname_func:
warnings.warn(
"classname_func= is deprecated; use class_name_func= instead. "
"See: https://github.com/wolever/parameterized/pull/74#issuecomment-613577057",
DeprecationWarning,
stacklevel=2,
)
class_name_func = lambda cls, idx, input: classname_func(cls, idx, input_dicts)
def decorator(base_class):
test_class_module = sys.modules[base_class.__module__].__dict__
for idx, input_dict in enumerate(input_dicts):
test_class_dict = dict(base_class.__dict__)
test_class_dict.update(input_dict)
name = class_name_func(base_class, idx, input_dict)
test_class_module[name] = type(name, (base_class,), test_class_dict)
# We need to leave the base class in place (see issue #73), but if we
# leave the test_ methods in place, the test runner will try to pick
# them up and run them... which doesn't make sense, since no parameters
# will have been applied.
# Address this by iterating over the base class and remove all test
# methods.
for method_name in list(base_class.__dict__):
if method_name.startswith("test_"):
delattr(base_class, method_name)
return base_class
return decorator
def get_class_name_suffix(params_dict):
if "name" in params_dict:
return parameterized.to_safe_name(params_dict["name"])
params_vals = (
params_dict.values() if PY3 else
(v for (_, v) in sorted(params_dict.items()))
)
return parameterized.to_safe_name(next((
v for v in params_vals
if isinstance(v, string_types)
), ""))
def default_class_name_func(cls, num, params_dict):
suffix = get_class_name_suffix(params_dict)
return "%s_%s%s" % (
cls.__name__,
num,
suffix and "_" + suffix,
)
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/parameterized.py
|
parameterized.py
|
import hmac
import hashlib
import base64
import urllib.parse
import requests
import os
import smtplib
import time
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
class SendEmail:
"""Send mail"""
def __init__(self, host, user, password, port=465):
"""
:param host: smtp server address
:param port: smtp server report
:param user: Email account number
:param password: SMTP service authorization code of mailbox
"""
if port == 465 or port == 587:
self.smtp = smtplib.SMTP_SSL(host=host, port=port)
else:
self.smtp = smtplib.SMTP(host=host, port=port)
self.smtp.login(user=user, password=password)
self.user = user
def send_email(self, subject="test report", content=None, filename=None, to_addrs=None):
"""
:param subject:Email subject
:param content: Email content
:param filename: Attachment document
:param to_addrs: Addressee's address
:type to_addrs: str or list
:return:
"""
msg = MIMEMultipart()
msg["Subject"] = subject
msg["From"] = self.user
if isinstance(to_addrs, str):
msg["To"] = to_addrs
elif to_addrs and isinstance(to_addrs, list):
msg["To"] = to_addrs[0]
if not content:
content = time.strftime("%Y-%m-%d-%H_%M_%S") + ":测试报告"
text = MIMEText(content, _subtype="html", _charset="utf8")
msg.attach(text)
if filename and os.path.isfile(filename):
with open(filename, "rb") as f:
content = f.read()
try:
report = MIMEApplication(content, _subtype=None)
except Exception:
report = MIMEApplication(content)
name = os.path.split(filename)[1]
report.add_header('content-disposition', 'attachment', filename=name)
msg.attach(report)
try:
self.smtp.send_message(msg, from_addr=self.user, to_addrs=to_addrs)
except Exception as e:
print("Failed to send test report")
raise e
else:
print("The test report has been sent")
class DingTalk:
"""Nail group notification occurred"""
def __init__(self, url, data, secret=None):
"""
:param url: Dingtalk robot webhook address
:param data:Message sent (refer to the official message type)
:param secret: (not required) if the robot has set the signature security, it needs to pass in the signature key
"""
self.url = url
self.data = data
self.secret = secret
def get_stamp(self):
"""Countersign"""
timestamp = str(round(time.time() * 1000))
secret_enc = self.secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, self.secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
return {"sign": sign, "timestamp": timestamp}
def send_info(self):
"""send info"""
if self.secret:
params = self.get_stamp()
else:
params = None
response = requests.post(url=self.url, json=self.data, params=params)
return response
class WeiXin:
"""
Enterprise wechat group notice
"""
base_url = "https://qyapi.weixin.qq.com/cgi-bin/appchat/send?access_token="
def __init__(self, access_token=None, corp_id=None, corp_secret=None):
"""
:param corp_id: wechat corp_id
:param corp_secret:Applied credential key
"""
self.corp_id = corp_id
self.corp_secret = corp_secret
if access_token:
self.access_token = access_token
elif corp_id and corp_secret:
self.access_token = self.get_access_token()
else:
raise ValueError("access_token and [corpid, corpsecret] cannot both be empty. "
"At least one of them must be passed in")
def get_access_token(self):
"""get access_token"""
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
params = {
"corpid": self.corp_id,
"corpsecret": self.corp_secret
}
result = requests.get(url=url, params=params).json()
if result.json()['errcode'] != 0:
raise ValueError(result["errmsg"])
return result["access_token"]
def send_info(self, data):
"""send info"""
url = self.base_url + self.access_token
response = requests.post(url=url, data=data)
return response
if __name__ == '__main__':
# url = "https://oapi.dingtalk.com/robot/send?access_token=690900b5ce6d5d10bb1218b8e64a4e2b55f96a6d116aaf50"
# data = {
# "msgtype": "markdown",
# "markdown": {
# "title": "自动化测试报告",
# "text": open('python31.md', 'r', encoding='utf-8').read()
# },
# "at": {
# "atMobiles": [],
# "isAtAll": False
# }
# }
# ding = DingTalk(url=url, data=data)
# res = ding.send_info()
# print(res)
pass
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/result_push.py
|
result_push.py
|
import json
import os
import unittest
import time
from concurrent.futures.thread import ThreadPoolExecutor
from ..core.test_result import TestResult, ReRunResult
from ..core.result_push import DingTalk, WeiXin, SendEmail
from jinja2 import Environment, FileSystemLoader
import copy
Load = unittest.defaultTestLoader
class TestRunner:
def __init__(self, suite: unittest.TestSuite,
filename="report.html",
report_dir="./reports",
title='测试报告',
tester='测试员',
desc="XX项目测试生成的报告",
templates=1
):
"""
:param suite [object]: 测试套件,由多个case组成,unittest的TestSuite对象, required
:param filename [string]: 报告文件的名称, defaults = report.html
:param report_dir [string]: 生成的报告地址, defaults = ./reports
:param title [string]: 套件名称(报告中使用的标题), defaults = 测试报告
:param templates [integer]: 报告模板, defaults = 1
:param tester [string]: 负责人, defaults = 测试员
"""
if not isinstance(suite, unittest.TestSuite):
raise TypeError("Parameter suite is not a test suite")
if not isinstance(filename, str):
raise TypeError("filename is not str")
if not filename.endswith(".html"):
filename = filename + ".html"
self.suite = suite
self.filename = filename
self.title = title
self.tester = tester
self.desc = desc
self.templates = templates
self.report_dir = report_dir
self.result = []
self.start_time = time.time()
def __classification_suite(self):
suites_list = []
def wrapper(suite):
for item in suite:
if isinstance(item, unittest.TestCase):
suites_list.append(suite)
break
else:
wrapper(item)
wrapper(copy.deepcopy(self.suite))
return suites_list
def __get_reports(self):
print("所有用例执行完毕,正在生成测试报告中......")
test_result = {
"success": 0,
"all": 0,
"fail": 0,
"skip": 0,
"error": 0,
"results": [],
"testClass": [],
}
for res in self.result:
for item in test_result:
test_result[item] += res.fields[item]
test_result['runtime'] = '{:.2f} S'.format(time.time() - self.start_time)
test_result["begin_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time))
test_result["title"] = self.title
test_result["tester"] = self.tester
test_result['desc'] = self.desc
if test_result['all'] != 0:
test_result['pass_rate'] = '{:.2f}'.format(test_result['success'] / test_result['all'] * 100)
else:
test_result['pass_rate'] = 0
# 判断是否要生产测试报告
if os.path.isdir(self.report_dir):
pass
else:
os.mkdir(self.report_dir)
# 获取历史执行数据
test_result['history'] = self.__handle_history_data(test_result)
template_path = os.path.join(os.path.dirname(__file__), '../templates')
env = Environment(loader=FileSystemLoader(template_path))
if self.templates == 2:
template = env.get_template('templates2.html')
elif self.templates == 3:
template = env.get_template('templates3.html')
else:
template = env.get_template('templates.html')
file_path = os.path.join(self.report_dir, self.filename)
res = template.render(test_result)
with open(file_path, 'wb') as f:
f.write(res.encode('utf8'))
print("测试报告已经生成,报告路径为:{}".format(file_path))
self.email_conent = {"file": os.path.abspath(file_path),
"content": env.get_template('templates03.html').render(test_result)
}
self.test_result = test_result
return test_result
def __handle_history_data(self, test_result):
"""
处理历史数据
:return:
"""
try:
with open(os.path.join(self.report_dir, 'history.json'), 'r', encoding='utf-8') as f:
history = json.load(f)
except FileNotFoundError as e:
history = []
history.append({'success': test_result['success'],
'all': test_result['all'],
'fail': test_result['fail'],
'skip': test_result['skip'],
'error': test_result['error'],
'runtime': test_result['runtime'],
'begin_time': test_result['begin_time'],
'pass_rate': test_result['pass_rate'],
})
with open(os.path.join(self.report_dir, 'history.json'), 'w', encoding='utf-8') as f:
json.dump(history, f, ensure_ascii=True)
return history
def __get_notice_content(self):
"""获取通知的内容"""
template_path = os.path.join(os.path.dirname(__file__), '../templates')
env = Environment(loader=FileSystemLoader(template_path))
res_text = env.get_template('dingtalk.md').render(self.test_result)
return res_text
def run(self, thread_count=1, count=0, interval=2):
"""
The entrance to running tests
Note: if multiple test classes share a global variable, errors may occur due to resource competition
:param thread_count:Number of threads. default 1
:param count: Rerun times, default 0
:param interval: Rerun interval, default 2
:return: Test run results
"""
suites = self.__classification_suite()
with ThreadPoolExecutor(max_workers=thread_count) as ts:
for i in suites:
# res = TestResult()
res = ReRunResult(count=count, interval=interval)
self.result.append(res)
ts.submit(i.run, result=res).add_done_callback(res.stopTestRun)
ts.shutdown(wait=True)
result = self.__get_reports()
return result
def rerun_run(self, count=0, interval=2):
"""
失败/异常用例重运行
:param count: 重运行次数, defaults to 0
:param interval: 重运行间隔, defaults to 2
:return: 执行结果对象
"""
res = ReRunResult(count=count, interval=interval)
self.result.append(res)
suites = self.__classification_suite()
for case_ in suites:
case_.run(res)
res.stopTestRun()
res = self.__get_reports()
return res
def send_email(self, host: str, port: int, user: str, password: str, to_addrs, is_file=True):
"""
将运行结果发送邮件,运行中的用例会自动绑定到邮件
:param host: SMTP 服务器地址
:param port: SMTP 服务器端口
:param user: Email 地址
:param password: SMTP 服务端需要的token
:param to_addrs: 邮件发送目标,可以是单独或列表
:param is_file: 是否是文件对象
:return:
"""
sm = SendEmail(host=host, port=port, user=user, password=password)
if is_file:
filename = self.email_conent["file"]
else:
filename = None
content = self.email_conent["content"]
sm.send_email(subject=self.title, content=content, filename=filename, to_addrs=to_addrs)
def get_except_info(self):
"""获取报错用例或失败用例的错误信息"""
except_info = []
num = 0
for i in self.result:
for texts in i.failures:
t, content = texts
num += 1
except_info.append("*{}、用例【{}】执行失败*,\n失败信息如下:".format(num, t._testMethodDoc))
except_info.append(content)
for texts in i.errors:
num += 1
t, content = texts
except_info.append("*{}、用例【{}】执行错误*,\n错误信息如下:".format(num, t._testMethodDoc))
except_info.append(content)
except_str = "\n".join(except_info)
return except_str
def dingtalk_notice(self, url, key=None, secret=None, at_mobiles=None, is_all=False, except_info=False):
"""
:param url: 钉钉机器人的Webhook地址
:param key: (非必传:str类型)如果钉钉机器人安全设置了关键字,则需要传入对应的关键字
:param secret:(非必传:str类型)如果钉钉机器人安全设置了签名,则需要传入对应的密钥
:param at_mobiles: (非必传,list类型)发送通知钉钉中要@人的手机号列表,如:[137xxx,188xxx]
:param is_all: 是否@所有人,默认为False,设为True则会@所有人
:param except_info:是否发送未通过用例的详细信息,默认为False,设为True则会发送失败用例的详细信息
:return: 发送成功返回 {"errcode":0,"errmsg":"ok"} 发送失败返回 {"errcode":错误码,"errmsg":"失败原因"}
"""
res_text = self.__get_notice_content()
if except_info:
res_text += '\n ### 未通过用例详情:\n'
res_text += self.get_except_info()
data = {
"msgtype": "markdown",
"markdown": {
"title": '{}({})'.format(self.title, key),
"text": res_text
},
"at": {
"atMobiles": at_mobiles,
"isAtAll": is_all
}
}
ding = DingTalk(url=url, data=data, secret=secret)
response = ding.send_info()
return response.json()
def weixin_notice(self, chat_id, access_token=None, corp_id=None, corp_secret=None):
"""
测试结果推送到企业微信群,【access_token】和【corp_id,corp_secret】至少要传一种
可以传入access_token ,也可以传入(corp_id,corp_secret)来代替access_token
:param chat_id: 企业微信群ID
:param access_token: 调用企业微信API接口的凭证
:param corp_id: 企业ID
:param corp_secret:应用的凭证密钥
:return:
"""
# 获取通知结果
res_text = self.__get_notice_content()
data = {
"chatid": chat_id,
"msgtype": "markdown",
"markdown": {
"content": res_text
}
}
wx = WeiXin(access_token=access_token, corp_id=corp_id, corp_secret=corp_secret)
response = wx.send_info(data=data)
return response
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/test_runner.py
|
test_runner.py
|
import traceback
import time
def run_count(count, interval, func, *args, **kwargs):
"""运行计数"""
for i in range(count):
try:
func(*args, **kwargs)
except Exception as e:
print("====用例执行失败===")
traceback.print_exc()
if i + 1 == count:
raise e
else:
print("==============开始第{}次重运行=============".format(i))
time.sleep(interval)
else:
break
def re_run(count, interval=2):
"""
单个测试用例重运行的装饰器,注意点,如果使用了ddt,那么该方法要在用在ddt之前
:param count: 失败重运行次数
:param interval: 每次重运行间隔时间,默认三秒钟
:return:
"""
def wrapper(func):
def decorator(*args, **kwargs):
run_count(count, interval, func, *args, **kwargs)
return decorator
return wrapper
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/re_run.py
|
re_run.py
|
from functools import wraps
import json
import yaml
def _create_test_name(index, name):
if index + 1 < 10:
test_name = name + "_00" + str(index + 1)
elif index + 1 < 100:
test_name = name + "_0" + str(index + 1)
else:
test_name = name + "_" + str(index + 1)
return test_name
def _update_func(new_func_name, params, test_desc, func, *args, **kwargs):
@wraps(func)
def wrapper(self):
return func(self, params, *args, **kwargs)
wrapper.__wrapped__ = func
wrapper.__name__ = new_func_name
wrapper.__doc__ = test_desc
return wrapper
def ddt(cls):
"""
:param cls: 测试类
:return:
"""
for name, func in list(cls.__dict__.items()):
if hasattr(func, "PARAMS"):
for index, case_data in enumerate(getattr(func, "PARAMS")):
new_test_name = _create_test_name(index, name)
if isinstance(case_data, dict) and case_data.get("title"):
test_desc = str(case_data.get("title"))
elif isinstance(case_data, dict) and case_data.get("desc"):
test_desc = str(case_data.get("desc"))
elif (not isinstance(case_data, str)) and hasattr(case_data, 'title'):
test_desc = str(case_data.title)
else:
test_desc = func.__doc__
func2 = _update_func(new_test_name, case_data, test_desc, func)
setattr(cls, new_test_name, func2)
else:
delattr(cls, name)
return cls
def list_data(datas):
"""
:param datas: 测试数据
:return:
"""
def wrapper(func):
setattr(func, "PARAMS", datas)
return func
return wrapper
def yaml_data(file_path):
"""
:param file_path: yaml文件路径
:return:
"""
def wrapper(func):
try:
with open(file_path, "r", encoding="utf-8") as f:
datas = yaml.load(f, Loader=yaml.FullLoader)
except UnicodeDecodeError:
with open(file_path, "r", encoding="gbk") as f:
datas = yaml.load(f, Loader=yaml.FullLoader)
setattr(func, "PARAMS", datas)
return func
return wrapper
def json_data(file_path):
"""
:param file_path: json文件路径
:return:
"""
def wrapper(func):
try:
with open(file_path, "r", encoding="utf-8") as f:
datas = json.load(f)
except UnicodeDecodeError:
with open(file_path, "r", encoding="gbk") as f:
datas = json.load(f)
setattr(func, "PARAMS", datas)
return func
return wrapper
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/data_driver.py
|
data_driver.py
|
### 【{{title}}】测试结果
##### 测试人员: {{tester}}
##### 开始时间: {{begin_time}}
##### 执行时间: {{runtime}}
##### 用例总数: {{all}}
##### 成功用例: {{success}}
##### 失败用例: {{fail}}
##### 错误用例: {{error}}
##### 跳过用例: {{skip}}
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/templates/dingtalk.md
|
dingtalk.md
|
"""
============================
Author:柠檬班-木森
Time:2020/7/16 17:47
E-mail:[email protected]
Company:湖南零檬信息技术有限公司
============================
"""
|
zzsukitest
|
/zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/templates/__init__.py
|
__init__.py
|
# coding: utf-8
def get_info():
print('【message.get_info()__name__=%s】'% __name__)
return '测试模块代码'
|
zzt-message
|
/zzt_message-0.1-py3-none-any.whl/com/zzt/info/message.py
|
message.py
|
# coding:utf-8
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
"""
name = input('please input your name: ')
print('hello,', name)
name
"""
"""
import com.zzt.info.message as msg
print(msg.get_info())
print('【main】__name__=%s'%__name__)
"""
from com.zzt.info import *
print(message.get_info())
print(information.get_tgfo())
|
zzt-message
|
/zzt_message-0.1-py3-none-any.whl/com/zzt/info/main.py
|
main.py
|
__all__=['message','information']
|
zzt-message
|
/zzt_message-0.1-py3-none-any.whl/com/zzt/info/__init__.py
|
__init__.py
|
# cording:utf-8
def get_tgfo():
print('【information】中的数据信息')
|
zzt-message
|
/zzt_message-0.1-py3-none-any.whl/com/zzt/info/information.py
|
information.py
|
# print('1024 * 768 =', 1024*768)
# coding:UTF-8
# keyword是一个模块的名称,这个名称需要符合标识符定义的要求
import keyword
# 实现了所以关键字的列出
"""
num = 10
# print(num, id(num))
# num = 30
del num
print(num, id(num))
"""
"""result = input('请输入bool型的参数: ')
print('输入的参数: ', result, type(0))
if result:
print('你好,沐言科技')
"""
"""score = 10.0
if 90<=score<=100:
print("优等生")
elif 60<=score<90:
print("良等生")
else:
print("差等生")
"""
'''num_a = 0
num_b = 1
while num_b<=1000:
print(num_b, end='、')
num_a, num_b = num_b, num_a + num_b
'''
# 元组和list互相转化
'''number = ('你好', '哈哈', '休息')
infos = [1, 2, 3, 4]
test = tuple(infos)
print('[数据类型]列表: %s ' % list(number))
print('[元组的数据类型]: %s' % type(test))
'''
'''
def get_info():
print('hello python')
return '你好呀'
data = get_info()
print(data)
'''
'''
def echo(title, url):
return '【带有参数的函数】,标题: {} ,地址: {}'.format(title, url)
print(echo(url='www.baidu.com', title='python'))
'''
'''
num = 100
def change_num():
global num
num = 30
change_num()
print('【全局变量】num=%s' % num)
'''
"""
def print_doc():
'''
测试__doc__全局变量的调用,无任何的方法体
:return:
'''
pass
print(print_doc.__doc__)
"""
"""
def print_data(count):
def out(data):
nonlocal count
count += 1
return "【第{}次输出数据】: {}".format(count, data)
return out
oa = print_data(0)
print(oa('哈哈哈哈'))
print()
print(eval('\n"-"*50\n'))
print()
import this
"""
"""
import sys
print('【执行平台信息】:%s'%(sys.platform))
print('【执行平台信息】:%s'%(sys.path))
"""
"""
import sys
print('【参数信息】:%s'%(sys.argv))
if len(sys.argv)==1:
print('没有输入参数,无法正确执行,程序退出!!!')
sys.exit(0)
else:
print('正确输入参数,程序结束', end="")
for item in sys.argv:
print(item, end='、')
"""
from random import *
numbers = [item for item in range(1, 10)]
print('【原始数据】:%s' % numbers)
print('-' * 50)
filter_result = list(filter(lambda item: item % 2 == 0, numbers))
print('【filter过滤数据】: %s' % filter_result)
print('-' * 50)
map_result = list(map(lambda item: item * 2, filter_result))
print('【map处理数据】: %s' % map_result)
print('-' * 50)
from functools import reduce
reduce_result = reduce(lambda x, y: x + y, map_result)
print('【reduce处理数据】: %s' % reduce_result)
|
zzt-message
|
/zzt_message-0.1-py3-none-any.whl/com/zzt/info/demo02.py
|
demo02.py
|
# zztoolbox
|
zztoolbox
|
/zztoolbox-0.0.3.tar.gz/zztoolbox-0.0.3/README.md
|
README.md
|
import torch
import numpy as np
import seaborn as sns
from tensor import concat, unsqueeze, plot_feature
def test_concat():
tensor = torch.ones(100,100)
assert concat([tensor,tensor]).shape == (200,100)
array = np.ones((100,100))
assert concat([array,array]).shape == (200,100)
def test_unsqueeze():
tensor = torch.ones(100,100)
assert unsqueeze(tensor).shape == (1,100,100)
array = np.ones((100,100))
assert unsqueeze(array).shape == (1,100,100)
def test_plot():
x = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
label = np.expand_dims(np.array([0,1,0,1]),1)
plot_feature('test.png', x, label,embbed_type='PCA')
|
zztoolbox
|
/zztoolbox-0.0.3.tar.gz/zztoolbox-0.0.3/tests/test_tensor.py
|
test_tensor.py
|
import requests
import time
def get_create_time(cls):
creat_time = int(round(time.time() * 1000))
return creat_time
class CrawlAPI(object):
def add(self,a,b):
return a+b
|
zzu-low
|
/zzu_low-0.4-py3-none-any.whl/zzu_low/crawlzz.py
|
crawlzz.py
|
import requests
import time
def get_create_time(cls):
creat_time = int(round(time.time() * 1000))
return creat_time
class CrawlAPI(object):
def add(self, a, b):
return a + b
@classmethod
def sub(cls, a, b):
return a - b
|
zzu-low2
|
/zzu_low2-0.4-py3-none-any.whl/zzu_low2/crawlzz.py
|
crawlzz.py
|
__all__=['messag','information']
|
zzw.py
|
/zzw.py-0.1-py3-none-any.whl/handsom/boy/__init__.py
|
__init__.py
|
def get_message():
return ("zzw.handsome.com")
|
zzw.py
|
/zzw.py-0.1-py3-none-any.whl/handsom/boy/messag.py
|
messag.py
|
def get_information():
return ("zzw.handsome.com")
|
zzw.py
|
/zzw.py-0.1-py3-none-any.whl/handsom/boy/information.py
|
information.py
|
import pandas as pd
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers,activations
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
#SE模块
#如需使用,得加到下采样层里
class Squeeze_excitation_layer(tf.keras.Model):
def __init__(self, filter_sq):
# filter_sq 是 Excitation 中第一个卷积过程中卷积核的个数
super().__init__()
self.avepool = tf.keras.layers.GlobalAveragePooling1D()
self.dense = tf.keras.layers.Dense(filter_sq)
self.relu = tf.keras.layers.Activation('relu')
self.sigmoid = tf.keras.layers.Activation('sigmoid')
def call(self, inputs):
squeeze = self.avepool(inputs)
excitation = self.dense(squeeze)
excitation = self.relu(excitation)
excitation = tf.keras.layers.Dense(inputs.shape[-1])(excitation)
excitation = self.sigmoid(excitation)
excitation = tf.keras.layers.Reshape((1, inputs.shape[-1]))(excitation)
scale = inputs * excitation
return scale
#下采样层
class DownSample(tf.keras.layers.Layer):
#units,使用多少个filter
#k_size:确定第一个卷积层的kernel_size
def __init__(self,units,is_pool=True,use_se = False,k_size=3):
super(DownSample,self).__init__()
#注意,原始unet是valid填充,此处简化为same填充
self.conv1 = tf.keras.layers.Conv1D(units,kernel_size=k_size,
padding = 'same')
self.conv2 = tf.keras.layers.Conv1D(units,kernel_size=3,
padding = 'same')
if is_pool:
self.pool = tf.keras.layers.MaxPooling1D(pool_size=2)
else:
self.pool=False
if use_se:
self.se = Squeeze_excitation_layer(units)
else:
self.se = False
def call(self,x):
if self.pool:
x = self.pool(x)
x = self.conv1(x)
x = tf.nn.relu(x)
x = self.conv2(x)
x = tf.nn.relu(x)
if self.se:
x = self.se(x)
return x
#上采样层
class UpSample(tf.keras.layers.Layer):
#units,使用多少个filter
def __init__(self,units):
super(UpSample,self).__init__()
#注意,原始unet是valid填充,此处简化为same填充
self.conv1 = tf.keras.layers.Conv1D(units,kernel_size=3,
padding = 'same')
self.conv2 = tf.keras.layers.Conv1D(units,kernel_size=3,
padding = 'same')
#反卷积上采样,注意上采样的stride是放大的关键
#注意上采样中
self.deconv = tf.keras.layers.Conv1DTranspose(units//2,kernel_size=2,
padding = 'same',strides=2)
#输入x,是否添加pool的控制属性
def call(self,x):
x = self.conv1(x)
x = tf.nn.relu(x)
x = self.conv2(x)
x = tf.nn.relu(x)
x = self.deconv(x)
x = tf.nn.relu(x)
return x
#现在其实是SE-uNET
class SE_Unet_1d(tf.keras.Model):
def __init__(self):
super(SE_Unet_1d,self).__init__()
#第一层64个卷积核
#注意is_pool在call时候传入,必须注意
#第一层给11,会不会好一些
self.down1 = DownSample(64,is_pool=False,k_size=11)
#都不加use_se=True,就是原始的
self.down2 = DownSample(128,use_se=True)
self.down3 = DownSample(256,use_se=True)
self.down4 = DownSample(512,use_se=True)
self.down5 = DownSample(1024)
#单独定义一个上采样
self.up = tf.keras.layers.Conv1DTranspose(512,kernel_size=2,
strides=2,padding='same'
)
self.up1 = UpSample(512)
self.up2 = UpSample(256)
self.up3 = UpSample(128)
#注意此处借用下采样,is_pool设false
self.last_conv = DownSample(64,is_pool=False)
#对每个像素点进行回归(只需要1个filter,n分类需要n个filter)
self.out_conv = tf.keras.layers.Conv1D(1,kernel_size=1,
padding = 'same')
def call(self,inputs):
x1 = self.down1(inputs)
x2 = self.down2(x1)
x3 = self.down3(x2)
x4 = self.down4(x3)
x5 = self.down5(x4)
x6 = self.up(x5)
#合并
x7 = tf.concat([x4,x6],2)
x8 = self.up1(x7)
x9 = tf.concat([x3,x8],2)
x10 = self.up2(x9)
x11 = tf.concat([x2,x10],2)
x12 = self.up3(x11)
x13 = tf.concat([x1,x12],2)
x14 = self.last_conv(x13)
out = self.out_conv(x14)
return out
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/tf_model.py
|
tf_model.py
|
import time #内置模块
import pysam
import pyBigWig
import numpy as np
import pandas as pd
from pysam import FastaFile
from scipy.ndimage import gaussian_filter1d
#辅助函数,用于one-hot编码
#用此函数对100万条1000长度的序列编码需要约700秒(GPU02节点)
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.],
'a':[1.0,0.,0.,0.],'c':[0.,1.0,0.,0.],'g':[0.,0.,1.0,0.],'t':[0.,0.,0.,1.0],'n':[0.,0.,0.,0.]}
return np.array([dna_dict[k] for k in dna])
#辅助函数,控制获得固定长度开放区序列
def get_new_bed_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
def get_new_bed4_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list,'classes':bed_df.classes})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
#根据控制文件(记录染色体号和长度,tab间隔)过滤样本
def sample_control(bed_df,genome_control_df):
#Python内置函数locals(),返回记录当前所有局部变量的字典
#此处用于局部变量的动态创建
ld = locals()
for i in range(len(genome_control_df)):
bed_chr_ = bed_df[bed_df.chr == genome_control_df.iloc[i,0]]
bed_chr = bed_chr_[bed_chr_.end<genome_control_df.iloc[i,1]]
ld['sample_' + str(genome_control_df.iloc[i,0])] =bed_chr[bed_chr.start>0]
bed_df_all = []
for j in genome_control_df.chr:
bed_df_all.append(ld['sample_' + j])
bed_df_control = pd.concat(bed_df_all,axis=0)
print(len(bed_df)-len(bed_df_control),'个样本被筛除,因为它们不在control文件中或超越了control文件中规定的界限')
return bed_df_control
#这里的第二个参数可以给bed_df而非bed原始文件,即只作为辅助函数,不独立
#约91秒可以完成10万1024长度序列的onehot编码并返回
def get_one_hot_seq_list(fasta_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'条序列进行one-hot编码')
start_time = time.time()
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq=[]
for index,data in sample_bed_df.iterrows():
seq.append(one_hot_dna(fasta_file.fetch(data.chr,data.start,data.end) ))
end_time = time.time()
print('序列one-hot编码完成,该步骤累计耗时:',end_time-start_time,'秒')
return seq
def get_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_regression = []
for index,data in sample_bed_df.iterrows():
#bw.stats在范围内取均值
bw_regression.append(bw_file.stats(str(data.chr),int(data.start),int(data.end))[0])
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_regression
def get_base_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行碱基分辨率信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_base_regression = []
for index,data in sample_bed_df.iterrows():
#bw.values取范围内每个点的值
bw_base_regression.append(bw_file.values(str(data.chr),int(data.start),int(data.end)))
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_base_regression
#获取反向互补序列,用于数据增强
def DNA_complement(sequence):
trantab = str.maketrans('ACGTacgt', 'TGCAtgca') #翻译表
com_sequence = sequence[::-1].translate(trantab) # 反向、转换互补
return com_sequence
#获取序列而不是one-hot的序列
def get_seq_list(fasta_path,sample_bed_df):
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq_=[]
for index,data in sample_bed_df.iterrows():
seq_.append(fasta_file.fetch(data.chr,data.start,data.end))
return seq_
#为了针对分类问题,应该给定bed第四列作为类别标签!
def genome_dataset(bed_path,fasta_path,seq_len=1024,genome_size_control=None,dataset_type='regression',bw_path=None,Data_Augmentation=False):
#注意匹配,chr01或chr1,有没有0,大小写,和fasta、bw文件比较
#注意容错机制:文件头有无均可读(待实现)
with open(bed_path,'r')as f:
#第一行第一个数据单位(应该是chrom或者chrxx)
chek_bed = f.readline().strip().split('\t')[0]
if chek_bed[:3]!='chr' and chek_bed[:3]!='CHR' and chek_bed[:3]!='Chr':
raise IOError("bed文件格式不合规范,请检查!\n 注:各列需以tab间隔,无文件头")
else:
print('bed文件检查通过')
if dataset_type=='regression' or dataset_type=='base_regression':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:3]
bed_df.columns = ['chr','start','end']
#得到定长的bed文件
new_bed_df = get_new_bed_df(bed_df,seq_len)
elif dataset_type=='classification':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:4]
bed_df.columns = ['chr','start','end','classes']
new_bed_df = get_new_bed4_df(bed_df,seq_len)
else:
raise Exception("请选择正确的模式")
#样本控制
if genome_size_control!=None:
print('使用控制文件,将过滤起止位点不合文件要求的序列和未在文件中出现的染色体对应的序列')
genome_control = pd.read_csv(genome_size_control,sep='\t',names=['chr','control'])
control_bed_df = sample_control(new_bed_df,genome_control)
else:
control_bed_df = new_bed_df
print('您选择不使用控制文件')
#获取原始序列的one-hot编码结果
seq = get_one_hot_seq_list(fasta_path,control_bed_df)
sample_name = []
#获取sample_name,原始
for k in range(len(control_bed_df)):
sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2]))
if dataset_type=='classification':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
classes_all = list(control_bed_df.classes)+list(control_bed_df.classes)
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'classes':classes_all})
return dataset_df
else:
print('您选择不使用数据增强')
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'classes':list(control_bed_df.classes)})
return dataset_df
if dataset_type=='regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_regression = get_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_regression_all = bw_regression+bw_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_regression = get_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_regression})
return dataset_df
if dataset_type=='base_regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_base_regression = get_base_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_base_regression_all = bw_base_regression+bw_base_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_base_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_base_regression = get_base_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_base_regression})
return dataset_df
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/get_dataset.py
|
get_dataset.py
|
import pyBigWig
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
#已测试通过
#用于对bw文件进行标准化和平滑,输出新bw文件
def bw_scale(in_path,out_path,standard=32 ):
in_bw = pyBigWig.open(in_path)
out_bw = pyBigWig.open(out_path,'w')
out_bw.addHeader([(k,v) for k,v in in_bw.chroms().items()])
for i in in_bw.chroms():
scale_ = sum(in_bw.values(i,0,in_bw.chroms()[i]))/in_bw.chroms()[i]
scale_bw_list = np.array(in_bw.values(i,0,in_bw.chroms()[i]))/scale_
#sigma设置了高斯核的标准差
fil_scale_bw_list = gaussian_filter1d(list(scale_bw_list), sigma=standard, truncate=3)
out_bw.addEntries(i, 0, values = fil_scale_bw_list.astype('float16'), span=1, step=1)
out_bw.close()
in_bw.close()
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/Standardization.py
|
Standardization.py
|
import os
import re
import numpy as np
import pandas as pd
import torch
import time #内置模块
import pysam
import pyBigWig
from pysam import FastaFile
from scipy.ndimage import gaussian_filter1d
#辅助函数,用于one-hot编码
#用此函数对100万条1000长度的序列编码需要约700秒(GPU02节点)
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.],
'a':[1.0,0.,0.,0.],'c':[0.,1.0,0.,0.],'g':[0.,0.,1.0,0.],'t':[0.,0.,0.,1.0],'n':[0.,0.,0.,0.]}
return np.array([dna_dict[k] for k in dna])
#辅助函数,控制获得固定长度开放区序列
def get_new_bed_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
def get_new_bed4_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list,'classes':bed_df.classes})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
#根据控制文件(记录染色体号和长度,tab间隔)过滤样本
def sample_control(bed_df,genome_control_df):
#Python内置函数locals(),返回记录当前所有局部变量的字典
#此处用于局部变量的动态创建
ld = locals()
for i in range(len(genome_control_df)):
bed_chr_ = bed_df[bed_df.chr == genome_control_df.iloc[i,0]]
bed_chr = bed_chr_[bed_chr_.end<genome_control_df.iloc[i,1]]
ld['sample_' + str(genome_control_df.iloc[i,0])] =bed_chr[bed_chr.start>0]
bed_df_all = []
for j in genome_control_df.chr:
bed_df_all.append(ld['sample_' + j])
bed_df_control = pd.concat(bed_df_all,axis=0)
print(len(bed_df)-len(bed_df_control),'个样本被筛除,因为它们不在control文件中或超越了control文件中规定的界限')
return bed_df_control
#这里的第二个参数可以给bed_df而非bed原始文件,即只作为辅助函数,不独立
#约91秒可以完成10万1024长度序列的onehot编码并返回
def get_one_hot_seq_list(fasta_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'条序列进行one-hot编码')
start_time = time.time()
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq=[]
for index,data in sample_bed_df.iterrows():
seq.append(one_hot_dna(fasta_file.fetch(data.chr,data.start,data.end) ))
end_time = time.time()
print('序列one-hot编码完成,该步骤累计耗时:',end_time-start_time,'秒')
return seq
def get_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_regression = []
for index,data in sample_bed_df.iterrows():
#bw.stats在范围内取均值
bw_regression.append(bw_file.stats(str(data.chr),int(data.start),int(data.end))[0])
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_regression
def get_base_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行碱基分辨率信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_base_regression = []
for index,data in sample_bed_df.iterrows():
#bw.values取范围内每个点的值
bw_base_regression.append(bw_file.values(str(data.chr),int(data.start),int(data.end)))
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_base_regression
#获取反向互补序列,用于数据增强
def DNA_complement(sequence):
trantab = str.maketrans('ACGTacgt', 'TGCAtgca') #翻译表
com_sequence = sequence[::-1].translate(trantab) # 反向、转换互补
return com_sequence
#获取序列而不是one-hot的序列
def get_seq_list(fasta_path,sample_bed_df):
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq_=[]
for index,data in sample_bed_df.iterrows():
seq_.append(fasta_file.fetch(data.chr,data.start,data.end))
return seq_
#为了针对分类问题,应该给定bed第四列作为类别标签!
def genome_dataset(bed_path,fasta_path,seq_len=1024,genome_size_control=None,dataset_type='regression',bw_path=None,Data_Augmentation=False):
#注意匹配,chr01或chr1,有没有0,大小写,和fasta、bw文件比较
#注意容错机制:文件头有无均可读(待实现)
with open(bed_path,'r')as f:
#第一行第一个数据单位(应该是chrom或者chrxx)
chek_bed = f.readline().strip().split('\t')[0]
if chek_bed[:3]!='chr' and chek_bed[:3]!='CHR' and chek_bed[:3]!='Chr':
raise IOError("bed文件格式不合规范,请检查!\n 注:各列需以tab间隔,无文件头")
else:
print('bed文件检查通过')
if dataset_type=='regression' or dataset_type=='base_regression':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:3]
bed_df.columns = ['chr','start','end']
#得到定长的bed文件
new_bed_df = get_new_bed_df(bed_df,seq_len)
elif dataset_type=='classification':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:4]
bed_df.columns = ['chr','start','end','classes']
new_bed_df = get_new_bed4_df(bed_df,seq_len)
else:
raise Exception("请选择正确的模式")
#样本控制
if genome_size_control!=None:
print('使用控制文件,将过滤起止位点不合文件要求的序列和未在文件中出现的染色体对应的序列')
genome_control = pd.read_csv(genome_size_control,sep='\t',names=['chr','control'])
control_bed_df = sample_control(new_bed_df,genome_control)
else:
control_bed_df = new_bed_df
print('您选择不使用控制文件')
#获取原始序列的one-hot编码结果
seq = get_one_hot_seq_list(fasta_path,control_bed_df)
sample_name = []
#获取sample_name,原始
for k in range(len(control_bed_df)):
sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2]))
if dataset_type=='classification':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
classes_all = list(control_bed_df.classes)+list(control_bed_df.classes)
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'classes':classes_all})
return dataset_df
else:
print('您选择不使用数据增强')
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'classes':list(control_bed_df.classes)})
return dataset_df
if dataset_type=='regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_regression = get_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_regression_all = bw_regression+bw_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_regression = get_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_regression})
return dataset_df
if dataset_type=='base_regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_base_regression = get_base_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_base_regression_all = bw_base_regression+bw_base_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_base_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_base_regression = get_base_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_base_regression})
return dataset_df
def get_torch_pfm(bed_file_path,fasta_file_path,model_path,weight_name='module.model.conv_net.0.weight',
filter_size=8,channel_num=320,seq_len=1000,
genome_size_control_path=None,lim__=0.75,data_batch=320,all_batch=45,stochastic_control=False):
print('您设定的阈值为最大激活值的'+str(lim__*100)+'%')
model_analysis = torch.load(model_path)
#查看参数字典的键(即网络有哪些层)
#model_analysis['state_dict'].keys()
weight0 = model_analysis['state_dict'][weight_name]
#此处也可以参数化
motif_ocr_seq = genome_dataset(bed_file_path,fasta_file_path,seq_len=seq_len,
genome_size_control=genome_size_control_path,dataset_type='classification',bw_path=None,Data_Augmentation=False)
#torch的通道位置和tf不一样
#此后将使用多次维度交换,统一到tf形式进行pfm矩阵构建
motif_ocr_hot = np.array(list(motif_ocr_seq.seq_one_hot)).swapaxes(1,2)
ocr_tensor = torch.from_numpy(motif_ocr_hot).cuda().float()
# 定义卷积层,注意卷积核数目和filter数目的问题
w=torch.nn.Conv1d(4,channel_num,filter_size).cuda()
# 把Tensor值作为权值赋值给Conv层,需要先转为torch.nn.Parameter类型
w.weight=torch.nn.Parameter(weight0)
conv_list = []
for i in range(all_batch):
#保证顺序
comput_ocr = ocr_tensor[int(i*data_batch):int((i+1)*data_batch)]
conv_out = w(comput_ocr)
conv_out=torch.Tensor.cpu(conv_out).detach().numpy()
conv_list.append(conv_out)
conv_array = np.array(conv_list)
final_conv_out = np.concatenate(conv_array,axis=0).swapaxes(1,2)
weight_use = torch.Tensor.cpu(weight0).detach().numpy()
weight_use = weight_use.swapaxes(0,2)
filter_all_position = []
for i in range(weight_use.shape[2]):
filter_position = []
for j in range(final_conv_out.shape[0]):
one_filter_weight = weight_use[:,:,i]
one_filter_scan_out = final_conv_out[j,:,i]
MAX = np.sum(np.max(one_filter_weight,1))
position = list(np.where(one_filter_scan_out >= MAX*lim__))
#循环结束后,filter_position存有一个filter的所有激活位置
#空的也要保存,因为要标识序列编号
filter_position.append(position)
filter_all_position.append(filter_position)
filter_all_position=np.array(filter_all_position)
#tensor转numpy、轴交换
ocr_numpy = ocr_tensor.detach().cpu().numpy()
ocr_numpy= ocr_numpy.swapaxes(1,2)
#获取对应序列
filter_seq=[]
#第一层遍历312个filter
for i in filter_all_position:
seq_ = []
#第二层遍历序列
for j in range(len(i)):
if len(i[j][0])>=1:
for k in i[j][0]:
#注意这里对应filter_size
seq_.append(ocr_numpy[j,k:k+filter_size,:])
filter_seq.append(seq_)
filter_seq_used = []
for i in filter_seq:
if stochastic_control==True:
if len(i)>= (seq_len-filter_size+1)*data_batch*all_batch*((0.25)**(np.floor(filter_size*lim__))):
filter_seq_used.append(i)
if stochastic_control==False:
if len(i)>= 10:
filter_seq_used.append(i)
final_pfm = []
for i in filter_seq_used:
final_pfm.append(np.sum(np.array(i),axis=0).T)
print('共获取'+str(len(final_pfm))+'个激活序列数在指定值'+'之上的pfm矩阵')
return final_pfm
#要求输入array
def get_meme_input_file(pfms_,meme_path):
pfms_ = np.array(pfms_)
ppm = []
for i in pfms_:
ppm.append(i/np.sum(i,axis=0))
with open(meme_path,'w')as f:
f.write('MEME version 5.3.3')
f.write('\n')
f.write('\n')
f.write('ALPHABET = ACGT')
f.write('\n')
f.write('\n')
f.write('strands: + -')
f.write('\n')
f.write('\n')
f.write('Background letter frequencies')
f.write('\n')
f.write('A 0.25 C 0.25 G 0.25 T 0.25')
f.write('\n')
f.write('\n')
for i in range(len(ppm)):
f.write('MOTIF\tmotif_ppm'+str(i))
f.write('\n')
f.write('letter-probability matrix: alength= 4 w= '+str(len(ppm[i].T)))
f.write('\n')
for j in ppm[i].T:
f.write(str(j[0])+'\t'+str(j[1])+'\t'+str(j[2])+'\t'+str(j[3]))
f.write('\n')
f.write('\n')
print('结果文件已存至'+meme_path+'\n'+'该文件具有meme的tomtom工具所需的输入格式')
def get_torch_motif(meme_path,bed_file_path,fasta_file_path,model_path,weight_name='module.model.conv_net.0.weight',
filter_size=8,channel_num=320,seq_len=1000,
genome_size_control_path=None,lim__=0.75,data_batch=320,all_batch=45,stochastic_control=False):
get_meme_input_file(get_torch_pfm(bed_file_path,fasta_file_path,model_path,weight_name=weight_name,
filter_size=filter_size,channel_num=channel_num,seq_len=seq_len,
genome_size_control_path=None,lim__=lim__,data_batch=data_batch,all_batch=all_batch,stochastic_control=stochastic_control),meme_path=meme_path)
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/cnn_torch_motif_1d.py
|
cnn_torch_motif_1d.py
|
import numpy as np
import wget
import pandas as pd
from random import randint, sample
#获取pfm
def get_pfm(taxonomic_groups=str('plants'),data_local = None):
if data_local == None:
if taxonomic_groups=='plants':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_plants_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
elif taxonomic_groups=='fungi':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_fungi_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
elif taxonomic_groups=='vertebrates':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_vertebrates_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
elif taxonomic_groups=='insects':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_insects_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
else:
#接收从JASPAR下载的文件作为输入
pre_pfm = pd.read_csv(str(data_local),
sep='\t',
header=None)
pfm = []
for i in range(0,len(pre_pfm),5):
pfm_sample = []
for j in range(i+1,i+5):
str_pfm = pre_pfm.iloc[j,0][4:-1].strip().split()
int_pfm = [int(k) for k in str_pfm]
pfm_sample.append(np.array(int_pfm))
pfm.append(np.array(pfm_sample).astype('float32'))
#返回记录有多个pfm数组的列表,每个pfm数组的shape均为4*L(L为长度)
print('There are '+str(len(pfm))+ ' PFMs '+str('!'))
print('You need to consider whether the number of CNN filters you use is suitable for this initialization method.')
return pfm
#获取ppm
def get_ppm(pfm_ ):
ppm = []
for k in pfm_:
ppm.append(k)
for i in range(len(ppm)):
for j in range(len(ppm[i][0])):
a = ppm[i][0][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
b = ppm[i][1][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
c = ppm[i][2][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
d = ppm[i][3][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
ppm[i][0][j] = a
ppm[i][1][j] = b
ppm[i][2][j] = c
ppm[i][3][j] = d
return ppm
#计算信息熵,越小越好;注意,这里加了1e-5,防止0的影响
def compute_Information_entropy(acgt):
return -1*np.sum(np.log2(np.array(acgt)+1e-5)* (np.array(acgt)+1e-5))
#获得一组定长的ppm
def get_ppm_L(ppm,L_ = 8):
ppm_L = []
num_drop = 0
num_L = 0
f = lambda x: compute_Information_entropy(x)
for i in ppm:
#如果长度小于指定值
if len(i[0])<L_:
num_drop+=1
#恰好等于全部的保留
elif len(i[0])==L_:
ppm_L.append(i)
num_L+=1
#大于的情况,保留信息熵最大的L-mer
else:
min_Information_entropy = 2
final_number = 0
for j in range(0,len(i)-L_):
if np.sum([f(a) for a in i.T[j:j+L_]])<min_Information_entropy:
min_Information_entropy = np.sum([f(a) for a in i.T[j:j+L_]])
final_number = j
else:
pass
ppm_L.append(i[:,final_number:final_number+L_])
print(str(num_drop)+ ' PFMs with lengths less than the specified length have been screened out.')
print( 'All '+str(num_L) + ' PFMs of length exactly equal to the specified length are retained.')
print('For PFMs with lengths greater than the specified length, the segment with the highest information entropy is intercepted.')
return ppm_L
#获得一组定长的ppm,每个值-0.25,均值近0
def get_ppm_rp25_L(ppm,L_ = 8):
ppm_L = []
num_drop = 0
num_L = 0
f = lambda x: compute_Information_entropy(x)
for i in ppm:
#如果长度小于指定值
if len(i[0])<L_:
num_drop+=1
#恰好等于全部的保留
elif len(i[0])==L_:
ppm_L.append(i-0.25)
num_L+=1
#大于的情况,保留信息熵最大的L-mer
else:
min_Information_entropy = 2
final_number = 0
for j in range(0,len(i)-L_):
if np.sum([f(a) for a in i.T[j:j+L_]])<min_Information_entropy:
min_Information_entropy = np.sum([f(a) for a in i.T[j:j+L_]])
final_number = j
else:
pass
ppm_L.append(i[:,final_number:final_number+L_]-0.25)
print(str(num_drop)+ ' PFMs with lengths less than the specified length have been screened out.')
print( 'All '+str(num_L) + ' PFMs of length exactly equal to the specified length are retained.')
print('For PFMs with lengths greater than the specified length, the segment with the highest information entropy is intercepted.')
return ppm_L
def get_pwm(ppm_L,background_acgt = [0.25,0.25,0.25,0.25]):
pwm_L = []
for i in range(len(ppm_L)):
pwm_L_sample = []
for j in range(4):
#这里加1e-3是防止结果中出现0
#这里的问题在于,正向最多到2,而负向可以到-inf(很大的负数)
#考虑如何改,或者直接用ppm?
#这会有一些问题哦——————
pwm_L_sample.append(list(np.log2((ppm_L[i][j]+1e-2)/background_acgt[j])))
pwm_L.append(np.array(pwm_L_sample))
return np.array(pwm_L)
def filter_initialization_matrix(taxonomic_groups='plants',data_local = None,
filters=64,
L_=8,
pattern='ppm_rp25',
background_acgt=[0.25, 0.25, 0.25, 0.25]):
print('Note that the base order in the return result matrix is ACGT')
#只有pwm模式,需要background_acgt
if pattern == 'ppm_rp25':
print(
'You will get the PPM_R25 matrix((the value of each position of the PPM matrix is subtracted by 0.25)) with the specified number and length.'
)
pfm = get_pfm(taxonomic_groups,data_local )
ppm = get_ppm(pfm)
ppm_L = get_ppm_rp25_L(ppm, L_)
sample_number = [randint(0, len(ppm_L) - 1) for _ in range(filters)]
ppm_r25_filters = []
for i in sample_number:
ppm_r25_filters.append(ppm_L[i])
print('You will get the numpy array with shape ',
str(np.array(ppm_r25_filters).shape))
print(
"You can use numpy's swaaxes function to make the dimension transformation suitable for initializing your parameters"
)
return np.array(ppm_r25_filters)
elif pattern == 'ppm':
print(
'You will get the PPM matrix with the specified number and length.'
)
pfm = get_pfm(taxonomic_groups,data_local )
ppm = get_ppm(pfm)
ppm_L = get_ppm_L(ppm, L_)
sample_number = [randint(0, len(ppm_L)-1) for _ in range(filters)]
ppm_filters = []
for i in sample_number:
ppm_filters.append(ppm_L[i])
print('You will get the numpy array with shape ',
str(np.array(ppm_filters).shape))
print(
"You can use numpy's swaaxes function to make the dimension transformation suitable for initializing your parameters"
)
return np.array(ppm_filters)
elif pattern == 'pwm':
print(
'You will get the PWM matrix of the specified length and the specified number calculated with '
+ str(background_acgt) + ' as the background.',
'To prevent negative infinity, the value of 1e-2 is added to all positions.'
)
pfm = get_pfm(taxonomic_groups,data_local )
ppm = get_ppm(pfm)
ppm_L = get_ppm_L(ppm, L_)
pwm = get_pwm(ppm_L, background_acgt)
sample_number = [randint(0, len(pwm)-1) for _ in range(filters)]
pwm_filters = []
for i in sample_number:
pwm_filters.append(pwm[i])
print('You will get the numpy array with shape ',
str(np.array(pwm_filters).shape))
print(
"You can use numpy's swaaxes function to make the dimension transformation suitable for initializing your parameters"
)
return np.array(pwm_filters)
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/filter_initialization.py
|
filter_initialization.py
|
import numpy as np
import pandas as pd
#要求输入array
def get_meme_input_file(pfms_,meme_path):
pfms_ = np.array(pfms_)
ppm = []
for i in pfms_:
ppm.append(i/np.sum(i,axis=0))
with open(meme_path,'w')as f:
f.write('MEME version 5.3.3')
f.write('\n')
f.write('\n')
f.write('ALPHABET = ACGT')
f.write('\n')
f.write('\n')
f.write('strands: + -')
f.write('\n')
f.write('\n')
f.write('Background letter frequencies')
f.write('\n')
f.write('A 0.25 C 0.25 G 0.25 T 0.25')
f.write('\n')
f.write('\n')
for i in range(len(ppm)):
f.write('MOTIF\tmotif_ppm'+str(i))
f.write('\n')
f.write('letter-probability matrix: alength= 4 w= '+str(len(ppm[i].T)))
f.write('\n')
for j in ppm[i].T:
f.write(str(j[0])+'\t'+str(j[1])+'\t'+str(j[2])+'\t'+str(j[3]))
f.write('\n')
f.write('\n')
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/get_meme_file.py
|
get_meme_file.py
|
import numpy as np
import pandas as pd
#对一组序列文件,获取其pfm,ppm,pwm;仍需检查是否正确
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.]}
return np.array([dna_dict[j] for j in dna])
#获取pfm或ppm或pwm矩阵
def get_pwm(file_path,background_acgt = [0.25,0.25,0.25,0.25],type_='pfm'):
seq_all_list = []
#这样会漏掉最后一个!!!!!
with open(file_path,'r')as f:
seq_list = []
seq_file = f.readlines()
for i in seq_file:
if i[0]=='>':
seq_all_list.append(seq_list)
seq_list=[]
elif i[0]=='A'or i[0]=='C' or i[0]== 'G' or i[0]=='T' or i[0]=='a'or i[0]=='c'or i[0]=='g'or i[0]=='t':
seq_list.append(i.strip())
else:
pass
#最后一个>对应的不能漏了
seq_all_list.append(seq_list)
pfm_all = []
ppm_all = []
pwm_all = []
for i in seq_all_list[1:]:
one_hot_seq = []
for j in i:
one_hot_seq.append(one_hot_dna(j))
pfm = np.sum(np.array(one_hot_seq),axis=0).T
ppm = pfm/np.sum(pfm,axis=0)
pwm = []
for k in range(4):
pwm.append(list(np.log2((ppm[k])/background_acgt[k])))
pfm_all.append(pfm)
ppm_all.append(ppm)
pwm_all.append(pwm)
if type_ == 'pfm':
return pfm_all
if type_ == 'ppm':
return ppm_all
if type_ == 'pwm':
return pwm_all
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/get_pwm.py
|
get_pwm.py
|
print('请注意,若您需要调用model模块或motif模块,您的当前环境下必须安装有tensorflow2.x或pytorch1.x')
print('为方便只需使用数据预处理等模块的用户,该package在依赖库中并未写入tensorflow和pytorch')
print('如果您不需要使用model和motif模块,请忽略此提示')
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/__init__.py
|
__init__.py
|
import os
import re
import numpy as np
import pandas as pd
import tensorflow as tf
import time #内置模块
import pysam
import pyBigWig
from pysam import FastaFile
from scipy.ndimage import gaussian_filter1d
#辅助函数,用于one-hot编码
#用此函数对100万条1000长度的序列编码需要约700秒(GPU02节点)
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.],
'a':[1.0,0.,0.,0.],'c':[0.,1.0,0.,0.],'g':[0.,0.,1.0,0.],'t':[0.,0.,0.,1.0],'n':[0.,0.,0.,0.]}
return np.array([dna_dict[k] for k in dna])
#辅助函数,控制获得固定长度开放区序列
def get_new_bed_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
def get_new_bed4_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list,'classes':bed_df.classes})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
#根据控制文件(记录染色体号和长度,tab间隔)过滤样本
def sample_control(bed_df,genome_control_df):
#Python内置函数locals(),返回记录当前所有局部变量的字典
#此处用于局部变量的动态创建
ld = locals()
for i in range(len(genome_control_df)):
bed_chr_ = bed_df[bed_df.chr == genome_control_df.iloc[i,0]]
bed_chr = bed_chr_[bed_chr_.end<genome_control_df.iloc[i,1]]
ld['sample_' + str(genome_control_df.iloc[i,0])] =bed_chr[bed_chr.start>0]
bed_df_all = []
for j in genome_control_df.chr:
bed_df_all.append(ld['sample_' + j])
bed_df_control = pd.concat(bed_df_all,axis=0)
print(len(bed_df)-len(bed_df_control),'个样本被筛除,因为它们不在control文件中或超越了control文件中规定的界限')
return bed_df_control
#这里的第二个参数可以给bed_df而非bed原始文件,即只作为辅助函数,不独立
#约91秒可以完成10万1024长度序列的onehot编码并返回
def get_one_hot_seq_list(fasta_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'条序列进行one-hot编码')
start_time = time.time()
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq=[]
for index,data in sample_bed_df.iterrows():
seq.append(one_hot_dna(fasta_file.fetch(data.chr,data.start,data.end) ))
end_time = time.time()
print('序列one-hot编码完成,该步骤累计耗时:',end_time-start_time,'秒')
return seq
def get_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_regression = []
for index,data in sample_bed_df.iterrows():
#bw.stats在范围内取均值
bw_regression.append(bw_file.stats(str(data.chr),int(data.start),int(data.end))[0])
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_regression
def get_base_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行碱基分辨率信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_base_regression = []
for index,data in sample_bed_df.iterrows():
#bw.values取范围内每个点的值
bw_base_regression.append(bw_file.values(str(data.chr),int(data.start),int(data.end)))
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_base_regression
#获取反向互补序列,用于数据增强
def DNA_complement(sequence):
trantab = str.maketrans('ACGTacgt', 'TGCAtgca') #翻译表
com_sequence = sequence[::-1].translate(trantab) # 反向、转换互补
return com_sequence
#获取序列而不是one-hot的序列
def get_seq_list(fasta_path,sample_bed_df):
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq_=[]
for index,data in sample_bed_df.iterrows():
seq_.append(fasta_file.fetch(data.chr,data.start,data.end))
return seq_
#为了针对分类问题,应该给定bed第四列作为类别标签!
def genome_dataset(bed_path,fasta_path,seq_len=1024,genome_size_control=None,dataset_type='regression',bw_path=None,Data_Augmentation=False):
#注意匹配,chr01或chr1,有没有0,大小写,和fasta、bw文件比较
#注意容错机制:文件头有无均可读(待实现)
with open(bed_path,'r')as f:
#第一行第一个数据单位(应该是chrom或者chrxx)
chek_bed = f.readline().strip().split('\t')[0]
if chek_bed[:3]!='chr' and chek_bed[:3]!='CHR' and chek_bed[:3]!='Chr':
raise IOError("bed文件格式不合规范,请检查!\n 注:各列需以tab间隔,无文件头")
else:
print('bed文件检查通过')
if dataset_type=='regression' or dataset_type=='base_regression':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:3]
bed_df.columns = ['chr','start','end']
#得到定长的bed文件
new_bed_df = get_new_bed_df(bed_df,seq_len)
elif dataset_type=='classification':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:4]
bed_df.columns = ['chr','start','end','classes']
new_bed_df = get_new_bed4_df(bed_df,seq_len)
else:
raise Exception("请选择正确的模式")
#样本控制
if genome_size_control!=None:
print('使用控制文件,将过滤起止位点不合文件要求的序列和未在文件中出现的染色体对应的序列')
genome_control = pd.read_csv(genome_size_control,sep='\t',names=['chr','control'])
control_bed_df = sample_control(new_bed_df,genome_control)
else:
control_bed_df = new_bed_df
print('您选择不使用控制文件')
#获取原始序列的one-hot编码结果
seq = get_one_hot_seq_list(fasta_path,control_bed_df)
sample_name = []
#获取sample_name,原始
for k in range(len(control_bed_df)):
sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2]))
if dataset_type=='classification':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
classes_all = list(control_bed_df.classes)+list(control_bed_df.classes)
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'classes':classes_all})
return dataset_df
else:
print('您选择不使用数据增强')
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'classes':list(control_bed_df.classes)})
return dataset_df
if dataset_type=='regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_regression = get_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_regression_all = bw_regression+bw_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_regression = get_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_regression})
return dataset_df
if dataset_type=='base_regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_base_regression = get_base_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_base_regression_all = bw_base_regression+bw_base_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_base_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_base_regression = get_base_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_base_regression})
return dataset_df
#提取模型第一个卷积层的参数
def get_tf_conv0_weight(model_path,weight_name):
tf1_model_best_path = model_path
reader = tf.compat.v1.train.NewCheckpointReader(tf1_model_best_path)
var_to_shape_map = reader.get_variable_to_shape_map()
'''
#这一段用于找到第一个卷积层的名称
for key in var_to_shape_map:
print("tensor name: ", key)
print(reader.get_tensor(key)) # 打印出Tensor的值
'''
#basenji第一个卷积层训练好的参数
cnn0_weight = reader.get_tensor(weight_name)
return cnn0_weight
#lim__ = 0.75 阈值
def get_basenji_motif_pfms(bed_file_path,fasta_file_path,model_path,seq_len=1000,kernel_size=22,
weight_name='',weight_ = None,
genome_size_control_path=None,lim__ = 0.75,data_batch=320,all_batch=45,stochastic_control=False):
if weight_ is None:
weight0 = get_tf_conv0_weight(model_path,weight_name)
else:
weight0 = weight_
print('您设定的阈值为最大激活值的'+str(lim__*100)+'%')
##获取数据,one_hot编码的序列,这里也可以参数化
motif_ocr_seq = genome_dataset(bed_file_path,fasta_file_path,seq_len=seq_len,
genome_size_control=genome_size_control_path,dataset_type='classification',bw_path=None,Data_Augmentation=False)
motif_ocr_hot = np.array(list(motif_ocr_seq.seq_one_hot))
ocr_tensor = tf.convert_to_tensor(motif_ocr_hot)
ocr_tensor = tf.cast(ocr_tensor, dtype = tf.float32)
#卷积运算
conv_list = []
for i in range(all_batch):
#保证顺序
comput_ocr = ocr_tensor[int(i*data_batch):int((i+1)*data_batch)]
conv_out = tf.compat.v1.nn.conv1d(comput_ocr, filters=weight0, padding='VALID').numpy()
conv_list.append(conv_out)
conv_array = np.array(conv_list)
final_conv_out = np.concatenate(conv_array,axis=0)
#获取对应位置
filter_all_position = []
for i in range(weight0.shape[2]):
filter_position = []
for j in range(final_conv_out.shape[0]):
one_filter_weight = weight0[:,:,i]
one_filter_scan_out = final_conv_out[j,:,i]
MAX = np.sum(np.max(one_filter_weight,1))
position = list(np.where(one_filter_scan_out >= MAX*lim__))
#循环结束后,filter_position存有一个filter的所有激活位置
#空的也要保存,因为要标识序列编号
filter_position.append(position)
filter_all_position.append(filter_position)
#获取对应序列
filter_seq=[]
#第一层遍历312个filter
for i in filter_all_position:
seq_ = []
#第二层遍历序列
for j in range(len(i)):
if len(i[j][0])>=1:
for k in i[j][0]:
#注意这里对应filter_size
seq_.append(ocr_tensor[j,k:k+int(kernel_size),:])
filter_seq.append(seq_)
#拿掉扫到的序列数太少的filter
#是不是改成25-75分位数更好?
filter_seq_used = []
for i in filter_seq:
if stochastic_control==True:
if len(i)>= (seq_len-kernel_size+1)*data_batch*all_batch*((0.25)**(np.floor(kernel_size*lim__))):
filter_seq_used.append(i)
if stochastic_control==False:
if len(i)>= 10:
filter_seq_used.append(i)
final_pfm = []
for i in filter_seq_used:
final_pfm.append(np.sum(np.array(i),axis=0).T)
print('共获取'+str(len(final_pfm))+'个激活序列数在指定值以上的pfm矩阵')
return final_pfm
#要求输入array
def get_meme_input_file(pfms_,meme_path):
pfms_ = np.array(pfms_)
ppm = []
for i in pfms_:
ppm.append(i/np.sum(i,axis=0))
with open(meme_path,'w')as f:
f.write('MEME version 5.3.3')
f.write('\n')
f.write('\n')
f.write('ALPHABET = ACGT')
f.write('\n')
f.write('\n')
f.write('strands: + -')
f.write('\n')
f.write('\n')
f.write('Background letter frequencies')
f.write('\n')
f.write('A 0.25 C 0.25 G 0.25 T 0.25')
f.write('\n')
f.write('\n')
for i in range(len(ppm)):
f.write('MOTIF\tmotif_ppm'+str(i))
f.write('\n')
f.write('letter-probability matrix: alength= 4 w= '+str(len(ppm[i].T)))
f.write('\n')
for j in ppm[i].T:
f.write(str(j[0])+'\t'+str(j[1])+'\t'+str(j[2])+'\t'+str(j[3]))
f.write('\n')
f.write('\n')
print('结果文件已存至'+meme_path+'\n'+'该文件具有meme的tomtom工具所需的输入格式')
def get_tf_motif(meme_path,bed_file_path,fasta_file_path,model_path,seq_len=1000,kernel_size=22,
weight_name='',weight_ = None,
genome_size_control_path=None,lim__ = 0.75,data_batch=320,all_batch=45,stochastic_control=False):
get_meme_input_file(get_basenji_motif_pfms(bed_file_path,fasta_file_path,model_path,seq_len=seq_len,kernel_size=kernel_size,
weight_name=weight_name,weight_ = weight_,
genome_size_control_path=genome_size_control_path,lim__ = lim__,data_batch=data_batch,all_batch=all_batch,stochastic_control=stochastic_control
),meme_path=meme_path)
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/cnn_tf_motif_1d.py
|
cnn_tf_motif_1d.py
|
import torch
import numpy as np
import torch
import torch.nn as nn
#最后一层pool改为1的DeeperDeepSEA
class DeeperDeepSEA_pool(nn.Module):
def __init__(self, sequence_length, n_targets):
super(DeeperDeepSEA_pool, self).__init__()
conv_kernel_size = 8
pool_kernel_size = 4
self.conv_net = nn.Sequential(
nn.Conv1d(4, 320, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Conv1d(320, 320, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.BatchNorm1d(320),
nn.Conv1d(320, 480, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.BatchNorm1d(480),
nn.Dropout(p=0.2),
nn.Conv1d(480, 960, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=44, stride=44),
nn.BatchNorm1d(960),
nn.Dropout(p=0.2)
)
self.classifier = nn.Sequential(
nn.Linear(960 , n_targets),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_targets),
nn.Linear(n_targets, n_targets),
nn.Sigmoid())
def forward(self, x):
"""
Forward propagation of a batch.
"""
out = self.conv_net(x)
reshape_out = out.view(out.size(0), 960 )
predict = self.classifier(reshape_out)
return predict
#res_attention_model
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
#res模块
class Bottleneck(nn.Module):
expansion = 1 #
def __init__(self, inplanes, planes, stride=1, downsample=None,use_1x1conv=False):
super(Bottleneck, self).__init__()
self.conv_1 = nn.Conv1d(inplanes, planes, kernel_size=1, bias=False)
self.bn_1 = nn.BatchNorm1d(planes)
self.conv_2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn_2 = nn.BatchNorm1d(planes)
self.conv_3 = nn.Conv1d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn_3 = nn.BatchNorm1d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if use_1x1conv:
self.conv_4 = nn.Conv1d(inplanes, planes,kernel_size = 1, stride=stride)
else:
self.conv_4 = False
self.bn_res = nn.BatchNorm1d(planes)
def forward(self, x):
if self.conv_4:
residual = self.conv_4(x)
residual = self.bn_res(residual)
else:
residual = x
residual = self.bn_res(residual)
out = self.conv_1(x)
out = self.bn_1(out)
out = self.relu(out)
out = self.conv_2(out)
out = self.bn_2(out)
out = self.relu(out)
out = self.conv_3(out)
out = self.bn_3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
#通道注意力机制
class ChannelAttention(nn.Module):
def __init__(self, in_channel):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.fc1 = nn.Conv1d(in_channel, in_channel // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv1d(in_channel // 16, in_channel, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
#空间注意力机制
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=3):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv1d(2, 1, kernel_size=3, padding=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class RES_ATTENTION(nn.Module):
def __init__(self, sequence_length, n_targets):
super(RES_ATTENTION, self).__init__()
self.conv_h1 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size= 7),
nn.ReLU(inplace=True)
)
self.conv_h2 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=9,padding=1),
nn.ReLU(inplace=True)
)
self.conv_h3 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=11,padding=2),
nn.ReLU(inplace=True)
)
self.conv_h4 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=13,padding=3),
nn.ReLU(inplace=True)
)
self.conv_h5 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=15,padding=4),
nn.ReLU(inplace=True)
)
#接收n个通道的输入
self.ca = ChannelAttention(160)
self.sa = SpatialAttention()
self.conv = nn.Sequential(
Bottleneck(160,160),
Bottleneck(160,320,use_1x1conv=True),
#降低通道数
nn.Conv1d( 320,160, kernel_size=1),
nn.Conv1d( 160,24, kernel_size=1),
nn.ReLU(inplace=True),
#
nn.Conv1d(24,24,498),
nn.ReLU(inplace=True),
nn.Conv1d(24,24,497),
)
self.classifier = nn.Sequential(
nn.Sigmoid()
)
def forward(self, x):
#直接reshape,全局不用全连接层
out1 = self.conv_h1(x)
out2 = self.conv_h2(x)
out3 = self.conv_h3(x)
out4 = self.conv_h4(x)
out5 = self.conv_h5(x)
out_merge = torch.cat((out1,out2,out3,out4,out5),dim=1)
out_merge_ca = self.ca(out_merge) * out_merge
out_merge_sa = self.sa(out_merge_ca) * out_merge_ca
out_ = self.conv(out_merge_sa)
reshape_out = out_.view(out_.size(0), 24 )
predict = self.classifier(reshape_out)
return predict
class more_cnn_dilation(nn.Module):
def __init__(self, sequence_length, n_targets):
super(more_cnn_dilation, self).__init__()
self.conv_h1 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size= 7),
nn.ReLU(inplace=True)
)
self.conv_h2 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=9,padding=1),
nn.ReLU(inplace=True)
)
self.conv_h3 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=11,padding=2),
nn.ReLU(inplace=True)
)
self.conv_h4 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=13,padding=3),
nn.ReLU(inplace=True)
)
self.conv_h5 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=15,padding=4),
nn.ReLU(inplace=True)
)
self.conv = nn.Sequential(
nn.Conv1d(320, 320, kernel_size=7,stride=4),
nn.ReLU(inplace=True),
nn.BatchNorm1d(320),
nn.Conv1d(320, 480, kernel_size=1),
nn.Conv1d(480, 480, kernel_size=3,dilation=2,padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=3,dilation=4,padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=3,dilation=8,padding=7),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=7,stride =4),
nn.ReLU(inplace=True),
nn.BatchNorm1d(480),
nn.Dropout(p=0.2),
nn.Conv1d(480, 960, kernel_size=1),
nn.Conv1d(960, 960, kernel_size=3,dilation=2,padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=4,padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=8,padding=7),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=16,padding=15),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=32,padding=31),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=64,padding=63),
nn.ReLU(inplace=True),
nn.BatchNorm1d(960),
nn.Dropout(p=0.2),
#降低通道数
nn.Conv1d( 960,480, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv1d( 480,120, kernel_size=1),
nn.Conv1d( 120,24, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv1d(24,24,47)
)
self.classifier = nn.Sequential(
nn.Sigmoid()
)
def forward(self, x):
#直接reshape,全局不用全连接层
out1 = self.conv_h1(x)
out2 = self.conv_h2(x)
out3 = self.conv_h3(x)
out4 = self.conv_h4(x)
out5 = self.conv_h5(x)
out_merge = torch.cat((out1,out2,out3,out4,out5),dim=1)
out_ = self.conv(out_merge)
reshape_out = out_.view(out_.size(0), 24 )
predict = self.classifier(reshape_out)
return predict
|
zzx-deep-genome
|
/zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/torch_model.py
|
torch_model.py
|
from setuptools import setup,find_packages
setup(
name="zzy-AxCat",
version="0.1",
author="zzy",
description="曾子毅",
packages = find_packages("zzy"),
package_dir = {"":"zzy"},
package_data = {
"":[".txt",".info","*.properties",".py"],
"":["data/*.*"],
},
exclude = ["*.test","*.test.*","test.*","test"]
)
|
zzy-AxCat
|
/zzy-AxCat-0.1.tar.gz/zzy-AxCat-0.1/setup.py
|
setup.py
|
def get_info():
return "My blog : Anoxia-Cat.github.io"
|
zzy-AxCat
|
/zzy-AxCat-0.1.tar.gz/zzy-AxCat-0.1/zzy/AxCat/Github/message.py
|
message.py
|
=====
zzyzx
=====
Do you believe in the cloud? It's in fact only somebody else's computer.
Those might fail or get hacked.
Do you believe in bug-free software? Nah, it's more likely every now and
then a crash, a bug, a race condition or some other back luck will lead
to data corruption of the things that you work on.
Do you think you'll be able to access your notes in thirty years? It's
likely the data format they're stored in is going to be hard to read.
This is why I store all my notes in my e-mail. It's been there since the
1970s, it's going to be there in the 2050s. MIME and IMAP ensure the
data is more-less plaintext and easily human-readable even without any
tool support. Apple Notes support it on both OS X and iOS. Pure win.
But wait, what about software failure? What if a bug erases my notes or
there's a data center fire and the data restored from a backup is in
a state from two days ago? What about bitrot?
Enter ``zzyzx``.
This is the most primitive backup system ever. Set it up in cron on your
laptop or a server you control and it will create incremental backups
with history between runs (setting up a Mercurial repository). It also
creates useful symlinks to human-readable note titles so you can find
them more easily.
Installation
------------
It requires Python 3.5+ and Click. Just install it from PyPI::
$ pip install zzyzx
$ cat >~/.zzyzx
[server]
host=mail.example.com
[email protected]
pass=secret
[backup]
repo_path=~/Notes
ignore_prefix=INBOX.Notes
$ zzyzx backup
Markdown export
---------------
If you installed ``zzyzx[markdown]`` from PyPI, you can also run::
$ zzyzx md
This will generate a list of files that are a textual representation
of the notes' contents. This is useful for exporting Apple Notes into
systems that expect Markdown files, like
`Bear <http://www.bear-writer.com/>`_.
Configure your Markdown support adding a section like the following
to your `.zzyzx` config::
[markdown]
path=~/Dropbox/Notes
extension=.txt
headings=atx
Headings can be "atx" (simple hashes), "atx_closed" (symmetrical
hashes), or "underlined" (ReST-like).
Why the name ``zzyzx``?
-----------------------
It's the last place on Earth. It's the end of the world.
Known issues
------------
Don't put the repo path in Dropbox as it doesn't support symlinks and
your other computers will see a lot of duplicate files.
The Markdown export is not perfect because the HTML syntax used by
Apple Notes is pretty strange. I did what I could, tested against a few
hundred notes against macOS Sierra and iOS 10.2 (they are not consistent
between each other either).
Changes
-------
2017.1.0
~~~~~~~~
* the Markdown export update: generally sucks less
* also update the creation and modification date in the Markdown export
* allow customization of the Markdown export file extensions
* allow exporting folder-based hashtags (for example for use with Bear
editor)
2016.6.0
~~~~~~~~
* bugfix: slashes and backslashes weren't properly escaped for title
symlinks
2016.4.1
~~~~~~~~
* backwards incompatible: ``zzyzx`` functionality now available as
``zzyzx backup``
* new functionality: ``zzyzx md`` unpacks .eml into text files and
attachments, translating HTML into Markdown
* bugfix: existing and newly created filenames are normalized to NFD;
existing file tracking won't be so eager to delete files anymore on
OS X
2016.4.0
~~~~~~~~
* first published version
Authors
-------
Glued together by `Łukasz Langa <[email protected]>`.
|
zzyzx
|
/zzyzx-2017.1.0.tar.gz/zzyzx-2017.1.0/README.rst
|
README.rst
|
import ast
import os
import re
from setuptools import setup
current_dir = os.path.abspath(os.path.dirname(__file__))
ld_file = open(os.path.join(current_dir, 'README.rst'))
try:
long_description = ld_file.read()
finally:
ld_file.close()
_version_re = re.compile(r'__version__\s+=\s+(?P<version>.*)')
with open('zzyzx/__init__.py', 'rb') as f:
version = _version_re.search(f.read().decode('utf-8')).group('version')
version = str(ast.literal_eval(version))
setup(
name='zzyzx',
author='Łukasz Langa',
author_email='[email protected]',
version=version,
url='http://github.com/ambv/zzyzx',
packages=['zzyzx'],
package_data={
'': ['*.txt', '*.rst'],
},
entry_points={
'console_scripts': ['zzyzx=zzyzx.cli:main'],
},
description='A simple IMAP Notes backup tool.',
long_description=long_description,
zip_safe=False,
platforms=['any'],
install_requires=[
'click',
],
extras_require={
'collation': ["PyICU"],
'markdown': ["beautifulsoup4", "html5lib", "python-magic"],
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
],
)
|
zzyzx
|
/zzyzx-2017.1.0.tar.gz/zzyzx-2017.1.0/setup.py
|
setup.py
|
# zzz
Python library that waits until something happens.
## Benefit
You will no longer have to write annoying `while`/`time.sleep()` checks
to wait until a variable is euqal to a certain value.
## Usage
It's real simple.
All you gotta do is just have an import statement:
```
from zzz import z
```
After that, you use the `z` function on any
variable/object/function/method/thing ("VOFMT"). You pass the aforementioned
VOFMT as the first argument (`variable`), a `value` that the VOFMT should be
equal to, and lastly an optional `delay` argument, which determines how long to
wait between the checks for the aforementioned conditional equivalence.
## Author
`zzz` was written by David Gay.
## License
AGPLv3+. See `LICENSE` file for full text.
## A note about formatting
I believe that Markdown is superior to ReStructured Text and do not care that
PyPI only parses ReStructured Text. You will have to deal with it. You are a
smart person. Crack the readme open in your text editor, toss it through a
Markdown renderer, or deal with it. You will have many worse moments before
your continual decay leads to your inevitable final breath and the collection
of atoms called "you" disperse and take their place within other beings.
|
zzz
|
/zzz-0.0.2.tar.gz/zzz-0.0.2/README.md
|
README.md
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='zzz',
version='0.0.2',
description='Wait for something to happen',
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
],
keywords='wait sleep time zzz trigger',
author='David Gay',
author_email='[email protected]',
url='https://github.com/oddshocks/zzz',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=[],
packages=[],
entry_points="""
""",
)
|
zzz
|
/zzz-0.0.2.tar.gz/zzz-0.0.2/setup.py
|
setup.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
from setuptools import setup, find_packages
setup(
name="zzz001",
version="0.0.4",
author="Jasper Jin",
author_email="[email protected]",
description="Singleton Client",
license="MIT",
url="https://github.com/vmware/singleton/tree/g11n-python-client",
packages=['sgtnclient'],
install_requires=[
"PyYAML"
],
classifiers=[
"Operating System :: OS Independent"
]
)
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/setup.py
|
setup.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import sys
import os
import re
import time
import copy
from threading import Thread
from collections import OrderedDict
from sgtn_properties import Properties
from sgtn_util import FileUtil, NetUtil, SysUtil
from sgtn_util import LOG_TYPE_INFO, KEY_RESULT, KEY_HEADERS
from sgtn_bykey import SingletonByKey
from sgtn_locale import SingletonLocaleUtil
from sgtn_py_base import SgtnException
from I18N import Config, Release, Translation
KEY_LOCALE = 'locale'
KEY_SOURCE = 'source'
KEY_ITEMS = 'format_items'
KEY_RESPONSE = 'response'
KEY_CODE = 'code'
KEY_DATA = 'data'
KEY_MESSAGES = 'messages'
KEY_PRODUCT = 'product'
KEY_VERSION = 'l10n_version'
KEY_SERVICE_URL = 'online_service_url'
KEY_OFFLINE_URL = 'offline_resources_base_url'
KEY_LOCAL_PATH = 'offline_resources_path'
KEY_DEFAULT_LOCALE = 'default_locale'
KEY_SOURCE_LOCALE = 'source_locale'
KEY_TRYDELAY = 'try_delay'
KEY_INTERVAL = 'cache_expired_time'
KEY_CACHEPATH = 'cache_path'
KEY_CACHETYPE = 'cache_type'
KEY_LOGPATH = 'log_path'
KEY_COMPONENTS = 'components'
KEY_LOCALES = 'locales'
KEY_LANG_TAG = 'language_tag'
KEY_COMPONENT_TAG = 'name'
KEY_COMPONENT_TEMPLATE = "component_template"
KEY_LOCALES_REFER = "locales_refer"
KEY_TEMPLATE = "template"
HEADER_REQUEST_ETAG = "If-None-Match"
LOCALE_DEFAULT = 'en-US'
MAX_THREAD = 1000
LOCAL_TYPE_FILE = 'file'
LOCAL_TYPE_HTTP = 'http'
RES_TYPE_PROPERTIES = '.properties'
RES_TYPE_SGTN = '.json'
class ClientUtil:
@classmethod
def check_response_valid(cls, dict):
if dict and KEY_RESULT in dict:
status = dict[KEY_RESULT].get(KEY_RESPONSE)
if status and KEY_CODE in status:
code = status[KEY_CODE]
if code == 200 or code == 604:
return True
return False
@classmethod
def read_resource_files(cls, local_type, file_list):
props = OrderedDict()
try:
for prop_file in file_list:
if prop_file.endswith(RES_TYPE_PROPERTIES):
text = None
if local_type == LOCAL_TYPE_HTTP:
text = NetUtil.http_get_text(prop_file)
else:
text = FileUtil.read_text_file(prop_file)
if text:
m = Properties().parse(text)
props.update(m)
elif prop_file.endswith(RES_TYPE_SGTN):
m = None
if local_type == LOCAL_TYPE_HTTP:
code, dt = NetUtil.http_get(prop_file, None)
if code == 200:
m = dt.get(KEY_RESULT)
else:
m = FileUtil.read_json_file(prop_file)
if m:
m = m.get(KEY_MESSAGES)
props.update(m)
except Exception as error:
raise IOError('Error in loading property file. Check file(s) = ', file_list, ' ', error)
return props
class SingletonConfig(Config):
def __init__(self, base_path, config_data):
self.base = base_path
self.config_data = config_data
self.product = config_data.get(KEY_PRODUCT)
self.version = '{0}'.format(config_data.get(KEY_VERSION))
self.remote_url = config_data.get(KEY_SERVICE_URL)
self.local_url = config_data.get(KEY_OFFLINE_URL)
if self.local_url:
parts = self.local_url.split('/')
self.local_type = parts[0][:-1]
if self.local_type == LOCAL_TYPE_FILE:
start = 2
needBasePath = False
if len(parts) > 3:
if parts[3] == '..' or parts[3] == '.':
start = 3
needBasePath = True
if parts[3].endswith(':'):
start = 3
self.local_url = '/'.join(parts[start:])
if needBasePath:
self.local_url = os.path.join(base_path, self.local_url)
self.log_path = self.get_path(KEY_LOGPATH) # log path
self.cache_path = self.get_path(KEY_CACHEPATH) # cache path
self.cache_type = self.get_item(KEY_CACHETYPE, 'default') # cache type
self.cache_expired_time = self.get_item(KEY_INTERVAL, 3600) # cache expired time
self.try_delay = self.get_item(KEY_TRYDELAY, 10) # try delay
self.default_locale = self.get_item(KEY_DEFAULT_LOCALE, LOCALE_DEFAULT)
self.source_locale = self.get_item(KEY_SOURCE_LOCALE, self.default_locale)
self._expand_components()
def _expand_locales(self, locales_def_array, template):
locales = {}
for one in locales_def_array:
locale_def = copy.deepcopy(one)
locales[locale_def.get(KEY_LANG_TAG)] = locale_def
if KEY_LOCAL_PATH not in locale_def and template:
locale_def[KEY_LOCAL_PATH] = copy.deepcopy(template.get(KEY_LOCAL_PATH))
return locales
def _expand_components(self):
self.components = None
components = self.config_data.get(KEY_COMPONENTS)
if not components:
return
expand = {}
self.components = {}
for component in components:
if KEY_LOCALES in component:
component[KEY_LOCALES] = self._expand_locales(component[KEY_LOCALES], None)
self.components[component.get(KEY_COMPONENT_TAG)] = copy.deepcopy(component)
continue
template_name = component.get(KEY_TEMPLATE)
if not template_name:
template_name = KEY_COMPONENT_TEMPLATE
if template_name not in expand:
t = self.config_data.get(template_name)
refer_name = t.get(KEY_LOCALES_REFER)
refer = self.config_data.get(refer_name)
if not refer:
continue
expand[template_name] = self._expand_locales(refer, t)
component[KEY_LOCALES] = expand[template_name]
self.components[component.get(KEY_COMPONENT_TAG)] = copy.deepcopy(component)
def get_config_data(self):
# method of Config
return self.config_data
def get_info(self):
# method of Config
info = {'product': self.product, 'version': self.version,
'remote': self.remote_url, 'local': self.local_url,
'source_locale': self.source_locale, 'default_locale': self.default_locale}
return info
def extract_list(self, key, key_name, key_refer, refer):
_dict = {}
_define = self.config_data.get(key)
if not _define:
return None
for one in _define:
dup = copy.deepcopy(one)
del dup[key_name]
_dict[one[key_name]] = dup
if key_refer not in dup and refer:
dup[key_refer] = copy.deepcopy(refer)
return _dict
def get_item(self, key, default_value):
value = self.config_data.get(key)
if value is None:
value = default_value
return value
def get_path(self, key):
path = self.config_data.get(key)
if path:
if path.startswith('./') or path.startswith('../'):
path = os.path.realpath(os.path.join(self.base, path))
return path
class SingletonApi:
VIP_PATH_HEAD = '/i18n/api/v2/translation/products/{0}/versions/{1}/'
VIP_PARAMETER = 'pseudo=false&machineTranslation=false&checkTranslationStatus=false'
VIP_GET_COMPONENT = 'locales/{0}/components/{1}?'
def __init__(self, release_obj):
self.rel = release_obj
self.cfg = release_obj.cfg
self.addr = self.cfg.remote_url
def get_component_api(self, component, locale):
head = self.VIP_PATH_HEAD.format(self.cfg.product, self.cfg.version)
path = self.VIP_GET_COMPONENT.format(locale, component)
return '{0}{1}{2}{3}'.format(self.addr, head, path, self.VIP_PARAMETER)
def get_localelist_api(self):
head = self.VIP_PATH_HEAD.format(self.cfg.product, self.cfg.version)
return '{0}{1}localelist'.format(self.addr, head)
def get_componentlist_api(self):
head = self.VIP_PATH_HEAD.format(self.cfg.product, self.cfg.version)
return '{0}{1}componentlist'.format(self.addr, head)
class SingletonUpdateThread(Thread):
def __init__(self, obj):
Thread.__init__(self)
self.obj = obj
def run(self):
self.obj.get_from_remote()
class SingletonAccessRemoteTask:
def __init__(self, release_obj, obj):
self.rel = release_obj
self.obj = obj
self.last_time = 0
self.querying = False
self.interval = self.rel.interval
def set_retry(self, current):
# try again after try_delay seconds
self.last_time = current - self.interval + self.rel.try_delay
def check(self):
if not self.rel.cfg.remote_url:
return
access_remote = False
if self.interval > 0:
current = time.time()
if current > self.last_time + self.interval:
access_remote = True
else:
if self.last_time == 0:
access_remote = True
if not access_remote:
return
if self.querying:
if self.obj.get_data_count() == 0:
while self.querying:
time.sleep(0.1)
return
self.querying = True
if self.obj.get_data_count() == 0:
self.obj.get_from_remote()
else:
th = SingletonUpdateThread(self.obj)
th.start()
class SingletonComponent:
def __init__(self, release_obj, locale, component, isLocalSource):
self.rel = release_obj
self.locale = locale
self.localeItem = self.rel.bykey.get_locale_item(locale, isLocalSource)
self.componentIndex = self.rel.bykey.get_component_index(component)
self.component = component
self.isLocalSource = isLocalSource
self.countOfMessages = 0
self.etag = None
self.cache_path = None
self.task = None if isLocalSource else SingletonAccessRemoteTask(release_obj, self)
if self.task and self.rel.cache_path:
self.cache_path = os.path.join(self.rel.cache_path, component, 'messages_{0}.json'.format(locale))
self.rel.log('--- cache file --- {0} ---'.format(self.cache_path))
if os.path.exists(self.cache_path):
dt = FileUtil.read_json_file(self.cache_path)
if KEY_MESSAGES in dt:
self.task.last_time = os.path.getmtime(self.cache_path)
self.set_messages(dt[KEY_MESSAGES])
def set_messages(self, messages):
for key in messages:
text = messages[key]
self.rel.bykey.set_string(key, self, self.componentIndex, self.localeItem, text)
self.countOfMessages = len(messages)
def get_messages(self):
return self.rel.bykey.get_messages(self.componentIndex, self.localeItem)
def get_message(self, key):
return self.rel.bykey.get_string(key, self.componentIndex, self.localeItem)
def is_messages_same(self, messages):
for key in messages:
text = messages[key]
message = self.rel.bykey.get_string(key, self.componentIndex, self.localeItem)
if message != text:
return False
return True
def get_from_remote(self):
current = time.time()
try:
# get messages
addr = self.rel.api.get_component_api(self.component, self.locale)
headers = {}
if self.etag:
headers[HEADER_REQUEST_ETAG] = self.etag
code, dt = NetUtil.http_get(addr, headers)
if code == 200 and ClientUtil.check_response_valid(dt):
self.etag, interval = NetUtil.get_etag_maxage(dt.get(KEY_HEADERS))
if interval:
self.task.interval = interval
messages = dt[KEY_RESULT][KEY_DATA][KEY_MESSAGES]
if self.cache_path:
if os.path.exists(self.cache_path) and self.is_messages_same(messages):
os.utime(self.cache_path, (current, current))
else:
self.rel.log('--- save --- {0} ---'.format(self.cache_path))
FileUtil.save_json_file(self.cache_path, dt[KEY_RESULT][KEY_DATA])
self.set_messages(messages)
self.task.last_time = current
elif code == 304:
self.task.last_time = current
else:
self.task.set_retry(current)
except SgtnException as e:
self.task.set_retry(current)
self.task.querying = False
def get_data_count(self):
return self.countOfMessages
class SingletonUseLocale:
def __init__(self, singletonLocale, sourceLocale, isLocalSource, bykey):
self.singletonLocale = singletonLocale
self.locale = self.singletonLocale.get_original_locale()
self.isLocalSource = isLocalSource
singletonSourceLocale = SingletonLocaleUtil.get_singleton_locale(sourceLocale)
self.isSourceLocale = self.locale in singletonSourceLocale.get_near_locale_list()
self.localeItem = bykey.get_locale_item(self.locale, True) if isLocalSource and bykey else None
self.components = {}
class SingletonReleaseBase:
def __init__(self, cfg):
self.cfg = cfg
self.cache_path = None
self.scope = None
self.logger = None
self.interval = 0
self.try_delay = 0
self.detach = False
self.locale_list = []
self.component_list = []
self.remote_pool = {}
self.source_pool = {}
self.local_handled = {}
self.component_handled = {}
if not cfg:
return
if cfg.log_path:
log_file = os.path.join(cfg.log_path, '{0}_{1}.log'.format(self.cfg.product, self.cfg.version))
self.init_logger(log_file)
if cfg.cache_path:
self.cache_path = os.path.join(cfg.cache_path, self.cfg.product, self.cfg.version)
self.log('--- cache path --- {0} ---'.format(self.cache_path))
self.interval = cfg.cache_expired_time
self.try_delay = cfg.try_delay
self.task = SingletonAccessRemoteTask(self, self)
self.get_scope()
self.remote_default_locale = self.get_locale_supported(self.cfg.default_locale)
self.remote_source_locale = self.get_locale_supported(self.cfg.source_locale)
self.isDifferent = self.remote_default_locale != self.remote_source_locale
self.bykey = SingletonByKey(self.cfg.source_locale, self.cfg.default_locale, self.isDifferent, self.cfg.cache_type)
self.useSourceLocale = self.get_use_locale(self.cfg.source_locale, True)
self._get_local_resource(self.useSourceLocale, self.cfg.source_locale)
self.useDefaultLocale = None
if self.isDifferent:
self.useDefaultLocale = self.get_use_locale(self.cfg.default_locale, False)
def get_use_locale(self, locale, asSource):
pool = self.source_pool if asSource else self.remote_pool
useLocale = pool.get(locale)
if useLocale is None:
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
useLocale = singletonLocale.find_item(pool, 1)
if useLocale is None:
useLocale = SingletonUseLocale(singletonLocale, self.cfg.source_locale, asSource, self.bykey)
for one in useLocale.singletonLocale.get_near_locale_list():
if one not in pool:
pool[one] = useLocale
return useLocale
def get_scope(self):
self.api = SingletonApi(self)
if self.cache_path:
self.locale_list = FileUtil.read_json_file(os.path.join(self.cache_path, 'locale_list.json'))
self.component_list = FileUtil.read_json_file(os.path.join(self.cache_path, 'component_list.json'))
if not self.cfg.remote_url:
return
if not self.locale_list:
self.get_from_remote()
else:
th = SingletonUpdateThread(self)
th.start()
def get_from_remote(self):
self.task.last_time = time.time()
try:
# get locale list
scope = self._get_scope_item(self.api.get_localelist_api(), KEY_LOCALES, 'locale_list.json')
if scope:
self.locale_list = scope
# get component list
scope = self._get_scope_item(self.api.get_componentlist_api(), KEY_COMPONENTS, 'component_list.json')
if scope:
self.component_list = scope
except SgtnException as e:
pass
self.task.querying = False
def get_data_count(self):
if not self.locale_list or not self.component_list:
return 0
return len(self.locale_list) + len(self.component_list)
def init_logger(self, log_file):
self.logger = SysUtil.init_logger(log_file, 'sgtn_{0}_{1}'.format(self.cfg.product, self.cfg.version))
self.log('--- release --- {0} --- {1} --- {2} ---'.format(self.cfg.product, self.cfg.version, time.time()))
def log(self, text, log_type=LOG_TYPE_INFO):
SysUtil.log(self.logger, text, log_type)
def _load_one_local(self, component, locale, path_define):
if not path_define:
return None
for i, v in enumerate(path_define):
path = v.replace('$COMPONENT', component).replace('$LOCALE', locale)
path_define[i] = os.path.join(self.cfg.local_url, path)
return ClientUtil.read_resource_files(self.cfg.local_type, path_define)
def _get_scope_item(self, addr, key, keep_name):
code, dt = NetUtil.http_get(addr, None)
if code == 200 and ClientUtil.check_response_valid(dt):
_, interval = NetUtil.get_etag_maxage(dt.get(KEY_HEADERS))
if interval:
self.task.interval = interval
scope = dt[KEY_RESULT][KEY_DATA][key]
if scope and self.cache_path:
FileUtil.save_json_file(os.path.join(self.cache_path, keep_name), scope)
return scope
return None
def _extract_info_from_dir(self, root):
if self.cfg.local_type != LOCAL_TYPE_FILE:
return
components = {}
dir_list, _ = FileUtil.get_dir_info(root)
for component in dir_list:
components[component] = {}
component_obj = components[component]
component_obj[KEY_LOCALES] = {}
locales_cfg = component_obj.get(KEY_LOCALES)
component_path = os.path.join(self.cfg.local_url, component)
_, file_list = FileUtil.get_dir_info(component_path)
for res_file in file_list:
parts = re.split(r"messages(.*)\.", res_file)
if len(parts) == 3:
if parts[1].startswith('_'):
locale = parts[1][1:]
elif parts[1] == '':
locale = self.cfg.source_locale
locales_cfg[locale] = {KEY_LOCAL_PATH: [os.path.join(component, res_file)]}
return components
def _get_local_resource(self, useLocale, locale):
if useLocale is None:
return
locale_item = useLocale.components
if not self.cfg.local_url:
return
if not self.cfg.components:
self.cfg.components = self._extract_info_from_dir(self.cfg.local_url)
if not self.cfg.components:
return
for component in self.cfg.components:
locales_cfg = self.cfg.components[component].get(KEY_LOCALES)
locale_define = None
if locales_cfg:
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
locale_define = singletonLocale.find_item(locales_cfg, 0)
combineKey = locale + '_!_' + component
if locale_define and combineKey not in self.local_handled:
path_define = locale_define.get(KEY_LOCAL_PATH)
map = self._load_one_local(component, locale, path_define)
component_obj = SingletonComponent(self, locale, component, useLocale.isLocalSource)
component_obj.set_messages(map)
locale_item[component] = component_obj
self.local_handled[combineKey] = True
def _get_remote_resource(self, locale, component):
if not self.locale_list or not self.component_list:
return None
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
if not singletonLocale.is_in_locale_list(self.locale_list):
return None
if component not in self.component_list:
return None
components = self.get_use_locale(locale, False).components
component_obj = components.get(component)
if component_obj is None:
self.log('--- component --- {0} ---'.format(component))
component_obj = SingletonComponent(self, locale, component, False)
components[component] = component_obj
component_obj.task.check()
return component_obj
def _get_component(self, locale, component):
component_remote = self._get_remote_resource(locale, component)
if component_remote:
return component_remote
component_obj = None
if self.cfg.local_url:
useLocale = self.get_use_locale(locale, False)
combineKey = locale + '_!_' + component
if combineKey not in self.local_handled:
self._get_local_resource(useLocale, locale)
self.local_handled[combineKey] = True
if useLocale:
component_obj = useLocale.components.get(component)
if component_obj is None and useLocale.isSourceLocale:
component_obj = self.useSourceLocale.components.get(component)
return component_obj
def _get_message(self, component, key, source, locale):
message = source if source is not None else key
if not key or not locale:
return message
if not self.bykey._onlyByKey and not component:
return message
self.task.check()
componentIndex = self.bykey.get_component_index(component)
if componentIndex >= 0:
combineKey = locale + '_!_' + component
if combineKey not in self.component_handled:
self._get_component(self.remote_source_locale, component)
componentObj = self._get_component(locale, component)
if self.isDifferent:
self._get_component(self.remote_default_locale, component)
if componentObj:
self.component_handled[combineKey] = True
localeItem = self.bykey.get_locale_item(locale, False)
message = self.bykey.get_string(key, componentIndex, localeItem, True)
return message
class SingletonRelease(SingletonReleaseBase, Release, Translation):
def get_config(self):
# method of Release
return self.cfg
def get_translation(self):
# method of Release
return self
def get_locale_strings(self, locale, asSource):
# method of Translation
collect = {}
useLocale = self.get_use_locale(locale, asSource)
if useLocale and useLocale.components:
components = useLocale.components
for component in components:
collect[component] = components[component].get_messages()
return collect
def get_source(self, component, key, sourceInCode):
componentIndex = self.bykey.get_component_index(component)
source = self.bykey.get_string(key, componentIndex, self.useSourceLocale.localeItem, False)
if source is not None:
return source
source = self._get_message(component, key, sourceInCode, self.cfg.source_locale)
return source
def get_raw(self, component, key, sourceInCode, locale, items):
useLocale = self.get_use_locale(locale, False)
if useLocale.isSourceLocale:
if sourceInCode is not None:
return sourceInCode
return self.get_source(component, key, sourceInCode)
source = self.get_source(component, key, sourceInCode)
if sourceInCode is not None and source is not None and source != sourceInCode:
return sourceInCode
return self._get_message(component, key, source, locale)
def get_string(self, component, key, **kwargs):
# method of Translation
sourceInCode = kwargs.get(KEY_SOURCE) if kwargs else None
locale = kwargs.get(KEY_LOCALE) if kwargs else None
items = kwargs.get(KEY_ITEMS) if kwargs else None
if not locale:
locale = SingletonClientManager().get_current_locale()
text = self.get_raw(component, key, sourceInCode, locale, items)
if text and items:
if isinstance(items, list):
text = self.format_by_array(text, items)
elif isinstance(items, dict):
text = self.format_by_map(text, items)
if text is None:
text = key
return text
def format_by_array(self, text, array):
return text.format(*array)
def format_by_map(self, text, map):
return text.format(**map)
def get_locale_supported(self, locale):
# method of Translation
return SysUtil.get_fallback_locale(locale)
class SingletonClientManager(object):
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = object.__new__(cls)
cls._instance.init()
return cls._instance
def init(self):
self._products = {}
def add_config_file(self, config_file, replaceMap=None):
config_text = FileUtil.read_text_file(config_file)
if replaceMap:
for key in replaceMap:
config_text = config_text.replace(key, replaceMap[key])
config_data = FileUtil.parse_datatree(config_text)
base_path = os.path.dirname(os.path.realpath(config_file))
cfg = self.add_config(base_path, config_data)
return cfg
def add_config(self, base_path, config_data):
if not config_data:
return
cfg = SingletonConfig(base_path, config_data)
release_obj = self.get_release(cfg.product, cfg.version)
if release_obj is None:
self.create_release(cfg)
return cfg
def get_release(self, product, version):
if not product or not version:
return None
releases = self._products.get(product)
if releases is None:
return None
return releases.get(version)
def create_release(self, cfg):
if not cfg or not cfg.product or not cfg.version:
return
releases = self._products.get(cfg.product)
if releases is None:
self._products[cfg.product] = {}
releases = self._products.get(cfg.product)
release_obj = releases.get(cfg.version)
if release_obj is None:
release_obj = SingletonRelease(cfg)
releases[cfg.version] = release_obj
def set_current_locale(self, locale):
current = sys._getframe().f_back.f_back
for i in range(10):
if not hasattr(current, 'f_locals'):
break
locals = current.f_locals
locals['_singleton_locale_'] = locale
if not hasattr(current, 'f_back'):
break
current = current.f_back
def get_current_locale(self):
current = sys._getframe().f_back.f_back
for i in range(10):
if not hasattr(current, 'f_locals'):
break
locals = current.f_locals
if '_singleton_locale_' in locals:
return locals['_singleton_locale_']
if not hasattr(current, 'f_back'):
break
current = current.f_back
return LOCALE_DEFAULT
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_client.py
|
sgtn_client.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import os
import sys
_libPath = os.path.dirname(__file__)
if _libPath not in sys.path:
sys.path.append(_libPath)
NOT_IMP_EXCEPTION = 'NotImplementedException'
_client_manager = None
def _get_client_manager():
global _client_manager
if _client_manager is None:
from sgtn_client import SingletonClientManager
_client_manager = SingletonClientManager()
return _client_manager
class Config(object):
"""Config interface"""
def get_config_data(self):
raise Exception()
def get_info(self):
raise Exception(NOT_IMP_EXCEPTION)
class Translation(object):
"""Translation interface"""
def get_string(self, component, key, **kwargs):
raise Exception(NOT_IMP_EXCEPTION)
def get_locale_strings(self, locale):
raise Exception(NOT_IMP_EXCEPTION)
def get_locale_supported(self, locale):
raise Exception(NOT_IMP_EXCEPTION)
class Release(object):
"""Release interface"""
def get_config(self):
"""get config interface Config"""
raise Exception(NOT_IMP_EXCEPTION)
def get_translation(self):
"""get translation interface Translation"""
raise Exception(NOT_IMP_EXCEPTION)
def add_config_file(config_file, replaceMap=None):
return _get_client_manager().add_config_file(config_file, replaceMap)
def add_config(base_path, config_data):
return _get_client_manager().add_config(base_path, config_data)
def set_current_locale(locale):
_get_client_manager().set_current_locale(locale)
def get_current_locale():
"""get string of locale"""
return _get_client_manager().get_current_locale()
def get_release(product, version):
"""get release interface Release"""
return _get_client_manager().get_release(product, version)
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/I18N.py
|
I18N.py
|
# coding=utf-8
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import urllib2 as httplib
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class SgtnPyBase:
@staticmethod
def int_to_unicode(value):
return unichr(value)
@staticmethod
def get_httplib():
return httplib
@staticmethod
def open_file(file_name, mode):
return open(file_name, mode)
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_py2_base.py
|
sgtn_py2_base.py
|
# coding=utf-8
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import urllib.request as httplib
class SgtnPyBase:
@staticmethod
def int_to_unicode(value):
return chr(value)
@staticmethod
def get_httplib():
return httplib
@staticmethod
def open_file(file_name, mode):
return open(file_name, mode, encoding='utf-8')
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_py3_base.py
|
sgtn_py3_base.py
|
# coding=utf-8
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import sys
if sys.version_info.major == 2:
from sgtn_py2_base import SgtnPyBase
else:
from sgtn_py3_base import SgtnPyBase
pybase = SgtnPyBase()
class SgtnException(Exception):
pass
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_py_base.py
|
sgtn_py_base.py
|
# coding=utf-8
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
from collections import OrderedDict
from sgtn_py_base import pybase
MAX_LINE_BUFFER = 1024
class LineReader:
def __init__(self, inCharBuf):
self.lineBuf = [None] * MAX_LINE_BUFFER
self.inLimit = 0
self.inOff = 0
self.inCharBuf = inCharBuf
if self.inCharBuf:
self.inLimit = len(self.inCharBuf)
def read_line(self):
length = 0
c = 0
skipWhiteSpace = True
isCommentLine = False
isNewLine = True
appendedLineBegin = False
precedingBackslash = False
skipLF = False
while True:
if self.inOff >= self.inLimit:
if length == 0 or isCommentLine:
return -1
if precedingBackslash:
length -= 1
return length
#The line below is equivalent to calling a ISO8859-1 decoder.
c = self.inCharBuf[self.inOff]
self.inOff += 1
if skipLF:
skipLF = False
if c == '\n':
continue
if skipWhiteSpace:
if c == ' ' or c == '\t' or c == '\f':
continue
if not appendedLineBegin and (c == '\r' or c == '\n'):
continue
skipWhiteSpace = False
appendedLineBegin = False
if isNewLine:
isNewLine = False
if c == '#' or c == '!':
isCommentLine = True
continue
if c != '\n' and c != '\r':
self.lineBuf[length] = c
length += 1
if length == len(self.lineBuf):
buf = [None] * length
self.lineBuf.extend(buf)
#flip the preceding backslash flag
if c == '\\':
precedingBackslash = not precedingBackslash
else:
precedingBackslash = False
else:
#reached end of line
if isCommentLine or length == 0:
isCommentLine = False
isNewLine = True
skipWhiteSpace = True
length = 0
continue
if self.inOff >= self.inLimit:
if precedingBackslash:
length -= 1
return length
if precedingBackslash:
length -= 1
#skip the leading whitespace characters in following line
skipWhiteSpace = True
appendedLineBegin = True
precedingBackslash = False
if c == '\r':
skipLF = True
else:
return length
class Properties:
def __init__(self):
self.kvTable = None
def parse(self, text):
self.kvTable = OrderedDict()
reader = LineReader(text)
self.load(reader)
return self.kvTable
def put(self, key, value):
oldValue = self.kvTable.get(key)
self.kvTable[key] = value
return oldValue
def load(self, lr):
convtBuf = [None] * MAX_LINE_BUFFER
while True:
limit = lr.read_line()
if limit < 0:
break
c = 0
keyLen = 0
valueStart = limit
hasSep = False
precedingBackslash = False
while True:
if keyLen >= limit:
break
c = lr.lineBuf[keyLen]
#need check if escaped.
if (c == '=' or c == ':') and not precedingBackslash:
valueStart = keyLen + 1
hasSep = True
break
elif (c == ' ' or c == '\t' or c == '\f') and not precedingBackslash:
valueStart = keyLen + 1
break
if c == '\\':
precedingBackslash = not precedingBackslash
else:
precedingBackslash = False
keyLen += 1
while True:
if valueStart >= limit:
break
c = lr.lineBuf[valueStart]
if c != ' ' and c != '\t' and c != '\f':
if not hasSep and (c == '=' or c == ':'):
hasSep = True
else:
break
valueStart += 1
key = self.load_convert(lr.lineBuf, 0, keyLen, convtBuf)
value = self.load_convert(lr.lineBuf, valueStart, limit-valueStart, convtBuf)
self.put(key, value)
def load_convert(self, inText, off, length, convtBuf):
if len(convtBuf) < length:
newLen = length * 2
convtBuf = [None] * newLen
outText = convtBuf
outLen = 0
end = off + length
while True:
if off >= end:
break
aChar = inText[off]
off += 1
if aChar == '\\':
aChar = inText[off]
off += 1
if aChar == 'u':
#Read the unicode after \u
value = 0
for i in range(4):
aChar = inText[off]
off += 1
if aChar >= '0' and aChar <= '9':
value = (value << 4) + ord(aChar) - ord('0')
elif aChar >= 'a' and aChar <= 'f':
value = (value << 4) + 10 + ord(aChar) - ord('a')
elif aChar >= 'A' and aChar <= 'F':
value = (value << 4) + 10 + ord(aChar) - ord('A')
else:
return None
outText[outLen] = pybase.int_to_unicode(value)
outLen += 1
else:
if aChar == 't':
aChar = '\t'
elif aChar == 'r':
aChar = '\r'
elif aChar == 'n':
aChar = '\n'
elif aChar == 'f':
aChar = '\f'
outText[outLen] = aChar
outLen += 1
else:
outText[outLen] = aChar
outLen += 1
return ''.join(outText[:outLen])
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_properties.py
|
sgtn_properties.py
|
# coding=utf-8
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import os
import sys
import json
import re
import logging
from collections import OrderedDict
from sgtn_py_base import pybase, SgtnException
from sgtn_debug import SgtnDebug
import ssl
if hasattr(ssl, '_create_unverified_context'): # for python 2.7
ssl._create_default_https_context = ssl._create_unverified_context
PY_VER = sys.version_info.major
UTF8 = 'utf-8'
httplib = pybase.get_httplib()
KEY_RESULT = 'result'
KEY_HEADERS = 'headers'
KEY_ERROR = 'error'
LOG_TYPE_INFO = 'info'
LOG_TYPE_ERROR = 'error'
# below keys are in lower case
LOCALE_MAP = {
'zh-hant': 'zh-Hant',
'zh-tw': 'zh-Hant',
'zh-hans': 'zh-Hans',
'zh-cn': 'zh-Hans'
}
class FileUtil:
LOG_INTERNAL = ''
@classmethod
def read_text_file(cls, file_name):
SgtnDebug.log_text('util', 'read file {0} / exist: {1}'.format(
file_name, os.path.exists(file_name)))
if os.path.exists(file_name) and os.path.isfile(file_name):
f = open(file_name, 'rb')
file_data = f.read()
f.close()
try:
file_data = file_data.decode(UTF8)
return file_data
except UnicodeDecodeError as e:
return None
return None
@classmethod
def parse_json_from_text(cls, text):
try:
dict_data = json.loads(text, object_pairs_hook=OrderedDict)
return dict_data
except json.decoder.JSONDecodeError as e:
raise SgtnException(str(e))
@classmethod
def parse_json(cls, text):
if text:
try:
return cls.parse_json_from_text(text)
except SgtnException as e:
return None
return None
@classmethod
def parse_yaml_from_text(cls, text):
try:
import yaml
dict_data = yaml.load(text, Loader=yaml.FullLoader)
return dict_data
except yaml.YAMLError as e:
raise SgtnException(str(e))
@classmethod
def parse_yaml(cls, text):
if text:
try:
return cls.parse_yaml_from_text(text)
except SgtnException as e:
return None
return None
@classmethod
def parse_datatree(cls, text):
if text:
data = cls.parse_yaml(text)
if data is None:
data = cls.parse_json(text)
return data
return None
@classmethod
def read_json_file(cls, file_name):
file_data = cls.read_text_file(file_name)
return cls.parse_json(file_data)
@classmethod
def read_datatree(cls, file_name):
file_data = cls.read_text_file(file_name)
return cls.parse_datatree(file_data)
@classmethod
def save_json_file(cls, file_name, dict):
dir = os.path.dirname(file_name)
if not os.path.exists(dir):
os.makedirs(dir)
f = pybase.open_file(file_name, 'w')
text = json.dumps(dict, ensure_ascii=False, indent=2)
f.write(text)
f.close()
@classmethod
def get_dir_info(cls, dir_name):
dir_list = []
file_list = []
try:
ls = os.listdir(dir_name)
except IOError as e:
pass
else:
for fn in ls:
temp = os.path.join(dir_name, fn)
if os.path.isdir(temp):
dir_list.append(fn)
else:
file_list.append(fn)
return dir_list, file_list
class NetUtil:
simulate_data = None
record_data = {'enable': False, 'records': {}}
@classmethod
def _get_data(cls, url, request_headers):
if not cls.simulate_data:
req = httplib.Request(url)
if request_headers:
for key in request_headers:
req.add_header(key, request_headers[key])
try:
res_data = httplib.urlopen(req)
except IOError as e:
raise SgtnException(str(e))
headers = {}
for h in res_data.headers:
headers[h.lower()] = res_data.headers[h].lower()
try:
result = res_data.read()
except IOError as e:
raise SgtnException(str(e))
try:
text = result.decode(UTF8)
except UnicodeDecodeError as e:
raise SgtnException(str(e))
if cls.record_data['enable']:
header_part = json.dumps(request_headers) if request_headers else request_headers
key = '{0}<<headers>>{1}'.format(url, header_part) if header_part else url
cls.record_data['records'][key] = {'text': text, 'headers': headers}
return text, headers
else:
header_part = json.dumps(request_headers) if request_headers else request_headers
key = '{0}<<headers>>{1}'.format(url, header_part) if header_part else url
kept = cls.simulate_data.get(key)
if kept:
if 'code' in kept:
if kept['code'] == 304:
raise SgtnException('Error 304:')
return kept['text'], kept['headers']
return None, None
@classmethod
def http_get_text(cls, url):
text = None
try:
text, _ = cls._get_data(url, None)
except SgtnException as e:
pass
return text
@classmethod
def http_get(cls, url, request_headers):
ret = {}
code = 400
try:
text, headers = cls._get_data(url, request_headers)
ret[KEY_RESULT] = FileUtil.parse_json_from_text(text)
ret[KEY_HEADERS] = headers
code = 200
except SgtnException as e:
err_msg = str(e)
parts = re.split("Error ([0-9]*):", err_msg)
if len(parts) > 1:
code = int(parts[1])
if code != 304:
ret[KEY_ERROR] = 'HTTP ERROR: {0}'.format(str(e))
return code, ret
@classmethod
def get_etag_maxage(cls, headers):
if headers is None:
return None, None
etag = headers.get('etag')
text = headers.get('cache-control')
if text is None:
return etag, None
parts = re.split("max\\-age[ ]*\\=[ ]*([0-9]*)[ ]*", text)
if len(parts) < 2:
return etag, None
return etag, float(parts[1])
class SysUtil:
@classmethod
def init_logger(cls, log_file, log_name):
handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(log_name)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
cls.log(logger, '')
cls.log(logger, '--- start --- python --- {0}'.format(sys.version.split('\n')[0]))
return logger
@classmethod
def log(cls, logger, text, log_type=LOG_TYPE_INFO):
if logger:
if log_type == LOG_TYPE_INFO:
logger.info(text)
return
elif log_type == LOG_TYPE_ERROR:
logger.error(text)
return
print(text)
@classmethod
def get_fallback_locale(cls, locale):
parts = re.split(r"[\-_]", locale)
parts[0] = parts[0].lower()
if len(parts) > 1:
parts[1] = parts[1].upper()
locale = '-'.join(parts)
fallback = LOCALE_MAP.get(locale.lower())
if fallback:
return fallback
return parts[0]
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_util.py
|
sgtn_util.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
from collections import OrderedDict
import threading
lock = threading.Lock()
from sgtn_locale import SingletonLocaleUtil
_indexLocaleItem = 0
class SingletonByKeyItem(object):
def __init__(self, componentIndex, itemIndex):
self._componentIndex = componentIndex
self._pageIndex = itemIndex // SingletonByKey.PAGE_MAX_SIZE
self._indexInPage = itemIndex % SingletonByKey.PAGE_MAX_SIZE
self._sourceStatus = 0x01
self._next = None
class SingletonByKeyTable(object):
def __init__(self, max):
self._max = max
self._table = [None] * max
def get_page(self, id):
return self._table[id]
def new_page(self, id):
array = [None] * self._max
self._table[id] = array
return array
def get_item(self, pageIndex, indexInPage):
array = self.get_page(pageIndex)
if array is None:
return None
return array[indexInPage]
def set_item(self, pageIndex, indexInPage, item):
array = self.get_page(pageIndex)
if array is None:
array = self.new_page(pageIndex)
array[indexInPage] = item
def get_item_by_one_index(self, index):
pageIndex = index // self._max
indexInPage = index % self._max
return self.get_item(pageIndex, indexInPage)
def set_item_by_one_index(self, index, item):
pageIndex = index // self._max
indexInPage = index % self._max
self.set_item(pageIndex, indexInPage, item)
class SingletonByKeyComponents(object):
def __init__(self):
self._count = 0
self._componentTable = []
self._componentIndexTable = {}
def get_id(self, component):
if not component:
return -1
componentIndex = self._componentIndexTable.get(component)
if componentIndex is not None:
return componentIndex
self._componentTable.append(component)
self._componentIndexTable[component] = self._count
self._count += 1
return self._count - 1
def get_name(self, id):
if id < 0 or id >= self._count:
return None
return self._componentTable[id]
class SingletonByKeyLocale(object):
def __init__(self, bykey, locale, asSource):
global _indexLocaleItem
_indexLocaleItem += 1
self._indexLocaleItem = _indexLocaleItem
self._bykey = bykey
self._locale = locale
self._singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
self._asSource = asSource
self._isSourceLocale = self._singletonLocale.compare(bykey._singletonLocaleSource)
self._messages = SingletonByKeyTable(SingletonByKey.PAGE_MAX_SIZE)
self._components = SingletonByKeyTable(SingletonByKey.COMPONENT_PAGE_MAX_SIZE)
def check_task(self, componentIndex, needCheck):
if componentIndex >= 0 and needCheck:
componentObj = self._components.get_item_by_one_index(componentIndex)
if componentObj is not None and componentObj.task is not None:
componentObj.task.check()
def get_message(self, componentIndex, pageIndex, indexInPage, needCheck=True):
self.check_task(componentIndex, needCheck)
return self._messages.get_item(pageIndex, indexInPage)
def set_message(self, message, componentObject, componentIndex, pageIndex, indexInPage):
if componentObject:
self._components.set_item_by_one_index(componentIndex, componentObject)
self._messages.set_item(pageIndex, indexInPage, message)
return True
class SingletonLookup(object):
def __init__(self, key, componentIndex, message):
self._key = key
self._componentIndex = componentIndex
self._message = message
self._add = 0
self._aboveItem = None
self._currentItem = None
class SingletonByKey(object):
PAGE_MAX_SIZE = 1024
COMPONENT_PAGE_MAX_SIZE = 128
def __init__(self, localeSource, localeDefault, isDifferent, cacheType):
self._itemCount = 0
self._keyAttrTable = {}
self._items = SingletonByKeyTable(SingletonByKey.PAGE_MAX_SIZE)
self._componentTable = SingletonByKeyComponents()
self._singletonLocaleSource = SingletonLocaleUtil.get_singleton_locale(localeSource)
self._sources = {}
self._locales = {}
self._onlyByKey = (cacheType == 'by_key')
self._isDifferent = isDifferent
self._sourceLocal = None
self._sourceRemote = None
self._defaultLocale = localeDefault
self._defaultRemote = None
def set_item(self, item, pageIndex, indexInPage):
self._items.set_item(pageIndex, indexInPage, item)
return True
def get_and_add_itemcount(self):
count = self._itemCount
self._itemCount += 1
return count
def get_locale_item(self, locale, asSource):
table = self._sources if asSource else self._locales
item = table.get(locale)
if item is None:
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
for oneLocale in table:
oneSingletonLocale = SingletonLocaleUtil.get_singleton_locale(oneLocale)
if singletonLocale.compare(oneSingletonLocale):
item = table[oneLocale]
break
if item is None:
item = SingletonByKeyLocale(self, locale, asSource)
table[locale] = item
return item
def get_component_index(self, component):
return self._componentTable.get_id(component)
def get_string(self, key, componentIndex, localeItem, needFallback=False):
if componentIndex < 0 and not self._onlyByKey:
return None
item = self._keyAttrTable.get(key)
if componentIndex >= 0:
while item:
if item._componentIndex == componentIndex:
break
item = item._next
if item is None:
localeItem.check_task(componentIndex, needFallback)
return None
if not needFallback:
message = localeItem.get_message(componentIndex, item._pageIndex, item._indexInPage, False)
return message
message = None
if item._sourceStatus & 0x01 == 0x01:
message = localeItem.get_message(componentIndex, item._pageIndex, item._indexInPage)
if message is not None:
return message
if self._isDifferent:
if not self._defaultRemote:
self._defaultRemote = self.get_locale_item(self._defaultLocale, False)
message = self._defaultRemote.get_message(componentIndex, item._pageIndex, item._indexInPage)
if message is None:
if item._sourceStatus & 0x04 == 0x04:
message = self._sourceLocal.get_message(componentIndex, item._pageIndex, item._indexInPage)
elif item._sourceStatus & 0x03 == 0x03:
message = self._sourceRemote.get_message(componentIndex, item._pageIndex, item._indexInPage)
if message is None:
message = key
return message
def new_key_item(self, componentIndex):
itemIndex = self.get_and_add_itemcount()
item = SingletonByKeyItem(componentIndex, itemIndex)
self.set_item(item, item._pageIndex, item._indexInPage)
return item
def _find_or_add(self, lookup):
item = self._keyAttrTable.get(lookup._key)
if item is None: # This is new
lookup._currentItem = self.new_key_item(lookup._componentIndex)
lookup._add = 1
return
while item:
if item._componentIndex == lookup._componentIndex: # Found
lookup._currentItem = item
return
lookup._aboveItem = item
item = item._next
lookup._currentItem = self.new_key_item(lookup._componentIndex)
lookup._add = 2
def do_set_string(self, key, componentObject, componentIndex, localeItem, message):
lookup = SingletonLookup(key, componentIndex, message)
self._find_or_add(lookup)
item = lookup._currentItem
if item is None:
return False
done = localeItem.set_message(message, componentObject, componentIndex, item._pageIndex, item._indexInPage)
if done and localeItem._isSourceLocale:
status = item._sourceStatus
if localeItem._asSource:
self._sourceLocal = localeItem
status |= 0x04
elif localeItem._isSourceLocale:
self._sourceRemote = localeItem
status |= 0x02
if (status & 0x06) != 0x06:
status |= 0x01
else:
localSource = self._sourceLocal.get_message(componentIndex, item._pageIndex, item._indexInPage, False)
remoteSource = self._sourceRemote.get_message(componentIndex, item._pageIndex, item._indexInPage, False)
if localSource == remoteSource:
status |= 0x01
else:
status &= 0x06
item._sourceStatus = status
# Finally, it's added in the table after it has been prepared to keep reading correct.
if lookup._add == 1:
self._keyAttrTable[key] = lookup._currentItem
elif lookup._add == 2:
lookup._aboveItem._next = lookup._currentItem
return done
def set_string(self, key, componentObject, componentIndex, localeItem, message):
if message is None or key is None or localeItem is None:
return False
text = self.get_string(key, componentIndex, localeItem)
if message != text:
with lock:
text = self.get_string(key, componentIndex, localeItem)
if message != text:
return self.do_set_string(key, componentObject, componentIndex, localeItem, message)
return False
def get_key_item(self, pageIndex, indexInPage):
array = self._items.get_page(pageIndex)
if array is None:
return None
return array[indexInPage]
def get_messages(self, componentIndex, localeItem):
messages = OrderedDict()
if componentIndex >= 0 and localeItem:
pages = {}
for i in range(SingletonByKey.PAGE_MAX_SIZE):
array = localeItem._messages.get_page(i)
if array is None:
continue
for k in range(SingletonByKey.PAGE_MAX_SIZE):
text = array[k]
if text is not None:
item = self.get_key_item(i, k)
if item:
if i not in pages:
pages[i] = {}
pages[i][k] = ''
for key in self._keyAttrTable:
item = self._keyAttrTable.get(key)
if item._pageIndex in pages:
array = pages[item._pageIndex]
if item._indexInPage in array:
messages[key] = self.get_string(key, componentIndex, localeItem)
return messages
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_bykey.py
|
sgtn_bykey.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/__init__.py
|
__init__.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import re
class SingletonLocale(object):
def __init__(self, locale):
self._localeList = [locale]
def get_near_locale_list(self):
return self._localeList
def add_near_locale(self, locale):
if locale in self._localeList:
return False
self._localeList.append(locale)
return True
def get_count(self):
return len(self._localeList)
def get_near_locale(self, index):
if index < 0 or index >= self.get_count():
return None
return self._localeList[index]
def get_original_locale(self):
return self.get_near_locale(0)
def compare(self, singletonLocale):
if singletonLocale is None:
return False
return self.is_in_locale_list(singletonLocale.get_near_locale_list())
def is_in_locale_list(self, checkList):
if checkList is None:
return False
for i in range(self.get_count()):
if self.get_near_locale(i) in checkList:
return True
return False
def find_item(self, items, start):
for i in range(start, self.get_count()):
nearLocale = self.get_near_locale(i)
item = items.get(nearLocale)
if item:
return item
return None
def set_items(self, items, item):
for i in range(self.get_count()):
nearLocale = self.get_near_locale(i)
items[nearLocale] = item
class SingletonLocaleUtil(object):
DEFAULT_LOCALE = "en-US"
FALLBACK = {
'zh-CN': 'zh-Hans',
'zh-TW': 'zh-Hant',
'zh-HANS': 'zh-Hans',
'zh-HANT': 'zh-Hant'
}
LocaleFallbackMap = {}
SystemLocale = None
@classmethod
def get_singleton_locale(cls, locale):
if locale is None:
return cls.get_singleton_locale(SingletonLocaleUtil.DEFAULT_LOCALE)
singletonLocale = cls.LocaleFallbackMap.get(locale.lower())
if singletonLocale:
return singletonLocale
parts = re.split(r'[\_|\-]', locale)
parts[0] = parts[0].lower()
if len(parts) > 1:
parts[1] = parts[1].upper()
original = '-'.join(parts)
singletonLocale = SingletonLocale(original)
fallback = cls.FALLBACK.get(original)
if fallback:
singletonLocale.add_near_locale(fallback)
elif len(parts) > 1:
singletonLocale.add_near_locale(parts[0])
cls.LocaleFallbackMap[locale.lower()] = singletonLocale
return singletonLocale
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_locale.py
|
sgtn_locale.py
|
# coding=utf-8
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import os
class SgtnDebug:
LOG_INTERNAL = ''
@classmethod
def set_internal_log(cls, file_name):
cls.LOG_INTERNAL = file_name
cls.log_text('debug', '--- internal log --- {0} ---'.format(file_name))
@classmethod
def log_text(cls, desc, data):
if cls.LOG_INTERNAL:
if not os.path.exists(cls.LOG_INTERNAL):
f = open(cls.LOG_INTERNAL, 'w')
else:
f = open(cls.LOG_INTERNAL, 'a')
text = '[{0}] {1}\n'.format(desc, data)
f.write(text)
f.close()
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_debug.py
|
sgtn_debug.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import sys
import unittest
sys.path.append('../sgtnclient')
from sgtn_locale import SingletonLocale, SingletonLocaleUtil
from sgtn_bykey import SingletonByKey, SingletonByKeyLocale, SingletonByKeyComponents
class TestByKey(unittest.TestCase):
def check_one(self, bykey, key, message, idComponent, localeObj):
bykey.set_string(key, None, idComponent, localeObj, message)
msg = bykey.get_string(key, -1, localeObj)
print('--- message --- %s --- %s' % (key, msg))
def show_one(self, bykey, key, localeObj):
msg = bykey.get_string(key, -1, localeObj)
print('--- message --- %s --- %s' % (key, msg))
def test_release(self):
bykey = SingletonByKey('en', 'en', False, 'by_key')
id1 = bykey.get_component_index('first')
id2 = bykey.get_component_index('second')
print('--- component id --- %s' % [id1, id2])
localeObj1 = bykey.get_locale_item('en', True)
localeObj2 = bykey.get_locale_item('en', False)
localeObj3 = bykey.get_locale_item('de_de', False)
localeObj4 = bykey.get_locale_item('de-DE', False)
item = bykey.get_key_item(0, 0)
bykey.set_string("key1", None, id1, localeObj1, "en_message")
bykey.set_string("key1", None, id1, localeObj2, "en_message")
self.check_one(bykey, "key1", "de_message", id1, localeObj3)
self.check_one(bykey, "key2", "de_message3", id1, localeObj3)
self.check_one(bykey, "key1", "de_message2", id1, localeObj3)
self.show_one(bykey, "key1", localeObj4)
messages = bykey.get_messages(id1, localeObj3)
print('--- messages --- %s' % messages)
def test_locale(self):
sgtnLocale = SingletonLocaleUtil.get_singleton_locale(None)
print('--- locale --- %s' % sgtnLocale.get_near_locale_list())
sgtnLocale = SingletonLocaleUtil.get_singleton_locale('zh_cn')
print('--- locale --- %s' % sgtnLocale.get_near_locale_list())
sgtnLocale = SingletonLocaleUtil.get_singleton_locale('de_de')
print('--- locale --- %s' % sgtnLocale.get_near_locale_list())
if __name__ == '__main__':
unittest.main()
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/test/test_sgtn_bykey.py
|
test_sgtn_bykey.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import sys
import unittest
sys.path.append('../sgtnclient')
from sgtn_properties import Properties
from sgtn_util import FileUtil
class TestClient(unittest.TestCase):
def test_properties_parser(self):
print('\n--- unittest --- %s --- python %s\n' % (
sys._getframe().f_code.co_name, sys.version_info.major))
text = u'qq1=qqq\n#the middle one\nbb2:bbb #tail\ncc3 abc好abc好'
print(text)
p = Properties()
m = p.parse(text)
self.assertEqual(m['bb2'], 'bbb #tail')
print('--- map1 --- %s ---' % m)
import json
t = json.dumps(m, indent=2, ensure_ascii=False)
print(t)
text = FileUtil.read_text_file('data/data_utf8.txt')
m = p.parse(text)
self.assertEqual(m['username'], u'Username用户名')
print('--- map2 --- %s ---' % m)
text = FileUtil.read_text_file('data/data_ascii.txt')
m = p.parse(text)
self.assertEqual(m['username'], u'Username用户名')
print('--- map3 --- %s ---' % m)
if __name__ == '__main__':
unittest.main()
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/test/test_properties_parser.py
|
test_properties_parser.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.