code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# encoding: utf-8
'''
@author: zyl
@file: image_utils.py
@time: 2021/11/2 18:10
@desc:
'''
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/image_utils.py
|
image_utils.py
|
# encoding: utf-8
'''
@author: zyl
@file: __init__.py.py
@time: 2021/11/2 16:48
@desc:
'''
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/__init__.py
|
__init__.py
|
# encoding: utf-8
"""
@author: zyl
@file: my_utils.py
@time: ~~
@desc: zyl utils
"""
import re
import langid
import pandas as pd
class MyTokenizer:
def __init__(self):
# 把连号‘-’分开
self.sentences_tokenizer_zh = self._cut_paragraph_to_sentences_zh
self.sentences_tokenizer_en = self._cut_paragraph_to_sentences_en().tokenize
self.words_tokenizer_zh = self._cut_sentence_to_words_zh
self.words_tokenizer_en = self._cut_sentence_to_words_en().tokenize
def _cut_paragraph_to_sentences_zh(self, para: str, drop_empty_line=True, strip=True, deduplicate=False):
"""
Args:
para: 输入文本
drop_empty_line: 是否丢弃空行
strip: 是否对每一句话做一次strip
deduplicate: 是否对连续标点去重,帮助对连续标点结尾的句子分句
Returns:
sentences: list of str
"""
if deduplicate:
para = re.sub(r"([。!?\!\?])\1+", r"\1", para)
para = re.sub('([。!?\?!])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?!][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
sentences = para.split("\n")
if strip:
sentences = [sent.strip() for sent in sentences]
if drop_empty_line:
sentences = [sent for sent in sentences if len(sent.strip()) > 0]
return sentences
def _cut_paragraph_to_sentences_en(self):
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
punkt_param = PunktParameters()
abbreviation = ['et al.', 'i.e.', 'e.g.', 'etc.', 'i.e', 'e.g', 'etc', ' et al']
punkt_param.abbrev_types = set(abbreviation)
tokenizer = PunktSentenceTokenizer(punkt_param)
return tokenizer
def _cut_sentence_to_words_zh(self, sentence: str):
english = 'abcdefghijklmnopqrstuvwxyz0123456789αγβδεζηθικλμνξοπρστυφχψω'
output = []
buffer = ''
for s in sentence:
if s in english or s in english.upper(): # 英文或数字
buffer += s
else: # 中文
if buffer:
output.append(buffer)
buffer = ''
output.append(s)
if buffer:
output.append(buffer)
return output
def _cut_sentence_to_words_en(self):
from nltk import WordPunctTokenizer
# from transformers import BasicTokenizer
# BasicTokenizer(do_lower_case=False).tokenize()
return WordPunctTokenizer()
def cut_sentence_to_words(self, sentence: str):
if langid.classify(sentence)[0] == 'zh':
return self.words_tokenizer_zh(sentence)
else:
return self.words_tokenizer_en(sentence)
def cut_paragraph_to_sentences(self, paragraph: str):
if langid.classify(paragraph)[0] == 'zh':
return self.sentences_tokenizer_zh(paragraph)
else:
return self.sentences_tokenizer_en(paragraph)
class NlpUtils:
def __init__(self):
pass
@staticmethod
def show_all():
import pandas as pd
# 设置value的显示长度为200,默认为50
pd.set_option('max_colwidth', 250)
# 显示所有列,把行显示设置成最大
pd.set_option('display.max_columns', None)
# 显示所有行,把列显示设置成最大
pd.set_option('display.max_rows', None)
@staticmethod
def df_clean_language(df, column_name, language_list=('en', 'zh')):
# dataframe过滤出某一列文本的语言
import langid
df['language'] = df[column_name].apply(lambda x: langid.classify(str(x))[0])
df = df[df['language'].isin(language_list)]
df = df.drop(['language'], axis=1)
return df
@staticmethod
def split_data_evenly(dt, num):
dt_length = len(dt)
step = int(dt_length / num)
other_dt = dt_length % num
if dt_length <= num:
print('dt_length <= dt_num')
return dt
if other_dt == 0:
return [dt[i:i + step] for i in range(0, dt_length, step)]
else:
first_dt = [dt[i:i + step + 1] for i in range(0, int((step + 1) * other_dt), step + 1)]
second_list = [dt[i:i + step] for i in range(int((step + 1) * other_dt), dt_length, step)]
first_dt.extend(second_list)
return first_dt
@staticmethod
def clean_text(text):
import re
text = re.sub('<[^<]+?>', '', text).replace('\n', '').strip() # 去html中的<>标签
text = ' '.join(text.split()).strip()
return text
@staticmethod
def cut_train_eval(all_df):
from sklearn.utils import resample
raw_df = resample(all_df, replace=False)
cut_point = min(5000, int(0.2 * len(raw_df)))
eval_df = raw_df[0:cut_point]
train_df = raw_df[cut_point:]
return train_df, eval_df
@staticmethod
def two_classification_sampling(train_df, column='labels', pos_label=1, mode='up_sampling'):
import pandas as pd
from sklearn.utils import resample
negative_df = train_df[train_df[column] != pos_label]
neg_len = negative_df.shape[0]
positive_df = train_df[train_df[column] == pos_label]
pos_len = positive_df.shape[0]
if neg_len > pos_len:
if mode == 'down_sampling':
down_sampling_df = resample(negative_df, replace=False, n_samples=pos_len, random_state=242)
return pd.concat([positive_df, down_sampling_df], ignore_index=True)
else:
up_sampling_df = resample(positive_df, replace=True, n_samples=(neg_len - pos_len), random_state=242)
return pd.concat([train_df, up_sampling_df], ignore_index=True)
elif neg_len < pos_len:
if mode == 'down_sampling':
down_sampling_df = resample(positive_df, replace=False, n_samples=neg_len, random_state=242)
return pd.concat([down_sampling_df, negative_df], ignore_index=True)
else:
up_sampling_df = resample(negative_df, replace=True, n_samples=(pos_len - neg_len), random_state=242)
return pd.concat([train_df, up_sampling_df], ignore_index=True)
else:
return train_df
@staticmethod
def find_index(raw_text, find_text, label='label'):
# special_character = set(re.findall('\W', str(raw_text)))
# for i in special_character:
# raw_text = raw_text.replace(i, '\\' + i)
re_result = re.finditer(find_text, raw_text)
starts = []
for i in re_result:
starts.append(i.span()[0])
return [{'label': label, 'start': s, 'offset': len(find_text)} for s in starts]
@staticmethod
def ner_find(text: str, entities: dict, ignore_nested=True):
"""
find the loaction of entities in a text
Args:
text: a text, like '我爱吃苹果、大苹果,小苹果,苹果【II】,梨子,中等梨子,雪梨,梨树。'
entities: {'entity_type1':{entity_str1,entity_str2...},
'entity_type2':{entity_str1,entity_str2...},
...}
like : {'apple': ['苹果', '苹果【II】'], 'pear': ['梨', '梨子'],}
ignore_nested: if nested
#>>>IndexedRuleNER().ner(text, entities, False)
Returns:
indexed_entities:{'entity_type1':[[start_index,end_index,entity_str],
[start_index,end_index,entity_str]...]
'entity_type2':[[start_index,end_index,entity_str],
[start_index,end_index,entity_str]...]
...}
#>>>{'apple': [[3, 5, '苹果'], [7, 9, '苹果'], [11, 13, '苹果'], [14, 16, '苹果'], [14, 20, '苹果【II】']],
'pear': [[21, 22, '梨'], [26, 27, '梨'], [30, 31, '梨'], [32, 33, '梨'], [21, 23, '梨子'], [26, 28, '梨子']]}
"""
indexed_entities = dict()
for every_type, every_value in entities.items():
every_type_value = []
for every_entity in list(every_value):
special_character = set(re.findall('\W', str(every_entity)))
for i in special_character:
every_entity = every_entity.replace(i, '\\' + i)
re_result = re.finditer(every_entity, text)
for i in re_result:
res = [i.span()[0], i.span()[1], i.group()]
if res != []:
every_type_value.append([i.span()[0], i.span()[1], i.group()])
indexed_entities[every_type] = every_type_value
if ignore_nested:
for key, value in indexed_entities.items():
all_indexs = [set(range(i[0], i[1])) for i in value]
for i in range(len(all_indexs)):
for j in range(i, len(all_indexs)):
if i != j and all_indexs[j].issubset(all_indexs[i]):
value.remove(value[j])
indexed_entities[key] = value
elif i != j and all_indexs[i].issubset(all_indexs[j]):
value.remove(value[i])
indexed_entities[key] = value
return indexed_entities
@staticmethod
def remove_some_model_files(args):
import os
if os.path.isdir(args.output_dir):
cmd = 'rm -rf ' + args.output_dir.split('outputs')[0] + 'outputs/'
os.system(cmd)
if os.path.isdir(args.output_dir.split('outputs')[0] + '__pycache__/'):
cmd = 'rm -rf ' + args.output_dir.split('outputs')[0] + '__pycache__/'
os.system(cmd)
if os.path.isdir(args.output_dir.split('outputs')[0] + 'cache/'):
cmd = 'rm -rf ' + args.output_dir.split('outputs')[0] + 'cache/'
os.system(cmd)
# @staticmethod
# def sunday_match(target, pattern):
# """
#
# Args:
# target:
# pattern:
#
# Returns:
#
# """
# len_target = len(target)
# len_pattern = len(pattern)
#
# if len_pattern > len_target:
# return list()
#
# index = 0
# starts = []
# while index < len_target:
# if pattern == target[index:index + len_pattern]:
# starts.append(index)
# index += 1
# else:
# if (index + len(pattern)) >= len_target:
# return starts
# else:
# if target[index + len(pattern)] not in pattern:
# index += (len_pattern + 1)
# else:
# index += 1
# return starts
# @staticmethod
# def transfomer_data_format_from_t5_to_ner(df: pd.DataFrame, delimiter='|',
# keep_addition_info=('id', 'text_type')):
# """
#
# Args:
# df: dataframe,must have the columns-['prefix','input_text','target_text']
#
# Returns:
#
# """
# all_cls = df.value_counts('prefix').index.to_list()
# custom_labels = ['O']
# for c in all_cls:
# custom_labels.append('B-' + c.upper())
# custom_labels.append('I-' + c.upper())
# sentence_id = 0
# res_li = []
# my_tokenizer = MyTokenizer()
#
# df = df.drop_duplicates(subset=['input_text'])
# for input_text, sub_df in tqdm(df.groupby('input_text', sort=False)):
# words = my_tokenizer.cut_sentence_to_word_piece(input_text)
# labels = ['O'] * len(words)
#
# for _, d in sub_df.iterrows():
# if keep_addition_info:
# for k in range(len(keep_addition_info)):
# exec(f'info_{k} = d[keep_addition_info[{k}]]')
#
# cls = d['prefix']
# sub_label = set(d['target_text'].split(delimiter))
# while '' in sub_label:
# sub_label.remove('')
# if sub_label:
# for every_entity in sub_label:
# entity = my_tokenizer.cut_sentence_to_word_piece(every_entity)
# res_starts = sunday_match(target=words, pattern=entity)
# if res_starts:
# for r in res_starts:
# labels[r] = 'B-' + cls.upper()
# if len(entity) > 1:
# labels[r + 1: r + len(entity)] = ['I-' + cls.upper()] * (len(entity) - 1)
#
# sentence_ner = []
# for w, l in zip(words, labels):
# r = {'sentence_id': sentence_id, 'words': w, 'labels': l}
# if keep_addition_info:
# for k in range(len(keep_addition_info)):
# r.update({keep_addition_info[k]: eval(f'info_{k}')})
# sentence_ner.append(r)
#
# res_li.extend(sentence_ner)
# sentence_id += 1
#
# df = pd.DataFrame(res_li)
# return df
if __name__ == '__main__':
test_df = pd.read_excel("/home/zyl/disk/PharmAI/pharm_ai/panel/data/v2.4.c/processed_0820.xlsx", 'eval')[0:100]
print('1')
# DTUtils.transfomer_data_format_from_t5_to_ner(test_df)
pass
# class Project(MyModel):
# def __init__(self):
# super(Project, self).__init__()
# self.start_time = '...'
# self.end_time = '...'
#
# self.wandb_proj = 'test'
# self.use_model = 'classification' # mt5 /classification
# self.model_type = 'bert'
# self.pretrained_model = ConfigFilePaths.bert_dir_remote
#
# def run(self):
# self.train_test()
#
# def train_test(self):
# self.model_version = 'vtest'
# self.pretrained_model = '/home/zyl/disk/PharmAI/pharm_ai/po/best_model/v4.2.0.4/'
# self.args = MyModel.set_model_parameter(model_version=self.model_version,
# args=ClassificationArgs(),
# save_dir='po')
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
# self.cuda_device = 0
# self.args.n_gpu = 3
#
# self.args.num_train_epochs = 1
# self.args.learning_rate = 5e-5
# self.args.train_batch_size = 64 # 512
# self.args.eval_batch_size = 32 # 256
# self.args.max_seq_length = 512
# self.args.gradient_accumulation_steps = 8 # 256
#
# train_df = pd.read_excel('./data/processed_0825.xlsx', 'train')
# eval_df = pd.read_excel('./data/processed_0825.xlsx', 'test')
# self.train(train_df=train_df, eval_df=eval_df)
#
#
# pass
# # d = range(0, 10)
# # num = 5
# # print(DTUtils.split_data_evenly(d, 5))
# # print('1')
# r = ['a',' ','','df','x',]
# f = ['','df']
# g = DTUtils.find_index(r, f)
# print(g)
# for i in g:
# print(r[i['start']:i['start']+i['offset']])
# print(r[22:25])
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/nlp_utils.py
|
nlp_utils.py
|
# encoding: utf-8
'''
@author: zyl
@file: api_utils.py
@time: 2021/11/8 17:48
@desc:
'''
from enum import Enum
from typing import List, Set, Optional, Dict, Union
from fastapi import Body, FastAPI, Query
from fastapi import Depends # depends依赖项
from fastapi import File, UploadFile
from fastapi import Form
from fastapi.responses import HTMLResponse
from pydantic import BaseModel, Field, EmailStr
# use html #############################################
# app = FastAPI()
# app.mount("/static", StaticFiles(directory="static"), name="static")
# templates = Jinja2Templates(directory="templates")
# @app.get("/items/{id}", response_class=HTMLResponse)
# async def read_item(request: Request, id: str):
# return templates.TemplateResponse("demo.html", {"request": request, "id": id})
# #####################################
# 1. 实例化接口#################################
app = FastAPI(title="Fastapi",
version="0.0.1",
contact={
"name": "张玉良",
"url": "https://github.com/ZYuliang/",
"email": "[email protected]",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
description="项目描述,接口说明,日志更新记录",
openapi_tags=[
{
"name": "interface1",
"description": "接口1说明",
},
{
"name": "interface2",
"description": "接口2说明",
"externalDocs": {
"description": "添加外部文档说明",
"url": "https://fastapi.tiangolo.com/",
},
},
],
)
# 2.定义输入输出##############################
class RequestItem(str, Enum):
name: str = Field(..., example="Foo", title="The description of the item", max_length=300, alias="other_name",
description="Query string for the items to search in the database that have a good match",
regex=None)
num: Optional[float] = Query(..., min_length=3)
# image: Optional[List[Image]] = None
tags: Set[str] = set()
class ResponseItem(BaseModel):
url: str
name: str
class ModelName(str, Enum):
alexnet = "alexnet"
resnet = "resnet"
lenet = "lenet"
class Image(BaseModel):
url: str
name: str
# 请求体---参数类型,默认值,限制,描述
class Item(BaseModel):
# 当一个属性具有默认值时,它不是必需的。否则它是一个必需属性。item.dict()
name: str = Field(..., example="Foo")
description: Optional[str] = None # 可选参数,默认值为None
price: float
tax: Optional[float] = None
q: str = Query(..., min_length=3) # ... 表示必须参数
q2: List[str] = Query(["foo", "bar"]) # Query检验
q3: list = Query([])
q4: Optional[str] = Query(
None,
alias="item-query", # 别名
title="Query string", # 标题
description="Query string for the items to search in the database that have a good match", # 描述
min_length=3,
deprecated=True, # 表明该参数已经弃用
regex="^fixedquery$" # 字符串正则表达式
)
size: float = Query(..., gt=0, lt=10.5) # int,float。大于小于设置
description2: Optional[str] = Field(
None, title="The description of the item", max_length=300
)
price: float = Field(..., gt=0, description="The price must be greater than zero")
tags: Set[str] = set()
image: Optional[List[Image]] = None # 子请求体
# 例子
class Config:
schema_extra = {
"example": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
}
class User(BaseModel):
username: str
full_name: Optional[str] = None
class UserIn(BaseModel):
username: str
password: str
email: EmailStr
full_name: Optional[str] = None
class BaseItem(BaseModel):
description: str
type: str
class CarItem(BaseItem):
type = "car"
class PlaneItem(BaseItem):
type = "plane"
size: int
# 3.接口函数 #####################################
# response_model_exclude_unset=True响应中将不会包含那些默认值,而是仅有实际设置的值 或者response_model_include={"name", "description"}
@app.post("/items/", response_model=UserIn, response_model_exclude_unset=True)
async def create_item(item: Item, img: List[Image], weights: Dict[int, float], importance: int = Body(...),
response_model=Union[PlaneItem, CarItem], status_code=201):
print(item.dict())
return item
@app.post("/login/")
async def login(username: str = Form(...), password: str = Form(...)):
# 通过表单字段发送 username 和 password
return {"username": username}
@app.post("/files/")
async def create_file(file: bytes = File(...)): # 以 bytes 形式读取和接收文件内容
return {"file_size": len(file)}
@app.post("/uploadfile/")
async def create_upload_file(file: UploadFile = File(...)): # 更适于处理图像、视频、二进制文件等大型文件,好处是不会占用所有内存
# filename:上传文件名字符串(str),例如, myimage.jpg;
# content_type:内容类型(MIME类型 / 媒体类型)字符串(str),例如,image / jpeg;
# file: SpooledTemporaryFile( file - like对象)。其实就是Python文件,可直接传递给其他预期file - like对象的函数或支持库。
# UploadFile支持以下 async 方法,(使用内部SpooledTemporaryFile)可调用相应的文件方法。
# write(data):把data (str或bytes)写入文件;
# read(size):按指定数量的字节或字符(size(int))读取文件内容;
# seek(offset):移动至文件offset (int)字节处的位置;例如,await myfile.seek(0)移动到文件开头;执行
# await myfile.read()后,需再次读取已读取内容时,这种方法特别好用;
# close():关闭文件。
contents = await file.read() # 或contents = myfile.file.read()
return {"filename": file.filename}
@app.post("/files/", tags=["items"], summary="Create an item",
description="Create an item with all the information, name, description, price, tax and a set of unique tags",
response_description="The created item", deprecated=True)
# tags 相当于改url所属的区域或者说是类型,不同url块
# summary对url的总结
# description对url的描述):
# response_description返回描述
# , deprecated=True弃用的接口
async def create_files(files: List[bytes] = File(...)):
"""
直接写在这里面的是接口的描述,用markdown
Create an item with all the information:
- **name**: each item must have a name
- **description**: a long description
- **price**: required
- **tax**: if the item doesn't have tax, you can omit this
- **tags**: a set of unique tag strings for this item
"""
return {"file_sizes": [len(file) for file in files]}
@app.get("/")
async def main():
content = """
<body>
<form action="/files/" enctype="multipart/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
<form action="/uploadfiles/" enctype="multipart/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
</body>
"""
return HTMLResponse(content=content)
async def common_parameters(q: Optional[str] = None, skip: int = 0, limit: int = 100):
return {"q": q, "skip": skip, "limit": limit}
@app.get("/items/")
async def read_items(commons: dict = Depends(common_parameters)):
return commons
# 4.测试###############################
# from fastapi.testclient import TestClient
#
# from .main import app
#
# client = TestClient(app)
#
# def test_read_main():
# response = client.get("/")
# assert response.status_code == 200
# assert response.json() == {"msg": "Hello World"}
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=3243)
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/api_utils/api_utils.py
|
api_utils.py
|
# encoding: utf-8
"""
@author: zyl
@file: test_api.py
@time: 2021/12/10 17:06
@desc:
"""
import asyncio
import time
from typing import List
import aiohttp
import requests
class TestAPI:
def __init__(self):
pass
@staticmethod
def test_api_sample(url, data):
"""
Args:
url: str
data: a json fata
Returns:
>>> to_predict = {"sentences": to_predict}) # type:json
>>> url = "http://0.0.0.0:3245/predict/"
>>> TestAPI.test_api_sample(url=url, data=to_predict)
"""
t1 = time.time()
response = requests.post(url, json=data)
print(response)
t2 = time.time()
print('spend time:' + str((t2 - t1) / 60) + 'minutes.')
return response
@staticmethod
async def one_request(client, url, json):
resp = await client.post(url=url, json=json)
result = resp.json
return result
@staticmethod
async def parallel_request(url, json: List[list]):
"""
并行请求
Args:
url: url
json: 准备的并行数据,每组数据都可以单独请求
Returns:
>>> url = "http://0.0.0.0:3245/predict/"
>>> json = [list(range(10)},list(range(10)},list(range(10)},]
>>> asyncio.run(TestAPI.parallel_request(url,json))
"""
# timeout = aiohttp.ClientTimeout(total=200)
async with aiohttp.ClientSession() as client:
start_time = time.time()
task_list = []
for i in json:
req = TestAPI.one_request(client, url, [i])
task = asyncio.create_task(req)
task_list.append(task)
res = await asyncio.gather(*task_list)
end_time = time.time()
print('spend time:' + str((end_time - start_time) / 60) + 'minutes.')
return res
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/api_utils/test_api.py
|
test_api.py
|
# encoding: utf-8
'''
@author: zyl
@file: __init__.py.py
@time: 2021/11/8 17:47
@desc:
'''
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/api_utils/__init__.py
|
__init__.py
|
# encoding: utf-8
"""
@author: zyl
@file: __init__.py.py
@time: 2021/12/10 16:31
@desc:
"""
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/others/__init__.py
|
__init__.py
|
# # encoding: utf-8
# """
# @author: zyl
# @file: others.py
# @time: 2021/12/10 16:01
# @desc:
# """
# import os
# import ast
# import time
# import pandas as pd
#
#
# def list_all_logs(logs_dir):
# res = []
# for file in os.listdir(logs_dir):
# if file.endswith('.log'):
# file_path = os.path.join(logs_dir, file)
# if os.path.getsize(file_path) != 0:
# res.append(file_path)
# return res
#
#
# def list_all_records_in_a_log_file(log_file):
# with open(log_file, mode='r') as f:
# all_lines = f.readlines()
# records = []
#
# for i in range(0, len(all_lines), 3):
# new_record_list = all_lines[i:i + 3]
# new_record_key = new_record_list[1].split('|')[0] # type:str
# start_time = new_record_list[1].split('||')[0].strip()
# end_time = new_record_list[-1].split('||')[0].strip()
# start_time_stamp = time.mktime(time.strptime(start_time.split('.')[0], "%Y-%m-%d %H:%M:%S")) + float(
# start_time.split('.')[-1]) / 1000
# end_time_stamp = time.mktime(time.strptime(end_time.split('.')[0], "%Y-%m-%d %H:%M:%S")) + float(
# end_time.split('.')[-1]) / 1000
# spend_time = round(end_time_stamp - start_time_stamp, 3)
#
# input_data = ast.literal_eval(new_record_list[1].split('Input data: ')[-1].strip('\n'))
# input_data_length = len(input_data['sentences'])
# avg_time = spend_time / input_data_length
#
# if isinstance(new_record_key, str):
# records.append({'start_time': start_time,
# 'start_time_stamp': start_time_stamp,
# 'end_time': end_time,
# 'end_time_stamp': end_time_stamp,
# 'spent_time': spend_time,
# 'input_data_length': input_data_length,
# 'avg_time': avg_time})
# df = pd.DataFrame(records)
# df.to_excel('./data/test.xlsx')
#
# @staticmethod
# def send_to_me(message):
# sender_email = "[email protected]"
# sender_password = "SYPZFDNDNIAWQJBL" # This is authorization password, actual password: pharm_ai163
# sender_smtp_server = "smtp.163.com"
# send_to = "[email protected]"
# Utilfuncs.send_email_notification(sender_email, sender_password, sender_smtp_server,
# send_to, message)
# if __name__ == '__main__':
# # logs_dir = '/home/zyl/disk/PharmAI/pharm_ai/panel/data/logs/'
# # print(list_all_logs(logs_dir))
# # log_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/data/logs/result_2021-06-09_16-47-49_961924.log"
# # list_all_records_in_a_log_file(log_file)
# avg_time = 8832.294 / 8000
# print(avg_time)
#
# pass
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/others/others.py
|
others.py
|
# encoding: utf-8
'''
@author: zyl
@file: T5_model.py
@time: 2021/11/11 10:54
@desc:
'''
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from simpletransformers.t5 import T5Model, DDPT5Model
from zyl_utils.data_utils.nlp_utils import DTUtils
class MyT5(T5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
class MyDDPT5(DDPT5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyDDPT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/my_T5model.py
|
my_T5model.py
|
# encoding: utf-8
"""
@author: zyl
@file: ner_utils.py
@time: 2021/9/14 14:03
@desc: ner utils for simple-transformer mt5 model, eval and predict
"""
import pandas as pd
class NERUtils:
# ner utils for mt5 model
def __init__(self):
# eval_entity_recognition ------评估
# revise_target_texts。
# revise_target_text
# keep_entities_in_input_text
# predict_entity_recognition-------预测
# split_texts_with_sliding_window
# model.predict_gpu
# combine_pred_target_texts_by_ids
# revise_target_texts
# revise_target_text
# keep_entities_in_input_text
# entity_recognition_v2-----标准
pass
@staticmethod
def eval_entity_recognition(model, eval_df: pd.DataFrame, check_in_input_text: bool, delimiter='|', tokenizer=None,
use_sliding_window=False, sliding_window=512, stride=0.8, pos_neg_ratio=None,
use_multi_gpus=None, self_metric=False):
"""eval entity recognition in mt5 model, version-v2 , reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
model: a mt5 model
eval_df: a pd.Dataframe , must have columns ['prefix','input_text','target_text']
check_in_input_text: if the entities are in input_texts
delimiter: the delimiter in target_text to split different entities
use_sliding_window: if truncate the input text when predict
sliding_window: truncating_size
stride: overlapping_size
use_multi_gpus:use_multi_gpus
pos_neg_ratio : the ratio of positive and negative sample importance
self_metric:self_metric
tokenizer: tokenizer to split sentence
Returns:
show report and res, {prefix:res_df},type:dict
"""
prefixes = eval_df['prefix'].to_list()
input_texts = eval_df['input_text'].tolist()
target_texts = eval_df['target_text'].tolist()
revised_target_texts = NERUtils.revise_target_texts(target_texts=target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
pred_target_texts = NERUtils.predict_entity_recognition(model, prefixes, input_texts, tokenizer=tokenizer,
use_sliding_window=use_sliding_window,
sliding_window=sliding_window, stride=stride,
delimiter=delimiter, use_multi_gpus=use_multi_gpus)
revised_pred_target_texts = NERUtils.revise_target_texts(target_texts=pred_target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
eval_df['true_target_text'] = revised_target_texts
eval_df['pred_target_text'] = revised_pred_target_texts
eval_res = {}
for prefix in set(prefixes):
prefix_df = eval_df[eval_df['prefix'] == prefix]
y_true = prefix_df['true_target_text'].tolist()
y_pred = prefix_df['pred_target_text'].tolist()
print(f'{prefix} report:')
res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio,
self_metric=self_metric)
eval_res[prefix] = res_df
print(f'sum report:')
res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
pos_neg_ratio=pos_neg_ratio, self_metric=self_metric)
eval_res['sum'] = res_df
return eval_res # {prefix:res_df},type:dict
@staticmethod
def predict_entity_recognition(model, prefixes: list, input_texts: list, use_sliding_window=False,
sliding_window=512, stride=0.8, tokenizer=None,
delimiter='|', use_multi_gpus=None) -> list:
"""predict entity recognition in mt5 model,
Args:
model: a mt5 model
prefixes: prefixes
input_texts: input_texts
use_sliding_window: if use_sliding_window
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
delimiter: the delimiter in target_text to split different entities,default: '|'
use_multi_gpus: use_multi_gpus
Returns:
pred_target_texts:list,every element in pred_target_texts corresponds a prefix and an input_text
"""
if len(input_texts) == 1:
use_multi_gpus = None
assert len(prefixes) == len(input_texts)
if use_sliding_window:
t_ids, t_prefixes, t_input_texts = NERUtils.split_texts_with_sliding_window(input_texts, prefixes,
tokenizer=tokenizer,
sliding_window=sliding_window,
stride=stride)
to_predict_texts = [i + ': ' + j for i, j in zip(t_prefixes, t_input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
pred_target_texts = NERUtils.combine_pred_target_texts_by_ids(pred_target_texts, t_ids, delimiter)
else:
to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
assert len(pred_target_texts) == len(input_texts)
return pred_target_texts # type:list[str]
@staticmethod
def split_text_with_sliding_window(text: str, sliding_window=128, tokenizer=None, stride=0.8) -> list:
""" any sequence exceeding the max_seq_length will be split into several windows (sub-sequences),
each of length max_seq_length. The windows will typically overlap each other to a certain degree to
minimize any information loss that may be caused by hard cutoffs.
Args:
text: a str text
sliding_window: truncating_size:sliding window, max_seq_length
tokenizer: tokenizer
stride: The amount of overlap between the windows,The stride can be specified in terms of either a fraction
of the max_seq_length, or as an absolute number of tokens.
Returns:
truncated_input_text: the list of truncated_input_text
"""
if not isinstance(text, str):
text = str(text)
if not tokenizer:
try:
from transformers.models.t5 import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("mt5-base")
except Exception:
print('no tokenizer....')
tokens = tokenizer.tokenize(text)
if len(tokens) <= sliding_window:
return [text]
else:
split_text = []
if stride < 1:
step_size = int(sliding_window * stride)
else:
step_size = int(stride)
steps = int(len(tokens) / step_size)
for i in range(0, steps + 1):
text_i_tokens = tokens[i * step_size:i * step_size + sliding_window]
if text_i_tokens:
text_i = ''.join(text_i_tokens).replace('▁', ' ').strip()
split_text.append(text_i)
if (len(split_text) > 1) and (
len(tokenizer.tokenize(split_text[-1])) < (sliding_window - step_size)):
split_text = split_text[0:-1]
return split_text
@staticmethod
def split_texts_with_sliding_window(input_texts: list, prefixes: list, tokenizer=None,
sliding_window=512, stride=0.8):
""" for every input_text in input_texts, split it and record the split_ids for combining
Args:
input_texts: the list of many input_text
prefixes: the prefix list of the input_texts list
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
Returns:
split_ids, split_prefixes, split_input_texts
"""
assert len(input_texts) == len(prefixes) # every input_text corresponds a prefix
input_texts_ids = range(len(input_texts))
split_ids = []
split_prefixes = []
split_input_texts = []
if not tokenizer:
try:
from transformers.models.t5 import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("mt5-base")
except Exception:
print('no tokenizer....')
for i_t_d, p, i_t in zip(input_texts_ids, prefixes, input_texts):
split_input_text = NERUtils.split_text_with_sliding_window(i_t, sliding_window, tokenizer, stride)
for t_i_t in split_input_text:
split_ids.append(i_t_d)
split_input_texts.append(t_i_t)
split_prefixes.append(p)
return split_ids, split_prefixes, split_input_texts # type:tuple[list[int],list[str],list[str]]
@staticmethod
def combine_pred_target_texts_by_ids(pred_target_texts, split_ids, delimiter: str = '|') -> list:
"""combine truncated_predicted_target_texts split_ids
Args:
pred_target_texts: the result of predicting the truncated input_texts
split_ids: get the truncated_ids when truncating input_texts
delimiter: the delimiter in target_text to split different entities
Returns:
pred_target_texts: predicted target_texts
"""
ids_target_text_dict = dict()
for i, j in zip(split_ids, pred_target_texts):
if not ids_target_text_dict.get(i):
ids_target_text_dict[i] = delimiter + j + delimiter
else:
ids_target_text_dict[i] = ids_target_text_dict[i] + j + delimiter
pred_target_texts = [ids_target_text_dict[k] for k in sorted(ids_target_text_dict.keys())]
return pred_target_texts # type:list
@staticmethod
def revise_target_texts(target_texts: list, input_texts: list, check_in_input_text: bool = False, delimiter='|'):
"""revise the target texts,
Args:
target_texts: the list of the target_texts
input_texts: the list of the input_texts
check_in_input_text: if check the entities in input_text
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_texts = list[set]
"""
revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
t_t in target_texts] # type:list[set,...]
if check_in_input_text:
revised_target_texts = NERUtils.keep_entities_in_input_text(input_texts, revised_target_texts)
return revised_target_texts # type:list[set]
@staticmethod
def revise_target_text(target_text: str, delimiter: str = '|', return_format='set'):
""" revise the target text
Args:
target_text: str, target_text
return_format: 'set' means:'every entity is an element in a set', 'str' means: different entities are split
by the delimiter
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_text : set or list
"""
assert isinstance(target_text, str)
target_text = target_text.split(delimiter)
target_text = set([' '.join(e.strip().split()) for e in target_text])
if '' in target_text:
target_text.remove('')
if return_format == 'set':
revised_target_text = target_text
elif return_format == 'list':
revised_target_text = list(target_text)
else: # return_format == 'str'
revised_target_text = '|'
if target_text != set():
for entity in list(target_text):
revised_target_text += (str(entity) + '|')
return revised_target_text
@staticmethod
def keep_entities_in_input_text(input_texts: list, target_texts: list):
"""for each sample, for every entity ,keep the entities that are in the input text,and remove other entities
Args:
input_texts: the list of many input_text,and every input text is a string
target_texts: the list of many target_text,and evert target text is a set
Returns:
revise_target_texts: list[str]
"""
revised_target_texts = []
for input_text, target_text in zip(input_texts, target_texts):
if target_text != set():
elements = list(target_text)
for e in elements:
if str(e) not in input_text:
target_text.remove(e) # type:set
revised_target_texts.append(target_text)
return revised_target_texts # type:list[set]
@staticmethod
def entity_recognition_v2(y_true: list, y_pred: list, pos_neg_ratio: str = None, self_metric=False):
"""the metric of entity_recognition, version-v2, reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
y_true: list[set],the list of true target texts,each element is a set
y_pred: list[set],the list of pred target texts,each element is a set
pos_neg_ratio: the ratio of positive and negative sample importance, default: the ratio of positive and
negative sample sizes, you can set it,like"7:3"
self_metric: self_metric
Returns:
show report and res
"""
neg_data = 0
neg_correct_dt = 0
neg_wrong_dt = 0
neg_redundant_entities = 0
pos_data = 0
pos_correct_dt = 0
pos_wrong_dt = 0
pos_correct_entities = 0
pos_wrong_entities = 0
pos_omitted_entities = 0
pos_redundant_entities = 0
for i, j in zip(y_true, y_pred):
if i == set():
neg_data += 1
if j == set():
neg_correct_dt += 1
else:
neg_wrong_dt += 1
neg_redundant_entities += len(j)
else:
pos_data += 1
true_pred = len(i & j)
pos_correct_entities += true_pred
if i == j:
pos_correct_dt += 1
elif len(i) >= len(j):
pos_wrong_dt += 1
pos_wrong_entities += (len(j) - true_pred)
pos_omitted_entities += (len(i) - len(j))
else:
pos_wrong_dt += 1
pos_redundant_entities += (len(j) - len(i))
pos_wrong_entities += (len(i) - true_pred)
all_pos_entities = pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities
if neg_data == 0:
neg_metric = 0
else:
neg_metric = neg_correct_dt / (neg_correct_dt + neg_redundant_entities)
if pos_data == 0:
pos_metric = 0
else:
pos_metric = pos_correct_entities / all_pos_entities
sum_metric_micro = (pos_correct_entities + neg_correct_dt) / (
neg_correct_dt + neg_redundant_entities + all_pos_entities)
# sum_metric_macro = neg_metric * 0.5 + pos_metric * 0.5
if pos_neg_ratio:
pos_all = float(pos_neg_ratio.split(':')[0])
neg_all = float(pos_neg_ratio.split(':')[1])
pos_ratio = pos_all / (pos_all + neg_all)
neg_ratio = neg_all / (pos_all + neg_all)
else:
pos_ratio = pos_data / (pos_data + neg_data)
neg_ratio = neg_data / (pos_data + neg_data)
sum_metric_weighted = pos_ratio * pos_metric + neg_ratio * neg_metric
# pos_precision = pos_correct_dt / (neg_correct_dt + pos_correct_dt)
# recall = pos_correct_dt / pos_data
tp = pos_correct_dt
fn = pos_wrong_dt
fp = neg_wrong_dt
tn = neg_correct_dt
accuracy = (tp + tn) / (tp + fn + fp + tn)
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 / (1 / precision + 1 / recall)
r = {
'positive data': [str(pos_data), pos_correct_dt, pos_wrong_dt, pos_correct_entities,
pos_wrong_entities, pos_omitted_entities, pos_redundant_entities, pos_metric],
'negative data': [neg_data, neg_correct_dt, neg_wrong_dt, '-', '-', '-', neg_redundant_entities,
neg_metric],
'all data ': [str(pos_data + neg_data), neg_correct_dt + pos_correct_dt, neg_wrong_dt + pos_wrong_dt,
pos_correct_entities, pos_wrong_entities, pos_omitted_entities,
pos_redundant_entities + neg_redundant_entities,
sum_metric_micro],
# 'precision': ['', '', '', '', '', '', '', precision],
# 'recall': ['', '', '', '', '', '', '', recall],
# 'f1 score': ['', '', '', '', '', '', '', (2 * precision * recall) / (precision + recall)],
# 'accuracy score': ['', '', '', '', '', '', '', (neg_correct_dt + pos_correct_dt) / (pos_data + neg_data)],
# 'micro score': ['', '', '', '', '', '', '', sum_metric_micro],
# 'macro score': ['', '', '', '', '', '', '', sum_metric_macro],
'weighted score': ['', '', '', '', '', '', '', sum_metric_weighted],
}
index = ['| data_num', '| correct_data', '| wrong_data', '| correct_entities', '| wrong_entities',
'| omitted_entities', '| redundant_entities', '| score']
res_df = pd.DataFrame(r, index=index).T
pd.set_option('precision', 4)
pd.set_option('display.width', None)
pd.set_option('display.max_columns', None)
pd.set_option("colheader_justify", "center")
print(res_df)
print(
f"正样本集得分为:{pos_correct_entities} / "
f"({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(pos_metric, 4)},负样本集得分为:{neg_correct_dt} / ({neg_correct_dt} + "
f"{neg_redundant_entities})={round(neg_metric, 4)},",
f"总体得分为: ({pos_correct_entities} + {neg_correct_dt}) / "
f"({all_pos_entities}+{neg_correct_dt + neg_redundant_entities})={round(sum_metric_micro, 4)}",
f"准确率:{accuracy}",
)
print('\n')
if self_metric:
more_not_error_pos = (pos_correct_entities + pos_redundant_entities) / (
pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities)
f"自定义-正样本集得分为:{pos_correct_entities + pos_redundant_entities} /" \
f" ({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(more_not_error_pos, 4)},负样本集得分为:{round(1, 4)},"
print('\n')
return res_df # type:pd.DataFrame
if __name__ == '__main__':
pass
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/ner_utils.py
|
ner_utils.py
|
# encoding: utf-8
'''
@author: zyl
@file: entry_match.py
@time: 2021/11/11 9:58
@desc:
'''
pass
# ##################################################################
# @staticmethod
# def eval_entry_match(model, eval_df: pd.DataFrame, my_dict, delimiter='|', use_dict_match=True,
# pos_neg_ratio=None, keep_entry_in_dict=True, use_multi_gpus=None):
# prefixes = eval_df['prefix'].tolist()
# input_texts = eval_df['input_text'].tolist()
# target_texts = eval_df['target_text'].tolist()
#
# revised_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# pred_target_texts = NERUtils.predict_entry_match(em_model=model, prefix_match_dict=my_dict.prefix_match_dict,
# prefixes=prefixes, input_texts=input_texts,
# use_multi_gpus=use_multi_gpus,
# use_dict_match=use_dict_match)
#
# revised_pred_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=pred_target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# eval_df['true_target_text'] = revised_target_texts
# eval_df['pred_target_text'] = revised_pred_target_texts
#
# eval_res = {}
# for prefix in set(prefixes):
# prefix_df = eval_df[eval_df['prefix'] == prefix]
# y_true = prefix_df['true_target_text'].tolist()
# y_pred = prefix_df['pred_target_text'].tolist()
# print(f'{prefix} report:')
# res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio)
# eval_res[prefix] = res_df
#
# print(f'sum report:')
# res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
# pos_neg_ratio=pos_neg_ratio)
# eval_res['sum'] = res_df
# return eval_res
#
#
# @staticmethod
# def predict_entry_match(em_model, prefix_match_dict, prefixes: list, input_texts: list, use_dict_match=True,
# use_multi_gpus=None):
# if len(input_texts) == 1:
# use_multi_gpus = None
# if use_dict_match:
# pred_by_dict = []
# for p, i in zip(prefixes, input_texts):
# pred_by_dict.append(
# NERUtils.predict_entry_match_by_dict_match(str(i).strip(), dictionary=prefix_match_dict.get(p),
# use_edit_distance=False))
#
# # i = i.lower() # modify
#
# # if p == 'disease_em':
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=di_dict, use_edit_distance=False))
# # else:
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=tar_dict, use_edit_distance=False))
# else:
# pred_by_dict = [None] * len(input_texts)
#
# to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
# if not use_multi_gpus:
# pred_by_model = em_model.predict(to_predict_texts)
# else:
# pred_by_model = em_model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
# # pred_by_model = em_model.predict(to_predict_texts)
# assert len(pred_by_model) == len(pred_by_dict)
# pred_target_texts = [d if d else m for d, m in zip(pred_by_dict, pred_by_model)]
# return pred_target_texts
#
#
# @staticmethod
# def predict_entry_match_by_dict_match(input_text: str, dictionary: dict, use_edit_distance: bool = False):
# """predict the entry of a string by using dictionary match
#
# Args:
# input_text: a string
# dictionary: the dict, {entity:entry}
# use_edit_distance: True or False
#
# Returns:
# None or entry(str)
# """
# entry = dictionary.get(input_text)
# if not entry:
# if use_edit_distance:
# import Levenshtein
# max_score = 0
# for every_entity in dictionary.keys():
# score = Levenshtein.ratio(every_entity, input_text)
# if score >= max_score and score > 0.80: # 42-->43-->52
# max_score = score
# entry = dictionary.get(every_entity)
# return entry # None or entry
#
#
# @staticmethod
# def em_revise_target_texts(prefixes, target_texts, prefix_dict, delimiter='|', keep_entry_in_dict=False):
# revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
# t_t in target_texts] # type:list[set,...]
#
# if keep_entry_in_dict:
# result = []
# for p, r_t_t in zip(prefixes, revised_target_texts):
# res = set()
# if r_t_t:
# for j in list(r_t_t):
# if j in prefix_dict.get(p):
# res.add(j)
# result.append(res)
# return result
# return revised_target_texts # type:list[set]
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
# @staticmethod
# def cut_entities(input_entities: list, prefixes: list):
# assert len(input_entities) == len(prefixes) # a input_text corresponds a prefix
# input_texts_ids = range(len(input_entities))
#
# cut_ids = []
# cut_input_entities = []
# cut_prefixes = []
# for id, i_e, p in zip(input_texts_ids, input_entities, prefixes):
# if not isinstance(i_e, set):
# cut_i_e = NERUtils.revise_target_text(target_text=i_e, return_format='set', delimiter='|')
# else:
# cut_i_e = i_e
# if cut_i_e != set():
# for c_i_t in cut_i_e:
# cut_ids.append(id)
# cut_input_entities.append(c_i_t)
# cut_prefixes.append(p)
# return cut_ids, cut_input_entities, cut_prefixes # type:list
#
# @staticmethod
# def combine_cut_entities(input_entities: list, cut_entities: list, cut_ids: list):
# dic = dict()
# for i, j in zip(cut_ids, cut_entities):
# if i not in dic.keys():
# dic[i] = j
# else:
# if isinstance(j, str):
# dic[i] = dic[i] + '|' + j
# else:
# dic[i].update(j)
#
# res = []
# all_keys = list(dic.keys())
# for i in range(len(input_entities)):
# if i in all_keys:
# res.append(dic[i])
# else:
# res.append(set())
# return res
###################################
# eval_entry_match
# em_revise_target_texts
# predict_entry_match
# predict_entry_match_by_dict_match
# model.predict_gpu
#
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/entry_match.py
|
entry_match.py
|
# encoding: utf-8
"""
@author: zyl
@file: ner_model.py
@time: 2021/11/25 13:59
@desc:
"""
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class NerModel:
"""
ner model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--ner-model"
self.show_running_loss = False
self.wandb_proj = 'ner'
self.save_dir = './'
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的处理的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.model_type = 'roberta'
self.pretrained_model = 'roberta-base' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
self.model_args = self.my_config()
def my_config(self):
return {
'train_batch_size': 8,
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# multiprocess
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
}
@staticmethod
def deal_with_df(df):
df = df[["sentence_id", "words", "labels"]]
df = df.astype({'sentence_id': 'int', 'words': 'str', 'labels': 'str'})
return df
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame):
# deal with dt
train_data = NerModel.deal_with_df(train_data)
eval_data = NerModel.deal_with_df(eval_data)
train_size = len(set(train_data['sentence_id'].tolist()))
eval_size = len(set(eval_data['sentence_id'].tolist()))
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'train_size': train_size,
'eval_size': eval_size,
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']
}
}
)
# get model
model = NERModel(model_type=self.model_type, model_name=self.pretrained_model, labels=self.labels,
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def train_example(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_data = pd.read_excel(train_file)
eval_data = pd.read_excel(eval_file)
self.save_dir = './'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.pretrained_model = 'bert-base-multilingual-cased' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-DISEASE", "I-DISEASE"]
self.model_args = self.my_config()
self.model_args.update(
{
'train_file': train_file,
'eval_file': eval_file,
'num_train_epochs': 3,
'learning_rate': 1e-3,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_data,eval_data)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = self.model_args.get('eval_size')
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
@staticmethod
def get_entity(pred_list, label='DISEASE'):
if not label:
label = ''
entities = []
e = ''
is_entity = 0
for index, p in enumerate(pred_list):
if p == '0':
if is_entity == 1:
entities.append(e)
is_entity = 0
elif p.startswith('B-' + label):
if is_entity == 1:
if e:
entities.append(e)
e = '-' + str(index)
is_entity = 1
elif p.startswith('I-' + label):
e = e + ('-' + str(index))
if is_entity == 1:
entities.append(e)
return entities
def eval(self, eval_df: pd.DataFrame):
eval_data = NerModel.deal_with_df(eval_df)
eval_size = len(set(eval_df['sentence_id'].tolist()))
self.model_args.update(
{
'eval_size': eval_size,
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'eval']
}
}
)
model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
result, model_outputs, preds_list = model.eval_model(eval_data)
wandb.init(
project=self.wandb_proj,
config = self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval']
)
wandb.log({"f1_score": result.get('f1_score')})
return result
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.save_dir = './'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.use_cuda = True
self.cuda_device = 1
self.model_args = self.my_config()
self.model_args.update(
{
'eval_file': eval_file,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.eval(eval_data)
if __name__ == '__main__':
s = ['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', 'B-DISEASE', 'B-DISEASE', 'B-DISEASE', 'I-DISEASE',
'I-DISEASE', 'O', 'B-DISEASE', 'O', 'I-DISEASE', 'I-DISEASE', 'B-DISEASE', 'I-DISEASE']
print(NerModel.get_entity(s))
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/ner_model.py
|
ner_model.py
|
# encoding: utf-8
'''
@author: zyl
@file: my_model.py
@time: 2021/11/11 10:56
@desc:
'''
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.classification import ClassificationModel, ClassificationArgs, DDPClassificationModel
from simpletransformers.t5 import T5Args
from zyl_utils.model_utils.my_T5model import MyT5, MyDDPT5
class MyModel:
"""
my model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.wandb_proj = 'test'
self.model_version = 'test' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.use_model = 'classification' # mt5 /classification
self.model_type = 'bert'
self.pretrained_model = './best/v1.1.1.1/' # 预训练模型位置
self.use_cuda = True
self.cuda_device = 0
self.num_labels = 2
self.args = MyModel.set_model_parameter(model_version=self.model_version,
args=self._set_args(), save_dir='./')
def _set_args(self):
if self.use_model == 't5' or self.use_model == 'mt5':
return T5Args()
else:
return ClassificationArgs()
@staticmethod
def set_model_parameter(model_version='test', args=ClassificationArgs(), save_dir='./'):
# multiprocess
args.use_multiprocessing = False
args.use_multiprocessing_for_evaluation = False
# base config
args.reprocess_input_data = True
args.use_cached_eval_features = False
args.fp16 = False
args.manual_seed = 234
args.gradient_accumulation_steps = 2 # ==increase batch size,Use time for memory,
# save
args.no_save = False
args.save_eval_checkpoints = False
args.save_model_every_epoch = False
args.save_optimizer_and_scheduler = True
args.save_steps = -1
# eval
args.evaluate_during_training = True
args.evaluate_during_training_verbose = True
args.no_cache = False
args.use_early_stopping = False
args.encoding = None
args.do_lower_case = False
args.dynamic_quantize = False
args.quantized_model = False
args.silent = False
args.overwrite_output_dir = True
args.output_dir = save_dir + 'outputs/' + model_version + '/'
args.cache_dir = save_dir + 'cache/' + model_version + '/'
args.best_model_dir = save_dir + 'best_model/' + model_version + '/'
args.tensorboard_dir = save_dir + 'runs/' + model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/'
return args
def get_train_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.pretrained_model, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'classification':
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def deal_with_df(df, use_model='cls'):
if use_model == 't5' or use_model == 'mt5':
df = df[['prefix', 'input_text', 'target_text']]
df = df.astype('str')
elif use_model == 'sentence_pair':
df = df[['text_a', 'text_b', 'labels']]
df = df.astype({'text_a': 'str', 'text_b': 'str', 'labels': 'int'})
else:
df = df.astype({'text': 'str', 'labels': 'int'})
df = df[['text', 'labels']]
return df
def train(self, train_df: pd.DataFrame, eval_df: pd.DataFrame, if_send_message=False):
# deal with dt
train_df = MyModel.deal_with_df(train_df, use_model=self.use_model)
eval_df = MyModel.deal_with_df(eval_df, use_model=self.use_model)
# config some parameters
train_size = train_df.shape[0]
self.args.update_from_dict({'train_length': train_size})
all_steps = train_size / self.args.train_batch_size
self.args.logging_steps = int(max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.evaluate_during_training_steps = int(
max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.wandb_project = self.wandb_proj
self.args.wandb_kwargs = {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']}
# get model
model = self.get_train_model()
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train length---{train_size}')
if self.use_model == 't5' or self.use_model == 'mt5':
model.train_model(train_data=train_df, eval_data=eval_df)
else:
model.train_model(train_df=train_df, eval_df=eval_df)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
if if_send_message:
print(f'train failed!!! ERROR:{error}')
# ModelUtils.send_to_me(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def get_predict_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.args.best_model_dir, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'sentence_pair':
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
eval_length = eval_df.shape[0]
# wand_b
wandb.init(project=self.wandb_proj, config=self.args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval length---{eval_length}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / eval_length, 5)
eval_time = round(need_time * eval_length, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {eval_length} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": eval_length})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/my_model.py
|
my_model.py
|
# encoding: utf-8
'''
@author: zyl
@file: my_DDPT5model.py
@time: 2021/11/11 11:00
@desc:
'''
import logging
import math
import os
import random
from dataclasses import asdict
import pandas as pd
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from simpletransformers.t5.t5_model import T5Model
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers.optimization import AdamW, Adafactor
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class DDPT5Model(T5Model):
"""The DDP version of T5Model"""
def __init__(
self,
model_type,
model_name,
args=None,
tokenizer=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a DDP T5Model model. Turn off multi-processing settings.
Args:
model_type: The type of model (t5, mt5)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
super().__init__(model_type, model_name, args, tokenizer, use_cuda, cuda_device, **kwargs)
self.args.use_multiprocessing = False
self.args.use_multiprocessing_for_evaluation = False
if self.args.n_gpu == 1:
raise ValueError("You are using DDP with single GPU.")
def train_model(
self,
train_data,
output_dir=None,
show_running_loss=True,
args=None,
eval_data=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
verbose (optional): whether output staff.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
os.environ['MASTER_ADDR'] = 'localhost'
port = random.randint(10000, 20000)
os.environ['MASTER_PORT'] = str(port)
mp.spawn(self.train_each_proc, nprocs=self.args.n_gpu,
args=(train_dataset, output_dir,
show_running_loss, eval_data, verbose, kwargs))
# self.save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train_each_proc(self, process_index, train_dataset, *train_args):
"""
A wrapper function of train() for each process of DDP.
:param process_index: param train_dataset passed into train().
:param train_dataset: The training set.
:param train_args: other position arguments passed to train().
:return: The same as train().
"""
self._local_rank = process_index
self._world_size = self.args.n_gpu
self.train(train_dataset, *train_args[:-1], **train_args[-1])
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
args = self.args
self.device = torch.device(f"cuda:{self._local_rank}")
self._move_model_to_device()
torch.distributed.init_process_group(
backend='nccl',
init_method='env://',
world_size=self._world_size,
rank=self._local_rank
)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self._local_rank])
model = self.model
if self._local_rank == 0:
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = DistributedSampler(
train_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size // self._world_size,
pin_memory=True
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
if 0 < args.save_after < 1:
args.save_after = math.ceil(t_total * args.save_after)
if args.optimizer == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "Adafactor":
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adafactor_eps,
clip_threshold=args.adafactor_clip_threshold,
decay_rate=args.adafactor_decay_rate,
beta1=args.adafactor_beta1,
weight_decay=args.weight_decay,
scale_parameter=args.adafactor_scale_parameter,
relative_step=args.adafactor_relative_step,
warmup_init=args.adafactor_warmup_init,
)
if self._local_rank == 0:
print("Using Adafactor for T5")
else:
raise ValueError(
"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.".format(
args.optimizer
)
)
if args.scheduler == "constant_schedule":
scheduler = get_constant_schedule(optimizer)
elif args.scheduler == "constant_schedule_with_warmup":
scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)
elif args.scheduler == "linear_schedule_with_warmup":
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "cosine_with_hard_restarts_schedule_with_warmup":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "polynomial_decay_schedule_with_warmup":
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
lr_end=args.polynomial_decay_schedule_lr_end,
power=args.polynomial_decay_schedule_power,
)
else:
raise ValueError("{} is not a valid scheduler.".format(args.scheduler))
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if self._local_rank == 0:
logger.info(" Training started")
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch",
disable=args.silent or self._local_rank != 0, mininterval=0)
epoch_number = 0
best_eval_metric = None
current_loss = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
stop_training = False
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to global_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project and self._local_rank == 0:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for epoch in train_iterator:
model.train()
train_sampler.set_epoch(epoch)
if epochs_trained > 0:
epochs_trained -= 1
continue
if self._local_rank == 0:
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs} on process {self._local_rank}",
disable=args.silent or self._local_rank != 0,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
loss = self.compute_loss(model, args, inputs)
else:
loss = self.compute_loss(model, args, inputs)
loss_ = loss.clone()
torch.distributed.barrier()
torch.distributed.reduce(loss_, 0)
current_loss = loss_.item() / self._world_size
if show_running_loss and self._local_rank == 0:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
if args.optimizer == "AdamW":
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0 and self._local_rank == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0]
},
step=global_step
)
if args.save_steps > 0 and global_step % args.save_steps == 0 and self._local_rank == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
model.train()
if stop_training:
break
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if (args.save_model_every_epoch or args.evaluate_during_training) and self._local_rank == 0:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch and self._local_rank == 0:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
# close tensorboard writer to avoid EOFError.
if self._local_rank == 0:
tb_writer.close()
wandb.finish()
def eval_model(
self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs
):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
eval_dataset = self.load_and_cache_examples(
eval_data, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(
eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs
)
self.results.update(result)
if self.args.evaluate_generated_text:
if self.args.preprocess_inputs:
to_predict = [
prefix + ": " + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
else:
to_predict = [
prefix + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
preds = self.predict(to_predict)
result = self.compute_metrics(
eval_data["target_text"].tolist(), preds, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = DistributedSampler(
eval_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size // self._world_size,
pin_memory=True
)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
if self.args.fp16:
from torch.cuda import amp
for batch in tqdm(
eval_dataloader,
disable=args.silent or silent,
desc="Running Evaluation"
):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
torch.distributed.barrier()
torch.distributed.reduce(loss, 0)
eval_loss += loss.item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps / self._world_size
if self._local_rank == 0:
print(eval_loss)
results["eval_loss"] = eval_loss
if self._local_rank == 0:
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def logging_and_saving(
self,
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter):
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores), step=global_step)
stop_training = False
if global_step > args.save_after:
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
return stop_training, best_eval_metric, early_stopping_counter
def check_early_stopping(self, early_stopping_counter, args, train_iterator, verbose):
stop_training = False
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
stop_training = True
return stop_training, early_stopping_counter
def compute_loss(self, model, args, inputs):
outputs = model(**inputs)
if args.r_drop:
outputs_ = model(**inputs)
loss = self.compute_r_drop_loss(
outputs['loss'],
outputs_['loss'],
outputs['logits'],
outputs_['logits'],
inputs['attention_mask'],
args.r_drop_alpha
)
else:
loss = outputs[0]
return loss
def compute_kl_loss(self, p, q, pad_mask=None, reduction='mean'):
p_loss = F.kl_div(F.log_softmax(p, dim=-1), F.softmax(q, dim=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, dim=-1), F.softmax(p, dim=-1), reduction='none')
if pad_mask is not None:
p_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
q_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
if reduction == 'mean':
p_loss = p_loss.mean()
q_loss = q_loss.mean()
elif reduction == 'sum':
p_loss = p_loss.sum()
q_loss = q_loss.sum()
else:
raise ValueError('Only mean or sum reduction is supported in computing KL Divergence!')
loss = (p_loss + q_loss) / 2
return loss
def compute_r_drop_loss(self, ce1, ce2, logit1, logit2, attention_mask, alpha, reduction='mean'):
kl_loss = self.compute_kl_loss(logit1, logit2, attention_mask, reduction=reduction)
ce_loss = 0.5 * (ce1 + ce2)
return ce_loss + alpha * kl_loss
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/DDPT5model.py
|
DDPT5model.py
|
# encoding: utf-8
'''
@author: zyl
@file: __init__.py.py
@time: 2021/11/2 16:48
@desc:
'''
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/__init__.py
|
__init__.py
|
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from loguru import logger
from ..data_utils.processing import Processor
class ModelUtils:
def __init__(self):
pass
@staticmethod
def get_best_cuda_device(gpu_num=1):
"""
获取显存最多的若干gpu的号
Args:
gpu_num:
Returns:
deviceMemory,like: '1,2'
"""
import pynvml
import numpy as np
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
deviceMemory = dict()
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
deviceMemory.update({i: mem_info.free / 1024 / 1024}) # M
deviceMemory = sorted(deviceMemory.items(), key=lambda x: x[1], reverse=True)
deviceMemory = np.array(deviceMemory, dtype=np.int64).tolist()
deviceMemory_tuple = deviceMemory[0:gpu_num]
deviceMemory = ','.join([str(d[0]) for d in deviceMemory_tuple])
logger.info(f'Use (gpus, memories): {deviceMemory_tuple}M')
return deviceMemory
@staticmethod
def fix_torch_multiprocessing():
"""
This function will close the shared memory of pytorch,
to fix `OSError: [Errno 12] Cannot allocate memory` ,
when multiprocessing is used to convert data into transformers features.
Add this function to the top of `train.py` ,or before loading a transformer model.
Reference:
- https://github.com/huaweicloud/dls-example/issues/26#issuecomment-411990039
- https://github.com/pytorch/fairseq/issues/1171#issuecomment-549345884
"""
import sys
import torch
from torch.utils.data import dataloader
from torch.multiprocessing.reductions import ForkingPickler
default_collate_func = dataloader.default_collate
def default_collate_override(batch):
dataloader._use_shared_memory = False
return default_collate_func(batch)
setattr(dataloader, 'default_collate', default_collate_override)
for t in torch._storage_classes:
if sys.version_info[0] == 2:
if t in ForkingPickler.dispatch:
del ForkingPickler.dispatch[t]
else:
if t in ForkingPickler._extra_reducers:
del ForkingPickler._extra_reducers[t]
@staticmethod
def predict_with_multi_gpus(self, to_predict, gpus: list = None):
"""
多gpu预测,必须在init中加入”self.funcs=None“
Args:
self: cls 某个模型类
to_predict: 要预测的东西,list
gpus: 若干gpu,list, gpus can be like: ["1","2"]
Returns:
预测的结果
"""
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = Processor.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
from simpletransformers.t5 import T5Model
from simpletransformers.ner import NERModel
class MyT5(T5Model):
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
self.funcs = []
def predict_with_multi_gpus(self, to_predict, gpus: list = None):
return ModelUtils.predict_with_multi_gpus(self, to_predict, gpus)
class MyNer(NERModel):
def __init__(self, model_type, model_name, args=None, labels=None, tokenizer=None, use_cuda=True, cuda_device=-1,
**kwargs):
super(MyNer, self).__init__(model_type=model_type, model_name=model_name, args=args, labels=labels,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
self.funcs = []
def predict_with_multi_gpus(self, to_predict, gpus: list = None):
return ModelUtils.predict_with_multi_gpus(self, to_predict, gpus)
# ##################################################################
# @staticmethod
# def eval_entry_match(model, eval_df: pd.DataFrame, my_dict, delimiter='|', use_dict_match=True,
# pos_neg_ratio=None, keep_entry_in_dict=True, use_multi_gpus=None):
# prefixes = eval_df['prefix'].tolist()
# input_texts = eval_df['input_text'].tolist()
# target_texts = eval_df['target_text'].tolist()
#
# revised_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# pred_target_texts = NERUtils.predict_entry_match(em_model=model, prefix_match_dict=my_dict.prefix_match_dict,
# prefixes=prefixes, input_texts=input_texts,
# use_multi_gpus=use_multi_gpus,
# use_dict_match=use_dict_match)
#
# revised_pred_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=pred_target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# eval_df['true_target_text'] = revised_target_texts
# eval_df['pred_target_text'] = revised_pred_target_texts
#
# eval_res = {}
# for prefix in set(prefixes):
# prefix_df = eval_df[eval_df['prefix'] == prefix]
# y_true = prefix_df['true_target_text'].tolist()
# y_pred = prefix_df['pred_target_text'].tolist()
# print(f'{prefix} report:')
# res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio)
# eval_res[prefix] = res_df
#
# print(f'sum report:')
# res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
# pos_neg_ratio=pos_neg_ratio)
# eval_res['sum'] = res_df
# return eval_res
#
#
# @staticmethod
# def predict_entry_match(em_model, prefix_match_dict, prefixes: list, input_texts: list, use_dict_match=True,
# use_multi_gpus=None):
# if len(input_texts) == 1:
# use_multi_gpus = None
# if use_dict_match:
# pred_by_dict = []
# for p, i in zip(prefixes, input_texts):
# pred_by_dict.append(
# NERUtils.predict_entry_match_by_dict_match(str(i).strip(), dictionary=prefix_match_dict.get(p),
# use_edit_distance=False))
#
# # i = i.lower() # modify
#
# # if p == 'disease_em':
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=di_dict, use_edit_distance=False))
# # else:
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=tar_dict, use_edit_distance=False))
# else:
# pred_by_dict = [None] * len(input_texts)
#
# to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
# if not use_multi_gpus:
# pred_by_model = em_model.predict(to_predict_texts)
# else:
# pred_by_model = em_model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
# # pred_by_model = em_model.predict(to_predict_texts)
# assert len(pred_by_model) == len(pred_by_dict)
# pred_target_texts = [d if d else m for d, m in zip(pred_by_dict, pred_by_model)]
# return pred_target_texts
#
#
# @staticmethod
# def predict_entry_match_by_dict_match(input_text: str, dictionary: dict, use_edit_distance: bool = False):
# """predict the entry of a string by using dictionary match
#
# Args:
# input_text: a string
# dictionary: the dict, {entity:entry}
# use_edit_distance: True or False
#
# Returns:
# None or entry(str)
# """
# entry = dictionary.get(input_text)
# if not entry:
# if use_edit_distance:
# import Levenshtein
# max_score = 0
# for every_entity in dictionary.keys():
# score = Levenshtein.ratio(every_entity, input_text)
# if score >= max_score and score > 0.80: # 42-->43-->52
# max_score = score
# entry = dictionary.get(every_entity)
# return entry # None or entry
#
#
# @staticmethod
# def em_revise_target_texts(prefixes, target_texts, prefix_dict, delimiter='|', keep_entry_in_dict=False):
# revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
# t_t in target_texts] # type:list[set,...]
#
# if keep_entry_in_dict:
# result = []
# for p, r_t_t in zip(prefixes, revised_target_texts):
# res = set()
# if r_t_t:
# for j in list(r_t_t):
# if j in prefix_dict.get(p):
# res.add(j)
# result.append(res)
# return result
# return revised_target_texts # type:list[set]
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
# @staticmethod
# def cut_entities(input_entities: list, prefixes: list):
# assert len(input_entities) == len(prefixes) # a input_text corresponds a prefix
# input_texts_ids = range(len(input_entities))
#
# cut_ids = []
# cut_input_entities = []
# cut_prefixes = []
# for id, i_e, p in zip(input_texts_ids, input_entities, prefixes):
# if not isinstance(i_e, set):
# cut_i_e = NERUtils.revise_target_text(target_text=i_e, return_format='set', delimiter='|')
# else:
# cut_i_e = i_e
# if cut_i_e != set():
# for c_i_t in cut_i_e:
# cut_ids.append(id)
# cut_input_entities.append(c_i_t)
# cut_prefixes.append(p)
# return cut_ids, cut_input_entities, cut_prefixes # type:list
#
# @staticmethod
# def combine_cut_entities(input_entities: list, cut_entities: list, cut_ids: list):
# dic = dict()
# for i, j in zip(cut_ids, cut_entities):
# if i not in dic.keys():
# dic[i] = j
# else:
# if isinstance(j, str):
# dic[i] = dic[i] + '|' + j
# else:
# dic[i].update(j)
#
# res = []
# all_keys = list(dic.keys())
# for i in range(len(input_entities)):
# if i in all_keys:
# res.append(dic[i])
# else:
# res.append(set())
# return res
###################################
# eval_entry_match
# em_revise_target_texts
# predict_entry_match
# predict_entry_match_by_dict_match
# model.predict_gpu
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/model_utils.py
|
model_utils.py
|
# encoding: utf-8
"""
@author: zyl
@file: re_ranker_cross_encoder.py
@time: 2021/12/16 9:46
@desc:
"""
class ReRanker_CrossEncoder:
def __init__(self):
pass
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/re_ranker_cross_encoder.py
|
re_ranker_cross_encoder.py
|
# encoding: utf-8
'''
@author: zyl
@file: T5_model.py
@time: 2021/11/11 10:54
@desc:
'''
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from simpletransformers.t5 import T5Model
try:
from zyl_utils.model_utils.models.DDPT5model import DDPT5Model
except:
print()
from zyl_utils.data_utils.nlp_utils import DTUtils
class MyT5(T5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
class MyDDPT5(DDPT5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyDDPT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/my_T5model.py
|
my_T5model.py
|
# encoding: utf-8
'''
@author: zyl
@file: entry_match.py
@time: 2021/11/11 9:58
@desc:
'''
pass
# ##################################################################
# @staticmethod
# def eval_entry_match(model, eval_df: pd.DataFrame, my_dict, delimiter='|', use_dict_match=True,
# pos_neg_ratio=None, keep_entry_in_dict=True, use_multi_gpus=None):
# prefixes = eval_df['prefix'].tolist()
# input_texts = eval_df['input_text'].tolist()
# target_texts = eval_df['target_text'].tolist()
#
# revised_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# pred_target_texts = NERUtils.predict_entry_match(em_model=model, prefix_match_dict=my_dict.prefix_match_dict,
# prefixes=prefixes, input_texts=input_texts,
# use_multi_gpus=use_multi_gpus,
# use_dict_match=use_dict_match)
#
# revised_pred_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=pred_target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# eval_df['true_target_text'] = revised_target_texts
# eval_df['pred_target_text'] = revised_pred_target_texts
#
# eval_res = {}
# for prefix in set(prefixes):
# prefix_df = eval_df[eval_df['prefix'] == prefix]
# y_true = prefix_df['true_target_text'].tolist()
# y_pred = prefix_df['pred_target_text'].tolist()
# print(f'{prefix} report:')
# res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio)
# eval_res[prefix] = res_df
#
# print(f'sum report:')
# res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
# pos_neg_ratio=pos_neg_ratio)
# eval_res['sum'] = res_df
# return eval_res
#
#
# @staticmethod
# def predict_entry_match(em_model, prefix_match_dict, prefixes: list, input_texts: list, use_dict_match=True,
# use_multi_gpus=None):
# if len(input_texts) == 1:
# use_multi_gpus = None
# if use_dict_match:
# pred_by_dict = []
# for p, i in zip(prefixes, input_texts):
# pred_by_dict.append(
# NERUtils.predict_entry_match_by_dict_match(str(i).strip(), dictionary=prefix_match_dict.get(p),
# use_edit_distance=False))
#
# # i = i.lower() # modify
#
# # if p == 'disease_em':
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=di_dict, use_edit_distance=False))
# # else:
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=tar_dict, use_edit_distance=False))
# else:
# pred_by_dict = [None] * len(input_texts)
#
# to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
# if not use_multi_gpus:
# pred_by_model = em_model.predict(to_predict_texts)
# else:
# pred_by_model = em_model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
# # pred_by_model = em_model.predict(to_predict_texts)
# assert len(pred_by_model) == len(pred_by_dict)
# pred_target_texts = [d if d else m for d, m in zip(pred_by_dict, pred_by_model)]
# return pred_target_texts
#
#
# @staticmethod
# def predict_entry_match_by_dict_match(input_text: str, dictionary: dict, use_edit_distance: bool = False):
# """predict the entry of a string by using dictionary match
#
# Args:
# input_text: a string
# dictionary: the dict, {entity:entry}
# use_edit_distance: True or False
#
# Returns:
# None or entry(str)
# """
# entry = dictionary.get(input_text)
# if not entry:
# if use_edit_distance:
# import Levenshtein
# max_score = 0
# for every_entity in dictionary.keys():
# score = Levenshtein.ratio(every_entity, input_text)
# if score >= max_score and score > 0.80: # 42-->43-->52
# max_score = score
# entry = dictionary.get(every_entity)
# return entry # None or entry
#
#
# @staticmethod
# def em_revise_target_texts(prefixes, target_texts, prefix_dict, delimiter='|', keep_entry_in_dict=False):
# revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
# t_t in target_texts] # type:list[set,...]
#
# if keep_entry_in_dict:
# result = []
# for p, r_t_t in zip(prefixes, revised_target_texts):
# res = set()
# if r_t_t:
# for j in list(r_t_t):
# if j in prefix_dict.get(p):
# res.add(j)
# result.append(res)
# return result
# return revised_target_texts # type:list[set]
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
# @staticmethod
# def cut_entities(input_entities: list, prefixes: list):
# assert len(input_entities) == len(prefixes) # a input_text corresponds a prefix
# input_texts_ids = range(len(input_entities))
#
# cut_ids = []
# cut_input_entities = []
# cut_prefixes = []
# for id, i_e, p in zip(input_texts_ids, input_entities, prefixes):
# if not isinstance(i_e, set):
# cut_i_e = NERUtils.revise_target_text(target_text=i_e, return_format='set', delimiter='|')
# else:
# cut_i_e = i_e
# if cut_i_e != set():
# for c_i_t in cut_i_e:
# cut_ids.append(id)
# cut_input_entities.append(c_i_t)
# cut_prefixes.append(p)
# return cut_ids, cut_input_entities, cut_prefixes # type:list
#
# @staticmethod
# def combine_cut_entities(input_entities: list, cut_entities: list, cut_ids: list):
# dic = dict()
# for i, j in zip(cut_ids, cut_entities):
# if i not in dic.keys():
# dic[i] = j
# else:
# if isinstance(j, str):
# dic[i] = dic[i] + '|' + j
# else:
# dic[i].update(j)
#
# res = []
# all_keys = list(dic.keys())
# for i in range(len(input_entities)):
# if i in all_keys:
# res.append(dic[i])
# else:
# res.append(set())
# return res
###################################
# eval_entry_match
# em_revise_target_texts
# predict_entry_match
# predict_entry_match_by_dict_match
# model.predict_gpu
#
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/entry_match.py
|
entry_match.py
|
import copy
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import torch
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
from zyl_utils.data_utils.processing import Processor
from ..metrics.ner_metric import entity_recognition_metrics
from tqdm import tqdm
class NerBIO:
"""
ner model for train and eval---bio--simple-trainsformers
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--ner-model"
self.wandb_proj = 'ner'
self.save_dir = './'
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,c进行模型的处理的数据批次,d:迭代调参批次
self.model_type = 'roberta'
self.pretrained_model = 'roberta-base' # 预训练模型位置 model_name
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
self.use_cuda = True
self.cuda_device = 0
self.model_args = self.my_config()
self.funcs = None
self.model = None
self.my_tokenizer =None
def my_config(self):
return {
'train_batch_size': 8,
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# multiprocess
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
}
@staticmethod
def deal_with_df(df: pd.DataFrame):
df = df[["sentence_id", "words", "labels"]]
df = df.astype({'sentence_id': 'int', 'words': 'str', 'labels': 'str'})
return df
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame, wandb_log=None):
# deal with dt
train_data = NerBIO.deal_with_df(train_data)
eval_data = NerBIO.deal_with_df(eval_data)
train_size = len(set(train_data['sentence_id'].tolist()))
eval_size = len(set(eval_data['sentence_id'].tolist()))
# update args
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']
}
}
)
# get model
model = NERModel(model_type=self.model_type, model_name=self.pretrained_model, labels=self.labels,
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train_size---{train_size}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
wandb.log({'train_size': train_size, 'eval_size': eval_size})
if wandb_log:
wandb.log(wandb_log)
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
@staticmethod
def get_id_entity(pred_list, label='DISEASE'):
"""
从一个bio格式的序列中获得id实体,比如:['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', ]---->['-3-4']
Args:
pred_list: ['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', ]
label: DISEASE
Returns:
['-3-4']
"""
if not label:
label = ''
entities = []
e = ''
is_entity = 0
for index, p in enumerate(pred_list):
if p == 'O':
if is_entity == 1:
entities.append(e)
is_entity = 0
elif p.startswith('B-' + label):
if is_entity == 1:
if e:
entities.append(e)
e = '-' + str(index)
is_entity = 1
elif p.startswith('I-' + label):
e = e + ('-' + str(index))
if is_entity == 1:
entities.append(e)
return entities # list or []
def eval(self, eval_df: pd.DataFrame, ner_t5_metric=False, wandb_log=None):
eval_data = NerBIO.deal_with_df(eval_df)
eval_size = len(set(eval_df['sentence_id'].tolist()))
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device,
labels=self.labels)
result, model_outputs, preds_list = model.eval_model(eval_data)
if wandb_log:
wandb.log(wandb_log)
wandb.log({"f1_score": result.get('f1_score'), 'eval_size': eval_size})
if ner_t5_metric:
all_entities_cls = set()
for c in self.labels:
if c.startswith('B'):
all_entities_cls.add(c.split('-')[-1])
labels = eval_data.groupby(by=['sentence_id'], sort=False)
labels = labels.apply(lambda x: x['labels'].tolist())
for c in all_entities_cls:
y_pred = [set(NerBIO.get_id_entity(p, label=c)) for p in preds_list]
y_true = [set(NerBIO.get_id_entity(l, label=c)) for l in labels]
print(c + ": \n")
res_df = entity_recognition_metrics(y_true, y_pred)
wandb.log({c + "_" + "ner_t5_metric": res_df.iloc[2, -1]})
def predict_with_multi_gpus(self, to_predict, gpus: list = None, **kwargs):
"""
多gpu预测,大数据量评估时用,必须在init中加入”self.funcs=None“
Args:
self: cls 某个模型类
to_predict: 要预测的东西,list
gpus: 若干gpu,list, gpus can be like: ["1","2"],多gpu预测时,若gpu列表中无本身的cuda-device,则不用,
只用gpus里面的gpu进行预测
Returns:
预测的结果
"""
if not self.model:
self.model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device,
labels=self.labels)
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
preds, model_outputs = self.model.predict(to_predict=to_predict, **kwargs)
else:
if not self.funcs:
self.funcs = []
for i in gpus:
if i != self.model.device.index:
other_m = copy.deepcopy(self.model)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.model.predict)
max_workers = len(gpus)
sub_data_sets = Processor.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt, **kwargs): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
preds = []
model_outputs = []
for i in sorted(res.keys()):
preds.extend(res[i][0])
model_outputs.extend(res[i][1])
return preds, model_outputs
def predict_texts(self, to_predict,split_on_space=False,if_cut_sentences=False):
if not self.model:
self.model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device,
labels=self.labels)
if not self.my_tokenizer:
from zyl_utils.data_utils.text_processing import MyTokenizer
self.my_tokenizer = MyTokenizer()
predict_ids = list(range(len(to_predict))) # 样本id
sentence_ids = [] # 句子id
sentences = []
if if_cut_sentences:
for t,i in zip(to_predict,predict_ids):
tmp_sentences = self.my_tokenizer.cut_paragraph_to_sentences(t) # [str]
for s in tmp_sentences:
words = self.my_tokenizer.cut_sentence_to_words(s, return_starts=False)
sentences.append(words)
sentence_ids.append(i)
else:
for t,i in zip(to_predict,predict_ids):
words = self.my_tokenizer.cut_sentence_to_words(t, return_starts=False)
sentences.append(words)
sentence_ids.append(i)
pred_res, _ = self.model.predict(sentences, split_on_space=split_on_space)
labels = set()
for l in self.labels:
if l!='O':
labels.add(l.split('-')[-1])
if split_on_space:
split_symbol = ' '
else:
split_symbol = ''
results = []
for p_i in predict_ids:
res = {l:set() for l in labels}
for p_r,s_i in zip(pred_res,sentence_ids):
if p_i == s_i:
words = [list(_.keys())[0] for _ in p_r]
pred = [list(_.values())[0] for _ in p_r] # ['B-DISEASE','I'....]
for l in labels:
entities_ids = NerBIO.get_id_entity(pred, label=l) # ['-0-1-2','-3-4'...]
for entity_id in entities_ids:
starts_id = int(entity_id.split('-')[1])
end_id = int(entity_id.split('-')[-1])
res[l].add(split_symbol.join(words[starts_id:end_id+1]))
results.append(res)
return results # [{'TENDEREE': {'临沂市人民医院'}}]
# pred = NerBIO.get_id_entity(pred, label=label)
# pred = [list(p.values())[0] for p in pred[0]]
# preds = []
# for text in tqdm(to_predict):
# if if_cut_sentences:
#
# else:
# sentences = [text]
# entities_in_one_text = []
# for sentence in sentences:
# words, starts = self.my_tokenizer.cut_sentence_to_words(sentence, return_starts=True)
#
# pred, _ = self.predict_with_multi_gpus([words], split_on_space=split_on_space) # [{'entity':'B-DISEASE'...}]
# pred = [list(p.values())[0] for p in pred[0]] # ['B-DISEASE','I'....]
# pred = NerBIO.get_id_entity(pred, label=label) # ['-0-1-2','-3-5'...]
#
# entities_in_one_sentence = []
# if pred:
# for entity in pred:
# starts_id = int(entity.split('-')[1])
# end_id = int(entity.split('-')[-1])
# entities_in_one_sentence.append(sentence[starts[starts_id]:
# starts[end_id] + len(words[end_id])]) # ['癌症'...]
# entities_in_one_text.extend(entities_in_one_sentence)
# preds.append(entities_in_one_text)
# return preds
class NerBIOModel(NERModel):
def __init__(self, model_type, model_name, labels=None, weight=None, args=None, use_cuda=True, cuda_device=-1,
onnx_execution_provider=None, **kwargs, ):
super(NerBIOModel, self).__init__(model_type, model_name, labels=labels, weight=weight, args=args,
use_cuda=use_cuda,
cuda_device=cuda_device, onnx_execution_provider=onnx_execution_provider,
**kwargs)
self.funcs = None
from zyl_utils.data_utils.text_processing import MyTokenizer
self.my_tokenizer = MyTokenizer()
def predict_with_multi_gpus(self, to_predict, gpus: list = None, **kwargs):
"""
多gpu预测,必须在init中加入”self.funcs=None“
Args:
self: cls 某个模型类
to_predict: 要预测的东西,list
gpus: 若干gpu,list, gpus can be like: ["1","2"]
Returns:
预测的结果
"""
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
preds, model_outputs = self.predict(to_predict=to_predict, **kwargs)
else:
if not self.funcs:
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
max_workers = len(gpus)
sub_data_sets = Processor.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt, **kwargs): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
preds = []
model_outputs = []
for i in sorted(res.keys()):
preds.extend(res[i][0])
model_outputs.extend(res[i][1])
return preds, model_outputs
def predict_texts(self, to_predict, split_on_space=False, label='DISEASE'):
from tqdm import tqdm
preds = []
for text in tqdm(to_predict):
sentences = self.my_tokenizer.cut_paragraph_to_sentences(text)
entities_in_one_text = []
for sentence in sentences:
words, starts = self.my_tokenizer.cut_sentence_to_words(sentence, return_starts=True)
pred, _ = self.predict([words], split_on_space=split_on_space) # [{'entity':'B-DISEASE'...}]
pred = [list(p.values())[0] for p in pred[0]] # ['B-DISEASE','I'....]
pred = NerBIO.get_id_entity(pred, label=label) # ['-0-1-2','-3-5'...]
entities_in_one_sentence = []
if pred:
for entity in pred:
starts_id = int(entity.split('-')[1])
end_id = int(entity.split('-')[-1])
entities_in_one_sentence.append(sentence[starts[starts_id]:
starts[end_id] + len(words[end_id])]) # ['癌症'...]
entities_in_one_text.extend(entities_in_one_sentence)
preds.append(entities_in_one_text)
return preds
if __name__ == '__main__':
from zyl_utils import get_best_cuda_device
class M(NerBIO):
def __init__(self):
super(M, self).__init__()
self.wandb_proj = 'test'
self.use_cuda = True
self.cuda_device = get_best_cuda_device()
self.save_dir = './'
def train_sample(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_df = pd.read_excel(train_file) # type:pd.DataFrame
eval_df = pd.read_excel(eval_file) # type:pd.DataFrame
self.model_version = 'v0.0.0.0'
self.model_type = 'bert'
self.pretrained_model = 'bert-base-multilingual-cased' # 预训练模型位置 model_name
self.model_args = self.my_config()
self.model_args.update(
{
'num_train_epochs': 3,
'learning_rate': 3e-4,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.labels = ["O", "B-DISEASE", "I-DISEASE"]
self.train(train_df, eval_df, wandb_log=None)
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.model_args = self.my_config()
self.model_args.update(
{
# 'best_model_dir':'./',
'eval_batch_size': 16,
}
)
self.eval(eval_data, ner_t5_metric=True, wandb_log={'eval_file': eval_file})
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/ner_bio.py
|
ner_bio.py
|
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.t5 import T5Model
from ..metrics.ner_metric import entity_recognition_metrics
class NerT5:
"""
ner model for train and eval---t5--simple-trainsformers
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--t5-model"
self.wandb_proj = 'mt5'
self.save_dir = './' # save output_file
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,c进行模型的处理的数据批次,d:迭代调参批次
self.model_type = 't5'
self.pretrained_model = 't5-base' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.model_args = self.my_config()
def my_config(self):
return {
'train_batch_size': 8,
'max_seq_length': 256,
# multiprocess
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
# normal
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
# save
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
# t5 args
'use_multiprocessed_decoding': False,
'num_beams': 1,
'length_penalty': 2.0,
'max_length': 20,
'num_return_sequences': 1,
'preprocess_inputs': True,
'repetition_penalty': 1.0,
'special_tokens_list': [],
'top_k': None,
'top_p': None,
}
def _deal_with_df(self, data, sliding_window=False, delimiter='|', up_sampling=False):
data = data[['prefix', 'input_text', 'target_text']]
data = data.astype('str')
if sliding_window:
from transformers import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained(self.pretrained_model)
data['input_text'] = data['input_text'].apply(NerT5._split_text_with_sliding_window,
args=(self.model_args.get('max_seq_length'),
tokenizer, 0.8))
data = data.explode('input_text')
res = []
for i, t in zip(data['input_text'].tolist(), data['target_text'].tolist()):
if t != delimiter:
all_entities = list(set(t.split(delimiter)))
if '' in all_entities:
all_entities.remove('')
r = delimiter
if all_entities:
for e in all_entities:
if str(e) in str(i):
r = r + str(e) + delimiter
res.append(r)
else:
res.append(t)
data['target_text'] = res
if up_sampling:
pos_data = data[data['target_text'] != '|']
from sklearn.utils import resample
up_sampling_data = resample(pos_data, replace=True, n_samples=(len(data) - len(pos_data) - len(pos_data)))
data = pd.concat([data, up_sampling_data], ignore_index=True)
data = resample(data, replace=False)
data.dropna(inplace=True)
return data
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame, sliding_window=False, up_sampling=False,
wandb_log=None):
# deal with dt
train_raw_size = train_data.shape[0]
eval_raw_size = eval_data.shape[0]
logger.info('processing data...')
train_data = self._deal_with_df(train_data, sliding_window=sliding_window, delimiter='|',
up_sampling=up_sampling)
eval_data = self._deal_with_df(eval_data, sliding_window=sliding_window, delimiter='|')
train_size = train_data.shape[0]
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train'],
}
}
)
model = T5Model(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.model_args)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train_size---{train_raw_size}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
wandb.log({"eval_size": eval_raw_size, 'train_size': train_raw_size})
if wandb_log:
wandb.log(wandb_log)
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def eval(self, eval_data: pd.DataFrame, check_in_input_text: bool = False, delimiter='|',
tokenizer=None, use_sliding_window=False, sliding_window=None, stride=0.8,
pos_neg_ratio=None, use_multi_gpus=None, self_metric=False, wandb_log=None):
# deal_with_dt
eval_data = self._deal_with_df(eval_data, sliding_window=False)
eval_size = eval_data.shape[0]
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{eval_size}')
model = T5Model(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.model_args)
eval_res = NerT5._eval_entity_recognition(model, eval_data=eval_data, delimiter=delimiter,
check_in_input_text=check_in_input_text,
tokenizer=tokenizer, use_sliding_window=use_sliding_window,
sliding_window=sliding_window, stride=stride,
pos_neg_ratio=pos_neg_ratio, use_multi_gpus=use_multi_gpus,
self_metric=self_metric)
if wandb_log:
wandb.log(wandb_log)
wandb_log = {"eval_size": eval_size}
for k, v in eval_res.items():
wandb_log.update({k: v.iloc[2, -1]})
wandb.log(wandb_log)
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / eval_size, 5)
eval_time = round(need_time * eval_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {eval_size} = {eval_time} s')
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
finally:
wandb.finish()
@staticmethod
def _eval_entity_recognition(model, eval_data: pd.DataFrame, check_in_input_text: bool, delimiter='|',
tokenizer=None, use_sliding_window=False, sliding_window=512, stride=0.8,
pos_neg_ratio=None, use_multi_gpus=None, self_metric=False):
"""eval entity recognition in mt5 model, version-v2 , reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
model: a mt5 model
eval_data: a pd.Dataframe , must have columns ['prefix','input_text','target_text']
check_in_input_text: if the entities are in input_texts
delimiter: the delimiter in target_text to split different entities
use_sliding_window: if truncate the input text when predict
sliding_window: truncating_size
stride: overlapping_size
use_multi_gpus:use_multi_gpus
pos_neg_ratio : the ratio of positive and negative sample importance
self_metric:self_metric
tokenizer: tokenizer to split sentence
Returns:
show report and res, {prefix:res_df},type:dict
"""
eval_data = eval_data[['prefix', 'input_text', 'target_text']]
eval_data = eval_data.astype('str')
prefixes = eval_data['prefix'].to_list()
input_texts = eval_data['input_text'].tolist()
target_texts = eval_data['target_text'].tolist()
revised_target_texts = NerT5._revise_target_texts(target_texts=target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
pred_target_texts = NerT5.predict_entity_recognition(model, prefixes, input_texts, tokenizer=tokenizer,
use_sliding_window=use_sliding_window,
sliding_window=sliding_window, stride=stride,
delimiter=delimiter, use_multi_gpus=use_multi_gpus)
revised_pred_target_texts = NerT5._revise_target_texts(target_texts=pred_target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
eval_data['true_target_text'] = revised_target_texts
eval_data['pred_target_text'] = revised_pred_target_texts
eval_res = {}
for prefix in set(prefixes):
prefix_df = eval_data[eval_data['prefix'] == prefix]
y_true = prefix_df['true_target_text'].tolist()
y_pred = prefix_df['pred_target_text'].tolist()
print(f'{prefix} report:')
res_df = entity_recognition_metrics(y_true, y_pred, pos_neg_ratio=pos_neg_ratio,
self_metric=self_metric)
eval_res[prefix] = res_df
print(f'sum report:')
res_df = entity_recognition_metrics(revised_target_texts, revised_pred_target_texts,
pos_neg_ratio=pos_neg_ratio, self_metric=self_metric)
eval_res['ner_t5_metric'] = res_df
return eval_res # {prefix:res_df},type:dict
@staticmethod
def predict_entity_recognition(model, prefixes: list, input_texts: list, use_sliding_window=False,
sliding_window=None, stride=0.8, tokenizer=None,
delimiter='|', use_multi_gpus=None) -> list:
"""predict entity recognition in mt5 model,
Args:
model: a mt5 model
prefixes: prefixes
input_texts: input_texts
use_sliding_window: if use_sliding_window
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
delimiter: the delimiter in target_text to split different entities,default: '|'
use_multi_gpus: use_multi_gpus
Returns:
pred_target_texts:list,every element in pred_target_texts corresponds a prefix and an input_text
"""
if not sliding_window:
sliding_window = model.args.max_seq_length
if len(input_texts) == 1:
use_multi_gpus = None
assert len(prefixes) == len(input_texts)
if use_sliding_window:
t_ids, t_prefixes, t_input_texts = NerT5._split_texts_with_sliding_window(input_texts, prefixes,
tokenizer=tokenizer,
sliding_window=sliding_window,
stride=stride)
to_predict_texts = [i + ': ' + j for i, j in zip(t_prefixes, t_input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
pred_target_texts = NerT5._combine_pred_target_texts_by_ids(pred_target_texts, t_ids, delimiter)
else:
to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
assert len(pred_target_texts) == len(input_texts)
return pred_target_texts # type:list[str]
@staticmethod
def _split_text_with_sliding_window(text: str, sliding_window=128, tokenizer=None, stride=0.8) -> list:
""" any sequence exceeding the max_seq_length will be split into several windows (sub-sequences),
each of length max_seq_length. The windows will typically overlap each other to a certain degree to
minimize any information loss that may be caused by hard cutoffs.
Args:
text: a str text
sliding_window: truncating_size:sliding window, max_seq_length
tokenizer: tokenizer
stride: The amount of overlap between the windows,The stride can be specified in terms of either a fraction
of the max_seq_length, or as an absolute number of tokens.
Returns:
truncated_input_text: the list of truncated_input_text
"""
sliding_window = sliding_window - 8 # 防止一些词: <\s> <sep>等
if not isinstance(text, str):
text = str(text)
if not tokenizer:
try:
from simpletransformers.t5 import T5Model
tokenizer = T5Model('mt5', 'google/mt5-base').tokenizer
except Exception:
print('no tokenizer....')
tokens = tokenizer.tokenize(text)
if len(tokens) <= sliding_window:
return [text]
else:
split_text = []
if stride < 1:
step_size = int(sliding_window * stride)
else:
step_size = int(stride)
steps = int(len(tokens) / step_size)
for i in range(0, steps + 1):
text_i_tokens = tokens[i * step_size:i * step_size + sliding_window]
if text_i_tokens:
text_i = ''.join(text_i_tokens).replace('▁', ' ').strip()
split_text.append(text_i)
if (len(split_text) > 1) and (
len(tokenizer.tokenize(split_text[-1])) < (sliding_window - step_size)):
split_text = split_text[0:-1]
return split_text
@staticmethod
def _split_texts_with_sliding_window(input_texts: list, prefixes: list, tokenizer=None,
sliding_window=512, stride=0.8):
""" for every input_text in input_texts, split it and record the split_ids for combining
Args:
input_texts: the list of many input_text
prefixes: the prefix list of the input_texts list
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
Returns:
split_ids, split_prefixes, split_input_texts
"""
assert len(input_texts) == len(prefixes) # every input_text corresponds a prefix
input_texts_ids = range(len(input_texts))
split_ids = []
split_prefixes = []
split_input_texts = []
if not tokenizer:
try:
from transformers.models.t5 import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("google/mt5-base")
except Exception:
print('no tokenizer....')
for i_t_d, p, i_t in zip(input_texts_ids, prefixes, input_texts):
split_input_text = NerT5._split_text_with_sliding_window(i_t, sliding_window, tokenizer, stride)
for t_i_t in split_input_text:
split_ids.append(i_t_d)
split_input_texts.append(t_i_t)
split_prefixes.append(p)
return split_ids, split_prefixes, split_input_texts # type:tuple[list[int],list[str],list[str]]
@staticmethod
def _combine_pred_target_texts_by_ids(pred_target_texts, split_ids, delimiter: str = '|') -> list:
"""combine truncated_predicted_target_texts split_ids
Args:
pred_target_texts: the result of predicting the truncated input_texts
split_ids: get the truncated_ids when truncating input_texts
delimiter: the delimiter in target_text to split different entities
Returns:
pred_target_texts: predicted target_texts
"""
ids_target_text_dict = dict()
for i, j in zip(split_ids, pred_target_texts):
if not ids_target_text_dict.get(i):
ids_target_text_dict[i] = delimiter + j + delimiter
else:
ids_target_text_dict[i] = ids_target_text_dict[i] + j + delimiter
pred_target_texts = [ids_target_text_dict[k] for k in sorted(ids_target_text_dict.keys())]
return pred_target_texts # type:list
@staticmethod
def _revise_target_texts(target_texts: list, input_texts: list, check_in_input_text: bool = False, delimiter='|'):
"""revise the target texts,
Args:
target_texts: the list of the target_texts
input_texts: the list of the input_texts
check_in_input_text: if check the entities in input_text
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_texts = list[set]
"""
revised_target_texts = [NerT5._revise_target_text(t_t, return_format='set', delimiter=delimiter) for
t_t in target_texts] # type:list[set,...]
if check_in_input_text:
revised_target_texts = NerT5._keep_entities_in_input_text(input_texts, revised_target_texts)
return revised_target_texts # type:list[set]
@staticmethod
def _revise_target_text(target_text: str, delimiter: str = '|', return_format='set'):
""" revise the target text
Args:
target_text: str, target_text
return_format: 'set' means:'every entity is an element in a set', 'str' means: different entities are split
by the delimiter
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_text : set or list
"""
assert isinstance(target_text, str)
target_text = target_text.split(delimiter)
target_text = set([' '.join(e.strip().split()) for e in target_text])
if '' in target_text:
target_text.remove('')
if return_format == 'set':
revised_target_text = target_text
elif return_format == 'list':
revised_target_text = list(target_text)
else: # return_format == 'str'
revised_target_text = '|'
if target_text != set():
for entity in list(target_text):
revised_target_text += (str(entity) + '|')
return revised_target_text
@staticmethod
def _keep_entities_in_input_text(input_texts: list, target_texts: list):
"""for each sample, for every entity ,keep the entities that are in the input text,and remove other entities
Args:
input_texts: the list of many input_text,and every input text is a string
target_texts: the list of many target_text,and evert target text is a set
Returns:
revise_target_texts: list[str]
"""
revised_target_texts = []
for input_text, target_text in zip(input_texts, target_texts):
if target_text != set():
elements = list(target_text)
for e in elements:
if str(e) not in input_text:
target_text.remove(e) # type:set
revised_target_texts.append(target_text)
return revised_target_texts # type:list[set]
if __name__ == '__main__':
from zyl_utils import get_best_cuda_device
class M(NerT5):
def __init__(self):
super(M, self).__init__()
self.wandb_proj = 'test'
self.save_dir = './'
self.model_type = 'mt5' # t5
self.use_cuda = True
self.cuda_device = get_best_cuda_device()
def train_sample(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_df = pd.read_excel(train_file) # type:pd.DataFrame
eval_df = pd.read_excel(eval_file) # type:pd.DataFrame
self.model_version = 'v0.0.0.0'
self.pretrained_model = 'google/mt5-base' # 预训练模型位置 model_name
self.model_args = self.my_config()
self.model_args.update(
{
'num_train_epochs': 3,
'learning_rate': 3e-4,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_df, eval_df, sliding_window=True,
wandb_log={'train_file': train_file, 'eval_file': eval_file})
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.model_version = 'erv0.0.0.0'
self.model_args = self.my_config()
self.model_args.update(
{
'eval_batch_size': 16,
# 'best_model_dir':'./'
}
)
self.eval(eval_data, check_in_input_text=False, delimiter='|',
tokenizer=None, use_sliding_window=False)
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/ner_t5.py
|
ner_t5.py
|
# encoding: utf-8
"""
@author: zyl
@file: utils.py
@time: 2021/11/29 15:18
@desc:
"""
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class Utils:
def __init__(self):
pass
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = self.model_args.get('eval_size')
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/utils.py
|
utils.py
|
# encoding: utf-8
"""
@author: zyl
@file: ner_model.py
@time: 2021/11/25 13:59
@desc:
"""
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class NerModel:
"""
ner model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--ner-model"
self.show_running_loss = False
self.wandb_proj = 'ner'
self.save_dir = '../'
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的处理的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.model_type = 'roberta'
self.pretrained_model = 'roberta-base' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
self.model_args = self.my_config()
def my_config(self):
return {
'train_batch_size': 8,
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# multiprocess
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
}
@staticmethod
def deal_with_df(df):
df = df[["sentence_id", "words", "labels"]]
df = df.astype({'sentence_id': 'int', 'words': 'str', 'labels': 'str'})
return df
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame):
# deal with dt
train_data = NerModel.deal_with_df(train_data)
eval_data = NerModel.deal_with_df(eval_data)
train_size = len(set(train_data['sentence_id'].tolist()))
eval_size = len(set(eval_data['sentence_id'].tolist()))
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'train_size': train_size,
'eval_size': eval_size,
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']
}
}
)
# get model
model = NERModel(model_type=self.model_type, model_name=self.pretrained_model, labels=self.labels,
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def train_example(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_data = pd.read_excel(train_file)
eval_data = pd.read_excel(eval_file)
self.save_dir = '../'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.pretrained_model = 'bert-base-multilingual-cased' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-DISEASE", "I-DISEASE"]
self.model_args = self.my_config()
self.model_args.update(
{
'train_file': train_file,
'eval_file': eval_file,
'num_train_epochs': 3,
'learning_rate': 1e-3,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_data, eval_data)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = self.model_args.get('eval_size')
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
@staticmethod
def get_entity(pred_list, label='DISEASE'):
if not label:
label = ''
entities = []
e = ''
is_entity = 0
for index, p in enumerate(pred_list):
if p == '0':
if is_entity == 1:
entities.append(e)
is_entity = 0
elif p.startswith('B-' + label):
if is_entity == 1:
if e:
entities.append(e)
e = '-' + str(index)
is_entity = 1
elif p.startswith('I-' + label):
e = e + ('-' + str(index))
if is_entity == 1:
entities.append(e)
return entities
def eval(self, eval_df: pd.DataFrame,use_t5_matric=False):
eval_data = NerModel.deal_with_df(eval_df)
eval_size = len(set(eval_df['sentence_id'].tolist()))
self.model_args.update(
{
'eval_size': eval_size,
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'eval']
}
}
)
model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
result, model_outputs, preds_list = model.eval_model(eval_data)
if use_t5_matric:
labels = eval_data.groupby(by=['sentence_id'],sort =False)
labels = labels.apply(lambda x: x['labels'].tolist())
preds_list = [set(NerModel.get_entity(p)) for p in preds_list]
labels = [set(NerModel.get_entity(l)) for l in labels]
from zyl_utils.model_utils.ner_utils import NERUtils
NERUtils.entity_recognition_v2(labels,preds_list)
print('1')
# # wandb updata
# wandb.init(
# project=self.wandb_proj,
# config = self.model_args,
# name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
# tags=[self.model_version, 'eval']
# )
# wandb.log({"f1_score": result.get('f1_score')})
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.save_dir = '../'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.use_cuda = True
self.cuda_device = 1
self.model_args = self.my_config()
self.model_args.update(
{
'eval_file': eval_file,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.eval(eval_data)
if __name__ == '__main__':
s = ['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', 'B-DISEASE', 'B-DISEASE', 'B-DISEASE', 'I-DISEASE',
'I-DISEASE', 'O', 'B-DISEASE', 'O', 'I-DISEASE', 'I-DISEASE', 'B-DISEASE', 'I-DISEASE']
print(NerModel.get_entity(s))
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/ner_model.py
|
ner_model.py
|
# encoding: utf-8
'''
@author: zyl
@file: my_model.py
@time: 2021/11/11 10:56
@desc:
'''
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.classification import ClassificationModel, ClassificationArgs, DDPClassificationModel
from simpletransformers.t5 import T5Args
from zyl_utils.model_utils.models.my_T5model import MyT5, MyDDPT5
class MyModel:
"""
my model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.wandb_proj = 'test'
self.model_version = 'test' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.use_model = 'classification' # mt5 /classification
self.model_type = 'bert'
self.pretrained_model = './best/v1.1.1.1/' # 预训练模型位置
self.use_cuda = True
self.cuda_device = 0
self.num_labels = 2
self.args = MyModel.set_model_parameter(model_version=self.model_version,
args=self._set_args(), save_dir='../')
def _set_args(self):
if self.use_model == 't5' or self.use_model == 'mt5':
return T5Args()
else:
return ClassificationArgs()
@staticmethod
def set_model_parameter(model_version='test', args=ClassificationArgs(), save_dir='./'):
# multiprocess
args.use_multiprocessing = False
args.use_multiprocessing_for_evaluation = False
# base config
args.reprocess_input_data = True
args.use_cached_eval_features = False
args.fp16 = False
args.manual_seed = 234
args.gradient_accumulation_steps = 2 # ==increase batch size,Use time for memory,
# save
args.no_save = False
args.save_eval_checkpoints = False
args.save_model_every_epoch = False
args.save_optimizer_and_scheduler = True
args.save_steps = -1
# eval
args.evaluate_during_training = True
args.evaluate_during_training_verbose = True
args.no_cache = False
args.use_early_stopping = False
args.encoding = None
args.do_lower_case = False
args.dynamic_quantize = False
args.quantized_model = False
args.silent = False
args.overwrite_output_dir = True
args.output_dir = save_dir + 'outputs/' + model_version + '/'
args.cache_dir = save_dir + 'cache/' + model_version + '/'
args.best_model_dir = save_dir + 'best_model/' + model_version + '/'
args.tensorboard_dir = save_dir + 'runs/' + model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/'
return args
def get_train_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.pretrained_model, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'classification':
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def deal_with_df(df, use_model='cls'):
if use_model == 't5' or use_model == 'mt5':
df = df[['prefix', 'input_text', 'target_text']]
df = df.astype('str')
elif use_model == 'sentence_pair':
df = df[['text_a', 'text_b', 'labels']]
df = df.astype({'text_a': 'str', 'text_b': 'str', 'labels': 'int'})
else:
df = df.astype({'text': 'str', 'labels': 'int'})
df = df[['text', 'labels']]
return df
def train(self, train_df: pd.DataFrame, eval_df: pd.DataFrame, if_send_message=False):
# deal with dt
train_df = MyModel.deal_with_df(train_df, use_model=self.use_model)
eval_df = MyModel.deal_with_df(eval_df, use_model=self.use_model)
# config some parameters
train_size = train_df.shape[0]
self.args.update_from_dict({'train_length': train_size})
all_steps = train_size / self.args.train_batch_size
self.args.logging_steps = int(max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.evaluate_during_training_steps = int(
max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.wandb_project = self.wandb_proj
self.args.wandb_kwargs = {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']}
# get model
model = self.get_train_model()
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train length---{train_size}')
if self.use_model == 't5' or self.use_model == 'mt5':
model.train_model(train_data=train_df, eval_data=eval_df)
else:
model.train_model(train_df=train_df, eval_df=eval_df)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
if if_send_message:
print(f'train failed!!! ERROR:{error}')
# ModelUtils.send_to_me(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def get_predict_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.args.best_model_dir, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'sentence_pair':
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
eval_length = eval_df.shape[0]
# wand_b
wandb.init(project=self.wandb_proj, config=self.args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval length---{eval_length}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / eval_length, 5)
eval_time = round(need_time * eval_length, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {eval_length} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": eval_length})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/my_model.py
|
my_model.py
|
# encoding: utf-8
"""
@author: zyl
@file: re_ranker_cross_encoder.py
@time: 2021/12/16 9:46
@desc:
选取候选集的方法
数据;【'mention':str,'entries';list】
"""
import math
from dataclasses import dataclass
from typing import Dict
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import \
CESoftmaxAccuracyEvaluator, CECorrelationEvaluator, CEBinaryClassificationEvaluator
from simpletransformers.config.model_args import ModelArgs
from torch.utils.data import DataLoader
from zyl_utils import get_best_cuda_device
MODEL_TYPE = [
'two_classification', # 输出0或1
'sts', # 语义相似性,输出0-1连续值,无序
'nli' # 自然语言推理,输出前后两句话的关系,有序,输出:0,1,2
]
@dataclass
class ReRankerCrossEncoderArgs(ModelArgs):
"""
Model args for a ReRankerCrossEncoder
num_labels:Number of labels of the classifier. If 1, the CrossEncoder is a regression model that outputs a
continous score 0...1. If > 1, it output several scores that can be soft-maxed to get probability
scores for the different classes.
"""
cuda_device: str = get_best_cuda_device(gpu_num=1)
train_batch_size: int = 16
max_seq_length: int = 128
tokenizer_args: Dict = dict
default_activation_function = None
num_labels:int =1
class ReRankerCrossEncoderModel:
def __init__(self, model_type='two_classification',
model_name="sentence-transformers/distiluse-base-multilingual-cased-v1", args=None):
"""
Args:
model_type: 'two_classification', # 输出0或1. 'sts', # 语义相似性,输出0-1连续值,无序
'nli' # 自然语言推理,输出前后两句话的关系,有序,输出:0,1,2
model_name: "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
args: dict
"""
self.args = self._load_model_args(model_name)
self.args.model_type = model_type
self.args.model_name = model_name
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ReRankerCrossEncoderArgs):
self.args = args
if self.args.model_type == 'sts':
self.args.num_labels = 1
elif self.args.model_type == 'two_classification':
self.args.num_labels = 1
else:
self.args.num_labels = 3
# loss_fct = nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# num_labels: int = 1 # softmaxed类的数量,默认1:continous score,
self.model = self.get_model()
def get_model(self):
return CrossEncoder(model_name=self.args.model_name, num_labels=self.args.num_labels,
max_length=self.args.max_seq_length, device=f'cuda:{self.args.cuda_device}',
tokenizer_args=self.args.tokenizer_args,
default_activation_function=self.args.default_activation_function)
def _load_model_args(self, input_dir):
args = ReRankerCrossEncoderArgs()
args.load(input_dir)
return args
def train(self, train_dt, eval_dt):
"""
loss_fct = nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
Args:
train_dt: df,['mention','entries'],'mention' is string text,'entries' is a list of entries.
eval_dt:
Returns:
"""
self.model = self.get_model()
train_samples = self.get_samples(train_dt)
print(f'train_sample_length:{len(train_samples)}')
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=self.args.train_batch_size)
eval_samples = self.get_samples(eval_dt)
evaluator = self.get_evaluator(eval_samples)
warmup_steps = math.ceil(
len(train_dataloader) * self.args.num_train_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = math.ceil(len(train_dataloader) * 0.1)
self.model.fit(train_dataloader=train_dataloader, evaluator=evaluator, epochs=self.args.num_train_epochs,
warmup_steps=warmup_steps, evaluation_steps=evaluation_steps, save_best_model=True,
output_path=self.args.best_model_dir, use_amp=False, callback=self.call_back,
show_progress_bar=True, optimizer_params={'lr': self.args.learning_rate})
def get_samples(self, df):
samples = []
if self.args.model_type == 'nli':
for _, sub_df in df.iterrows():
candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
if sub_df['entries']:
entries_length = len(sub_df['entries'])
if entries_length > 1:
label_id = 1 # 蕴含关系
else:
label_id = 2 # 等价关系
for e in sub_df['entries']:
samples.append(InputExample(texts=[sub_df['mention'], e], label=label_id))
if e in candidate_entries:
candidate_entries.remove(e)
for c_e in candidate_entries:
samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
elif self.args.model_type == 'sts':
for _, sub_df in df.iterrows():
candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
if sub_df['entries']:
entries_length = len(sub_df['entries'])
if 'label' in sub_df.index:
score = sub_df['label']
else:
score = round(1 / entries_length, 4)
for e in sub_df['entries']:
samples.append(InputExample(texts=[sub_df['mention'], e], label=score))
samples.append(InputExample(texts=[e, sub_df['mention']], label=score))
if e in candidate_entries:
candidate_entries.remove(e)
for c_e in candidate_entries:
samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
else:
for _, sub_df in df.iterrows():
candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
if sub_df['entries']:
for e in sub_df['entries']:
samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
samples.append(InputExample(texts=[e, sub_df['mention']], label=1))
for c_e in candidate_entries:
samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
return samples
def get_candidade_entries(self, query):
candidate_entries = query
return candidate_entries # type:list
def get_evaluator(self, eval_samples):
if self.args.model_type == 'nli':
return CECorrelationEvaluator.from_input_examples(eval_samples, name='eval')
elif self.args.model_type == 'two_classification':
return CEBinaryClassificationEvaluator.from_input_examples(eval_samples, name='eval')
else:
return CESoftmaxAccuracyEvaluator.from_input_examples(eval_samples, name='eval')
# class RerankerTrainer:
# def __init__(self):
# self.model_path = "distiluse-base-multilingual-cased-v1"
# self.dimensions = 512
# self.cuda_device = get_best_cuda_device(gpu_num=1)
# self.max_seqence_length = 128
# self.use_st_model = True
# self.train_batch_size = 16
# self.epoch = 5
# self.learning_rate = 1e-5
# self.all_scores = []
# self.best_score = 0
# self.label2int = {"contradiction": 0, "entailment": 1, "neutral": 1}
# self.train_num_labels = len(set(self.label2int.values()))
# pass
#
# def train(self, train_df, dev_df, save_model="./best_model/test/", loss_func='SoftmaxLoss',
# evaluator_func='MyEvaluator2', top_k=30):
#
# self.save_model = save_model
# model = self.get_model()
#
# train_dataloader, train_loss = self.get_train_objectives(train_df, model, loss_func=loss_func,
# top_k=top_k)
#
# evaluator = self.get_evaluator(dev_df, evaluator_func=evaluator_func)
#
# warmup_steps = math.ceil(len(train_dataloader) * self.epoch * 0.1) # 10% of train data for warm-up
# evaluation_steps = math.ceil(len(train_dataloader) * 0.1)
#
# print('start train...')
# # Which loss function to use for training. If None, will use nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# model.fit(train_dataloader=train_dataloader, epochs=self.epoch, warmup_steps=warmup_steps,
# evaluator=evaluator, save_best_model=True,
# output_path=save_model,
# evaluation_steps=evaluation_steps,
# callback=self.call_back,
# loss_fct=train_loss,
# optimizer_params={'lr': self.learning_rate})
#
# df = pd.DataFrame(self.all_scores)
# df.to_excel(save_model + 'my_score.xlsx')
# RerankerTrainer.save_parameters(self, save_model=f'{save_model}parameters.json')
#
# def get_retrieval_model(self):
# from sentence_transformers import SentenceTransformer
# model = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/v2/"
# model = SentenceTransformer(self.model_path, device=f'cuda:{self.cuda_device}')
# return model
#
# def get_evaluator(self, dev_df, evaluator_func='MyEvaluator2', collection='t1'):
# from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# from sklearn.utils import resample
#
# self.evaluator_func = evaluator_func
# dev_df = resample(dev_df, replace=False)
#
# if evaluator_func == 'MyEvaluator':
# from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# from sentence_transformers import InputExample
# dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# scores = dev_df.index.tolist()
# eval_examples = []
# dev_samples = []
# for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# eval_examples.append(InputExample(texts=[t, r]))
# evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection)
#
# elif evaluator_func == 'EmbeddingSimilarityEvaluator':
# sentences_1 = []
# sentences_2 = []
# scores = []
# dev_samples = []
# for _, sub_df in dev_df.iterrows():
# if sub_df['label'] != 0.0:
# sentences_1.append(sub_df['entity'])
# sentences_2.append(sub_df['entry'])
# scores.append(sub_df['label'])
#
# evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# else:
# from sentence_transformers import InputExample
# from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator2
# dev_samples = []
# for _, sub_df in dev_df.iterrows():
# if sub_df['label'] == 1:
# dev_samples.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# elif sub_df['label'] > 0:
# dev_samples.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# else:
# dev_samples.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=0))
# evaluator = MyEvaluator2.from_input_examples(dev_samples, name='AllNLI-dev')
#
# print(f'dev_length:{len(dev_samples)}')
# self.dev_length = len(dev_samples)
# return evaluator
#
# @staticmethod
# def save_parameters(para_obj, save_model='./test.json'):
# """
# 存储一个对象的参数,对象参数可以是模型参数或超参数
# Args:
# para_obj: 要存储的参数的对象
# save_model: 保存路径
#
# Returns:
#
# """
# para_list = para_obj.__dir__()
# # save_para_list = ['best_score','device','max_seq_length','tokenizer']
# para = {}
# for p in para_list:
# if not p.startswith('_'):
# # if p in save_para_list:
# r = getattr(para_obj, p)
# if isinstance(r, int) or isinstance(r, str) or isinstance(r, float) or isinstance(r, list) \
# or isinstance(r, bool):
# para[p] = r
#
# with open(save_model, "w", encoding='utf-8') as f:
# # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格
# # f.write(json.dumps(para,indent=4))
# json.dump(para, f, indent=4) # 传入文件描述符,和dumps一样的结果
#
# para.pop("all_scores")
# with open(log_file, "a", encoding='utf-8') as f:
# json.dump(para, f, indent=4)
# f.write('\n')
#
# def call_back(self, score, epoch, steps):
# self.all_scores.append({str(epoch) + '-' + str(steps): score})
# if score > self.best_score:
# self.best_score = score
# print(f'epoch:{epoch}: score:{score} ')
#
# class TrainerV1(RerankerTrainer):
# def __init__(self):
# super(TrainerV1, self).__init__()
#
# def run(self):
# self.train_1011()
#
# def train_1011(self):
# def deal_with_df(df, corpus):
# df['entry'] = df['entry'].astype('str')
# df['entity'] = df['entity'].astype('str')
# m = self.get_retrieval_model()
# qs = df['entity'].tolist()
# res = RetrievalEvaluator.query_result(model=m, corpus=corpus, queries=qs, top_k=10)
# li = []
# for i, r in zip(qs, res):
# for _ in r:
# li.append({'entity': i, 'entry': _, 'label': 0})
# df_ = pd.DataFrame(li)
# print(len(df))
# df = pd.concat([df, df_], ignore_index=True)
# print(len(df))
# df.drop_duplicates(subset=['entity', 'entry'], keep='first', inplace=True)
# print(len(df))
# return df
#
# self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# 'train')
# corpus = list(set(train_df['entry'].tolist()))
# corpus = [str(c) for c in corpus]
# train_df = deal_with_df(train_df, corpus=corpus)
#
# self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# 'eval')
# dev_df = deal_with_df(dev_df, corpus=corpus)
#
# self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # self.model_path = "./best_model/di_reranker_v2.0/"
#
# # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
#
# # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # self.model_path = "./best_model/v2/v2.2.1/"
#
# # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
#
# self.cuda_device = get_best_cuda_device(gpu_num=1)
# self.dimensions = 768
# self.max_seqence_length = 64
# self.use_st_model = True
# self.train_batch_size = 32
# self.epoch = 3
# self.learning_rate = 1e-5
# self.train(train_df, dev_df, save_model="./best_model/di_reranker_2/",
# loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# evaluator_func="MyEvaluator2",
# top_k=10)
#
# # def train_cross_model(self):
# # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'train')
# # m = self.get_retrieval_model()
# # RetrievalEvaluator.query_result(model=model, corpus=corpus, queries=queries, top_k=1)
# #
# # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'eval')
# #
# # # self.train_file = "./data/v2/train_2.csv.gz"
# # # train_df = pd.read_csv(self.train_file, compression='gzip', sep='|')
# # # self.dev_file = "./data/v2/eval.csv.gz"
# # # dev_df = pd.read_csv(self.dev_file, compression='gzip', sep='|')
# #
# #
# # # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # self.model_path = "./best_model/di_reranker_v2.0/"
# #
# # # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
# #
# # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # self.model_path = "./best_model/v2/v2.2.1/"
# #
# # # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
# #
# #
# #
# # self.dimensions = 768
# # self.max_seqence_length = 128
# # self.use_st_model = True
# # self.train_batch_size = 32
# # self.epoch = 3
# # self.learning_rate = 2e-5
# # self.train(train_df, dev_df, save_model="./best_model/v2/v2.2.2/",
# # loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# # evaluator_func="MyEvaluator2",
# # top_k=10)
def call_back(self, score, epoch, steps):
print(f'epoch:{epoch}----step:{steps}----score:{score} ')
if __name__ == '__main__':
import pandas as pd
class Test(ReRankerCrossEncoderModel):
def __init__(self):
super(Test, self).__init__()
def get_candidade_entries(self, query):
candidate_entries = []
# 模糊搜索
# 语义搜索
return candidate_entries
def test_train(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_df = pd.read_excel(train_file) # type:pd.DataFrame
eval_df = pd.read_excel(eval_file) # type:pd.DataFrame
self.model_version = 'v0.0.0.0'
self.args.update_from_dict(
{
'model_type' : 'two_classification',
'model_name' : "sentence-transformers/distiluse-base-multilingual-cased-v1",
'num_train_epochs': 3,
'learning_rate': 3e-4,
'train_batch_size': 24, # 28
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_df, eval_df)
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/reranker_cross_encoder.py
|
reranker_cross_encoder.py
|
# encoding: utf-8
"""
@author: zyl
@file: retrieval_bi_encoder.py
@time: 2021/12/16 9:45
@desc:
"""
import math
from dataclasses import dataclass
from typing import Dict
import pandas as pd
from sentence_transformers import datasets
from sentence_transformers import losses
from sentence_transformers import models
from simpletransformers.config.model_args import ModelArgs
from tqdm import tqdm
from zyl_utils import get_best_cuda_device
MODEL_TYPE = [
'sts', # 两个文本相似性,
'nli', # 句子关系,多对多,只有蕴含和矛盾
'paraphrase', # 释义,(从一组数据中找出其中相似含义的句子)
'duplicate_text' # 相同文本集,多对一
'information retrieval' # 信息检索
]
# from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
# from scipy.stats import pearsonr, spearmanr
# from sentence_transformers.readers import InputExample
from sentence_transformers import SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
@dataclass
class ReTrievalBiEncoderArgs(ModelArgs):
"""
Model args for a ReTrievalBiEncoderArgs
num_labels:Number of labels of the classifier. If 1, the CrossEncoder is a regression model that outputs a
continous score 0...1. If > 1, it output several scores that can be soft-maxed to get probability
scores for the different classes.
"""
cuda_device: str = get_best_cuda_device(gpu_num=1)
train_batch_size: int = 16
max_seq_length: int = 128
use_sbert_model: bool = True
tokenizer_args: Dict = dict
default_activation_function = None
num_labels: int = 1
output_path: str = './'
model_version: str = 'test'
loss_func: str = 'MultipleNegativesRankingLossHard'
evaluator_func: str = 'BinaryClassificationEvaluator'
show_encode_progress_bar: bool = True
learning_rate = 1e-4
query_chunk_size: int = 100
retrieval_top_k: int = 10 # corpus中最大搜索多少个实体
retrieval_score: float = -1 # corpus大于多少得分的被搜索出来
at_least_top_k: int = -1 # corpus最少搜索出多少个词条
# class RecallEvaluator(SentenceEvaluator):
# """
# Evaluate a model based on the similarity of the embeddings by calculating the Spearman and Pearson rank correlation
# in comparison to the gold standard labels.
# The metrics are the cosine similarity as well as euclidean and Manhattan distance
# The returned score is the Spearman correlation with a specified metric.
#
# The results are written in a CSV. If a CSV already exists, then values are appended.
# """
#
# def __init__(self, to_predict_texts: List[str], labels: List[str], corpus, batch_size: int = 16,
# main_similarity: SimilarityFunction = None, name: str = '', show_progress_bar: bool = False,
# write_csv: bool = True, top_k=100, encode_batch_size=128):
# """
# Constructs an evaluator based for the dataset
#
# The labels need to indicate the similarity between the sentences.
#
# :param to_predict_texts: List with the first sentence in a pair
# :param labels: List with the second sentence in a pair
# :param scores: Similarity score between to_predict_texts[i] and labels[i]
# :param write_csv: Write results to a CSV file
# """
# self.corpus = corpus
# self.to_predict_texts = to_predict_texts
# self.labels = labels
# self.write_csv = write_csv
# self.top_k = top_k
# self.encode_batch_size = encode_batch_size
# assert len(self.to_predict_texts) == len(self.labels)
#
# self.main_similarity = main_similarity
# self.name = name
#
# self.batch_size = batch_size
# if show_progress_bar is None:
# show_progress_bar = (
# logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG)
# self.show_progress_bar = show_progress_bar
#
# self.csv_file = "similarity_evaluation" + ("_" + name if name else '') + "_results.csv"
# self.csv_headers = ["epoch", "steps", "score"]
#
# @classmethod
# def from_input_examples(cls, examples: List[InputExample], **kwargs):
# to_predict_texts = []
# labels = []
#
# for example in examples:
# to_predict_texts.append(example.texts[0])
# labels.append(example.texts[1])
# return cls(to_predict_texts, labels, **kwargs)
#
# @staticmethod
# def caculate_recall(y_true, y_pred):
# recall = 0
# for t, p in zip(y_true, y_pred):
# if len(t) == 0:
# recall += 1
# else:
# recall += (len(set(t) & set(p)) / len(t))
# return recall / len(y_true)
#
# def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1):
# res = RetrievalEvaluator.query_result(model, queries=self.to_predict_texts, corpus=self.corpus,
# corpus_embeddings=None, top_k=self.top_k, return_format='result')
# y_true = [set(i) for i in self.labels]
#
# res_1 = [r[0:1] for r in res]
#
# res_10 = [r[0:10] for r in res]
# res_50 = [r[0:50] for r in res]
# res_100 = [r[0:100] for r in res]
#
# recall_1 = RecallEvaluator.caculate_recall(y_true, res_1)
# recall_10 = RecallEvaluator.caculate_recall(y_true, res_10)
# recall_50 = RecallEvaluator.caculate_recall(y_true, res_50)
# recall_100 = RecallEvaluator.caculate_recall(y_true, res_100)
# print(f'\nrecall@1 {recall_1}\n'
# f'recall@10 {recall_10}\n'
# f'recall@50 {recall_50}\n'
# f'recall@100 {recall_100}\n')
# return recall_10
import random
class ReTrievalBiEncoderModel:
def __init__(self,model_name="sentence-transformers/distiluse-base-multilingual-cased-v1",args=None):
"""
Args:
model_type: 'two_classification', # 输出0或1. 'sts', # 语义相似性,输出0-1连续值,无序
'nli' # 自然语言推理,输出前后两句话的关系,有序,输出:0,1,2
model_name: "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
args: dict
"""
self.score_function = util.dot_score
self.args = self._load_model_args(model_name)
self.args.model_name = model_name
self.corpus_embeddings = None
self.mention_corpus = self.get_mention_corpus()
self.entries_corpus = self.get_entries_corpus()
self.corpus_dict = self.get_corpus_dict()
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ReTrievalBiEncoderArgs):
self.args = args
self.model = None
def _load_model_args(self, input_dir):
args = ReTrievalBiEncoderArgs()
args.load(input_dir)
return args
def train(self, train_dt, eval_dt):
"""
Args:
train_dt: df,['mention','entries'],'mention' is string text,'entries' is a list of entries.
eval_dt:
Returns:
"""
self.model = self.get_model()
self.args.best_model_dir = self.args.output_dir + 'best_model/' + self.args.model_version + '/'
train_objectives = self.get_train_objects(train_dt) # type:list
evaluator = self.get_evaluator(eval_dt)
warmup_steps = math.ceil(
len(train_objectives[0]) * self.args.num_train_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = math.ceil(len(train_objectives[0]) * 0.1)
self.model.fit(train_objectives=train_objectives, evaluator=evaluator, epochs=self.args.num_train_epochs,
warmup_steps=warmup_steps, evaluation_steps=evaluation_steps, save_best_model=True,
output_path=self.args.best_model_dir, use_amp=False, callback=self.call_back,
show_progress_bar=True, optimizer_params={'lr': self.args.learning_rate})
def get_model(self):
if self.args.use_sbert_model:
# 预测和训练sentence-transformers_model时用到
model = SentenceTransformer(self.args.model_name, device=f'cuda:{str(self.args.cuda_device)}')
else:
# 训练时,修改模型结构,比如输出,用到,得到的是一个sentencetransformer_model模型
# max_seq_length,model_args,cache_dir,tokenizer_args, do_lower_case,tokenizer_name_or_path
word_embedding_model = models.Transformer(self.args.model_name, max_seq_length=self.args.max_seq_length, )
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode='mean')
model = SentenceTransformer(modules=[word_embedding_model, pooling_model],
device=f'cuda:{str(self.args.cuda_device)}')
# dense_layer = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
# out_features=self.output_dimension, activation_function=nn.Tanh())
# normalize_layer = models.Normalize()
# model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_layer, normalize_layer],
# device=f'cuda:{str(self.cuda_device)}')
# from sentence_transformers.models.T5 import T5
# word_embedding_model = T5(self.model_path,max_seq_length=self.max_seqence_length)
# dense_model = models.Dense(in_features=word_embedding_model.get_word_embedding_dimension(),
# out_features=word_embedding_model.get_word_embedding_dimension(),
# activation_function=nn.Tanh())
return model
def get_train_objects(self, df):
"""
Args:
df: 输入: ['mention','entries'],'mention' is string text,'entries' is a list of entries.
Returns:
"""
if self.args.loss_func == 'MultipleNegativesRankingLossHard':
df = df[df['entries'].apply(len).gt(0)] # 去除空列表
train_samples = []
for _, sub_df in tqdm(df.iterrows()):
contradiction_entries = self.get_candidate_entries(sub_df['mention'])
contradiction_entries = [c for c in contradiction_entries if c not in sub_df['entries']]
for e in sub_df['entries']:
train_samples.append(
InputExample(texts=[sub_df['mention'], e, random.choice(contradiction_entries)]))
train_samples.append(
InputExample(texts=[e, sub_df['mention'], random.choice(contradiction_entries)]))
train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.args.train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=self.model, scale=20.0,
similarity_fct=util.dot_score)
train_obj = [(train_dataloader, train_loss)]
elif self.args.loss_func == 'MultipleNegativesRankingLoss':
df = df[df['entry'] != []]
df = df.explode('entry')
train_samples = []
for _, sub_df in tqdm(df.iterrows()):
train_samples.append(InputExample(texts=[sub_df['entry'], sub_df['entity']]))
print(len(train_samples))
# Special data loader that avoid duplicates within a batch
train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.args.train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=self.model, scale=20.0,
similarity_fct=util.dot_score)
train_obj = [(train_dataloader, train_loss)]
else:
df = df[df['entry'] != []]
df = df.explode('entry')
train_samples = []
for _, sub_df in tqdm(df.iterrows()):
train_samples.append(InputExample(texts=[sub_df['entry'], sub_df['entity']]))
print(len(train_samples))
# Special data loader that avoid duplicates within a batch
train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.args.train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=self.model, scale=20.0,
similarity_fct=util.dot_score)
train_obj = [(train_dataloader, train_loss)]
return train_obj
def get_mention_corpus(self):
# 评估时的实体语料库(非词条),所有提及,
mention_corpus = []
return mention_corpus
def get_entries_corpus(self):
# 所有词条的语料库
entries_corpus = []
return entries_corpus
def get_corpus_dict(self):
# 评估时每个语料库中的实体 映射为词条的字典
# 评估时使用训练集的字典,接口使用所有数据的字典
corpus_dict = {'entity': 'entry'}
return corpus_dict
def get_candidate_entries(self, text):
# 对于一个文本,从所有字典词条中获取最相似的若干个词条
candidate_entries = []
return candidate_entries
def call_back(self, score, epoch, steps):
print(f'epoch:{epoch}: score:{score}, steps:{steps} ')
def query(self, queries, return_format='result'):
if not self.model:
self.model = self.get_model()
# 从预料库中搜索最相似的
if not self.mention_corpus:
self.mention_corpus = self.get_mention_corpus()
if not self.corpus_embeddings:
self.corpus_embeddings = self.model.encode(self.mention_corpus, self.args.eval_batch_size,
self.args.show_encode_progress_bar,
'sentence_embedding',
True, True, f'cuda:{self.args.cuda_device}', False)
self.corpus_embeddings = util.normalize_embeddings(self.corpus_embeddings)
queries_embeddings = self.model.encode(queries, self.args.eval_batch_size,
self.args.show_encode_progress_bar,
'sentence_embedding',
True, True, f'cuda:{self.args.cuda_device}', False)
queries_embeddings = util.normalize_embeddings(queries_embeddings)
hits = util.semantic_search(queries_embeddings, self.corpus_embeddings,
top_k=self.args.retrieval_top_k,
corpus_chunk_size=len(self.mention_corpus),
query_chunk_size=self.args.query_chunk_size,
score_function=self.score_function) # 排过序,得分从大到小
if return_format == 'result':
res = []
for h in hits:
r = []
for i in h:
if i['score'] > self.args.retrieval_score:
r.append(self.mention_corpus[i['corpus_id']])
if len(r) < self.args.at_least_top_k:
for i in range(len(r), self.args.at_least_top_k):
r.append(self.mention_corpus[i['corpus_id']])
res.append(r)
return res
else:
return hits
@staticmethod
def caculate_recall(y_true, y_pred):
recall = 0
for t, p in zip(y_true, y_pred):
if len(t) == 0:
recall += 1
else:
recall += (len(set(t) & set(p)) / len(t))
return recall / len(y_true)
def eval(self, to_predicts, labels, batch_size=16, retrieval_top_k=100, at_least_top_k=10,
retrieval_score=0.1):
pred = self.query(to_predicts, batch_size=batch_size, show_progress_bar=True,
retrieval_top_k=retrieval_top_k,
at_least_top_k=at_least_top_k, retrieval_score=retrieval_score,
return_format='result')
res_1 = [r[0:1] for r in pred]
res_10 = [r[0:10] for r in pred]
res_50 = [r[0:50] for r in pred]
res_100 = [r[0:100] for r in pred]
recall_1 = ReTrievalBiEncoderModel.caculate_recall(labels, res_1)
recall_10 = ReTrievalBiEncoderModel.caculate_recall(labels, res_10)
recall_50 = ReTrievalBiEncoderModel.caculate_recall(labels, res_50)
recall_100 = ReTrievalBiEncoderModel.caculate_recall(labels, res_100)
print(f'\nrecall@1 {recall_1}\n'
f'recall@10 {recall_10}\n'
f'recall@50 {recall_50}\n'
f'recall@100 {recall_100}\n')
return recall_10
def get_evaluator(self, dev_df):
if self.args.evaluator_func == 'BinaryClassificationEvaluator':
eval_samples = []
for _, sub_df in tqdm(dev_df.iterrows()):
for e in sub_df['entries']:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
contradiction_entries = self.get_candidate_entries(sub_df['mention'])
contradiction_entries = [c for c in contradiction_entries if c not in sub_df['entries']]
for e in contradiction_entries:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=0))
evaluator = BinaryClassificationEvaluator.from_input_examples(examples=eval_samples,
name='eval',
batch_size=self.args.eval_batch_size,
show_progress_bar=True)
else:
eval_samples = []
for _, sub_df in tqdm(dev_df.iterrows()):
for e in sub_df['entries']:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
contradiction_entries = self.get_candidate_entries(sub_df['mention'])
contradiction_entries = [c for c in contradiction_entries if c not in sub_df['entries']]
for e in contradiction_entries:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=0))
evaluator = BinaryClassificationEvaluator.from_input_examples(examples=eval_samples,
name='eval',
batch_size=self.args.eval_batch_size,
show_progress_bar=True,
)
return evaluator
if __name__ == '__main__':
class Test(ReTrievalBiEncoderModel):
def __init__(self):
super(Test, self).__init__()
def get_mention_corpus(self):
# disease_dict = pd.read_excel("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_v2_1221.xlsx")
disease_dict = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
corpus = disease_dict['entity'].tolist()
return [str(c) for c in set(corpus)]
def get_entries_corpus(self):
disease_dict = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
corpus = disease_dict['entity'].tolist()
return [str(c) for c in set(corpus)]
pass
def get_corpus_dict(self):
disease_dict = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
disease_dict = dict(zip(disease_dict['entity'].tolist(), disease_dict['entry'].tolist()))
return disease_dict
def get_candidate_entries(self, one_text):
# 对于一个文本,从所有字典词条中获取最相似的若干个词条
candidate_entries = self.query(one_text, return_format='result')[0]
return candidate_entries
def test_train(self):
self.args.update_from_dict(
{
'model_name':"sentence-transformers/distiluse-base-multilingual-cased-v1",
'cuda_device': '1',
'train_batch_size': 16,
'max_seq_length': 128,
'loss_func': 'MultipleNegativesRankingLossHard',
'evaluator_func': 'BinaryClassificationEvaluator',
'learning_rate': 1e-4,
'output_path':'/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/',
'model_version': 'test',
}
)
train_dt = pd.read_hdf('/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5',
'train')
train_dt.rename(columns={'entity':'mention','entry':'entries'},inplace=True)
eval_dt = pd.read_hdf('/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5',
'eval')
eval_dt.rename(columns={'entity': 'mention', 'entry': 'entries'}, inplace=True)
self.train(train_dt, eval_dt)
def test_predict(self, to_predict):
self.args.model_name = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/di_retrieval_v2.1/"
self.args.update_from_dict(
{}
)
self.model = self.get_model()
res = self.query(to_predict, return_format='result')
print(res)
def test_eval(self):
self.args.model_name = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/di_retrieval_v2.1/"
self.model = self.get_model()
dev_df = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'eval')
to_predict = dev_df['entity'].tolist()
labels = dev_df['entry'].tolist()
self.eval(to_predict, labels, batch_size=16, retrieval_top_k=100, at_least_top_k=10,
retrieval_score=-1)
# Test().test_predict(['肿瘤', 'cancer'])
Test().test_train()
# def get_samples(self, df):
# samples = []
# if self.args.loss=='MultipleNegativesRankingLoss':
#
# # entry , entity ,other_entry
#
#
# elif self.args.loss=='MultipleNegativesRankingLossHard':
#
# elif self.args.loss=='OnlineContrastiveLoss':
#
# elif self.args.loss ==
#
# if self.args.model_type == 'nli':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if entries_length > 1:
# label_id = 1 # 蕴含关系
# else:
# label_id = 2 # 等价关系
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=label_id))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# elif self.args.model_type == 'sts':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if 'label' in sub_df.index:
# score = sub_df['label']
# else:
# score = round(1 / entries_length, 4)
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=score))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=score))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# else:
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=1))
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# return samples
#
# def get_candidade_entries(self, query):
# candidate_entries = query
# return candidate_entries # type:list
#
# def get_evaluator(self, eval_samples):
# if self.args.model_type == 'nli':
# return CECorrelationEvaluator.from_input_examples(eval_samples, name='eval')
# elif self.args.model_type == 'two_classification':
# return CEBinaryClassificationEvaluator.from_input_examples(eval_samples, name='eval')
# else:
# return CESoftmaxAccuracyEvaluator.from_input_examples(eval_samples, name='eval')
#
# # class RerankerTrainer:
# # def __init__(self):
# # self.model_path = "distiluse-base-multilingual-cased-v1"
# # self.dimensions = 512
# # self.cuda_device = get_best_cuda_device(gpu_num=1)
# # self.max_seqence_length = 128
# # self.use_st_model = True
# # self.train_batch_size = 16
# # self.epoch = 5
# # self.learning_rate = 1e-5
# # self.all_scores = []
# # self.best_score = 0
# # self.label2int = {"contradiction": 0, "entailment": 1, "neutral": 1}
# # self.train_num_labels = len(set(self.label2int.values()))
# # pass
# #
# # def train(self, train_df, dev_df, save_model="./best_model/test/", loss_func='SoftmaxLoss',
# # evaluator_func='MyEvaluator2', top_k=30):
# #
# # self.save_model = save_model
# # model = self.get_model()
# #
# # train_dataloader, train_loss = self.get_train_objectives(train_df, model, loss_func=loss_func,
# # top_k=top_k)
# #
# # evaluator = self.get_evaluator(dev_df, evaluator_func=evaluator_func)
# #
# # warmup_steps = math.ceil(len(train_dataloader) * self.epoch * 0.1) # 10% of train data for warm-up
# # evaluation_steps = math.ceil(len(train_dataloader) * 0.1)
# #
# # print('start train...')
# # # Which loss function to use for training. If None, will use nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# # model.fit(train_dataloader=train_dataloader, epochs=self.epoch, warmup_steps=warmup_steps,
# # evaluator=evaluator, save_best_model=True,
# # output_path=save_model,
# # evaluation_steps=evaluation_steps,
# # callback=self.call_back,
# # loss_fct=train_loss,
# # optimizer_params={'lr': self.learning_rate})
# #
# # df = pd.DataFrame(self.all_scores)
# # df.to_excel(save_model + 'my_score.xlsx')
# # RerankerTrainer.save_parameters(self, save_model=f'{save_model}parameters.json')
# #
# # def get_retrieval_model(self):
# # from sentence_transformers import SentenceTransformer
# # model = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/v2/"
# # model = SentenceTransformer(self.model_path, device=f'cuda:{self.cuda_device}')
# # return model
# #
# # def get_evaluator(self, dev_df, evaluator_func='MyEvaluator2', collection='t1'):
# # from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# # from sklearn.utils import resample
# #
# # self.evaluator_func = evaluator_func
# # dev_df = resample(dev_df, replace=False)
# #
# # if evaluator_func == 'MyEvaluator':
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# # from sentence_transformers import InputExample
# # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# # scores = dev_df.index.tolist()
# # eval_examples = []
# # dev_samples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# # evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection)
# #
# # elif evaluator_func == 'EmbeddingSimilarityEvaluator':
# # sentences_1 = []
# # sentences_2 = []
# # scores = []
# # dev_samples = []
# # for _, sub_df in dev_df.iterrows():
# # if sub_df['label'] != 0.0:
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # scores.append(sub_df['label'])
# #
# # evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# # else:
# # from sentence_transformers import InputExample
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator2
# # dev_samples = []
# # for _, sub_df in dev_df.iterrows():
# # if sub_df['label'] == 1:
# # dev_samples.append(
# # InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# # elif sub_df['label'] > 0:
# # dev_samples.append(
# # InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# # else:
# # dev_samples.append(
# # InputExample(texts=[sub_df['entity'], sub_df['entry']], label=0))
# # evaluator = MyEvaluator2.from_input_examples(dev_samples, name='AllNLI-dev')
# #
# # print(f'dev_length:{len(dev_samples)}')
# # self.dev_length = len(dev_samples)
# # return evaluator
# #
# # @staticmethod
# # def save_parameters(para_obj, save_model='./test.json'):
# # """
# # 存储一个对象的参数,对象参数可以是模型参数或超参数
# # Args:
# # para_obj: 要存储的参数的对象
# # save_model: 保存路径
# #
# # Returns:
# #
# # """
# # para_list = para_obj.__dir__()
# # # save_para_list = ['best_score','device','max_seq_length','tokenizer']
# # para = {}
# # for p in para_list:
# # if not p.startswith('_'):
# # # if p in save_para_list:
# # r = getattr(para_obj, p)
# # if isinstance(r, int) or isinstance(r, str) or isinstance(r, float) or isinstance(r, list) \
# # or isinstance(r, bool):
# # para[p] = r
# #
# # with open(save_model, "w", encoding='utf-8') as f:
# # # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格
# # # f.write(json.dumps(para,indent=4))
# # json.dump(para, f, indent=4) # 传入文件描述符,和dumps一样的结果
# #
# # para.pop("all_scores")
# # with open(log_file, "a", encoding='utf-8') as f:
# # json.dump(para, f, indent=4)
# # f.write('\n')
# #
# # def call_back(self, score, epoch, steps):
# # self.all_scores.append({str(epoch) + '-' + str(steps): score})
# # if score > self.best_score:
# # self.best_score = score
# # print(f'epoch:{epoch}: score:{score} ')
# #
# # class TrainerV1(RerankerTrainer):
# # def __init__(self):
# # super(TrainerV1, self).__init__()
# #
# # def run(self):
# # self.train_1011()
# #
# # def train_1011(self):
# # def deal_with_df(df, corpus):
# # df['entry'] = df['entry'].astype('str')
# # df['entity'] = df['entity'].astype('str')
# # m = self.get_retrieval_model()
# # qs = df['entity'].tolist()
# # res = RetrievalEvaluator.query_result(model=m, corpus=corpus, queries=qs, top_k=10)
# # li = []
# # for i, r in zip(qs, res):
# # for _ in r:
# # li.append({'entity': i, 'entry': _, 'label': 0})
# # df_ = pd.DataFrame(li)
# # print(len(df))
# # df = pd.concat([df, df_], ignore_index=True)
# # print(len(df))
# # df.drop_duplicates(subset=['entity', 'entry'], keep='first', inplace=True)
# # print(len(df))
# # return df
# #
# # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# # 'train')
# # corpus = list(set(train_df['entry'].tolist()))
# # corpus = [str(c) for c in corpus]
# # train_df = deal_with_df(train_df, corpus=corpus)
# #
# # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# # 'eval')
# # dev_df = deal_with_df(dev_df, corpus=corpus)
# #
# # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # # self.model_path = "./best_model/di_reranker_v2.0/"
# #
# # # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
# #
# # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # self.model_path = "./best_model/v2/v2.2.1/"
# #
# # # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
# #
# # self.cuda_device = get_best_cuda_device(gpu_num=1)
# # self.dimensions = 768
# # self.max_seqence_length = 64
# # self.use_st_model = True
# # self.train_batch_size = 32
# # self.epoch = 3
# # self.learning_rate = 1e-5
# # self.train(train_df, dev_df, save_model="./best_model/di_reranker_2/",
# # loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# # evaluator_func="MyEvaluator2",
# # top_k=10)
# #
# # # def train_cross_model(self):
# # # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # # 'train')
# # # m = self.get_retrieval_model()
# # # RetrievalEvaluator.query_result(model=model, corpus=corpus, queries=queries, top_k=1)
# # #
# # # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # # 'eval')
# # #
# # # # self.train_file = "./data/v2/train_2.csv.gz"
# # # # train_df = pd.read_csv(self.train_file, compression='gzip', sep='|')
# # # # self.dev_file = "./data/v2/eval.csv.gz"
# # # # dev_df = pd.read_csv(self.dev_file, compression='gzip', sep='|')
# # #
# # #
# # # # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # # self.model_path = "./best_model/di_reranker_v2.0/"
# # #
# # # # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # # # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
# # #
# # # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # # self.model_path = "./best_model/v2/v2.2.1/"
# # #
# # # # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
# # #
# # #
# # #
# # # self.dimensions = 768
# # # self.max_seqence_length = 128
# # # self.use_st_model = True
# # # self.train_batch_size = 32
# # # self.epoch = 3
# # # self.learning_rate = 2e-5
# # # self.train(train_df, dev_df, save_model="./best_model/v2/v2.2.2/",
# # # loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# # # evaluator_func="MyEvaluator2",
# # # top_k=10)
#
# def call_back(self, score, epoch, steps):
# print(f'epoch:{epoch}----step:{steps}----score:{score} ')
# class RetrievalDT:
# def __init__(self):
# pass
#
# @staticmethod
# def convert_dt_for_MultipleNegativesRankingLoss(train_data: pd.DataFrame, neg_data=2, corpus=None,
# mode='sentence_pair'):
# train_data = v
# train_data.dropna(inplace=True)
# if mode == 'sentence_pair':
# return train_data
# else:
# new_train_data = []
# for _, sub_df in tqdm(train_data.iterrows()):
# count = 1
# while count <= neg_data / 2:
# neg_entity = random.choice(corpus)
# if train_data[
# (train_data['entry'] == neg_entity) & (train_data['entity'] == sub_df['entity'])].empty:
# new_train_data.append({
# 'entry': sub_df['entry'],
# 'pos_entity': sub_df['entity'],
# 'neg_entity': neg_entity,
# })
# new_train_data.append({
# 'entry': sub_df['entity'],
# 'pos_entity': sub_df['entry'],
# 'neg_entity': neg_entity,
# })
# count += 1
# return pd.DataFrame(new_train_data)
# class RetrievalBiEncoder:
# def __init__(self):
# self.pretrained_model = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# self.output_dimension = 768 # 输出向量维度
# self.cuda_device = get_best_cuda_device(gpu_num=1)
# self.max_seqence_length = 128 # 输入长度
# self.use_sbert_model = True # 是否改变模型结构
# self.train_batch_size = 16
# self.epoch = 5
# self.data_top_k = 3 # 负样本数
# self.learning_rate = 1e-5
#
# self.save_model = "./best_model/" # 模型保存路径
# self.model_version = 'test' # 版本号,最好模型路径
#
# self.logging_scores = []
# self.logging_best_score = 0
# self.log_file = './best_model/retrieval_logging.json'
#
# def train_model(self, train_df, dev_df, loss_func='CosineSimilarityLoss',
# evaluator_func='EmbeddingSimilarityEvaluator',
# eval_batch_size=128):
#
# model = self.get_model()
# train_samples = self.get_samples(train_dt)
#
# corpus = self.get_corpus()
# corpus = [str(c) for c in corpus]
# train_obj = self.get_train_objectives(train_df, model, loss_func=loss_func, corpus=corpus)
#
# self.train_size = 9999999999
# for t in train_obj:
# self.train_size = min(len(t[0]), self.train_size)
#
# print(f'train_size:{self.train_size}')
# evaluator = self.get_evaluator(dev_df, evaluator_func=evaluator_func, corpus=corpus,
# encode_batch_size=encode_batch_size)
#
# warmup_steps = math.ceil(self.train_size * 1 * 0.1) # 10% of train data for warm-up
# evaluation_steps = math.ceil(self.train_size * 0.1)
#
# print('start train...')
# print(f"save to :{self.save_model + self.model_version + '/'}")
# model.fit(train_objectives=train_obj, epochs=self.epoch, warmup_steps=warmup_steps,
# evaluator=evaluator,
# save_best_model=True,
# output_path=self.save_model + self.model_version + '/',
# evaluation_steps=evaluation_steps,
# callback=self.call_back,
# optimizer_params={'lr': self.learning_rate})
#
# df = pd.DataFrame(self.all_scores)
# df.to_excel(self.save_model + self.model_version + '/my_score.xlsx')
# TrainRetrieval.save_parameters(self,
# save_model=f"{self.save_model + self.model_version + '/'}parameters.json")
#
# def get_model(self):
# print(f'use_pretrained_model: {self.pretrained_model}')
# if self.use_sbert_model:
# model = SentenceTransformer(self.pretrained_model, device=f'cuda:{str(self.cuda_device)}')
# else:
# word_embedding_model = models.Transformer(self.pretrained_model, max_seq_length=self.max_seqence_length)
# # from sentence_transformers.models.T5 import T5
# # word_embedding_model = T5(self.model_path,max_seq_length=self.max_seqence_length)
# # dense_model = models.Dense(in_features=word_embedding_model.get_word_embedding_dimension(),
# # out_features=word_embedding_model.get_word_embedding_dimension(),
# # activation_function=nn.Tanh())
# pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
# pooling_mode_cls_token=False, pooling_mode_max_tokens=False,
# pooling_mode_mean_tokens=True, pooling_mode_mean_sqrt_len_tokens=False, )
# dense_layer = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
# out_features=self.output_dimension, activation_function=nn.Tanh())
# normalize_layer = models.Normalize()
# model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_layer, normalize_layer],
# device=f'cuda:{str(self.cuda_device)}')
# self.output_dimension = model.get_sentence_embedding_dimension()
# return model
#
# def get_samples(self, df):
# samples = []
# if self.args.model_type == 'nli':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if entries_length > 1:
# label_id = 1 # 蕴含关系
# else:
# label_id = 2 # 等价关系
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=label_id))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# elif self.args.model_type == 'sts':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if 'label' in sub_df.index:
# score = sub_df['label']
# else:
# score = round(1 / entries_length, 4)
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=score))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=score))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# else:
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=1))
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# return samples
#
#
# def get_train_objectives(self, train_data, model, loss_func='MultipleNegativesRankingLoss', corpus=None):
# """
#
# Args:
# train_data: ['entity','entry'],entity:要查询的文本,entry:匹配到的词条列表,可以多条
# model:
# loss_func:
# corpus: 输入的语料库用以构建负样本
#
# Returns:
# train_obj = [(train_dataloader, train_loss)]
# """
# train_samples = []
# self.loss_func = loss_func
# if loss_func == 'MultipleNegativesRankingLoss':
# train_data = RetrievalDT.convert_dt_for_MultipleNegativesRankingLoss(train_data, neg_data=2, corpus=corpus)
# # Special data loader that avoid duplicates within a batch
#
# train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.train_batch_size)
# train_loss = losses.MultipleNegativesRankingLoss(model=model)
# train_obj = [(train_dataloader, train_loss)]
# return train_obj
# elif loss_func == 'MultipleNegativesRankingLoss2':
# for _, sub_df in tqdm(train_data.iterrows()):
# if sub_df['label'] != 0:
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']]))
#
# print(len(train_samples))
# # Special data loader that avoid duplicates within a batch
# train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.train_batch_size)
# train_loss = losses.MultipleNegativesRankingLoss(model=model)
# train_obj = [(train_dataloader, train_loss)]
# return train_obj
# elif loss_func == 'OnlineContrastiveLoss':
# train_data = train_data[train_data['label'] != 0.0] # type:pd.DataFrame
#
# dev_df = train_data.groupby('entity').apply(lambda x: x['entry'].tolist())
#
# scores = dev_df.index.tolist()
# eval_examples = []
# for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# eval_examples.append(InputExample(texts=[t, r]))
#
# for _, sub_df in train_data.iterrows():
# if sub_df['label'] > 0:
# label = 1
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']], label=label))
# train_samples.append(InputExample(texts=[sub_df['entry'], sub_df['entity']], label=label))
# else:
# label = 0
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']], label=label))
#
# train_loss = losses.OnlineContrastiveLoss(model=model)
# elif loss_func == 'multi-task':
# train_samples_MultipleNegativesRankingLoss = []
# train_samples_ConstrativeLoss = []
#
# for _, sub_df in train_data.iterrows():
# if sub_df['label'] > 0:
# label = 1
# else:
# label = 0
# train_samples_ConstrativeLoss.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=label))
# if str(label) == '1':
# for _ in range(int(self.data_top_k / 2)):
# train_samples_MultipleNegativesRankingLoss.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# train_samples_MultipleNegativesRankingLoss.append(
# InputExample(texts=[sub_df['entry'], sub_df['entity']], label=1))
#
# # Create data loader and loss for MultipleNegativesRankingLoss
# train_dataset_MultipleNegativesRankingLoss = SentencesDataset(
# train_samples_MultipleNegativesRankingLoss,
# model=model)
# train_dataloader_MultipleNegativesRankingLoss = DataLoader(train_dataset_MultipleNegativesRankingLoss,
# shuffle=True,
# batch_size=self.train_batch_size)
# train_loss_MultipleNegativesRankingLoss = losses.MultipleNegativesRankingLoss(model)
#
# # Create data loader and loss for OnlineContrastiveLoss
# train_dataset_ConstrativeLoss = SentencesDataset(train_samples_ConstrativeLoss, model=model)
# train_dataloader_ConstrativeLoss = DataLoader(train_dataset_ConstrativeLoss, shuffle=True,
# batch_size=self.train_batch_size)
#
# # As distance metric, we use cosine distance (cosine_distance = 1-cosine_similarity)
# distance_metric = losses.SiameseDistanceMetric.COSINE_DISTANCE
# # Negative pairs should have a distance of at least 0.5
# margin = 0.5
# train_loss_ConstrativeLoss = losses.OnlineContrastiveLoss(model=model, distance_metric=distance_metric,
# margin=margin)
# train_object = [
# (train_dataloader_MultipleNegativesRankingLoss, train_loss_MultipleNegativesRankingLoss),
# (train_dataloader_ConstrativeLoss, train_loss_ConstrativeLoss)]
#
# return train_object
# elif loss_func == 'BatchHardSoftMarginTripletLoss':
# ### There are 4 triplet loss variants:
# ### - BatchHardTripletLoss
# ### - BatchHardSoftMarginTripletLoss
# ### - BatchSemiHardTripletLoss
# ### - BatchAllTripletLoss
#
# from sentence_transformers.datasets.SentenceLabelDataset import SentenceLabelDataset
#
# guid = 1
# self.label_map_file = "./data/v2/label_dict.xlsx"
# label_map = pd.read_excel(self.label_map_file)
# label_map = dict(zip(label_map['entry'].tolist(), label_map['label_num'].tolist()))
# train_samples = []
# for _, sub_df in train_data.iterrows():
# if sub_df['label'] != 0:
# train_samples.append(InputExample(guid=str(guid), texts=[sub_df['entity']],
# label=label_map.get(sub_df['entry'])))
# guid += 1
#
# print(f'train_length:{len(train_samples)}')
# self.train_length = len(train_samples)
#
# train_dataset = SentenceLabelDataset(train_samples)
# train_dataloader = DataLoader(train_dataset, batch_size=self.train_batch_size, drop_last=True)
# train_loss = losses.BatchHardSoftMarginTripletLoss(model=model)
# return train_dataloader, train_loss
# else:
# for _, sub_df in train_data.iterrows():
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']], label=sub_df['label']))
# train_loss = losses.CosineSimilarityLoss(model=model)
#
# train_dataset = SentencesDataset(train_samples, model)
# train_dataloader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=self.train_batch_size)
# train_obj = [(train_dataloader, train_loss)]
# return train_obj
#
# #
# #
# # def get_evaluator(self, dev_df, evaluator_func='EmbeddingSimilarityEvaluator', collection='t1', corpus=None,
# # top_k=100, encode_batch_size=128):
# # self.evaluator_func = evaluator_func
# # dev_df = resample(dev_df, replace=False)
# #
# # if evaluator_func == 'MyEvaluator':
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# # from sentence_transformers import InputExample
# # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# # scores = dev_df.index.tolist()
# # eval_examples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# # evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection,
# # top_k=top_k, encode_batch_size=encode_batch_size)
# #
# # # elif evaluator_func == 'InformationRetrievalEvaluator':
# # # ir_evaluator = InformationRetrievalEvaluator(dev_queries, corpus, dev_rel_docs,
# # # show_progress_bar=True,
# # # corpus_chunk_size=100000,
# # # precision_recall_at_k=[10, 100],
# # # name="msmarco dev")
# # elif evaluator_func == 'recall_evaluator':
# # from pharm_ai.panel.entry_match.retrieval_eval import RecallEvaluator
# # # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # from sentence_transformers import InputExample
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# #
# # scores = dev_df.index.tolist()
# # eval_examples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# #
# # evaluator = RecallEvaluator.from_input_examples(examples=eval_examples, corpus=corpus, name='sts-eval',
# # top_k=top_k, encode_batch_size=encode_batch_size)
# # return evaluator
# #
# # elif evaluator_func == 'seq_evaluator':
# # from sentence_transformers import evaluation
# # from sentence_transformers import InputExample
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# # evaluators = []
# #
# # sentences_1 = []
# # sentences_2 = []
# # scores_ = []
# # for _, sub_df in dev_df.iterrows():
# #
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # if sub_df['label'] > 0:
# # scores_.append(1)
# # else:
# # scores_.append(0)
# #
# # binary_acc_evaluator = evaluation.BinaryClassificationEvaluator(sentences_1, sentences_2, scores_)
# # evaluators.append(binary_acc_evaluator)
# #
# # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# # # scores = dev_df.index.tolist()
# # eval_examples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# # my_evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection,
# # top_k=top_k, encode_batch_size=encode_batch_size)
# #
# # evaluators.append(my_evaluator)
# # seq_evaluator = evaluation.SequentialEvaluator(evaluators,
# # main_score_function=lambda scores: scores[-1])
# # return seq_evaluator
# #
# # elif evaluator_func == 'EmbeddingSimilarityEvaluator':
# # sentences_1 = []
# # sentences_2 = []
# # scores = []
# # for _, sub_df in dev_df.iterrows():
# # # if sub_df['label'] != 0.0:
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # scores.append(sub_df['label'])
# #
# # evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# # else:
# # sentences_1 = []
# # sentences_2 = []
# # scores = []
# # for _, sub_df in dev_df.iterrows():
# # if sub_df['label'] != 0.0:
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # scores.append(sub_df['label'])
# # evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# # print(f'dev_length:{len(scores)}')
# # self.dev_length = len(scores)
# # return evaluator
# #
# # @staticmethod
# # def save_parameters(para_obj, save_model='./test.json'):
# # """
# # 存储一个对象的参数,对象参数可以是模型参数或超参数
# # Args:
# # para_obj: 要存储的参数的对象
# # save_model: 保存路径
# #
# # Returns:
# #
# # """
# # para_list = para_obj.__dir__()
# # # save_para_list = ['best_score','device','max_seq_length','tokenizer']
# # para = {}
# # for p in para_list:
# # if not p.startswith('_'):
# # # if p in save_para_list:
# # r = getattr(para_obj, p)
# # if isinstance(r, int) or isinstance(r, str) or isinstance(r, float) or isinstance(r, list) \
# # or isinstance(r, bool):
# # para[p] = r
# #
# # with open(save_model, "w", encoding='utf-8') as f:
# # # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格
# # # f.write(json.dumps(para,indent=4))
# # json.dump(para, f, indent=4) # 传入文件描述符,和dumps一样的结果
# #
# # para.pop("all_scores")
# # with open(log_file, "a", encoding='utf-8') as f:
# # json.dump(para, f, indent=4)
# # f.write('\n')
# #
# # def call_back(self, score, epoch, steps):
# # self.all_scores.append({str(epoch) + '-' + str(steps): score})
# # if score > self.best_score:
# # self.best_score = score
# # print(f'epoch:{epoch}: score:{score} ')
# #
# # def get_corpus(self):
# # self.corpus_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_v2_1217.xlsx"
# # corpus = pd.read_excel(self.corpus_file)
# # corpus = list(set(corpus['entry'].tolist()))
# # return corpus
# #
# # def run(self):
# # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'train')
# # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'eval')
# #
# # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/disease_v2.0/"
# #
# # self.use_st_model = True
# # self.model_version = 'di_retrieval_v2.1'
# #
# # from zyl_utils import get_best_cuda_device
# # self.cuda_device = get_best_cuda_device(gpu_num=1)
# # self.max_seqence_length = 128
# # self.output_dimension = 1024
# # self.train_batch_size = 256
# # self.data_top_k = 3
# # self.epoch = 5
# # self.learning_rate = 1e-5
# #
# # self.train_model(train_df, dev_df,
# # loss_func='MultipleNegativesRankingLoss2', # multi-task
# # evaluator_func="recall_evaluator",
# # encode_batch_size=32)
#
# if __name__ == '__main__':
# # get_auto_device()
# # FineTurn().run()
# # Trainer().run()
#
# TrainRetrieval().run()
#
# pass
# if __name__ == '__main__':
# class Re
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/retrieval_bi_encoder.py
|
retrieval_bi_encoder.py
|
# encoding: utf-8
'''
@author: zyl
@file: my_DDPT5model.py
@time: 2021/11/11 11:00
@desc:
'''
import logging
import math
import os
import random
from dataclasses import asdict
import pandas as pd
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from simpletransformers.t5.t5_model import T5Model
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers.optimization import AdamW, Adafactor
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class DDPT5Model(T5Model):
"""The DDP version of T5Model"""
def __init__(
self,
model_type,
model_name,
args=None,
tokenizer=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a DDP T5Model model. Turn off multi-processing settings.
Args:
model_type: The type of model (t5, mt5)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
super().__init__(model_type, model_name, args, tokenizer, use_cuda, cuda_device, **kwargs)
self.args.use_multiprocessing = False
self.args.use_multiprocessing_for_evaluation = False
if self.args.n_gpu == 1:
raise ValueError("You are using DDP with single GPU.")
def train_model(
self,
train_data,
output_dir=None,
show_running_loss=True,
args=None,
eval_data=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
verbose (optional): whether output staff.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
os.environ['MASTER_ADDR'] = 'localhost'
port = random.randint(10000, 20000)
os.environ['MASTER_PORT'] = str(port)
mp.spawn(self.train_each_proc, nprocs=self.args.n_gpu,
args=(train_dataset, output_dir,
show_running_loss, eval_data, verbose, kwargs))
# self.save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train_each_proc(self, process_index, train_dataset, *train_args):
"""
A wrapper function of train() for each process of DDP.
:param process_index: param train_dataset passed into train().
:param train_dataset: The training set.
:param train_args: other position arguments passed to train().
:return: The same as train().
"""
self._local_rank = process_index
self._world_size = self.args.n_gpu
self.train(train_dataset, *train_args[:-1], **train_args[-1])
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
args = self.args
self.device = torch.device(f"cuda:{self._local_rank}")
self._move_model_to_device()
torch.distributed.init_process_group(
backend='nccl',
init_method='env://',
world_size=self._world_size,
rank=self._local_rank
)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self._local_rank])
model = self.model
if self._local_rank == 0:
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = DistributedSampler(
train_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size // self._world_size,
pin_memory=True
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
if 0 < args.save_after < 1:
args.save_after = math.ceil(t_total * args.save_after)
if args.optimizer == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "Adafactor":
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adafactor_eps,
clip_threshold=args.adafactor_clip_threshold,
decay_rate=args.adafactor_decay_rate,
beta1=args.adafactor_beta1,
weight_decay=args.weight_decay,
scale_parameter=args.adafactor_scale_parameter,
relative_step=args.adafactor_relative_step,
warmup_init=args.adafactor_warmup_init,
)
if self._local_rank == 0:
print("Using Adafactor for T5")
else:
raise ValueError(
"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.".format(
args.optimizer
)
)
if args.scheduler == "constant_schedule":
scheduler = get_constant_schedule(optimizer)
elif args.scheduler == "constant_schedule_with_warmup":
scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)
elif args.scheduler == "linear_schedule_with_warmup":
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "cosine_with_hard_restarts_schedule_with_warmup":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "polynomial_decay_schedule_with_warmup":
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
lr_end=args.polynomial_decay_schedule_lr_end,
power=args.polynomial_decay_schedule_power,
)
else:
raise ValueError("{} is not a valid scheduler.".format(args.scheduler))
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if self._local_rank == 0:
logger.info(" Training started")
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch",
disable=args.silent or self._local_rank != 0, mininterval=0)
epoch_number = 0
best_eval_metric = None
current_loss = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
stop_training = False
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to global_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project and self._local_rank == 0:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for epoch in train_iterator:
model.train()
train_sampler.set_epoch(epoch)
if epochs_trained > 0:
epochs_trained -= 1
continue
if self._local_rank == 0:
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs} on process {self._local_rank}",
disable=args.silent or self._local_rank != 0,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
loss = self.compute_loss(model, args, inputs)
else:
loss = self.compute_loss(model, args, inputs)
loss_ = loss.clone()
torch.distributed.barrier()
torch.distributed.reduce(loss_, 0)
current_loss = loss_.item() / self._world_size
if show_running_loss and self._local_rank == 0:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
if args.optimizer == "AdamW":
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0 and self._local_rank == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0]
},
step=global_step
)
if args.save_steps > 0 and global_step % args.save_steps == 0 and self._local_rank == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
model.train()
if stop_training:
break
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if (args.save_model_every_epoch or args.evaluate_during_training) and self._local_rank == 0:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch and self._local_rank == 0:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
# close tensorboard writer to avoid EOFError.
if self._local_rank == 0:
tb_writer.close()
wandb.finish()
def eval_model(
self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs
):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
eval_dataset = self.load_and_cache_examples(
eval_data, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(
eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs
)
self.results.update(result)
if self.args.evaluate_generated_text:
if self.args.preprocess_inputs:
to_predict = [
prefix + ": " + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
else:
to_predict = [
prefix + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
preds = self.predict(to_predict)
result = self.compute_metrics(
eval_data["target_text"].tolist(), preds, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = DistributedSampler(
eval_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size // self._world_size,
pin_memory=True
)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
if self.args.fp16:
from torch.cuda import amp
for batch in tqdm(
eval_dataloader,
disable=args.silent or silent,
desc="Running Evaluation"
):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
torch.distributed.barrier()
torch.distributed.reduce(loss, 0)
eval_loss += loss.item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps / self._world_size
if self._local_rank == 0:
print(eval_loss)
results["eval_loss"] = eval_loss
if self._local_rank == 0:
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def logging_and_saving(
self,
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter):
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores), step=global_step)
stop_training = False
if global_step > args.save_after:
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
return stop_training, best_eval_metric, early_stopping_counter
def check_early_stopping(self, early_stopping_counter, args, train_iterator, verbose):
stop_training = False
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
stop_training = True
return stop_training, early_stopping_counter
def compute_loss(self, model, args, inputs):
outputs = model(**inputs)
if args.r_drop:
outputs_ = model(**inputs)
loss = self.compute_r_drop_loss(
outputs['loss'],
outputs_['loss'],
outputs['logits'],
outputs_['logits'],
inputs['attention_mask'],
args.r_drop_alpha
)
else:
loss = outputs[0]
return loss
def compute_kl_loss(self, p, q, pad_mask=None, reduction='mean'):
p_loss = F.kl_div(F.log_softmax(p, dim=-1), F.softmax(q, dim=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, dim=-1), F.softmax(p, dim=-1), reduction='none')
if pad_mask is not None:
p_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
q_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
if reduction == 'mean':
p_loss = p_loss.mean()
q_loss = q_loss.mean()
elif reduction == 'sum':
p_loss = p_loss.sum()
q_loss = q_loss.sum()
else:
raise ValueError('Only mean or sum reduction is supported in computing KL Divergence!')
loss = (p_loss + q_loss) / 2
return loss
def compute_r_drop_loss(self, ce1, ce2, logit1, logit2, attention_mask, alpha, reduction='mean'):
kl_loss = self.compute_kl_loss(logit1, logit2, attention_mask, reduction=reduction)
ce_loss = 0.5 * (ce1 + ce2)
return ce_loss + alpha * kl_loss
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/DDPT5model.py
|
DDPT5model.py
|
# encoding: utf-8
"""
@author: zyl
@file: __init__.py.py
@time: 2021/11/29 9:34
@desc:
"""
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/__init__.py
|
__init__.py
|
# encoding: utf-8
"""
@author: zyl
@file: utils.py
@time: 2021/11/29 15:18
@desc:
"""
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class ModelUtils:
def __init__(self):
pass
@staticmethod
def get_auto_cuda_device(gpu_num=1):
import pynvml
import numpy as np
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
deviceMemory = dict()
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
deviceMemory.update({i:mem_info.free / 1024 / 1024}) # M
deviceMemory = sorted(deviceMemory.items(), key=lambda x: x[1], reverse=True)
deviceMemory = np.array(deviceMemory, dtype=np.int64).tolist()
deviceMemory_tuple = deviceMemory[0:gpu_num]
deviceMemory = ','.join([str(d[0]) for d in deviceMemory_tuple])
logger.info(f'The memory of the smallest memory gpu({deviceMemory_tuple[-1][0]}) is:{deviceMemory_tuple[-1][-1]}M')
return deviceMemory
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = eval_df.shape[0]
# wand_db
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"evel_size": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
if __name__ == '__main__':
ModelUtils.get_auto_cuda_device()
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/model_utils.py
|
model_utils.py
|
def sunday_match(target, pattern) -> list:
"""
find all pattern's starts,在一个序列中找到要的所有的子序列
Args:
target: str or list ,原始序列
pattern: str or list,要匹配的序列
Returns:
starts: 匹配到子序列在原始序列中的起始位置
"""
len_target = len(target)
len_pattern = len(pattern)
if len_pattern > len_target:
return list()
index = 0
starts = []
while index < len_target:
if pattern == target[index:index + len_pattern]:
starts.append(index)
index += 1
else:
if (index + len(pattern)) >= len_target:
return starts
else:
if target[index + len(pattern)] not in pattern:
index += (len_pattern + 1)
else:
index += 1
return starts
if __name__ == '__main__':
t = "this is an apple , apple app app is app not app"
# t = t.split()
p = "app"
# p = p.split()
print(t)
print(p)
print(sunday_match(target=t, pattern=p))
import pandas as pd
dict = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
print('1')
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/algorithms/sunday_match.py
|
sunday_match.py
|
# encoding: utf-8
"""
@author: zyl
@file: __init__.py.py
@time: 2021/11/26 9:09
@desc:
"""
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/algorithms/__init__.py
|
__init__.py
|
from typing import List, Set
import pandas as pd
def entity_recognition_metrics(
y_true: List[Set],
y_pred: List[Set],
pos_neg_ratio: str = None,
self_metric=False
) -> pd.DataFrame:
"""
the metric of entity_recognition, version-v2, reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
y_true: list[set],the list of true target texts,each element is a set
y_pred: list[set],the list of pred target texts,each element is a set
pos_neg_ratio: the ratio of positive and negative sample importance, default: the ratio of positive and
negative sample sizes, you can set it,like"7:3"
self_metric: self_metric
Returns:
show report and res
"""
neg_data = 0
neg_correct_dt = 0
neg_wrong_dt = 0
neg_redundant_entities = 0
pos_data = 0
pos_correct_dt = 0
pos_wrong_dt = 0
pos_correct_entities = 0
pos_wrong_entities = 0
pos_omitted_entities = 0
pos_redundant_entities = 0
for i, j in zip(y_true, y_pred):
if i == set():
neg_data += 1
if j == set():
neg_correct_dt += 1
else:
neg_wrong_dt += 1
neg_redundant_entities += len(j)
else:
pos_data += 1
true_pred = len(i & j)
pos_correct_entities += true_pred
if i == j:
pos_correct_dt += 1
elif len(i) > len(j):
pos_wrong_dt += 1
pos_wrong_entities += (len(j) - true_pred)
pos_omitted_entities += (len(i) - len(j))
else:
pos_wrong_dt += 1
pos_redundant_entities += (len(j) - len(i))
pos_wrong_entities += (len(i) - true_pred)
all_pos_entities = pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities
pred_neg = sum([1 for j in y_pred if len(j) == 0])
true_neg = sum([1 for i in y_true if len(i) == 0])
pred_pos = sum([len(j) for j in y_pred])
true_pos = sum([len(i) for i in y_true])
if neg_data == 0:
neg_metric = neg_precision = neg_recall = neg_f1 = 0
else:
neg_metric = neg_correct_dt / (neg_correct_dt + neg_redundant_entities)
neg_precision = neg_correct_dt / pred_neg if pred_neg else 0
neg_recall = neg_correct_dt / true_neg if true_neg else 0
neg_f1 = 2 * neg_precision * neg_recall / (neg_precision + neg_recall + 1e-10)
if pos_data == 0:
pos_metric = pos_precision = pos_recall = pos_f1 = 0
else:
pos_metric = pos_correct_entities / all_pos_entities
pos_precision = pos_correct_entities / pred_pos if pred_pos else 0
pos_recall = pos_correct_entities / true_pos if true_pos else 0
pos_f1 = 2 * pos_precision * pos_recall / (pos_precision + pos_recall + 1e-10)
sum_metric_micro = (pos_correct_entities + neg_correct_dt) / (
neg_correct_dt + neg_redundant_entities + all_pos_entities)
# sum_metric_macro = neg_metric * 0.5 + pos_metric * 0.5
precision = (neg_correct_dt + pos_correct_entities) / (pred_pos + pred_neg + 1e-10)
recall = (neg_correct_dt + pos_correct_entities) / (true_pos + true_neg + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
if pos_neg_ratio:
pos_all = float(pos_neg_ratio.split(':')[0])
neg_all = float(pos_neg_ratio.split(':')[1])
pos_ratio = pos_all / (pos_all + neg_all)
neg_ratio = neg_all / (pos_all + neg_all)
else:
pos_ratio = pos_data / (pos_data + neg_data)
neg_ratio = neg_data / (pos_data + neg_data)
sum_metric_weighted = pos_ratio * pos_metric + neg_ratio * neg_metric
# pos_precision = pos_correct_dt / (neg_correct_dt + pos_correct_dt)
# recall = pos_correct_dt / pos_data
tp = pos_correct_dt
fn = pos_wrong_dt
fp = neg_wrong_dt
tn = neg_correct_dt
accuracy = (tp + tn) / (tp + fn + fp + tn)
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 / (1 / precision + 1 / recall)
r = {
'positive data': [str(pos_data), pos_correct_dt, pos_wrong_dt, pos_correct_entities,
pos_wrong_entities, pos_omitted_entities, pos_redundant_entities,
pos_precision, pos_recall, pos_f1, pos_metric],
'negative data': [neg_data, neg_correct_dt, neg_wrong_dt, '-', '-', '-', neg_redundant_entities,
neg_precision, neg_recall, neg_f1, neg_metric],
'all data ': [str(pos_data + neg_data), neg_correct_dt + pos_correct_dt, neg_wrong_dt + pos_wrong_dt,
pos_correct_entities, pos_wrong_entities, pos_omitted_entities,
pos_redundant_entities + neg_redundant_entities,
precision, recall, f1, sum_metric_micro],
'weighted score': ['', '', '', '', '', '', '', '', '', '', sum_metric_weighted],
}
index = ['| data_num', '| correct_data', '| wrong_data', '| correct_entities', '| wrong_entities',
'| omitted_entities', '| redundant_entities', '| precision', '| recall', '| f1', '| score']
res_df = pd.DataFrame(r, index=index).T
pd.set_option('precision', 4)
pd.set_option('display.width', None)
pd.set_option('display.max_columns', None)
pd.set_option("colheader_justify", "center")
print(res_df)
print(
f"正样本集得分为:{pos_correct_entities} / "
f"({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(pos_metric, 4)},负样本集得分为:{neg_correct_dt} / ({neg_correct_dt} + "
f"{neg_redundant_entities})={round(neg_metric, 4)},",
f"总体得分为: ({pos_correct_entities} + {neg_correct_dt}) / "
f"({all_pos_entities}+{neg_correct_dt + neg_redundant_entities})={round(sum_metric_micro, 4)}",
# f"准确率:{accuracy}",
)
print('\n')
if self_metric:
more_not_error_pos = (pos_correct_entities + pos_redundant_entities) / (
pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities)
f"自定义-正样本集得分为:{pos_correct_entities + pos_redundant_entities} /"
f" ({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(more_not_error_pos, 4)},负样本集得分为:{round(1, 4)},"
print('\n')
return res_df
if __name__ == '__main__':
y_true = [{'a','b'},{'j','d'},{'c','k'}]
y_true.extend([set()]*27)
y_pred = [{'a','b'},{'j','d'},{'c','f'}]
y_pred.extend([set()] * 27)
# y_true = [{'a','b','j','d','c','k'}]
# y_pred = [{'a','b','j','d','c','f'}]
r = entity_recognition_metrics(y_true,y_pred)
# print(r.iloc[2,-3])
print('1')
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/metrics/ner_metric.py
|
ner_metric.py
|
# encoding: utf-8
"""
@author: zyl
@file: __init__.py.py
@time: 2021/11/29 15:08
@desc:
"""
|
zyl-utils
|
/zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/metrics/__init__.py
|
__init__.py
|
import sys
def print_lol(lines,indent=False,level=0,fn=sys.stdout):
for each_line in lines:
if isinstance(each_line,list):
print_lol(each_line,indent,level+1,fn)
else:
if indent:
for each_item in range(level):
print('\t',end='',file=fn)
print(each_line,file=fn)
|
zyl_nester
|
/zyl_nester-1.0.0.tar.gz/zyl_nester-1.0.0/zyl_nester.py
|
zyl_nester.py
|
from distutils.core import setup
setup(
name='zyl_nester',
version='1.0.0',
py_modules=['zyl_nester'],
author='zyl',
author_email='[email protected]',
url='',
description='A simple',
)
|
zyl_nester
|
/zyl_nester-1.0.0.tar.gz/zyl_nester-1.0.0/setup.py
|
setup.py
|
## Zylo-Admin
Zylo-Admin a battery startup for newly updated zylo v2.0.8
## Available scripts
```bash
zylo-admin startproject -i {projectname}
```
```bash
zylo-admin manage engine
```
```bash
zylo-admin runserver {projectname}
```
- zylo-admin --> Main module()
- zylo-admin startproject --> create wsgi project for zylo()
- zylo-admin startproject -i {projectname} --> -i denote project cells it set on 100.55 m/s by default
- zylo-admin manage engine --> manage for managing all the static and templating files & engine denote the default engine in settings.py
- zylo-admin runserver {projectname} --> runserver to run the server in debug mode by default just passig the create wsgi folder name
|
zylo-admin
|
/zylo-admin-1.0.3.tar.gz/zylo-admin-1.0.3/README.md
|
README.md
|
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='zylo-admin',
version='1.0.3',
description='A battery startup for zylo web framework',
long_description=long_description,
long_description_content_type='text/markdown',
author='Pawan kumar',
author_email='[email protected]',
url='https://github.com/embracke/zyloadmin',
packages=find_packages(),
install_requires=['zylo'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
entry_points={
'console_scripts': [
'zylo-admin = zyloadmin.main:main',
],
},
)
|
zylo-admin
|
/zylo-admin-1.0.3.tar.gz/zylo-admin-1.0.3/setup.py
|
setup.py
|
import os
import sys
import pickle
import subprocess
import argparse
created_projects = []
def create_folder(project_name):
if not os.path.exists(project_name):
os.makedirs(project_name)
print(f"Folder '{project_name}' created.")
else:
print(f"Folder '{project_name}' already exists.")
def create_file(file_path, content):
with open(file_path, 'w') as f:
f.write(content)
print(f"File '{file_path}' created.")
def create_views_static(project_name):
views_folder = f"{project_name}/views"
static_folder = f"{project_name}/static"
os.makedirs(views_folder)
os.makedirs(static_folder)
print("Folders 'views' and 'static' created.")
index_html_code = '''<!DOCTYPE html>
<html>
<head>
<title>Welcome to Zylo Web Framework</title>
</head>
<body>
<div class="flex items-center justify-center h-screen bg-gray-100">
<h1 class="text-4xl font-bold text-indigo-700">Welcome to Zylo Web Framework</h1>
</div>
</body>
</html>
'''
create_file(f"{views_folder}/index.html", index_html_code)
create_file(f"{static_folder}/script.js", "")
create_file(f"{static_folder}/style.css", "")
def update_modules_json(project_name):
modules_json_file = f"{project_name}/modules.json"
import json
with open(modules_json_file, 'r') as f:
data = json.load(f)
data[0]["modules"].extend([
{"name": "viewengine", "url": "http://zylo.vvfin.in/jit/23116933/modules/viewengine?pypi=True&connected=True"},
{"name": "staticengine", "url": "http://zylo.vvfin.in/jit/23116933/modules/static?pypi=True&connected=True"},
{"name": "pubsec", "url": "http://zylo.vvfin.in/jit/23116933/modules/pubsec?pypi=True&connected=True"}
])
with open(modules_json_file, 'w') as f:
json.dump(data, f, indent=4)
print("modules.json updated.")
def load_created_projects():
if os.path.exists("project.pkl"):
with open("project.pkl", "rb") as f:
return pickle.load(f)
return []
def save_created_projects(projects):
with open("project.pkl", "wb") as f:
pickle.dump(projects, f)
def run_server(project_name):
app_file = f"{project_name}/app.py"
if os.path.exists(app_file):
print(f"Running server for project '{project_name}'...")
subprocess.run(["python", app_file])
else:
print(f"Error: 'app.py' file not found in project '{project_name}'.")
def main():
global created_projects # Declare the variable as global to modify it inside the function
parser = argparse.ArgumentParser(description="ZyloAdmin - A Python project management tool for Zylo Web Framework.")
subparsers = parser.add_subparsers(dest='command', help="Available commands")
# Subparser for the 'startproject' command
startproject_parser = subparsers.add_parser('startproject', help='Create a new project')
startproject_parser.add_argument('-i', '--projectname', required=True, help='Name of the project')
# Subparser for the 'runserver' command
runserver_parser = subparsers.add_parser('runserver', help='Run the server for a project')
runserver_parser.add_argument('projectname', help='Name of the project to run the server for')
# Subparser for the 'manage' command
manage_parser = subparsers.add_parser('manage', help='Manage startup engine')
manage_parser.add_argument('engine', choices=['engine'], help='Manage the startup engine')
args = parser.parse_args()
if args.command == 'startproject':
project_name = args.projectname
create_folder(project_name)
settings_code = '''HOST = 'localhost'
PORT = 8000
DEBUG = True
SECRET_KEY = "your_secret_key"
STATIC_FOLDER = "static"
TEMPLATES = [
{
'BACKEND': 'zylo.backends.ZyloTemplates',
'DIRS': ['views'],
}
]
DATABASES = {
'default': {
'ENGINE': 'zylo.db.backends.electrus',
'HOST': 'localhost',
'PORT': 37017,
'USER': 'root',
'PASSWORD': 'root'
}
}
MAILER = [
{
"SMTP": "VALUE",
"PORT": "VALUE",
"USERNAME": "VALUE",
"PASSWORD": "VALUE",
"SSL": True,
"DEFAULT_SENDER": "VALUE"
}
]
'''
create_file(f"{project_name}/settings.py", settings_code)
app_code = '''from zylo.core.branch import Zylo, Response
app = Zylo(__name__)
@app.route('/', methods=['GET', 'POST'])
def home(request):
return Response("Welcome to Zylo Web Framework")
if __name__ == "__main__":
app.runs()
'''
create_file(f"{project_name}/app.py", app_code)
modules_json_code = '''[
{
"config": [
{
"$host": "127.0.0.1",
"$port": 8000,
"$debug": "True",
"$http": "www.zylo.vvfin.in/conf/%connection%/devweb2?_uri=main&support=True&_ping=192.168.0.1"
}
],
"modules": [
{
"name": "zylo",
"url": "http://zylo.vvfin.in/jit/23116933/modules/zylo?pypi=True&connected=True"
},
{
"name": "mailer",
"url": "http://zylo.vvfin.in/jit/23116933/modules/mailer?pypi=True&connected=True"
},
{
"name": "JwT",
"url": "http://zylo.vvfin.in/jit/23116933/modules/JwT?pypi=True&connected=True"
},
{
"name": "blueprint",
"url": "http://zylo.vvfin.in/jit/23116933/modules/blueprint?pypi=True&connected=True"
},
{
"name": "chiper",
"url": "http://zylo.vvfin.in/jit/23116933/modules/chiper?pypi=True&connected=True"
},
{
"name": "session",
"url": "http://zylo.vvfin.in/jit/23116933/modules/session?pypi=True&connected=True"
},
{
"name": "limiter",
"url": "http://zylo.vvfin.in/jit/23116933/modules/limiter?pypi=True&connected=True"
},
{
"name": "BaseModals",
"url": "http://zylo.vvfin.in/jit/23116933/modules/BaseModals?pypi=True&connected=True"
}
],
"database": [
{"name": "Electrus", "$connection": "True"},
{"name": "MongoDB", "$connection": "False"}
],
"privilege": [
{
"role": "user",
"control": "+055 wbr++",
"$host": "127.0.0.1",
"$port": "8080"
}
]
}
]
'''
create_file(f"{project_name}/modules.json", modules_json_code)
created_projects.append(project_name)
save_created_projects(created_projects)
elif args.command == 'runserver':
project_name = args.projectname
run_server(project_name)
elif args.command == 'manage' and args.engine == 'engine':
created_projects = load_created_projects()
if len(created_projects) == 0:
print("No projects have been created yet.")
return
project_name = created_projects[-1]
create_views_static(project_name)
update_modules_json(project_name)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
zylo-admin
|
/zylo-admin-1.0.3.tar.gz/zylo-admin-1.0.3/zyloadmin/main.py
|
main.py
|
# Zylo
Zylo is a lightweight web framework made with love.
## Features
- Simple and intuitive routing
- Template rendering using Jinja2
- Session management with the sessions library
- Static file serving
## Installation
You can install Zylo using pip:
```bash
pip install zylo
```
## Usage
```python
from zylo import Zylo
app = Zylo()
@app.route('/')
def home(request):
return 'Hello, World!'
if __name__ == '__main__':
app.run()
```
## changelogs
- Beta version 2.0.3
- Latest update of beta
- Bug fixed with update --> 2.0.3
- Updated Usage Guide 1.2.1
- Addedd more functions & Bug Fixes
- Bug fixes in Zylo
- Mailer updated to --> 1.0.3
```python
from zylo.limiter import Limiter, render_template
app = Zylo(__name__)
limiter = Limiter(app)
@app.route('/', methods=['GET', 'POST'])
@limiter.limit('10/minutes')
return render_template('index.html')
if __name__ == '__main__':
app.run()
```
## Blueprint
```python
from zylo import Zylo, Response
from zylo.blueprint import Blueprint
app = Zylo(__name__)
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/')
def home(request):
return Response("Welcome to ZYLO blueprint route")
app.register_blueprint(blueprint)
if __name__ == "__main__":
app.run()
```
## Sessions
```python
from zylo import Zylo, Response, render_template, redirect
app = Zylo(__name__)
@app.route('/')
def home(request):
session = request.session
session['id'] = 123
return redirect('/dashboard')
@app.route('/dashboard')
def dashboard(request):
session = request.session
id = session.get('id')
return render_template('dashboard.html', id=id)
@app.route('/logout')
def logout(request):
request.session.clear()
return Response("You have been successfully logged out")
if __name__ == "__main__":
app.run()
```
## JwT
```python
from zylo.JwT import JwT, error_handler
jwt = JwT()
try:
payload = {'user_id': 123, 'role': 'admin'}
access_token = jwt.create_payload(payload, algorithm="HS256", time_limit_hours=1)
decoded_payload = jwt.verify_payload(access_token)
id = decoded_payload['user_id']
print(f"id: {id}")
except Exception as e:
error_message = error_handler(e)
print('Error:', error_message)
```
## Limiter
```python
from zylo import Zylo, Response
from zylo.limiter import Limiter
app = Zylo(__name__)
limiter = Limiter(app)
@app.route('/')
@limiter.limit(limit=5, period=60)
def home(request):
return Response("Limited route")
if __name__ == "__main__":
app.run()
```
## Mailer
```python
from zylo import Zylo, Response
from zylo.mailer import Mailer
mailer = Mailer()
app = Zylo(__name__)
// Mailer config
mailer.config['SMTP'] = 'SMTP'
mailer.config['SMTP_PORT'] = 'SMTP_PORT'
mailer.config['SENDER_EMAIL'] = 'SENDER_EMAIL'
mailer.config['DEFAULT_SENDER'] = 'DEFAULT_SENDER'
mailer.config['SENDER_PASSWORD'] = 'SENDER_PASSWORD'
mailer.config['SSL'] = True
mailer.config['SSL_SECURITY'] = True
@app.route('/')
def home(request):
email = "[email protected]"
subject = "Welcome to ZYLO"
body = "A user-friendly python web framework made with love"
mail = mailer.send_email(email, subject, body)
if mail:
return Response(f"Mail sent successfully to {email}")
return Response("Something went wrong while sending email")
if __name__ == "__main__":
app.run()
```
## Chiper
```python
// Input sanitization
from zylo.chiper import sanitize_input
name = "'name1'"
san_name = sanitize_input(name)
print(san_name) // output --> name1
// Generate ID
from zylo.chiper import generate_id
print(generate_id(11)) // length defined 11, output --> y-909716817
// Secure password validation
from zylo.chiper import is_secure_password
password = "123"
sec_password = "secpassword@0000"
print(is_secure_password(password)) // output --> False
print(is_secure_password(sec_password)) // output --> True
// Email validation
from zylo.chiper import validate_email
print(validate_email("demo@1")) // output -->
print(validate_email("[email protected]")) // output --> True
// Hashing and verifying passwords
from zylo.chiper import hash_password, verify_password
pswd = "mypassword"
hashed_password = hash_password(pswd)
print(hashed_password) // output --> $zylo.chiper@9e8b057a1f8e43c9e0d8d20769c8f516b5ba419998b5ed6fb877452db4c46049b2bd9560da6fef2c3afb047485cebfbab5cad85787b2be1de820ca5ee42ba3bcfb37c6395dcf4e27abf6a02d1926197a
print(verify_password(pswd, hashed_password)) // output --> True
```
|
zylo
|
/zylo-2.0.3.tar.gz/zylo-2.0.3/README.md
|
README.md
|
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='zylo',
version='2.0.3',
description='A lightweight web framework made with love',
long_description=long_description,
long_description_content_type='text/markdown',
author='Pawan kumar',
author_email='[email protected]',
url='https://github.com/E491K7/zylo',
packages=find_packages(),
install_requires=['werkzeug', 'jinja2', 'cryptography', 'zylo-admin', 'itsdangerous'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
zylo
|
/zylo-2.0.3.tar.gz/zylo-2.0.3/setup.py
|
setup.py
|
# zylogger
A logger that can be used directly without any config.
## build
```
python setup.py sdist bdist_wheel
```
## upload to test_pypi
```
python -m twine upload --verbose --repository testpypi dist/*
```
## upload to pypi
```
python -m twine upload
```
## usage
### install
```
pip install zylogger
```
### usage
```
import logging
import zylogger
zylogger.init()
logging.info('hello zylogger')
```
|
zylogger
|
/zylogger-0.0.1.tar.gz/zylogger-0.0.1/README.md
|
README.md
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='zylogger',
version='0.0.1',
author='robin zhang',
author_email='[email protected]',
description='A logger can be used directly',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/nicolerobin/zylogger',
packages=setuptools.find_packages(),
classifiers=[
],
python_requires='>=2.7',
)
|
zylogger
|
/zylogger-0.0.1.tar.gz/zylogger-0.0.1/setup.py
|
setup.py
|
from distutils.core import setup
setup(
name='zylxd',
version='1.0.0',
py_modules=['zylxd'],
)
|
zylxd
|
/zylxd-1.0.0.tar.gz/zylxd-1.0.0/setup.py
|
setup.py
|
# Zymbit Connect #
Utility to connect to the Zymbit Cloud
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/README.md
|
README.md
|
#!/usr/bin/env python
import os
from distutils.core import setup
SCRIPT_DIR = os.path.dirname(__file__)
if not SCRIPT_DIR:
SCRIPT_DIR = os.getcwd()
# put together list of requirements to install
install_requires = []
with open(os.path.join(SCRIPT_DIR, 'requirements.txt')) as fh:
for line in fh.readlines():
if line.startswith('-'):
continue
install_requires.append(line.strip())
data_files = []
setup(name='zymbit-connect',
version='2.0.1rc1',
description='Zymbit Connect',
author='Roberto Aguilar',
author_email='[email protected]',
package_dir={'': 'src'},
packages=[
'zymbit',
'zymbit.connect',
'zymbit.upstream',
'zymbit.util',
],
scripts=[
'src/scripts/connect',
'src/scripts/write_auth_token',
],
data_files=data_files,
long_description=open('README.md').read(),
url='http://zymbit.com/',
license='LICENSE',
install_requires=install_requires,
)
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/setup.py
|
setup.py
|
import os
from conversion import convert_bool
API_URL = os.environ.get('API_URL', 'https://api.zymbit.com/zymbit/v2')
AUTH_ROOT = '/etc/zymbit/auth'
AUTH_TOKEN = os.environ.get('AUTH_TOKEN') or None
BOOTSTRAP_KEY = os.environ.get('BOOTSTRAP_KEY')
CHECK_HOSTNAME = convert_bool(os.environ.get('CHECK_HOSTNAME', 'true'))
CLIENT_ID_VERSION = os.environ.get('CLIENT_ID_VERSION')
PUBSUB_PING_INTERVAL = int(os.environ.get('PUBSUB_PING_INTERVAL', 300))
REGISTER_ENDPOINT = '/projects/register'
WEBSOCKET_ENDPOINT = '/websocket_url'
WEBSOCKET_SEND_CLIENT_INFO = convert_bool(os.environ.get('WEBSOCKET_SEND_CLIENT_INFO', 'true'))
ZYMBIT_RUN_PATH = os.environ.get('ZYMBIT_RUN_PATH', '/run/zymbit')
ZYMBIT_HOST_INFO_PATH = os.path.join(ZYMBIT_RUN_PATH, 'host_info')
CONNECT_PORT_9628_TCP_ADDR = os.environ.get('CONNECT_PORT_9628_TCP_ADDR', '0.0.0.0')
CONNECT_PORT_9628_TCP_PORT = int(os.environ.get('CONNECT_PORT_9628_TCP_PORT', 9628))
CONSOLE_MESSENGER_HOST = os.environ.get('CONSOLE_MESSENGER_HOST', CONNECT_PORT_9628_TCP_ADDR)
CONSOLE_MESSENGER_PORT = int(os.environ.get('CONSOLE_MESSENGER_PORT', CONNECT_PORT_9628_TCP_PORT))
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/settings.py
|
settings.py
|
class Disconnect(Exception):
pass
class NotConnected(Exception):
pass
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/exceptions.py
|
exceptions.py
|
"""
Connect to the pubsub engine
"""
import datetime
import logging
import math
from _ssl import SSLWantReadError
from zymbit import settings
from zymbit.exceptions import NotConnected, Disconnect
from zymbit.upstream import registration
from zymbit.upstream.ws import get_websocket
from zymbit.util.client import get_auth_token
from zymbit.util.envelope import get_envelope
from zymbit.util.statemachine import StateMachine, NO_SLEEP
from zymbit.util.time import now
NO_DELTA = datetime.timedelta(seconds=0)
class PubSubStateMachine(StateMachine):
"""
State machine to keep connect to the pubsub engine
This state machine handles bootstrapping a system when it's not yet
registered and once registered, establish a persistent connection to
the pubsub engine
"""
def __init__(self, raise_exceptions=True, message_handler=None, subscriptions=None):
super(PubSubStateMachine, self).__init__(raise_exceptions=raise_exceptions)
self.message_handler = message_handler
self.registration_retries = 0
self.next_registration_attempt = None
self.registration_retry_max_sleep = 3600 # sleep up to an hour
self.subscriptions = subscriptions or []
self.websocket = None
# set last_read to instantiation time so that ping pong is played after
# the connection has been established
self.last_read = now()
self.last_ping = self.last_read
# play ping pong after a minute of silence
self.ping_interval = datetime.timedelta(seconds=settings.PUBSUB_PING_INTERVAL)
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def send(self, envelope):
if self.websocket is None:
raise NotConnected()
self.websocket.send(envelope)
##
# State machine methods
##
def init(self):
if self.message_handler:
self.message_handler(get_envelope('proxy', dict(routing_key='proxy.init')))
def check_last_read(self):
_now = now()
next_ping_check = self.last_read + self.ping_interval
if (next_ping_check - _now) < NO_DELTA:
# only send pings once per max_silence_time
next_ping = self.last_ping + self.ping_interval
if (next_ping - _now) < NO_DELTA:
self.logger.debug('sending ping')
self.websocket.send(get_envelope('ping', {}))
self.last_ping = _now
# check if a re-connect is in order
disconnect_time = self.last_read + (self.ping_interval * 3)
if (disconnect_time - _now) < NO_DELTA:
raise Disconnect()
def connect(self):
"""
Connects to the pubsub engine
"""
self.websocket = get_websocket()
# set last_read here so that we are not immediately disconnected by check_last_read()
self.last_read = now()
def disconnect(self):
"""
Disconnects from the pubsub engine
"""
if self.message_handler:
self.message_handler(get_envelope('connection', dict(routing_key='connection.disconnected')))
if self.websocket is None:
return
ws, self.websocket = self.websocket, None
ws.close()
def handle_message(self, buf):
if self.message_handler:
self.message_handler(buf)
else:
self.logger.info(repr(buf))
def has_auth_token(self):
"""
Checks whether this device has an auth token
"""
return get_auth_token() not in ('', None)
def listen(self):
"""
Listens for upstream messages and sends up local messages
"""
try:
buf = self.websocket.recv()
except SSLWantReadError: # seems to be raised when there is no data
buf = None
if buf:
self.last_read = now()
self.handle_message(buf)
return NO_SLEEP
self.check_last_read()
def register(self):
"""
Registers the system with zymbit services
"""
# check to see if a registration attempt should be made
if self.next_registration_attempt:
_now = now()
# when there is a positive delta between now and the next registration attempt
# simply return
if (self.next_registration_attempt - _now) > NO_DELTA:
return False
self.next_registration_attempt = None
registration.register()
self.registration_retries = 0
def registration_error(self):
self.logger.exception(self.last_exception)
self.registration_retries += 1
sleep_time = min(math.pow(2, self.registration_retries), self.registration_retry_max_sleep)
self.next_registration_attempt = now() + datetime.timedelta(seconds=sleep_time)
self.logger.error('Registration error; next retry at {}'.format(self.next_registration_attempt))
def subscribe(self):
"""
Subscribes to desired streams
"""
for subscription in self.subscriptions:
if isinstance(subscription, dict):
params = subscription
else:
params = dict(routing_key=subscription)
envelope = get_envelope('subscribe', params=params)
self.websocket.send(envelope)
transitions = {
StateMachine.start: {
True: init,
},
init: {
None: has_auth_token,
},
has_auth_token: {
False: register,
True: connect,
},
register: {
None: connect,
Exception: registration_error,
},
registration_error: {
None: StateMachine.start,
},
connect: {
None: subscribe,
Exception: disconnect,
},
disconnect: {
None: StateMachine.start,
Exception: StateMachine.start,
},
subscribe: {
None: listen,
Exception: disconnect,
},
listen: {
Exception: disconnect,
},
}
if __name__ == '__main__':
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
PubSubStateMachine(raise_exceptions=False).run()
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/pubsub.py
|
pubsub.py
|
"""
Client class for local Connect server
"""
import json
import logging
import random
import select
import socket
from zymbit import settings
from zymbit.exceptions import NotConnected
from zymbit.util.statemachine import StateMachine
from zymbit.util.time import interval
class LocalClient(StateMachine):
buffer_size = 4096
subscriptions = []
def __init__(self, raise_exceptions=False, loop_sleep_time=None, subscriptions=None):
super(LocalClient, self).__init__(raise_exceptions=raise_exceptions)
self.socket = None
self.loop_sleep_time = loop_sleep_time or self.loop_sleep_time
self.subscriptions = subscriptions or self.subscriptions
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def connect(self):
address = self.get_address()
self.logger.info('address={}'.format(address))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(address)
self.socket.setblocking(0)
self.logger.debug('connected to {}'.format(address))
def disconnect(self):
if self.socket is not None:
self.socket.close()
def get_address(self):
return (settings.CONSOLE_MESSENGER_HOST, settings.CONSOLE_MESSENGER_PORT)
def handle_buf(self, buf):
buf_utf8 = buf.decode('utf8')
try:
envelope = json.loads(buf_utf8)
if envelope.get('params', {}).get('routing_key') == 'connection.connected':
self.subscribe()
except ValueError:
pass
self.handle_message(buf_utf8)
def handle_message(self, buf):
self.logger.info(buf)
def listen(self):
r, _, _ = select.select([self.socket], [], [], 0.01)
if self.socket in r:
buf = self.socket.recv(self.buffer_size)
self.handle_buf(buf)
self.publish()
def publish(self):
pass
def send(self, buf):
if self.socket is None:
raise NotConnected()
if not buf.endswith('\n'):
buf = '{}\n'.format(buf)
self.socket.send(buf)
def subscribe(self):
for subscription in self.subscriptions:
self.send('action=subscribe,routing_key={}'.format(subscription))
transitions = {
StateMachine.start: {
True: connect,
},
connect: {
None: listen,
Exception: disconnect,
},
disconnect: {
None: StateMachine.start,
Exception: StateMachine.start,
},
listen: {
socket.error: disconnect,
Exception: disconnect,
},
}
class ExampleClient(LocalClient):
subscriptions = [
'#',
]
@interval(30.0)
def publish(self):
value = int(5 * random.random())
data = 'key=foo,value={}'.format(value)
self.send(data)
if __name__ == '__main__':
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
client = ExampleClient()
client.run()
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/local.py
|
local.py
|
import json
import logging
import time
from zymbit.connect.pubsub import PubSubStateMachine, NotConnected
from zymbit.connect.server import ConsoleMessengerServer
from zymbit.util.buffer import BufferIterator
from zymbit.util.envelope import parse_buf
from zymbit.util.statemachine import NO_SLEEP
from zymbit.util.time import get_sleep_time, now
class Proxy(object):
def __init__(self):
self.pubsub = PubSubStateMachine(raise_exceptions=False, message_handler=self.handle_pubsub_message)
self.messenger_server = ConsoleMessengerServer(self.handle_console_message)
# when set, this message sent to all messenger server clients
self.initial_message = None
self._run = True
self.console_buffer = BufferIterator()
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def handle_console_message(self, client, buf):
self.console_buffer.write(buf)
for item in self.console_buffer:
if not item:
continue
self.handle_buf(client, item)
def handle_buf(self, client, buf):
try:
envelope = parse_buf(buf)
except:
self.logger.warning('unable to parse buf={!r}'.format(buf))
return
self.handle_console_connection(client, envelope)
# connection notifications are not sent upstream
data = json.loads(envelope)
if data.get('action') == 'connection':
return
try:
self.pubsub.send(envelope)
except NotConnected as exc:
self.logger.exception(exc)
self.logger.error('unable to send pubsub buf={!r}, envelope={}'.format(buf, envelope))
def handle_console_connection(self, client, envelope):
data = json.loads(envelope)
# nothing to do for disconnects
if data['params'].get('routing_key') != 'connection.connected':
return
# nothing to do when there is no initial message
if self.initial_message is None:
return
self.messenger_server.send(client, self.initial_message)
return True
def handle_pubsub_message(self, buf):
if not buf.endswith('\n'):
buf = '{}\n'.format(buf)
buffer_iterator = BufferIterator(buf=buf)
for t_buf in buffer_iterator:
data = json.loads(t_buf)
if data.get('params', {}).get('routing_key') == 'connection.connected':
self.initial_message = t_buf
elif data.get('params', {}).get('routing_key') == 'connection.disconnected':
self.initial_message = None
try:
self.messenger_server.broadcast(buf)
except Exception as exc:
self.logger.exception(exc)
self.logger.error('unable to send messenger_server buf={!r}'.format(buf))
def run(self):
while self._run:
start = now()
pubsub_result = self.pubsub.loop()
messenger_result = self.messenger_server.loop(select_timeout=0.01)
if NO_SLEEP in (pubsub_result, messenger_result):
continue
time.sleep(get_sleep_time(1.0, start))
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/proxy.py
|
proxy.py
|
from .proxy import Proxy
from .local import LocalClient
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/__init__.py
|
__init__.py
|
import logging
import socket
from select import select
from zymbit import settings
from zymbit.util.envelope import get_envelope
BUFSIZE = 4096
# backwards compat with python2
try:
BlockingIOError
except NameError:
BlockingIOError = None.__class__
try:
ConnectionResetError
except NameError:
ConnectionResetError = None.__class__
class BaseServer(object):
def __init__(self, host, port, message_handler=None):
self.addr = (host, port)
self._tcp_sock = None
self._udp_sock = None
self.connections = {}
self.message_handler = message_handler
self._run = True
@property
def logger(self):
logger_name = '{}.{}'.format(__name__, self.__class__.__name__)
return logging.getLogger(logger_name)
@property
def tcp_sock(self):
if self._tcp_sock:
return self._tcp_sock
try:
self._tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._tcp_sock.setblocking(0)
self._tcp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._tcp_sock.bind(self.addr)
self._tcp_sock.listen(128) # max 128 clients
except socket.error:
self.logger.warning('Unable to bind TCP socket at addr={}'.format(self.addr))
else:
self.logger.info("Listening on TCP addr={}".format(self.addr))
return self._tcp_sock
@property
def udp_sock(self):
if self._udp_sock:
return self._udp_sock
try:
self._udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._udp_sock.setblocking(0)
self._udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._udp_sock.bind(self.addr)
except socket.error:
self.logger.warning('Unable to bind UDP socket at addr={}'.format(self.addr))
else:
self.logger.info("Listening on UDP addr={}".format(self.addr))
return self._udp_sock
def broadcast(self, message):
for connection in self.connections:
self.send(connection, message)
def close_tcp(self):
self._tcp_sock = None
def close_udp(self):
self._udp_sock = None
def connect(self, info):
message = get_envelope('connection', dict(routing_key='connection.connected'))
conn, addr = info
self.logger.info('%s, %s %s' % (conn, addr, message))
self.connections[conn] = addr
self.handle_message(conn, message)
def disconnect(self, connection):
message = get_envelope('connection', dict(routing_key='connection.disconnected'))
addr = self.connections.pop(connection)
self.logger.info('%s, %s %s' % (connection, addr, message))
self.handle_message(connection, message)
def fileno(self):
return self.tcp_sock.fileno()
def handle_message(self, client, buf):
if self.message_handler:
self.message_handler(client, buf)
else:
self.logger.info('client={}, buf={}'.format(client, buf))
def loop(self, select_timeout=1.0):
handled = None
# check UDP
try:
buf, client = self.udp_sock.recvfrom(1024)
except socket.error as exc:
if isinstance(exc, (BlockingIOError,)):
error_number = exc.errno
else:
error_number = exc[0]
# (11, 'Resource temporarily unavailable')
# [Errno 35] Resource temporarily unavailable
if error_number not in (11, 35):
self.logger.exception(exc)
self.logger.warning('got socket error_number={}'.format(error_number))
self.close_udp()
else:
if buf:
self.handle_message(client, buf)
handled = True
try:
self.connect(self.tcp_sock.accept())
except socket.error as exc:
if isinstance(exc, (BlockingIOError,)):
error_number = exc.errno
else:
error_number = exc[0]
# (11, 'Resource temporarily unavailable')
# [Errno 35] Resource temporarily unavailable
if error_number not in (11, 35):
self.logger.exception(exc)
self.logger.warning('got socket error_number={}'.format(error_number))
self.close_tcp()
ready, _, _ = select(self.connections, [], [], select_timeout)
for client in ready:
try:
buf = client.recv(BUFSIZE)
except socket.error as exc:
if isinstance(exc, (ConnectionResetError,)):
error_number = exc.errno
else:
error_number = exc[0]
# [Errno 54] Connection reset by peer
# [Errno 104] Connection reset by peer -- raspbian
if error_number not in (54, 104):
self.logger.exception(exc)
self.logger.warning('got socket error_number={}'.format(error_number))
self.disconnect(client)
continue
else:
if not len(buf):
self.disconnect(client)
continue
self.handle_message(client, buf)
handled = True
return handled
def quit(self):
self.tcp_sock.close()
self.udp_sock.close()
# prevent getting exception where dictionary changes while looping
connections = list(self.connections.keys())
for connection in connections:
self.disconnect(connection)
def run(self):
while self._run:
self.loop()
def send(self, connection, buf):
try:
if not isinstance(buf, (bytes,)):
buf = buf.encode('utf8')
connection.send(buf)
except Exception as exc:
self.logger.exception(exc)
self.logger.error('error sending connection={}, buf={}'.format(connection, buf))
class ConsoleMessengerServer(BaseServer):
def __init__(self, message_handler):
super(ConsoleMessengerServer, self).__init__(
settings.CONSOLE_MESSENGER_HOST,
settings.CONSOLE_MESSENGER_PORT,
message_handler=message_handler
)
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/server.py
|
server.py
|
import datetime
import dateutil.parser
import functools
import logging
import pytz
# it's impossible that "now" is less than this datetime
# we know we are out of sync with real time if we ever
# get a time value less than this
MIN_DT = datetime.datetime(2014, 7, 25, 17, 00, 00) # Zymbit est date, UTC
utc = pytz.utc
EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=utc)
LONG_TIME_AGO = utc.localize(datetime.datetime(1, 1, 1)) # a really long time ago
# keys follow the same convention as InfluxDB
SECOND_PRECISIONS = {
's': 1,
'ms': 1000,
'u': 1e6,
'n': 1e9,
}
def now():
return utc.localize(datetime.datetime.utcnow())
def timestamp(dt=None):
if dt is None:
dt = now()
return dt.isoformat('T')
def get_sleep_time(seconds, start):
"""
Wait at most the given number of seconds from the initial time given
:param seconds: float - number of seconds to wait
:param start: datetime - the start time
:return: float - time to wait
"""
_now = now()
delta = _now - start
diff = delta.seconds + (1.0 * delta.microseconds / 1e6)
wait = max(0, seconds - diff)
# print 'start={}, _now={}, delta={}, diff={}, wait={}'.format(start, _now, delta, diff, wait)
return wait
def interval(interval_delay, default_return=None):
"""
Call a function every given interval
:param interval_delay: float - number of seconds
:param default_return: when the interval has not passed, what to return (default: None)
"""
interval_delta = datetime.timedelta(seconds=interval_delay)
def wrapper(fn):
@functools.wraps(fn)
def interval_handler(*args, **kwargs):
t0 = now()
last_call = getattr(fn, 'last_call', LONG_TIME_AGO)
if (t0 - last_call) > interval_delta:
fn.last_call = t0
return fn(*args, **kwargs)
else:
return default_return
return interval_handler
return wrapper
class MillisDatetime(object):
def __init__(self, millis):
self.last_millis = None
self.initial = None
self.set_initial(millis)
@property
def logger(self):
return logging.getLogger(__name__)
def get_time(self, millis):
if millis < self.last_millis:
self.logger.info(
'time rolled over, last_millis={}, millis={}'.format(
self.last_millis, millis))
self.set_initial(millis)
delta = datetime.timedelta(milliseconds=millis)
return self.initial + delta
def set_initial(self, millis):
delta = datetime.timedelta(milliseconds=millis)
self.initial = now() - delta
self.last_millis = millis
def get_seconds(iso_timestamp, precision='s'):
"""
Returns the number of seconds since EPOCH for the given ISO 8601 timestamp
"""
dt = dateutil.parser.parse(iso_timestamp)
return get_seconds_dt(dt, precision=precision)
def get_seconds_dt(dt=None, precision='s'):
"""
Returns the number of seconds since EPOCH for the given datetime object
"""
dt = dt or now()
return (dt - EPOCH).total_seconds() * SECOND_PRECISIONS[precision]
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/time.py
|
time.py
|
import hashlib
import json
import os
import re
from zymbit.settings import AUTH_ROOT, AUTH_TOKEN, CLIENT_ID_VERSION, ZYMBIT_HOST_INFO_PATH
MAC_RE = re.compile(r'.*HWaddr (?P<hwaddr>[^ ]+)')
SDCARD_ATTRS_RE = re.compile(r'ATTRS{(?P<key>[^}]+)}=="(?P<value>[^"]+)"')
def get_auth_path():
client_id = get_client_id()
return os.path.join(AUTH_ROOT, client_id)
def get_auth_token():
auth_token = AUTH_TOKEN
if auth_token is not None:
return auth_token
auth_path = get_auth_path()
if os.path.exists(auth_path):
with open(auth_path, 'r') as fh:
auth_token = fh.read().strip()
return auth_token
def get_cpu_info():
"""
Returns CPU identification information
:return:
"""
info = {
'cpu_hardware': None,
'cpu_revision': None,
'cpu_serial': None,
}
with open(os.path.join(ZYMBIT_HOST_INFO_PATH, 'cpu')) as fh:
content = fh.read()
for line in content.splitlines():
line = line.strip()
if line == '':
continue
line_split = line.split(':', 1)
key = 'cpu_{}'.format(line_split[0].strip().replace(' ', '_').lower())
if key not in list(info.keys()):
continue
info[key] = line_split[1].strip()
return info
def get_eth0_info():
"""
Returns eth0 identification information
:return:
"""
info = {
'eth0_hwaddr': None
}
with open(os.path.join(ZYMBIT_HOST_INFO_PATH, 'eth0')) as fh:
content = fh.read()
for line in content.splitlines():
matches = MAC_RE.match(line)
if not matches:
continue
info['eth0_hwaddr'] = matches.group('hwaddr')
return info
def get_sdcard_info():
"""
Returns sdcard identification information
:return dict: sdcard information
"""
info = {
'sdcard_cid': None,
}
with open(os.path.join(ZYMBIT_HOST_INFO_PATH, 'sdcard')) as fh:
content = fh.read()
for line in content.splitlines():
matches = SDCARD_ATTRS_RE.match(line.strip())
if not matches:
continue
key = 'sdcard_{}'.format(matches.group('key'))
if key not in list(info.keys()):
continue
info[key] = matches.group('value')
return info
def get_client_id():
if CLIENT_ID_VERSION is None:
return get_client_id_latest()
return globals()['get_client_id_v{}'.format(CLIENT_ID_VERSION)]()
def get_client_id_v0():
info = get_eth0_info()
return info['eth0_hwaddr']
def get_client_id_v1():
info = get_client_info()
# the client_id is the hash of a JSON representation of an array of (key, value) 2-tuples
data = json.dumps(sorted(list(info.items()), key=lambda a: a[0])).encode('utf8')
sha = hashlib.sha1(data)
return sha.hexdigest()
# alias the default get_client_id to v1
get_client_id_latest = get_client_id_v1
def get_client_info():
info = {}
info.update(get_cpu_info())
info.update(get_eth0_info())
info.update(get_sdcard_info())
return info
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/client.py
|
client.py
|
import json
import uuid
from zymbit.util.time import timestamp
# different ways a data stream buffer is parsed in order to ship up
# NOTE: first one to send back an envelope wins, so order matters!
ENVELOPE_PARSERS = []
def get_parsed_envelope(params):
action = 'data'
if isinstance(params, dict):
_action = params.pop('action', None)
if _action is not None:
action = _action
# this looks like an envelope already, jsonify and return
if 'params' in params:
params['action'] = action
return jsonify(params)
if action == 'data' and 'key' not in params:
params['key'] = 'sensor'
return get_envelope(action, params)
def parse_json_envelope(buf):
try:
params = json.loads(buf)
except ValueError:
return None
else:
if isinstance(params, int):
params = {
'value': params,
}
return params
ENVELOPE_PARSERS.append(parse_json_envelope)
def parse_comma_equals(buf):
"""
Parse a string of comma-delimited strings, that are each equal-delimited key/value pairs
:param buf: string - buffer to be parsed
:return: None - when no equal sign is found, JSON string envelop - when data is parsed
"""
if '=' not in buf:
return None
parsed = {}
unparsed = []
# split at commas
for token in buf.split(','):
# get rid of outer spaces
token = token.strip()
if '=' not in token:
unparsed.append(token)
continue
key, value = token.split('=')
key = key.strip()
if ' ' in key:
_unparsed, key = key.rsplit(' ', 1)
unparsed.append(_unparsed)
for conversion in (int, float):
try:
value = conversion(value)
except ValueError:
pass
else:
break
parsed[key] = value
if unparsed:
parsed['zb.unparsed'] = json.dumps(unparsed)
parsed['zb.unparsed.line'] = buf
return parsed
ENVELOPE_PARSERS.append(parse_comma_equals)
# NOTE: this is the "if all else fails" parser; should be appended last!
def parse_log_envelope(buf):
params = {
'action': 'log',
'line': buf,
}
return params
ENVELOPE_PARSERS.append(parse_log_envelope)
def get_envelope(action, params, request_message_id=None, client_id=None, as_json=True):
data = {
'message_id': str(uuid.uuid4()),
'timestamp': timestamp(),
'action': action,
'params': params,
}
if request_message_id:
data.update({
'request_message_id': request_message_id,
})
if client_id:
data.update({
'client_id': client_id,
})
if as_json:
return jsonify(data)
else:
return data
def jsonify(data):
return '{}\r\n'.format(json.dumps(data))
def parse_buf(buf):
"""
parse the given buffer into an envelope
:param buf: string, may be in a parseable format
:return: envelope
"""
for parser in ENVELOPE_PARSERS:
params = parser(buf)
if params:
return get_parsed_envelope(params)
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/envelope.py
|
envelope.py
|
from __future__ import absolute_import
import datetime
import inspect
import logging
import time
from .time import LONG_TIME_AGO, now, get_sleep_time
NO_SLEEP = '-- NO SLEEP --'
class StateMachine(object):
transitions = {}
def __init__(self, raise_exceptions=True):
self._run = True
self._state = self.start
self.raise_exceptions = raise_exceptions
self.loop_sleep_time = 1.0
self.last_exception = None
self._setup_transitions()
self.logger.debug('transitions={}'.format(self.transitions))
self.check_start = False
self.last_start = LONG_TIME_AGO
self.next_start = LONG_TIME_AGO
self.start_fail_count = 0
self.start_success_delta = datetime.timedelta(seconds=10)
def _setup_transitions(self):
# convert the transition functions into bound methods
_transitions = {}
for k, v in list(self.transitions.items()):
bound_method = getattr(self, k.__name__)
t_transitions = dict([(kk, getattr(self, vv.__name__)) for kk, vv in list(v.items())])
_transitions[bound_method] = t_transitions
self.transitions = _transitions
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def loop(self):
result = None
try:
result = self._state()
except Exception as exc: # global exception catcher here to use for state transitions
self.last_exception = exc
result = exc
if not inspect.isclass(exc):
result = exc.__class__
if self.raise_exceptions:
raise
else:
self.logger.exception(exc)
else:
self.last_exception = None
finally:
transitions = self.transitions.get(self._state, {})
for _result, _state in list(transitions.items()):
if _result == result or inspect.isclass(_result) and inspect.isclass(result) and issubclass(result, _result):
self._state = _state
return result
def quit(self):
self._run = False
def run(self):
while self._run:
start = now()
current_state = self._state
result = self.loop()
# only sleep when there is no state transition
if current_state == self._state and result != NO_SLEEP:
sleep_time = get_sleep_time(self.loop_sleep_time, start)
# self.logger.debug('loop_sleep_time={}, sleep_time={}'.format(self.loop_sleep_time, sleep_time))
time.sleep(sleep_time)
def start(self):
_now = now()
if self.check_start:
self.check_start = False
if _now > self.last_start + self.start_success_delta:
# the current time is greater than the last start time + the
# success delta; reset the fail count
self.start_fail_count = 0
else:
# otherwise, increment the fail count and calculate an exponential
# backoff
self.start_fail_count += 1
seconds = min(300, 2 ** self.start_fail_count)
backoff = datetime.timedelta(seconds=seconds)
self.next_start = _now + backoff
self.logger.info('next start at {}'.format(self.next_start))
if _now < self.next_start:
# the current time is before the next start, hold off
return False
self.check_start = True
self.last_start = _now
return True
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/statemachine.py
|
statemachine.py
|
import os
from email.parser import Parser
PKG_INFO_FILENAME = 'PKG-INFO'
SOURCES_FILENAME = 'SOURCES.txt'
def get_egg_info(path):
for item in os.listdir(path):
if item.endswith('.egg-info'):
yield os.path.join(path, item)
def get_sources(path):
with open(os.path.join(path, SOURCES_FILENAME), 'r') as fh:
return fh.read().strip().splitlines()
def find_package():
sep = os.path.sep
source_path = sep.join(__file__.split(sep)[-3:]).replace('.pyc', '.py')
dirname = os.path.dirname(__file__)
if dirname == '':
dirname = os.getcwd()
dirname = os.path.abspath(dirname)
while dirname != '/':
for item in get_egg_info(dirname):
if source_path in get_sources(item):
return item
dirname = os.path.dirname(dirname)
def get_version():
package = find_package()
if package is None:
return 'dev'
pkg_info_path = os.path.join(package, PKG_INFO_FILENAME)
with open(pkg_info_path, 'r') as fh:
parsed = Parser().parse(fh)
return parsed.get('Version')
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/version.py
|
version.py
|
import os
import sys
from collections import OrderedDict
from sh import uname
def find_cpuinfo(markers):
# no support for non-linux systems
if os.path.exists('/proc/cpuinfo'):
content = open('/proc/cpuinfo', 'r').read()
for marker, return_value in list(markers.items()):
if marker in content:
return return_value
def get_device_meta():
return {
'distro': get_distro(),
'model': get_model(),
'system': get_system(),
}
def get_distro():
"""
Returns the device distribution
:return string: device distribution
"""
distro = None
if os.path.exists('/etc/linino'):
return 'linino'
result = uname('-a')
if 'Darwin Kernel Version' in result:
return 'osx'
issue_path = '/etc/issue'
if os.path.exists(issue_path):
with open(issue_path, 'r') as fh:
content = fh.read().lower()
for _distro in ('raspbian', 'arch'):
if _distro in content:
distro = _distro
break
return distro
def get_model():
systems = OrderedDict((
('Arduino Yun', 'yun'),
('BCM2708', '1'),
('BCM2709', '2'),
))
cpuinfo = find_cpuinfo(systems)
if cpuinfo:
return cpuinfo
if sys.platform == 'darwin':
return sys.platform
def get_system():
systems = OrderedDict((
('Arduino Yun', 'arduino'),
('BCM270', 'raspberrypi'), # note this will match BCM2708 (rpi) and BCM2709 (rpi2)
))
cpuinfo = find_cpuinfo(systems)
if cpuinfo:
return cpuinfo
if sys.platform == 'darwin':
return sys.platform
if 'linux' in sys.platform:
return 'linux'
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/__init__.py
|
__init__.py
|
import collections
class BufferIterator(collections.Iterable):
def __init__(self, buf=None, split_at='\n'):
self.buf = ''
if buf:
self.write(buf)
self.split_at = split_at
def __iter__(self):
return self
def __next__(self):
try:
idx = self.buf.index(self.split_at)
except ValueError:
raise StopIteration()
buf = self.buf[:idx+1]
self.buf = self.buf[idx+1:]
return buf
def write(self, buf):
try:
buf = buf.decode('utf8')
except AttributeError:
pass
self.buf += buf
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/buffer.py
|
buffer.py
|
import os
from zymbit import settings
from zymbit.upstream.api import ZymbitApi
from zymbit.util import get_device_meta
from zymbit.util.client import get_auth_path, get_client_info
def register():
"""
Makes the bootstrap request upstream
"""
post_data = {
'bootstrap_key': settings.BOOTSTRAP_KEY,
}
post_data.update(get_device_meta())
post_data.update(get_client_info())
api = ZymbitApi()
response = api.post(settings.REGISTER_ENDPOINT, data=post_data)
response.raise_for_status()
write_auth(response.json())
def write_auth(data):
auth_token = data['auth_token']
write_auth_token(auth_token)
def write_auth_token(auth_token):
auth_path = get_auth_path()
auth_root = os.path.dirname(auth_path)
if not os.path.exists(auth_root):
os.makedirs(auth_root)
with open(auth_path, 'w') as fh:
fh.write(auth_token)
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/upstream/registration.py
|
registration.py
|
import websocket
from zymbit.settings import CHECK_HOSTNAME, WEBSOCKET_ENDPOINT, WEBSOCKET_SEND_CLIENT_INFO
from zymbit.upstream.api import ZymbitApi
from zymbit.util.client import get_client_info
def get_websocket():
sslopt = {"check_hostname": CHECK_HOSTNAME}
url = get_websocket_url()
ws = websocket.create_connection(url, sslopt=sslopt)
ws.settimeout(0)
return ws
def get_websocket_url():
params = {}
if WEBSOCKET_SEND_CLIENT_INFO:
params.update(get_client_info())
api = ZymbitApi()
response = api.get(WEBSOCKET_ENDPOINT, params=params)
return response.json()
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/upstream/ws.py
|
ws.py
|
import functools
import logging
import requests
from zymbit import settings
from zymbit.util.client import get_auth_token
from zymbit.util.version import get_version
class ZymbitApi(object):
ConnectionError = requests.ConnectionError
HTTPError = requests.HTTPError
def __init__(self, auth_token=None, api_url=None):
self.logger = logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
self._auth_token = auth_token
self.api_url = api_url or settings.API_URL
self.session = requests.session()
self.response = None
def __getattribute__(self, item):
if item in ('delete', 'get', 'patch', 'post', 'put'):
request = super(ZymbitApi, self).__getattribute__('request')
return functools.partial(request, item)
return super(ZymbitApi, self).__getattribute__(item)
@property
def auth_token(self):
if self._auth_token:
return self._auth_token
self._auth_token = get_auth_token()
return self._auth_token
def request(self, method, endpoint, **kwargs):
headers = kwargs.pop('headers', {})
headers['User-Agent'] = 'Zymbit Connect {}'.format(get_version())
self.logger.debug('auth_token: {}'.format(self.auth_token))
headers['apikey'] = self.auth_token or 'anonymous'
kwargs['headers'] = headers
if 'verify' not in kwargs:
kwargs['verify'] = settings.CHECK_HOSTNAME
url = '{}{}'.format(self.api_url, endpoint)
self.logger.debug('request url={}, kwargs={}'.format(url, kwargs))
method_fn = getattr(self.session, method)
try:
self.response = method_fn(url, **kwargs)
except requests.ConnectionError as exc:
raise requests.ConnectionError('Unable to connect to url={}'.format(url))
else:
self.response.raise_for_status()
return self.response
|
zymbit-connect
|
/zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/upstream/api.py
|
api.py
|
trequests
=========
.. image:: https://travis-ci.org/1stvamp/trequests.png?branch=master
A Tornado async HTTP/HTTPS client adapter for python-requests.
The problem
-----------
You enjoy using `Tornado <http://www.tornadoweb.org/>`_ to build fast non-blocking web applications, and you want to use a library from PyPI that makes a few HTTP requests, but pretty much every dev and their dog uses `Requests <http://python-requests.org/>`_ to make HTTP requests (rightly so, because it's *awesome*), but requests has no knowledge of the event loop nor can it yield when a socket blocks, which means any time you try to use a library like that it begins to block your request handling and grud-knows what other worlds of pain.
The solution
------------
Luckily there are solutions, one such is to use the `greenlet <http://greenlet.readthedocs.org/>`_ module to wrap blocking operations and swap Tornado coroutines at the right time, there is even the handy `tornalet <https://github.com/Gawen/tornalet>`_ module which handles this for you.
To make life even easier, you lucky lucky people, I've created ``trequests``, an async Requests adapter which uses greenlets (via tornalet) and the inbuilt non-blocking HTTP client methos in Tornado, to make any call to a library (utilizing Requests) non-blocking.
Installation
------------
.. code-block:: bash
$ pip install trequests
Usage
-----
.. code-block:: python
# Assume bobs_big_data uses python-requests for HTTP requests
import bobs_big_data
from tornado.web import RequestHandler
from trequests import setup_session
from tornalet import tornalet
# Tell requests to use our AsyncHTTPadapter for the default
# session instance, you can also pass you own through
setup_session()
class WebHandler(RequestHandler):
@tornalet
def get(self):
data = {'foo': 'bar'}
# This will now unblock the current coroutine, like magic
response = bobs_big_data.BigData(data).post()
return self.write(response)
Tests
-----
To run the basic testsuite hit up `python setup.py test`.
Caveats
-------
``trequests`` has been used in production in a large scale metrics application, and is a very small and quite simple module.
**However** I've released it as ``0.9.x`` mainly because it's missing 100% compatibility with the Requests adapter API, most noticeably *cookie jar* and *session* support, which I will improve (or please send me a pull request if you fancy adding support), and release as a ``1.x`` branch when I have the time.
Also at the moment the ``setup_session`` utility actually monkey patches the ``session`` utility functions in Requests, as this was the only way I could see to override the mounts on "default" session instances (e.g. those created for every call when a session isn't provided). I'm hoping to change this in the future.
|
zymbit-trequests
|
/zymbit-trequests-0.9.5.tar.gz/zymbit-trequests-0.9.5/README.rst
|
README.rst
|
"""Installer for trequests
"""
from os import path
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
cwd = path.dirname(__file__)
__version__ = open(path.join(cwd, 'trequests/trequests_version.txt'),
'r').read().strip()
setup(
name='zymbit-trequests',
description='A Tornado async HTTP/HTTPS client '
'adaptor for python-requests',
long_description=open('README.rst').read(),
version=__version__,
author='Wes Mason',
author_email='[email protected]',
url='https://github.com/1stvamp/trequests',
packages=find_packages(exclude=['ez_setup']),
install_requires=open('requirements.txt').readlines(),
package_data={'': ['trequests_version.txt']},
include_package_data=True,
test_suite="trequests_tests",
license='BSD'
)
|
zymbit-trequests
|
/zymbit-trequests-0.9.5.tar.gz/zymbit-trequests-0.9.5/setup.py
|
setup.py
|
import requests
from os import path
from tornalet import asyncify
from tornado.httpclient import AsyncHTTPClient
def get_version_string():
return open(path.join(path.dirname(__file__),
'trequests_version.txt'), 'r').read().strip()
def get_version():
return get_version_string().split('.')
__version__ = get_version_string()
# Don't know how to handle this yet, so just mock it out for now
requests.adapters.extract_cookies_to_jar = lambda a, b, c: None
class AsyncHTTPAdapter(requests.adapters.HTTPAdapter):
"""A python-requests HTTP/HTTPS adapter that uses the Tornado
AsyncHTTPClient and greenlets (via the tornalet library) to perform a
non-blocking call inside the Tornado IOLoop whenever a
requests.[get/post/put/delete/request]() call is made. It then wraps the
tornado.httpclient.HTTPResponse as a requests.models.Response instance and
returns so that any library calling requests gets what it expects (mostly).
"""
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
http_client = AsyncHTTPClient()
# This where the magic happens, tornalet.asyncify wraps the parent
# call in a greenlet that can be swapped out the same as any
# aync tornado IO handler call.
resp = asyncify(http_client.fetch)(request=request.url,
method=request.method,
body=request.body,
headers=request.headers,
validate_cert=verify)
# We probably don't get this from any of the tornado adaptors, so
# we stub it out as Unknown
resp.reason = 'Unknown'
resp.content = resp.body
r = self.build_response(request, resp)
# Reset the code and content as they're not parsed by build_response
r.status_code = resp.code
r._content = resp.content
return r
def setup_session(session=None, mounts=None):
"""Mount the AsyncHTTPAdapter for a given session instance,
or for the default instance in python-requests, for a given set of mounts
or just for the default HTTP/HTTPS protocols.
"""
if session is None:
session = requests.session()
if mounts is None:
mounts = ('http://', 'https://')
def _session():
for mount in mounts:
session.mount(mount, AsyncHTTPAdapter())
if session is None:
requests.session = requests.sessions.session = _session
else:
_session()
|
zymbit-trequests
|
/zymbit-trequests-0.9.5.tar.gz/zymbit-trequests-0.9.5/trequests/__init__.py
|
__init__.py
|
zymbit
======
Python library to communicate with the Zymbit cloud
|
zymbit
|
/zymbit-0.5.17.tar.gz/zymbit-0.5.17/README
|
README
|
#!/usr/bin/env python
import os
from distutils.core import setup
SCRIPT_DIR = os.path.dirname(__file__)
if not SCRIPT_DIR:
SCRIPT_DIR = os.getcwd()
# put together list of requirements to install
install_requires = []
with open(os.path.join(SCRIPT_DIR, 'requirements.txt')) as fh:
for line in fh.readlines():
if line.startswith('-'):
continue
install_requires.append(line.strip())
data_files = [(dirpath, [os.path.join(dirpath, x) for x in filenames]) for dirpath, dirnames, filenames in os.walk('files') if filenames]
setup(name='zymbit',
version='0.5.17',
description='Zymbit cloud library',
author='Roberto Aguilar',
author_email='[email protected]',
packages=[
'zymbit', 'zymbit.arduino', 'zymbit.commands',
'zymbit.common', 'zymbit.compat', 'zymbit.darwin',
'zymbit.linux', 'zymbit.messenger', 'zymbit.raspberrypi'
],
scripts=['scripts/zymbit'],
data_files=data_files,
long_description=open('README').read(),
url='http://zymbit.com/',
license='LICENSE',
install_requires=install_requires,
)
|
zymbit
|
/zymbit-0.5.17.tar.gz/zymbit-0.5.17/setup.py
|
setup.py
|
Copyright 2023 Zymbit
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/LICENSE.md
|
LICENSE.md
|
# Zymbit Wallet Python SDK
## Overview
Ethereum accounts, signatures, and transactions have an additional layer of complexity over traditional cryptographic keys and signatures. The Zymbit Wallet SDK aims to abstract away this complexity, enabling you to create and manage multiple blockchain wallets and seamlessly integrate with various blockchains without having to deal with their technical intricacies.
The first iteration of the SDK encapsulates all wallet creation, management, and use (sending transactions and interacting with dApps) capabilities for Ethereum and EVM compatible chains.
If you are a developer interested in creating your own custom implementations of Accounts and/or Keyrings to work with ZymbitKeyringManager, you should further explore this repository. By extending the Account and [Keyring Abstract Base Classes (ABCs)](https://docs.python.org/3/library/abc.html), you can implement the required methods and any additional functionality as needed. The elliptic curves we support (secp256k1, secp256r1, and ed25519) are used by many major blockchains, including Bitcoin, Ethereum, Cardano, Solana, and Polkadot. Developing your own keyrings can be incredibly beneficial for a wide range of applications, such as key management or on-chain interactions like sending transactions or interacting with smart contracts.
**NOTE:** Only compatible with [HSM6](https://www.zymbit.com/hsm6/), [SCM](https://www.zymbit.com/scm/), and [SEN](https://www.zymbit.com/secure-compute-node/)
## Installation
```
pip install zymbitwalletsdk
```
## Documentation:
[Zymbit Wallet Python SDK Documentation](https://docs.zymbit.com/zymbit-wallet-sdk/zymbit-wallet-python-sdk/)
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/README.md
|
README.md
|
#!/bin/bash
# Navigate to the module directory
cd ../
# Remove __pycache__ folders
find . -type d -name "__pycache__" -exec rm -rf {} +
# Reinstall the module
pip install --upgrade --force-reinstall .
# Navigate to the tests directory and run the test script
cd tests
python3 -m unittest
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/tests/test.sh
|
test.sh
|
import unittest
from typing import Type
import zymkey
import sys
from zymbitwalletsdk import Keyring, ZymbitEthKeyring, ZymbitKeyringManager
class ZymbitEthKeyringTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create a zymbit keyring manager instance
cls.keyring_manager = ZymbitKeyringManager()
def test_create_and_remove_keyring(self):
# test that a keyring can be created with valid inputs
wallet_name_1 = "test_wallet_1"
wallet_name_2 = "test_wallet_2"
master_gen_key = bytearray([0x03] * 32)
master_slot, mnemonic = self.keyring_manager.create_keyring(ZymbitEthKeyring, wallet_name_1, master_gen_key)
master_slot1, mnemonic1 = self.keyring_manager.create_keyring(ZymbitEthKeyring, wallet_name_2)
self.assertIsInstance(master_slot, int)
self.assertIsInstance(mnemonic, str)
self.assertEqual(len(mnemonic.split()), 24)
self.assertIsInstance(master_slot1, int)
self.assertIsInstance(mnemonic1, str)
self.assertEqual(len(mnemonic1.split()), 24)
keyrings = self.keyring_manager.get_keyrings()
self.assertEqual(len(keyrings), 2)
# test that creating a keyring with an invalid keyring class raises a AttributeError and ValueError
with self.assertRaises(AttributeError) and self.assertRaises(ValueError):
self.keyring_manager.create_keyring(Keyring, wallet_name_1, master_gen_key)
# test that creating a keyring with an invalid wallet name raises a ValueError
with self.assertRaises(ValueError):
self.keyring_manager.create_keyring(ZymbitEthKeyring, "", master_gen_key)
# test that creating a keyring with an invalid master_gen_key raises a TypeError
with self.assertRaises(TypeError):
self.keyring_manager.create_keyring(ZymbitEthKeyring, wallet_name_1, "invalid_key")
self.keyring_manager.remove_keyring(wallet_name_1, remove_master=True)
self.keyring_manager.remove_keyring(wallet_name_2, remove_master=True)
def test_add_and_remove_keyring(self):
# create a new keyring and add it to the manager
wallet_name = "test_wallet_3"
use_bip39_recovery = zymkey.RecoveryStrategyBIP39()
zymkey.client.gen_wallet_master_seed(key_type=ZymbitEthKeyring.CURVE.get_curve_type(), master_gen_key=bytearray(), wallet_name=wallet_name, recovery_strategy=use_bip39_recovery)
keyring = ZymbitEthKeyring(wallet_name=wallet_name)
self.keyring_manager.add_keyring(keyring)
# test that the keyring was added successfully
added_keyring = self.keyring_manager.get_keyring(wallet_name)
self.assertIsInstance(added_keyring, ZymbitEthKeyring)
self.assertEqual(added_keyring.wallet_name, wallet_name)
# remove the keyring and test that it was removed successfully
self.keyring_manager.remove_keyring(wallet_name, remove_master=True)
with self.assertRaises(ValueError):
self.keyring_manager.get_keyring(wallet_name)
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/tests/test_zymbit_keyring_manager.py
|
test_zymbit_keyring_manager.py
|
import unittest
from unittest.mock import Mock
import sys
import zymkey
import time
from zymbitwalletsdk import EthConnect, EthTransaction, SignedEthTransaction, ZymbitEthKeyring
from Crypto.Hash import keccak, SHA256
class TestEthConnect(unittest.TestCase):
@classmethod
def setUpClass(cls):
slots: list[int] = zymkey.client.get_slot_alloc_list()[0]
cls.slots = list(filter(lambda slot: slot > 15, slots))
cls.wallet_name = "test_wallet"
use_bip39_recovery = zymkey.RecoveryStrategyBIP39()
(master_slot, mnemonic) = zymkey.client.gen_wallet_master_seed(key_type=ZymbitEthKeyring.CURVE.get_curve_type(), master_gen_key=bytearray(), wallet_name=cls.wallet_name, recovery_strategy=use_bip39_recovery)
cls.master_slot = master_slot
cls.keyring = ZymbitEthKeyring(wallet_name=cls.wallet_name)
cls.keyring.add_accounts(5)
@classmethod
def tearDownClass(cls):
slots: list[int] = zymkey.client.get_slot_alloc_list()[0]
slots = list(filter(lambda slot: slot > 15, slots))
diff = set(slots) - set(cls.slots)
for slot in list(diff):
zymkey.client.remove_key(slot)
def test_create_transaction(self):
transaction = EthConnect.create_transaction(to=self.keyring.accounts[0].address)
self.assertIsInstance(transaction, EthTransaction)
def test_create_deploy_contract_transaction(self):
transaction = EthConnect.create_deploy_contract_transaction(chain_id=11155111, contract_bytecode_path="./bytecode.txt", contract_abi_path="./ABI.json", constructor_args=['0x'+('0'*64), self.keyring.accounts[0].address])
self.assertIsInstance(transaction, EthTransaction)
def test_create_execute_contract_transaction(self):
transaction = EthConnect.create_execute_contract_transaction(chain_id=11155111, contract_address="0x6FCc62196FD8C0f1a92817312c109D438cC0acC9", contract_abi_path="./ABI.json", function_name="postData", args=["OMRON", "HR_MONITOR", int(time.time()), "0x" + ("0"*64), '0x' + ('0'*130)])
self.assertIsInstance(transaction, EthTransaction)
def test_sign_transaction(self):
transaction = EthConnect.create_transaction(to=self.keyring.accounts[1].address)
signed_transaction = EthConnect.sign_transaction(transaction, self.keyring, address=self.keyring.accounts[2].address)
self.assertIsInstance(signed_transaction, SignedEthTransaction)
def test_rlp_serialize_deserialize_transaction(self):
transaction = EthConnect.create_transaction(to=self.keyring.accounts[3].address)
encoded_transaction = EthConnect.rlp_serialize_transaction(transaction)
decoded_transaction = EthConnect.rlp_deserialize_transaction(encoded_transaction)
self.assertIsInstance(decoded_transaction, EthTransaction)
def test_create_sign_message_and_concat_sig(self):
message, message_bytes = EthConnect.create_message("Hello, World!")
hash_message = EthConnect.keccak256(bytes_data=message_bytes)
v, r, s = EthConnect.sign_message(hash_message, self.keyring, address=self.keyring.accounts[3].address)
self.assertTrue(isinstance(v, int) and isinstance(r, int) and isinstance(s, int))
signature = EthConnect.concatenate_sig(v,r,s)
self.assertIsInstance(signature, str)
def test_keccak256(self):
keccak_hash = EthConnect.keccak256(str_data="Hello, World!")
self.assertIsInstance(keccak_hash, keccak.Keccak_Hash)
def test_sha256(self):
sha256_hash = EthConnect.sha256(str_data="Hello, World!")
self.assertIsInstance(sha256_hash, SHA256.SHA256Hash)
def test_eth_to_wei(self):
wei = EthConnect.eth_to_wei(ether_amount=1)
self.assertIsInstance(wei, int)
self.assertEqual(wei, 1000000000000000000)
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/tests/test_eth_connect.py
|
test_eth_connect.py
|
import unittest
from unittest.mock import MagicMock
import sys
from zymbitwalletsdk import Account, EthAccount
import zymkey
import binascii
from web3 import Web3
class TestEthAccount(unittest.TestCase):
def test_init(self):
account = EthAccount("m/44'/60'/0'/0/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 32)
self.assertIsInstance(account, Account)
self.assertEqual(account.path, "m/44'/60'/0'/0/0")
self.assertEqual(account.address, "0x742d35Cc6634C0532925a3b844Bc454e4438f44e")
self.assertEqual(account.slot, 32)
def test_serialize(self):
account = EthAccount("m/44'/60'/0'/0/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 32)
serialized = account.serialize()
self.assertEqual(serialized["path"], "m/44'/60'/0'/0/0")
self.assertEqual(serialized["address"], "0x742d35Cc6634C0532925a3b844Bc454e4438f44e")
self.assertEqual(serialized["slot"], 32)
def test_is_valid_account(self):
account = EthAccount("m/44'/60'/0'/0/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 32)
self.assertTrue(account.is_valid_account())
with self.assertRaises(ValueError):
invalid_address_account = EthAccount("m/44'/60'/0'/0/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44", 32)
with self.assertRaises(ValueError):
invalid_slot_account = EthAccount("m/44'/60'/0'/0/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 513)
with self.assertRaises(ValueError):
invalid_path_account = EthAccount("m/44'/60'/0'/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 32)
with self.assertRaises(ValueError):
invalid_path_account_2 = EthAccount("m/44'/60'/0'/0/0/0", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 32)
with self.assertRaises(ValueError):
invalid_path_account_3 = EthAccount("m/44'/60'/0'/0/0'", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e", 32)
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/tests/test_eth_account.py
|
test_eth_account.py
|
import unittest
from unittest.mock import patch
from Crypto.Hash import SHA256, keccak
from typing import List
import sys
import zymkey
from zymbitwalletsdk import Keyring, EthAccount, EllipticCurve, EthTransaction, SignedEthTransaction, ZymbitEthKeyring
class TestZymbitEthKeyring(unittest.TestCase):
@classmethod
def setUpClass(cls):
slots: list[int] = zymkey.client.get_slot_alloc_list()[0]
cls.slots = list(filter(lambda slot: slot > 15, slots))
cls.wallet_name = "test_wallet"
use_bip39_recovery = zymkey.RecoveryStrategyBIP39()
(master_slot, mnemonic) = zymkey.client.gen_wallet_master_seed(key_type=ZymbitEthKeyring.CURVE.get_curve_type(), master_gen_key=bytearray(), wallet_name=cls.wallet_name, recovery_strategy=use_bip39_recovery)
cls.master_slot = master_slot
cls.keyring = ZymbitEthKeyring(master_slot=master_slot)
@classmethod
def tearDownClass(cls):
slots: list[int] = zymkey.client.get_slot_alloc_list()[0]
slots = list(filter(lambda slot: slot > 15, slots))
diff = set(slots) - set(cls.slots)
for slot in list(diff):
zymkey.client.remove_key(slot)
def test_serialize_deserialize(self):
serialized = self.keyring.serialize()
keyring = ZymbitEthKeyring(wallet_name=serialized['wallet_name'])
self.assertEqual(self.keyring.TYPE, keyring.TYPE)
self.assertEqual(self.keyring.BASE_PATH, keyring.BASE_PATH)
self.assertEqual(self.keyring.wallet_name, keyring.wallet_name)
self.assertEqual(self.keyring.master_slot, keyring.master_slot)
self.assertEqual(self.keyring.base_slot, keyring.base_slot)
self.assertEqual(len(self.keyring.accounts), len(keyring.accounts))
def test_add_account(self):
self.assertEqual(len(self.keyring.accounts), 0)
new_account = self.keyring.add_account()
self.assertEqual(len(self.keyring.accounts), 1)
self.assertIsInstance(new_account, EthAccount)
self.assertEqual(new_account.path, "m/44'/60'/0'/0/0")
def test_add_accounts(self):
self.assertEqual(len(self.keyring.accounts), 1)
new_accounts = self.keyring.add_accounts(3)
self.assertEqual(len(self.keyring.accounts), 4)
self.assertIsInstance(new_accounts, list)
self.assertIsInstance(new_accounts[0], EthAccount)
self.assertEqual(new_accounts[0].path, "m/44'/60'/0'/0/1")
self.assertEqual(new_accounts[1].path, "m/44'/60'/0'/0/2")
self.assertEqual(new_accounts[2].path, "m/44'/60'/0'/0/3")
def test_add_accounts_list(self):
self.assertEqual(len(self.keyring.accounts), 4)
new_accounts = self.keyring.add_accounts_list([4, 20, 7])
self.assertEqual(len(self.keyring.accounts), 7)
self.assertIsInstance(new_accounts, list)
self.assertIsInstance(new_accounts[0], EthAccount)
self.assertEqual(new_accounts[0].path, "m/44'/60'/0'/0/4")
self.assertEqual(new_accounts[1].path, "m/44'/60'/0'/0/20")
self.assertEqual(new_accounts[2].path, "m/44'/60'/0'/0/7")
def test_get_accounts(self):
self.assertEqual(len(self.keyring.accounts), 7)
new_account = self.keyring.add_account(index=35)
accounts = self.keyring.get_accounts()
self.assertEqual(len(accounts), 8)
self.assertIsInstance(accounts, list)
self.assertIsInstance(accounts[0], EthAccount)
self.assertEqual(accounts[-1].path,"m/44'/60'/0'/0/35")
def test_remove_account(self):
account = self.keyring.get_accounts()[0]
self.assertEqual(len(self.keyring.accounts), 8)
self.assertTrue(self.keyring.remove_account(address=account.address))
self.assertEqual(len(self.keyring.accounts), 7)
def test_get_public_key(self):
account = self.keyring.get_accounts()[0]
public_key = self.keyring.get_public_key(address=account.address)
self.assertIsInstance(public_key, str)
self.assertRegex(public_key, r'^0x[a-fA-F0-9]{128}$')
|
zymbitwalletsdk
|
/zymbitwalletsdk-1.0.0.tar.gz/zymbitwalletsdk-1.0.0/tests/test_zymbit_eth_keyring.py
|
test_zymbit_eth_keyring.py
|
# zyme
> Short blurb about what your product does.
[![PyPI][pypi-image]][pypi-url]
[![Downloads][downloads-image]][downloads-url]
[![Status][status-image]][pypi-url]
[![Python Version][python-version-image]][pypi-url]
[![Format][format-image]][pypi-url]
[![Requirements][requirements-status-image]][requirements-status-url]
[![tests][tests-image]][tests-url]
[![Codecov][codecov-image]][codecov-url]
[![CodeFactor][codefactor-image]][codefactor-url]
[![Codeclimate][codeclimate-image]][codeclimate-url]
[![Lgtm alerts][lgtm-alerts-image]][lgtm-alerts-url]
[![Lgtm quality][lgtm-quality-image]][lgtm-quality-url]
[![CodeQl][codeql-image]][codeql-url]
[![readthedocs][readthedocs-image]][readthedocs-url]
[![pre-commit][pre-commit-image]][pre-commit-url]
[![pre-commit.ci status][pre-commit.ci-image]][pre-commit.ci-url]
[![Imports: isort][isort-image]][isort-url]
[![Code style: black][black-image]][black-url]
[![Checked with mypy][mypy-image]][mypy-url]
[![security: bandit][bandit-image]][bandit-url]
[![Commitizen friendly][commitizen-image]][commitizen-url]
[![Conventional Commits][conventional-commits-image]][conventional-commits-url]
[![DeepSource][deepsource-image]][deepsource-url]
[![license][license-image]][license-url]
One to two paragraph statement about your product and what it does.

## Installation
OS X & Linux:
```sh
pip3 install zyme
```
Windows:
```sh
pip install zyme
```
## Usage example
A few motivating and useful examples of how your product can be used. Spice this up with code blocks and potentially more screenshots.
_For more examples and usage, please refer to the [Wiki][wiki]._
## Development setup
Describe how to install all development dependencies and how to run an automated test-suite of some kind. Potentially do this for multiple platforms.
```sh
pip install --editable zyme
```
## Documentation
### - [**Read the Docs**](https://zyme.readthedocs.io/en/latest/)
### - [**Wiki**](https://github.com/Stephen-RA-King/zyme/wiki)
## Meta
[](https://linkedin.com/in/stephen-k-3a4644210)
[](https://github.com/Stephen-RA-King)
[](https://pypi.org/project/zyme/)
[](https://www.justpython.tech)
[](mailto:[email protected])
[](https://www.justpython.tech/cv)
Stephen R A King : [email protected]
Distributed under the MIT license. See [license](license-url) for more information.
[https://github.com/Stephen-RA-King/zyme](https://github.com/Stephen-RA-King/zyme)
Created with Cookiecutter template: [**cc_template**][cc_template-url] version 1.1.1
<!-- Markdown link & img dfn's -->
[bandit-image]: https://img.shields.io/badge/security-bandit-yellow.svg
[bandit-url]: https://github.com/PyCQA/bandit
[black-image]: https://img.shields.io/badge/code%20style-black-000000.svg
[black-url]: https://github.com/psf/black
[cc_template-url]: https://github.com/Stephen-RA-King/cc_template
[codeclimate-image]: https://api.codeclimate.com/v1/badges/7fc352185512a1dab75d/maintainability
[codeclimate-url]: https://codeclimate.com/github/Stephen-RA-King/zyme/maintainability
[codecov-image]: https://codecov.io/gh/Stephen-RA-King/zyme/branch/main/graph/badge.svg
[codecov-url]: https://app.codecov.io/gh/Stephen-RA-King/zyme
[codefactor-image]: https://www.codefactor.io/repository/github/Stephen-RA-King/zyme/badge
[codefactor-url]: https://www.codefactor.io/repository/github/Stephen-RA-King/zyme
[codeql-image]: https://github.com/Stephen-RA-King/zyme/actions/workflows/codeql-analysis.yml/badge.svg
[codeql-url]: https://github.com/Stephen-RA-King/zyme/actions/workflows/codeql-analysis.yml
[commitizen-image]: https://img.shields.io/badge/commitizen-friendly-brightgreen.svg
[commitizen-url]: http://commitizen.github.io/cz-cli/
[conventional-commits-image]: https://img.shields.io/badge/Conventional%20Commits-1.0.0-yellow.svg?style=flat-square
[conventional-commits-url]: https://conventionalcommits.org
[deepsource-image]: https://static.deepsource.io/deepsource-badge-light-mini.svg
[deepsource-url]: https://deepsource.io/gh/Stephen-RA-King/zyme/?ref=repository-badge
[downloads-image]: https://static.pepy.tech/personalized-badge/zyme?period=total&units=international_system&left_color=black&right_color=orange&left_text=Downloads
[downloads-url]: https://pepy.tech/project/zyme
[format-image]: https://img.shields.io/pypi/format/zyme
[isort-image]: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336
[isort-url]: https://github.com/pycqa/isort/
[lgtm-alerts-image]: https://img.shields.io/lgtm/alerts/g/Stephen-RA-King/zyme.svg?logo=lgtm&logoWidth=18
[lgtm-alerts-url]: https://lgtm.com/projects/g/Stephen-RA-King/zyme/alerts/
[lgtm-quality-image]: https://img.shields.io/lgtm/grade/python/g/Stephen-RA-King/zyme.svg?logo=lgtm&logoWidth=18
[lgtm-quality-url]: https://lgtm.com/projects/g/Stephen-RA-King/zyme/context:python
[license-image]: https://img.shields.io/pypi/l/zyme
[license-url]: https://github.com/Stephen-RA-King/zyme/blob/main/license
[mypy-image]: http://www.mypy-lang.org/static/mypy_badge.svg
[mypy-url]: http://mypy-lang.org/
[pre-commit-image]: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white
[pre-commit-url]: https://github.com/pre-commit/pre-commit
[pre-commit.ci-image]: https://results.pre-commit.ci/badge/github/Stephen-RA-King/gitwatch/main.svg
[pre-commit.ci-url]: https://results.pre-commit.ci/latest/github/Stephen-RA-King/gitwatch/main
[pypi-url]: https://pypi.org/project/zyme/
[pypi-image]: https://img.shields.io/pypi/v/zyme.svg
[python-version-image]: https://img.shields.io/pypi/pyversions/zyme
[readthedocs-image]: https://readthedocs.org/projects/zyme/badge/?version=latest
[readthedocs-url]: https://zyme.readthedocs.io/en/latest/?badge=latest
[requirements-status-image]: https://requires.io/github/Stephen-RA-King/zyme/requirements.svg?branch=main
[requirements-status-url]: https://requires.io/github/Stephen-RA-King/zyme/requirements/?branch=main
[status-image]: https://img.shields.io/pypi/status/zyme.svg
[tests-image]: https://github.com/Stephen-RA-King/zyme/actions/workflows/tests.yml/badge.svg
[tests-url]: https://github.com/Stephen-RA-King/zyme/actions/workflows/tests.yml
[wiki]: https://github.com/Stephen-RA-King/zyme/wiki
|
zyme
|
/zyme-0.1.1.tar.gz/zyme-0.1.1/README.md
|
README.md
|
# Credits
## Development Lead
- Stephen R A King \<[email protected]\>
## Maintainer
- Stephen R A King \<[email protected]\>
## Contributors
|
zyme
|
/zyme-0.1.1.tar.gz/zyme-0.1.1/AUTHORS.md
|
AUTHORS.md
|
#!/usr/bin/env python
"""The setup script."""
# Third party modules
from setuptools import setup
setup()
|
zyme
|
/zyme-0.1.1.tar.gz/zyme-0.1.1/setup.py
|
setup.py
|
# Changelog
<!--next-version-placeholder-->
## v0.1.1 (2022-07-22)
## 0.1.0 (2022-07-22)
### First Release of 'zyme'
<!-- Markdown link & img dfn's -->
[github](https://github.com/Stephen-RA-King/zyme)
|
zyme
|
/zyme-0.1.1.tar.gz/zyme-0.1.1/CHANGELOG.md
|
CHANGELOG.md
|
#!/usr/bin/env python
import os
from distutils.core import setup
from setuptools.command.install import install
SCRIPT_DIR = os.path.dirname(__file__)
if not SCRIPT_DIR:
SCRIPT_DIR = os.getcwd()
# put together list of requirements to install
install_requires = ['cmdline>=0.1.8', 'sh>=1.11']
REQUIREMENTS = os.path.join(SCRIPT_DIR, 'requirements.txt')
if os.path.exists(REQUIREMENTS):
with open(REQUIREMENTS) as fh:
for line in fh.readlines():
if line.startswith('-'):
continue
install_requires.append(line.strip())
long_description = ''
README = os.path.join(SCRIPT_DIR, 'README.md')
if os.path.exists(README):
long_description = open(README, 'r').read()
def get_data_files(base):
for dirpath, _, filenames in os.walk(base):
for filename in filenames:
yield os.path.join(dirpath, filename)
# http://stackoverflow.com/a/36902139/703144
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
os.system("systemctl daemon-reload")
install.run(self)
data_files = [
('config', list(get_data_files('config'))),
('/etc/systemd/system', ['etc/systemd/system/[email protected]']),
('/usr/local/lib', ['lib/libzk_app_utils.so']),
('share/zymkey/examples', get_data_files('examples')),
]
setup(name='zymkey',
version='0.1.3',
description='Zymkey utilities',
author='Zymbit, Inc.',
author_email='[email protected]',
packages=[
'zymkey',
'zymkey.commands',
],
cmdclass={
'install': PostInstallCommand,
},
entry_points={
'console_scripts': [
'zymkey = zymkey.entrypoints:main',
],
},
data_files=data_files,
long_description=long_description,
url='https://zymbit.com/',
license='LICENSE',
install_requires=install_requires,
)
|
zymkey
|
/zymkey-0.1.3.tar.gz/zymkey-0.1.3/setup.py
|
setup.py
|
import zymkey
from textwrap import fill
print('Testing data lock...')
src = bytearray(b'\x01\x02\x03\x04')
dst = zymkey.client.lock(src)
print('Original Data')
s = fill(' '.join('{:02X}'.format(c) for c in src), 49)
print(s)
print('Encrypted Data')
s = fill(' '.join('{:02X}'.format(c) for c in dst), 49)
print(s)
print('Testing data unlock...')
new_src = dst
new_dst = zymkey.client.unlock(new_src)
print('Decryped Data')
s = fill(' '.join('{:02X}'.format(c) for c in new_dst), 49)
print(s)
print('Turning LED on...')
zymkey.client.led_on()
print('Testing get_random() with 512 bytes...')
num = 512
random_bytes = zymkey.client.get_random(num)
s = fill(' '.join('{:02X}'.format(c) for c in random_bytes), 49)
print(s)
print('Turning LED off...')
zymkey.client.led_off()
print('Flashing LED off, 500ms on, 100ms off...')
zymkey.client.led_flash(500, 100)
print('Testing zkCreateRandDataFile with 1MB...')
num = 1024 * 1024
file_path = '/tmp/r.bin'
zymkey.client.create_random_file(file_path, num)
print('Turning LED off...')
zymkey.client.led_off()
print('Testing get_ecdsa_public_key()...')
pk = zymkey.client.get_ecdsa_public_key()
s = fill(' '.join('{:02X}'.format(c) for c in pk), 49)
print(s)
print('Testing create_ecdsa_public_key_file()...')
zymkey.client.create_ecdsa_public_key_file('/tmp/pk.pem')
|
zymkey
|
/zymkey-0.1.3.tar.gz/zymkey-0.1.3/examples/zk_app_utils_test.py
|
zk_app_utils_test.py
|
from __future__ import print_function
import zymkey
from zymkey.exceptions import VerificationError
secret_message = 'Hello, Bob. --Alice'
print('Signing data...', end='')
signature = zymkey.client.sign(secret_message)
print('OK')
print('Verifying data...', end='')
zymkey.client.verify(secret_message, signature)
print('OK')
print('Verifying tainted data...', end='')
try:
zymkey.client.verify(secret_message.replace('Alice', 'Eve'), signature)
except VerificationError:
print('FAIL, yay!')
else:
raise Exception('verification should have failed, but passed')
# Flash the LED to indicate the operation is underway
zymkey.client.led_flash(500, 100)
# Generate random blocks of 512 to fill a 1MB array
bs = 512
num_blocks = 256
print('Generating random block ({!r} bytes)...'.format(bs * num_blocks))
random_bytes = []
for x in range(num_blocks):
random_bytes += zymkey.client.get_random(bs)
# Encrypt the random data
print('Encrypting random block...')
encrypted = zymkey.client.lock(random_bytes)
# Decrypt the random data
print('Decrypting encrypted block...')
decrypted = zymkey.client.unlock(encrypted)
decrypted_list = list(decrypted)
random_list = list(random_bytes)
if decrypted_list == random_list:
print('PASS: Decrypted data matches original random data')
else:
print('Decrypted data does not match original random data')
# Turn off the LED
zymkey.client.led_off()
print('Done!')
|
zymkey
|
/zymkey-0.1.3.tar.gz/zymkey-0.1.3/examples/zk_crypto_test.py
|
zk_crypto_test.py
|
from zymod.event.zy_event_level_enum import ZyEventLevel
from zymod.event.zy_internal_event import ZyInternalEvent
__all__ = [
'ZyEventLevel',
'ZyInternalEvent',
]
|
zymod
|
/event/__init__.py
|
__init__.py
|
from happy_python import HappyLog
from zymod.event import ZyEventLevel
class ZyInternalEvent(Exception):
def __init__(self, level: ZyEventLevel, summary: str, description: str, trigger: str):
super().__init__(self, '%s: %s' % (summary, description))
self.level = level
self.summary = summary
self.description = description
self.trigger = trigger
self.hlog = HappyLog.get_instance()
self.hlog.debug('ZyInternalEvent->%s' % self.asdict())
def asdict(self) -> dict:
return {
"level": self.level.value,
"summary": self.summary,
"description": self.description,
"trigger": self.trigger
}
|
zymod
|
/event/zy_internal_event.py
|
zy_internal_event.py
|
from enum import unique, IntEnum
@unique
class ZyEventLevel(IntEnum):
Notice = 0
Warning = 1
Alert = 2
|
zymod
|
/event/zy_event_level_enum.py
|
zy_event_level_enum.py
|
# n进制 转 m进制,m和n均为正整数
# n-->10
# 10-->m
def bs(ten, m):
'''
十进制转m进制
:param ten: 十进制数
:param m: 转换进制数
:return: 无
'''
while ten:
yield str(ten % m)
ten = ten // m
def start(num, n, m):
'''
:param num:输入数:输入小写字母和数字
:param n:输入数的进制数:必须整数
:param m:输出数的进制数,必须整数
:return:返回输出数字符串
'''
# if n>1 and m>1:raise ValueError("必须输入大于1的整数")
# elif not isinstance(num,(int,str)):raise ValueError('num必须为int型或小写字母')
# elif num>1:raise ValueError('num必须大于1')
# elif isinstance(num,str):num=str(num)
ten = int(str(num), n) # n-->10
bs_num = bs(ten, m)
# bs_num01=[str(87+i) if int(i)>9 else -i for i in bs_num]
return ''.join(bs_num)[::-1]
if __name__ == '__main__':
print(start(1000111, 2, 16))
|
zymouse
|
/bs/Bs.py
|
Bs.py
|
from .Bs import start
|
zymouse
|
/bs/__init__.py
|
__init__.py
|
Zymp
====
Zymp is a Python library to design "restriction site arrays", which are
compact sequences with many restriction sites.
For instance here is a 159-nucleotide sequence made with Zymp, with 49 enzyme
recognition sites (out of 52 provided). That's a frequency of around
3 nucleotides per site:
.. image:: https://raw.githubusercontent.com/Edinburgh-Genome-Foundry/zymp/master/docs/_static/images/example_array.png
:width: 800
Infos
-----
**PIP installation:**
.. code:: bash
pip install zymp
**Github Page:**
`<https://github.com/Edinburgh-Genome-Foundry/zymp>`_
**License:** MIT, Copyright Edinburgh Genome Foundry
More biology software
---------------------
.. image:: https://raw.githubusercontent.com/Edinburgh-Genome-Foundry/Edinburgh-Genome-Foundry.github.io/master/static/imgs/logos/egf-codon-horizontal.png
:target: https://edinburgh-genome-foundry.github.io/
Zymp is part of the `EGF Codons <https://edinburgh-genome-foundry.github.io/>`_
synthetic biology software suite for DNA design, manufacturing and validation.
|
zymp
|
/zymp-0.1.3.tar.gz/zymp-0.1.3/pypi-readme.rst
|
pypi-readme.rst
|
.. raw:: html
<p align="center">
<img alt="Zymp" title="Zymp" src="https://raw.githubusercontent.com/Edinburgh-Genome-Foundry/zymp/master/docs/_static/images/title.png" width="300">
<br />
</p>
.. image:: https://github.com/Edinburgh-Genome-Foundry/zymp/actions/workflows/build.yml/badge.svg
:target: https://github.com/Edinburgh-Genome-Foundry/zymp/actions/workflows/build.yml
:alt: GitHub CI build status
.. image:: https://coveralls.io/repos/github/Edinburgh-Genome-Foundry/zymp/badge.svg?branch=master
:target: https://coveralls.io/github/Edinburgh-Genome-Foundry/zymp?branch=master
**Zymp** is a Python library to produce small sequences of DNA packed with enzyme
restriction sites. You specify the enzymes you want, the ones you don't want,
whether you want the sites to be unique, or any other condition, and Zymp will
attempt to find a compact sequence verifying all of this (it really focuses on
sequence shortness).
**Warning:** Zymp is implemented with a "whatever works well enough"
philosophy. It has a lot of "whatever" but it generally works "well enough".
The algorithm is greedy with many simplifications so don't expect perfect solutions.
Examples
--------
Here is how you design a sequence
.. code:: python
from zymp import (stacked_sites_array, plot_sequence_sites,
annotate_enzymes_sites, write_record)
enzymes_names = [
'AccI', 'AclI', 'AflII', 'AflIII', 'AgeI', 'ApaLI', 'AseI',
'AvaI', 'BamHI', 'BanII', 'BlnI', 'BmtI', 'BsmI', 'BssHII',
'DdeI', 'DraI', 'Eco47III', 'EcoRI', 'EcoRV', 'HindII',
'HindIII', 'HinfI', 'HpaI', 'KpnI', 'MfeI', 'MluI',
'MspA1I', 'MunI', 'NaeI', 'NcoI', 'NdeI', 'NheI', 'NotI',
'NsiI', 'NspI', 'PstI', 'PvuI', 'PvuII', 'SacI', 'SacII',
'SalI', 'ScaI', 'SfaNI', 'SnaBI', 'SpeI', 'SphI', 'SspI',
'StyI', 'VspI', 'XhoI', 'XmaI', 'ZraI'
]
forbidden_enzymes=['BsmBI', 'BsaI']
# DESIGN AN OPTIMIZED SEQUENCE WITH ZYMP
seq, sites_in_seq, leftover = stacked_sites_array(
enzymes_names, forbidden_enzymes=forbidden_enzymes,
unique_sites=True, tries=100)
print ("Sequence length:", len(seq),
"\nRestriction sites:", len(sites_in_seq),
"\nSites not included: ", leftover)
# PLOT A SUMMARY
ax = plot_sequence_sites(seq, enzymes_names)
ax.figure.savefig("stacked_array.pdf", bbox_inches='tight')
# WRITE THE SEQUENCE AND SITE ANNOTATIONS AS A RECORD
record = annotate_enzymes_sites(
seq, enzymes_names, forbidden_enzymes=forbidden_enzymes)
write_record(record, 'stacked_site_array.gb')
**Plot output:**
.. raw:: html
<p align="center">
<img alt="stacked array" title="stacked array" src="https://raw.githubusercontent.com/Edinburgh-Genome-Foundry/zymp/master/docs/_static/images/example_array.png" width="800">
<br />
</p>
**Console output:**
.. code:: bash
Sequence length: 159
Restriction sites: 49
Sites not included: {'NcoI', 'HpaI', 'SacII'}
Zymp has created a 159-nucleotide sequence with 49 of the 52 restriction sites
we specified, that's only ~3 nucleotides per site ! and the sequence is free
of BsaI or HpaI sites, so it is compatible with Golden Gate assembly.
If NcoI and HpaI are your favorite enzymes, you may be disappointed that they
are not in the final sequence. Zymp allows you to add validity conditions
for the result:
.. code:: python
from zymp import stacked_sites_array
def success_condition(seq, sites_in_seq, leftover):
return {'NcoI', 'HpaI'}.issubset(sites_in_seq)
seq, sites_in_seq, leftover = stacked_sites_array(
enzymes_names, forbidden_enzymes=forbidden_enzymes,
tries=100, success_condition=success_condition)
print ("Sequence length:", len(seq),
"\nRestriction sites:", len(sites_in_seq),
"\nSites not included: ", leftover)
**New console output:**
.. code:: bash
Sequence length: 158
Restriction sites: 47
Sites not included: {'SacII', 'SacI', 'XhoI', 'BlnI', 'XmaI'}
Installation
------------
You can install zymp through PIP:
.. code::
pip install zymp
Alternatively, you can unzip the sources in a folder and type:
.. code::
python setup.py install
License = MIT
-------------
Zymp is an open-source software originally written at the
`Edinburgh Genome Foundry <http://genomefoundry.org>`_ by
`Zulko <https://github.com/Zulko>`_ and
`released on Github <https://github.com/Edinburgh-Genome-Foundry/zymp>`_
under the MIT licence (Copyright 2018 Edinburgh Genome Foundry).
Everyone is welcome to contribute!
More biology software
---------------------
.. image:: https://raw.githubusercontent.com/Edinburgh-Genome-Foundry/Edinburgh-Genome-Foundry.github.io/master/static/imgs/logos/egf-codon-horizontal.png
:target: https://edinburgh-genome-foundry.github.io/
Zymp is part of the `EGF Codons <https://edinburgh-genome-foundry.github.io/>`_ synthetic biology software suite for DNA design, manufacturing and validation.
|
zymp
|
/zymp-0.1.3.tar.gz/zymp-0.1.3/README.rst
|
README.rst
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "0.9.6"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
zymp
|
/zymp-0.1.3.tar.gz/zymp-0.1.3/ez_setup.py
|
ez_setup.py
|
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
exec(open("zymp/version.py").read()) # loads __version__
setup(
name="zymp",
version=__version__,
author="Zulko",
url="https://github.com/Edinburgh-Genome-Foundry/zymp",
description="Design compact sequences with many enzyme restriction sites.",
long_description=open("pypi-readme.rst").read(),
license="MIT",
keywords="DNA sequence design restriction site array",
packages=find_packages(exclude="docs"),
install_requires=[
"numpy",
"dnachisel",
"dna_features_viewer",
"biopython",
"proglog",
],
)
|
zymp
|
/zymp-0.1.3.tar.gz/zymp-0.1.3/setup.py
|
setup.py
|
from zymp import (
stacked_sites_array,
plot_sequence_sites,
annotate_enzymes_sites,
write_record,
)
enzymes_names = [
"AccI",
"AclI",
"AflII",
"AflIII",
"AgeI",
"ApaLI",
"AseI",
"AvaI",
"BamHI",
"BanII",
"BlnI",
"BmtI",
"BsmI",
"BssHII",
"DdeI",
"DraI",
"Eco47III",
"EcoRI",
"EcoRV",
"HindII",
"HindIII",
"HinfI",
"HpaI",
"KpnI",
"MfeI",
"MluI",
"MspA1I",
"MunI",
"NaeI",
"NcoI",
"NdeI",
"NheI",
"NotI",
"NsiI",
"NspI",
"PstI",
"PvuI",
"PvuII",
"SacI",
"SacII",
"SalI",
"ScaI",
"SfaNI",
"SnaBI",
"SpeI",
"SphI",
"SspI",
"StyI",
"VspI",
"XhoI",
"XmaI",
"ZraI",
]
forbidden_enzymes = ["BsmBI", "BsaI"]
sequence, enzymes_in_sequence, enzymes_not_in_sequence = stacked_sites_array(
enzymes_names, forbidden_enzymes=forbidden_enzymes, tries=100
)
print(
"Sequence length:",
len(sequence),
"\nRestriction sites:",
len(enzymes_in_sequence),
"\nSites not included: ",
enzymes_not_in_sequence,
)
# PLOT A SUMMARY
ax = plot_sequence_sites(sequence, enzymes_names)
ax.figure.savefig("stacked_array.pdf", bbox_inches="tight")
# WRITE THE SEQUENCE AND SITE ANNOTATIONS AS A RECORD
record = annotate_enzymes_sites(
sequence, enzymes_names, forbidden_enzymes=forbidden_enzymes
)
write_record(record, "stacked_site_array.gb")
|
zymp
|
/zymp-0.1.3.tar.gz/zymp-0.1.3/examples/basic_example.py
|
basic_example.py
|
MIT License
Copyright (c) 2018 The Python Packaging Authority
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
zymptest
|
/zymptest-0.2.2.tar.gz/zymptest-0.2.2/README.rst
|
README.rst
|
#!/usr/bin/env python
from __future__ import print_function
from setuptools import setup
import setuptools
setup(name="zymptest",
version="0.2.2",
author="Yimin Zhang",
author_email="[email protected]",
description="test",
long_description=open("README.rst").read(),
license="MIT",
url="",
packages=['zymptest'],
install_requires=[],
classifiers=[ "Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: Microsoft :: Windows",
"Topic :: Text Processing :: Indexing",
"Topic :: Utilities",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
],
)
|
zymptest
|
/zymptest-0.2.2.tar.gz/zymptest-0.2.2/setup.py
|
setup.py
|
# -*- coding: UTF-8 -*-
# from oyospider.common.db_operate import MySQLdbHelper
import random
import sys
import time
sys.path.append(r'../../')
import requests
from threadpool import ThreadPool, makeRequests
from oyospider.common.db_operate import MySQLdbHelper
class ProxyIPHelper(object):
def __init__(self):
self.proxy_ip_table = "dm_proxy_ip_t"
self.mydb = MySQLdbHelper()
def get_usable_proxy_ip(self):
sql = "select * from dm_proxy_ip_t"
records = self.mydb.executeSql(sql)
for record in records:
print("get_usable_proxy_ip=" + record[1])
return records
def get_usable_anon_proxy_ip(self):
"""获取可用的高匿 代理IP
"""
sql = "SELECT * FROM dm_proxy_ip_t p WHERE p.anon LIKE '%高匿%' AND DATE_FORMAT( succTime, '%Y-%m-%d' ) = ( SELECT DATE_FORMAT( max( succTime ), '%Y-%m-%d' ) FROM dm_proxy_ip_t )"
records = self.mydb.executeSql(sql)
# for record in records:
# print record[1]
return records
def get_usable_anon_proxy_ip_str(self):
records = self.get_usable_anon_proxy_ip()
ip_port = []
for t in records:
ip_port.append("http://" + t[1] + ":" + t[2])
return ip_port
def find_all_proxy_ip(self):
"""
查出所有代理IP
"""
db_helper = MySQLdbHelper()
# proxy_ip_list = db_helper.select("proxy_ip", fields=["protocol", "ip", "port"])
# proxy_ip_list = db_helper.executeSql("select protocol,ip,port from proxy_ip where 1=1 limit 1")
proxy_ip_list = db_helper.executeSql(
"SELECT protocol,ip,port,source FROM proxy_ip as t order by t.id DESC limit 2000;")
return proxy_ip_list
def find_china_proxy_ip(self, limit):
"""
查出中国境内代理IP,作为打底数据
"""
db_helper = MySQLdbHelper()
# proxy_ip_list = db_helper.select("proxy_ip", fields=["protocol", "ip", "port"])
sql = "select protocol,ip,`port`,source from proxy_ip t where 1=1 and ( t.area like '%山东%' or t.area like '%江苏%' " \
"or t.area like '%上海%' or t.area like '%浙江%' or t.area like '%安徽%' or t.area like '%福建%' or t.area like '%江西%' " \
"or t.area like '%广东%' or t.area like '%广西%' or t.area like '%海南%' or t.area like '%河南%' or t.area like '%湖南%' " \
"or t.area like '%湖北%' or t.area like '%北京%' or t.area like '%天津%' or t.area like '%河北%' or t.area like '%山西%' " \
"or t.area like '%内蒙%' or t.area like '%宁夏%' or t.area like '%青海%' or t.area like '%陕西%' or t.area like '%甘肃%' " \
"or t.area like '%新疆%' or t.area like '%四川%' or t.area like '%贵州%' or t.area like '%云南%' or t.area like '%重庆%' " \
"or t.area like '%西藏%' or t.area like '%辽宁%' or t.area like '%吉林%' or t.area like '%黑龙%' or t.area like '%香港%' " \
"or t.area like '%澳门%' or t.area like '%台湾%') order by t.create_time desc limit " + str(limit)
proxy_ip_list = db_helper.executeSql(sql)
return proxy_ip_list
def callback_test(self, request, result):
print("callback_test")
def get_all_proxy_ip_useable(self, target_site, target_url, put_proxy_to_redis):
"""
测试指定URL代理的有效性
"""
proxy_ip_list = self.find_all_proxy_ip()
# useable_ip_list = []
batchno = int(round(time.time() * 1000))
# timestamp = int(round(time.time()))
par_list = []
for proxy_ip in proxy_ip_list:
paras = []
paras.append(proxy_ip[0])
paras.append(proxy_ip[1])
paras.append(proxy_ip[2])
paras.append(proxy_ip[3])
paras.append(target_site)
paras.append(target_url)
paras.append(batchno)
paras.append(put_proxy_to_redis)
par_list.append((paras, None))
# print paras
print(par_list)
pool = ThreadPool(50)
requests = makeRequests(self.test_proxy_ip_useable1, par_list, self.callback_test)
for req in requests:
pool.putRequest(req)
pool.wait()
# for proxy_ip in proxy_ip_list:
# # protocol = proxy_ip[0]
# # ip = proxy_ip[1]
# # port = proxy_ip[2]
#
# test_proxy_id = self.test_proxy_ip_useable(proxy_ip[0], proxy_ip[1], proxy_ip[2], target_url)
# print "proxy_ip = " + str(test_proxy_id)
# if test_proxy_id:
# put_proxy_to_redis(proxy_ip[0], proxy_ip[1], proxy_ip[2])
# useable_ip_list.append(test_proxy_id)
# # redis_helper
# return useable_ip_list
# redis_helper
def test_proxy_ip_useable(self, protocol, ip, port, target_url):
proxy = ""
if protocol:
proxy = protocol + "://" + ip + ":" + port
else:
proxy = "http://" + ip + ":" + port
# proxy ="18017115578:194620chao@"+ ip + port
# user_agent_list = RotateUserAgentMiddleware()
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {
"User-Agent": random.choice(user_agent_list)
}
proxy_obj = requests.utils.urlparse(proxy)
if proxy_obj.scheme.upper() == 'HTTP':
test_url = target_url
test_proxies = {
"http": proxy_obj.netloc
}
elif proxy_obj.scheme.upper() == 'HTTPS':
test_url = target_url
test_proxies = {
"https": proxy_obj.netloc
}
if test_proxies:
# 测试代理有效性
try:
print("proxy:'%s',test_url:'%s'" % (proxy, test_url))
response = requests.head(test_url, headers=headers, proxies=test_proxies, timeout=8)
print("proxy:'%s',test_url:'%s',status_code:'%s'" % (proxy, test_url, response.status_code))
if response.status_code == 200:
# return proxy_ip
return protocol, ip, port
except Exception as e:
print(e)
else:
return None
def test_proxy_ip_useable1(self, protocol, ip, port, source, target_site, target_url, batchno, put_proxy_to_redis):
proxy = ""
if protocol:
proxy = protocol + "://" + ip + ":" + port
else:
proxy = "http://" + ip + ":" + port
# user_agent_list = RotateUserAgentMiddleware()
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {
"User-Agent": random.choice(user_agent_list)
}
proxy_obj = requests.utils.urlparse(proxy)
if proxy_obj.scheme.upper() == 'HTTP':
test_url = target_url
test_proxies = {
"http": proxy_obj.netloc
}
elif proxy_obj.scheme.upper() == 'HTTPS':
test_url = target_url
test_proxies = {
"https": proxy_obj.netloc
}
if test_proxies:
# 测试代理有效性
try:
print("proxy:'%s',test_url:'%s',source:'%s'" % (proxy, test_url, source))
response = requests.head(test_url, headers=headers, proxies=test_proxies, timeout=8)
print("proxy:'%s',test_url:'%s',source:'%s',status_code:'%s'" % (
proxy, test_url, source, response.status_code))
if response.status_code == 200:
# return proxy_ip
if put_proxy_to_redis:
print("put_proxy_to_redis:%s,%s,%s" % (protocol, ip, port))
put_proxy_to_redis(protocol, ip, port, source, target_site, batchno, 60 * 15)
return protocol, ip, port
except Exception as e:
print(e)
else:
return None
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/proxy_ip_oper.py
|
proxy_ip_oper.py
|
# -*- coding: UTF-8 -*-
import datetime
import json
import sys
import time
import urllib2
sys.path.append(r'../../')
from oyospider.common.db_operate import MySQLdbHelper
reload(sys)
sys.setdefaultencoding('utf-8')
def send_monitor_info():
db_helper = MySQLdbHelper()
sql = """
SELECT
ht.ota_name,
ht.ota_hotel_count,
tp.hotel_crawl_count,
room_price_count,
DATE_FORMAT(begin_time,'%%Y-%%m-%%d %%H:%%i') begin_time,
DATE_FORMAT(end_time,'%%Y-%%m-%%d %%T') end_time,
DATE_FORMAT(checkin_date,'%%Y-%%m-%%d') checkin_date,
batch_no
FROM
(
SELECT
h.ota_name,
count( 1 ) ota_hotel_count
FROM
dm_hotel_monitor_ota_map_t h
WHERE
h.ota_hotel_url <> ''
AND h.ota_hotel_url <> '/'
GROUP BY
h.ota_name
) ht
INNER JOIN (
SELECT
t.ota_name,
count( DISTINCT t.ota_hotel_id ) hotel_crawl_count,
count( 1 ) room_price_count,
min( create_time ) begin_time,
max( create_time ) end_time,
t.checkin_date,
t.batch_no batch_no
FROM
hotel_room_price_monitor t
WHERE
t.create_time >= '%s'
AND t.create_time < '%s'
GROUP BY
t.ota_name,
t.checkin_date,
DATE_FORMAT( t.create_time, '%%Y-%%m-%%d %%H' )
ORDER BY
t.ota_name
) tp
WHERE
ht.ota_name = tp.ota_name and ht.ota_name = '%s'
order by ota_name ,batch_no desc
"""
end_time = datetime.datetime.strptime(time.strftime('%Y-%m-%d %H', time.localtime(time.time())) + ":59:59",
"%Y-%m-%d %H:%M:%S")
end_time_str = datetime.datetime.strftime(end_time, "%Y-%m-%d %H:%M:%S")
begin_time_str = datetime.datetime.strftime(end_time + datetime.timedelta(hours=-3), "%Y-%m-%d %H:%M:%S")
send_url = "https://oapi.dingtalk.com/robot/send?access_token=3b0cb4f0d390d8b3d12d76c198d733c780ebc0532f876d9e7801c6ff011f3da1"
for ota_name in ["ctrip", "meituan"]:
record = db_helper.executeSql(sql % (begin_time_str, end_time_str, ota_name))
msg_body = []
hotel_count = 0
for r in record:
hotel_count = r[1]
msg_body.append(
" > ###### 爬取时间:%s \n\n > ###### 入住日期:%s \n\n> ###### 酒店总数:%s \n\n > ###### 房价总数:%s \n\n ###### \n\n" % (
r[4], r[6], r[2], r[3]))
head_msg = " #### 全网最低价项目 #### \n\n %s 最近三次爬取统计:\n\n ##### 映射酒店总数:%s \n\n ———————————————— \n\n " % (
ota_name, hotel_count)
head_msg = head_msg + "\n\n ———————————————— \n\n".join(msg_body)
# 发送消息
post_data = {'msgtype': 'markdown',
'markdown': {'title': '全网最低价',
'text': head_msg}
}
headers = {'Content-Type': 'application/json; charset=utf-8'}
req = urllib2.Request(url=send_url, headers=headers, data=json.dumps(post_data))
res_data = urllib2.urlopen(req)
res = res_data.read()
print res
def send_scrapy_log_info():
print "test"
if __name__ == '__main__':
send_monitor_info()
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/ding_talk_warn.py
|
ding_talk_warn.py
|
# -*- coding: UTF-8 -*-
import sys
import threading
import time
import schedule
sys.path.append(r'../../')
from oyospider.common.get_meituan_token import MeiTuanTokenHelper
from oyospider.common.proxy_ip_pull_redis import RedisIPHelper
from oyospider.common.redis_operate import RedisHelper
def get_all_proxy_to_db_and_redis_job():
redis_helper = RedisHelper()
ctrip_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis,
args=("ctrip", "https://hotels.ctrip.com/hotel/428365.html",))
ctrip_thread.start()
meituan_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis,
args=("meituan", "https://www.meituan.com/jiudian/157349277/",))
meituan_thread.start()
ip_thread = threading.Thread(target=redis_helper.get_database_proxy_ip)
ip_thread.start()
def get_dailiyun_proxy_to_redis_job():
redis_helper = RedisIPHelper()
ctrip_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis,
args=("ctrip", "https://hotels.ctrip.com/hotel/428365.html",))
ctrip_thread.start()
meituan_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis,
args=("meituan", "https://www.meituan.com/jiudian/157349277/",))
meituan_thread.start()
def get_meituan_token():
meituan_helper = MeiTuanTokenHelper()
meituan_token_thread = threading.Thread(target=meituan_helper.start_requests)
meituan_token_thread.start()
if __name__ == '__main__':
try:
get_all_proxy_to_db_and_redis_job()
get_dailiyun_proxy_to_redis_job()
get_meituan_token()
#
schedule.every(10).minutes.do(get_all_proxy_to_db_and_redis_job)
schedule.every(2).minutes.do(get_dailiyun_proxy_to_redis_job)
schedule.every(20).seconds.do(get_meituan_token)
except Exception as e:
print(e)
#
while True:
try:
schedule.run_pending()
time.sleep(1)
except Exception as e:
print(e)
# num = [1, 3, 6, 4, 2, ]
# for i in range(3):
# print i, num[i]
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/schedule_task.py
|
schedule_task.py
|
# -*- coding: UTF-8 -*-
import re
import sys
import time
sys.path.append(r'../../')
import requests
from oyospider.common.db_operate import MySQLdbHelper
class ProxyIpExtractHelper(object):
"""
从各网获取代理IP操作类
"""
def get_from_xiguan(self, fetch_num):
"""
西瓜代理提取接口,并入库
接口文档:http://www.xiguadaili.com/api
"""
for protocol in ["http", "https"]:
if not fetch_num:
fetch_num = "100"
# protocol = "http"
api_url = "http://api3.xiguadaili.com/ip/?tid=556077616504319&category=2&show_area=true&show_operator=true&num=%s&protocol=%s" % (
fetch_num, protocol)
# api_url = "http://dly.134t.com/query.txt?key=NPBF565B9C&word=&count=%s"%(fetch_num)
# api_url = "http://svip.kdlapi.com/api/getproxy/?orderid=963803204081436&num=%s&b_pcchrome=1&b_pcie=1&b_pcff=1&protocol=2&method=2&an_an=1&an_ha=1&sep=1"%(fetch_num)
print("get_from_xiguan url = " + api_url)
proxy_ips = []
response = requests.get(api_url)
res = response.text
# print res
if res:
ip_list = res.split("\r\n")
field = ["ip", "port", "operator", "area", "protocol", "anon", "delay", "source", "type", "create_time"]
values = []
for ip_str in ip_list:
# print type(ip_str)
# print re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", ip_str)[0]
# print ip_str
ip = re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", ip_str)[0]
port = re.findall(r":(\d+).*", ip_str)[0]
area = ""
if re.findall(r"@(.*)#", ip_str):
area = re.findall(r"@(.*)#", ip_str)[0]
operator = ""
if re.findall(r"#(.*)", ip_str):
operator = re.findall(r"#(.*)", ip_str)[0]
# proxy_ip = ({"ip": ip, "port": port, "area": area, "operator": operator, "protocol": protocol})
value = []
value.append(ip)
value.append(port)
value.append(operator)
value.append(area)
value.append(protocol)
value.append("2")
value.append("")
value.append("xiguadaili") # 代理IP来源
value.append("1") # 收费
value.append(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
values.append(value)
# print value
# print proxy_ip
# proxy_ips.append(proxy_ip)
db_helper = MySQLdbHelper()
# 插入临时表
db_helper.insertMany("proxy_ip_swap", field, values)
# 插入正式表,用于去重
insert_sql = "insert into proxy_ip(ip,port,operator,area,protocol,anon,delay,source,type,create_time) select ip,port,operator,area,protocol,anon,delay,source,type,create_time from proxy_ip_swap s where not exists (select null from proxy_ip p where p.ip = s.ip and p.port = s.port and p.protocol = s.protocol)"
db_helper.executeCommit(insert_sql)
return proxy_ips
def get_from_dailiyun(self):
"""
代理云提取接口,直接入redist
接口文档:https://www.showdoc.cc/bjt5521?page_id=157160154849769
"""
# api_url = "http://dly.134t.com/query.txt?key=NPBF565B9C&word=&count=1000"
api_url = "http://dly.134t.com/query.txt?key=NPBF565B9C&word=&count=100&detail=true"
print("get_from_dailiyun url = " + api_url)
response = requests.get(api_url)
res = response.text
if res:
ip_list = res.split("\r\n")
return ip_list
def get_all_proxy_site(self):
"""
从网站或API获得所有代理IP
"""
print("get_all_proxy_site")
db_helper = MySQLdbHelper()
# 1.西瓜代理
self.get_from_xiguan(1000)
# 清空临时表
truncate_sql = "truncate table proxy_ip_swap"
db_helper.executeCommit(truncate_sql)
# print proxy_ip["ip"] + "," + proxy_ip["port"] + "," + proxy_ip["area"] + "," + proxy_ip[
# "operator"] + "," + proxy_ip["protocol"]
# for ip_str in range(5):
# print proxy_ip["ip"] + "," + proxy_ip["port"] + "," + proxy_ip["area"] + "," + proxy_ip[
# "operator"] + "," + proxy_ip["protocol"]
if __name__ == '__main__':
# str = "61.222.87.87:38157@台湾省#电信"
# print re.findall(r":(\d+).*", str)[0]
# print re.findall(r"@(.*)#", str)[0]
# print re.findall(r"#(.*)", str)[0]
#
# print re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", str)[0]
extract_helper = ProxyIpExtractHelper()
extract_helper.get_all_proxy_site()
# adapter.get_all_proxy_site()
# adapter.test_proxy_ip_useable("hotel.meituan.com/shanghai/")
# adapter.load_usable_proxy_ip_to_redis("meiTuan")
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/proxy_ip_pull.py
|
proxy_ip_pull.py
|
# -*- coding: utf-8 -*-
import time
def zip_default():
# 格式化时间戳为标准格式
nowday_time = time.strftime('%Y%m%d_default.zip', time.localtime(time.time()))
# print(nowday_time)
zip_ml = "zip -r %s ./default.log" % (nowday_time)
print(zip_ml)
return zip_ml
if __name__ == '__main__':
zip_default()
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/zip_default.py
|
zip_default.py
|
# -*- coding: UTF-8 -*-
import random
import sys
import threading
sys.path.append(r'../../')
import redis
from redis import ConnectionError
from scrapy.utils.project import get_project_settings
from oyospider.common.proxy_ip_oper import ProxyIPHelper
from oyospider.common.proxy_ip_pull import ProxyIpExtractHelper
import gevent.monkey
gevent.monkey.patch_all()
class RedisHelper(object):
def __init__(self):
settings = get_project_settings()
host = settings.get('REDIS_HOST', '')
port = settings.get('REDIS_PORT')
password = settings.get('REDIS_PASSWORD')
self.dailiyun_username = settings.get('DAILIYUN_USERNAME')
self.dailiyun_password = settings.get('DAILIYUN_PASSWORD')
# self.pool = Pool(1)
# password = settings.get("REDIS_PARAMS").get('password')
try:
self.redis_con = redis.StrictRedis(host=host, port=port, password=password)
# ping = self.ping()
except NameError:
return {'error': 'cannot import redis library'}
except ConnectionError as e:
return {'error': str(e)}
def get_redis_conn(self):
return self.redis_con
def put_proxy_to_redis_pool(self, protocol, ip, port, source, target_site, batchno, expire_time):
"""
将可用的代理IP放入redis池中
:param protocol:
:param ip:
:param port:
:param source:
:param target_site:
:param batchno:
:param expire_time
:return:
"""
key = "proxy_ip_pool:%s:%s|%s|%s|%s" % (target_site, source, protocol, ip, port)
self.redis_con.set(key, "")
self.redis_con.expire(key, expire_time)
def put_proxy_ip_to_redis_queue(self, protocol, ip, port, source, target_site, batchno, expire_time):
"""
将可用的代理IP放入redis队列中
:param protocol:
:param ip:
:param port:
:param source:
:param target_site:
:param batchno:
:param expire_time
:return:
"""
key = "proxy_ip_queue:%s:%s|%s|%s|%s" % (target_site, source, protocol, ip, port)
self.redis_con.set(key, "")
self.redis_con.expire(key, expire_time)
def put_proxy_ip_to_redis_queue(self, targer_site, proxy_ip_str):
"""
将可用的代理IP放入redis队列中
:param targer_site:
:param proxy_ip_str:
:return:
"""
key = "proxy_ip_queue:%s" % targer_site
self.redis_con.rpush(key, proxy_ip_str)
self.redis_con.expire(key, 60 * 10)
def load_repeat_proxy_ip_ctrip(self):
name = "ctrip_ip"
proxy = self.redis_con.lpop(name)
return proxy
def load_repeat_proxy_ip_meituan(self):
name = "meituan_ip"
proxy = self.redis_con.lpop(name)
return proxy
def load_usable_proxy_ip_to_redis(self, target_site, target_url):
"""
加载可用的代理IP
:param target_site:
:param target_url:
:return:
"""
# 加载到redis中
# print"============ load_usable_proxy_ip_to_redis init============="
proxy_ip_helper = ProxyIPHelper()
proxy_ip_helper.get_all_proxy_ip_useable(target_site, target_url,
self.put_proxy_to_redis_pool)
def get_usable_proxy_ip(self, site):
"""
获得可以用的代理IP,没有的话直接从数据库里拿最近的代理IP,同时加载可用的代理IP到redis中
:param site:
:return:
"""
# 判断redis中是否有代理
# print "len = %s" % len(self.redis_con.sscan_iter(site + "_Ips"))
site_keys = []
print "get ip from redis "
for key in self.redis_con.keys(site + "Ips*"):
site_keys.append(key)
print "redis keys = " + str(site_keys)
if site_keys:
site_ips = self.redis_con.srandmember(max(site_keys))
if site_ips:
return site_ips.split("|")
# print site_ips(0)
# print random.choice(site_ips)
proxy_ip_helper = ProxyIPHelper()
china_proxy_ips = proxy_ip_helper.find_china_proxy_ip(100)
if china_proxy_ips:
# 异步加载到redis中
# self.pool.apply_async(self.load_usable_proxy_ip_to_redis, args=(site,))
thread = threading.Thread(target=self.load_usable_proxy_ip_to_redis, args=(site,))
# thread.setDaemon(True)
thread.start()
thread.join()
# 先返回表中随机IP给调用者
return random.choice(china_proxy_ips)
else:
return None
def get_database_proxy_ip(self):
p_ip = ProxyIpExtractHelper()
p_ip.get_all_proxy_site()
def get_usable_proxy_ip_from_redis_queue(self, target_site):
"""
从队列中取代理ip
:param target_site:
:return:格式:代理来源|代理协议|代理IP|代理port
"""
key = "proxy_ip_queue:%s" % target_site
proxy_ip_queue = self.redis_con.lpop(key)
print "get_usable_proxy_ip_from_redis_queue,proxy_ip = %s" % proxy_ip_queue
return proxy_ip_queue
def get_usable_proxy_ip_from_redis_pool(self, target_site):
"""
从ip池中取代理ip
:param target_site:
:return:格式:代理来源|代理协议|代理IP|代理port
"""
# 代理云数量相对西瓜代理数量较少,需要增加代理云的随机选中机率
# 查询西代理IP数量
random_key = ["dailiyun|*", "xiguadaili|*", "*"]
sub_key = random.choice(random_key)
match_key = "proxy_ip_pool:%s:%s" % (target_site, sub_key)
print "match_key = %s" % match_key
# print "get_usable_proxy_ip_from_redis_pool = %s" % match_key
site_keys = []
for key in self.redis_con.keys(match_key):
site_keys.append(key)
# print "get_usable_proxy_ip_from_redis_pool size :%s " % len(site_keys)
proxy_ip_pool = None
if len(site_keys) > 0:
proxy_ip_key = random.choice(site_keys)
proxy_ip_pool = proxy_ip_key.split(":")[2]
print "get_usable_proxy_ip_from_redis_pool,proxy_ip = %s" % proxy_ip_pool
return proxy_ip_pool
def get_usable_proxy_ip_from_db(self):
"""
从数据库中取代理ip
:return:格式:代理来源|代理协议|代理IP|代理port
"""
proxy_ip_helper = ProxyIPHelper()
china_proxy_ips = proxy_ip_helper.find_all_proxy_ip()
proxy_ip_recrod = random.choice(china_proxy_ips)
proxy_ip_db = None
if proxy_ip_recrod:
proxy_ip_db = "%s|%s|%s|%s" % (
proxy_ip_recrod[3], proxy_ip_recrod[0], proxy_ip_recrod[1], proxy_ip_recrod[2])
print "get_usable_proxy_ip_from_db,proxy_ip = %s" % proxy_ip_db
return proxy_ip_db
def get_usable_proxy_ip_v2(self, target_site):
"""
根据优先级获取可用ip
:return:格式:代理来源|代理协议|代理IP|代理port
"""
# 1.从队列中取 ip
proxy_ip_str = self.get_usable_proxy_ip_from_redis_queue(target_site)
if not proxy_ip_str:
# 如果队列中有代理IP,则使用队列中的ip,如果没有,则从ip池中取
# 2.从IP池中取 ip
#
proxy_ip_str = self.get_usable_proxy_ip_from_redis_pool(target_site)
if not proxy_ip_str:
# 3.从数据库中取ip
proxy_ip_str = self.get_usable_proxy_ip_from_db()
return proxy_ip_str
def get_usable_request_proxy_ip(self, target_site):
"""
获得可直接用于设置的代理IP
:return:格式:scrapy resquest标准格式,可以直接使用,其它格式需要处理
"""
proxy_ip_str = self.get_usable_proxy_ip_v2(target_site)
proxy_ip_req = None
if proxy_ip_str:
# 根据代理来源判断生成代理ip的正确字符串
proxy_ip_info = proxy_ip_str.split("|")
proxy_source = proxy_ip_info[0]
if proxy_source == "dailiyun":
user_name = self.dailiyun_username
password = self.dailiyun_password
proxy_ip_req = "%s://%s:%s@%s:%s" % (
proxy_ip_info[1], user_name, password, proxy_ip_info[2], proxy_ip_info[3])
elif proxy_source == "xiguadaili":
proxy_ip_req = "%s://%s:%s" % (proxy_ip_info[1], proxy_ip_info[2], proxy_ip_info[3])
else:
print "unkown proxy_source:" + target_site
return proxy_ip_req, proxy_ip_str
if __name__ == '__main__':
redis_helper = RedisHelper()
ctrip_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis,
args=("ctrip", "https://hotels.ctrip.com/hotel/428365.html",))
ctrip_thread.start()
meituan_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis,
args=("meituan", "https://www.meituan.com/jiudian/157349277/",))
meituan_thread.start()
ip_thread = threading.Thread(target=redis_helper.get_database_proxy_ip)
ip_thread.start()
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/redis_operate.py
|
redis_operate.py
|
# -*- coding: UTF-8 -*-
import re
import MySQLdb
from scrapy.utils.project import get_project_settings
class MySQLdbHelper(object):
"""操作mysql数据库,基本方法
"""
def __init__(self):
settings = get_project_settings()
self.DB_CONF = settings.get('DB_CONF')
db_conf = self.DB_CONF
self.host = db_conf['host']
self.username = db_conf['user']
self.password = db_conf['passwd']
self.database = db_conf['db']
self.port = db_conf['port']
self.charset = db_conf['charset']
self.con = None
self.cur = None
try:
self.con = MySQLdb.connect(host=self.host, user=self.username, passwd=self.password,
db=self.database, port=self.port, charset=self.charset)
# print self.host
# 所有的查询,都在连接 con 的一个模块 cursor 上面运行的
self.cur = self.con.cursor()
except:
raise Exception("DataBase connect error,please check the db config.")
def close(self):
"""关闭数据库连接
"""
if not self.con:
self.con.close()
else:
raise Exception("DataBase doesn't connect,close connectiong error;please check the db config.")
def getVersion(self):
"""获取数据库的版本号
"""
self.cur.execute("SELECT VERSION()")
return self.getOneData()
def getOneData(self):
# 取得上个查询的结果,是单个结果
data = self.cur.fetchone()
return data
def creatTable(self, tablename, attrdict, constraint):
"""创建数据库表
args:
tablename :表名字
attrdict :属性键值对,{'book_name':'varchar(200) NOT NULL'...}
constraint :主外键约束,PRIMARY KEY(`id`)
"""
if self.isExistTable(tablename):
return
sql = ''
sql_mid = '`id` bigint(11) NOT NULL AUTO_INCREMENT,'
for attr, value in attrdict.items():
sql_mid = sql_mid + '`' + attr + '`' + ' ' + value + ','
sql = sql + 'CREATE TABLE IF NOT EXISTS %s (' % tablename
sql = sql + sql_mid
sql = sql + constraint
sql = sql + ') ENGINE=InnoDB DEFAULT CHARSET=utf8'
print 'creatTable:' + sql
self.executeCommit(sql)
def executeSql(self, sql=''):
"""执行sql语句,针对读操作返回结果集
args:
sql :sql语句
"""
try:
self.cur.execute(sql)
records = self.cur.fetchall()
return records
except MySQLdb.Error, e:
error = 'MySQL execute failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
def executeCommit(self, sql=''):
"""执行数据库sql语句,针对更新,删除,事务等操作失败时回滚
"""
try:
self.cur.execute(sql)
self.con.commit()
except MySQLdb.Error, e:
self.con.rollback()
error = 'MySQL execute failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print "error:", error
return error
def insert(self, tablename, params):
"""创建数据库表
args:
tablename :表名字
key :属性键
value :属性值
"""
key = []
value = []
for tmpkey, tmpvalue in params.items():
key.append(tmpkey)
if isinstance(tmpvalue, str):
value.append("\'" + tmpvalue + "\'")
else:
value.append(tmpvalue)
attrs_sql = '(' + ','.join(key) + ')'
values_sql = ' values(' + ','.join(value) + ')'
sql = 'insert into %s' % tablename
sql = sql + attrs_sql + values_sql
print '_insert:' + sql
self.executeCommit(sql)
def select(self, tablename, cond_dict='', order='', fields='*'):
"""查询数据
args:
tablename :表名字
cond_dict :查询条件
order :排序条件
example:
print mydb.select(table)
print mydb.select(table, fields=["name"])
print mydb.select(table, fields=["name", "age"])
print mydb.select(table, fields=["age", "name"])
"""
consql = ' '
if cond_dict != '':
for k, v in cond_dict.items():
consql = consql + k + '=' + v + ' and'
consql = consql + ' 1=1 '
if fields == "*":
sql = 'select * from %s where ' % tablename
else:
if isinstance(fields, list):
fields = ",".join(fields)
sql = 'select %s from %s where ' % (fields, tablename)
else:
raise Exception("fields input error, please input list fields.")
sql = sql + consql + order
print 'select:' + sql
return self.executeSql(sql)
def insertMany(self, table, attrs, values):
"""插入多条数据
args:
tablename :表名字
attrs :属性键
values :属性值
example:
table='test_MySQLdb'
key = ["id" ,"name", "age"]
value = [[101, "liuqiao", "25"], [102,"liuqiao1", "26"], [103 ,"liuqiao2", "27"], [104 ,"liuqiao3", "28"]]
mydb.insertMany(table, key, value)
"""
values_sql = ['%s' for v in attrs]
attrs_sql = '(' + ','.join(attrs) + ')'
values_sql = ' values(' + ','.join(values_sql) + ')'
sql = 'insert into %s' % table
sql = sql + attrs_sql + values_sql
print 'insertMany:' + sql
try:
print sql
for i in range(0, len(values), 20000):
self.cur.executemany(sql, values[i:i + 20000])
self.con.commit()
except MySQLdb.Error, e:
self.con.rollback()
error = 'insertMany executemany failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
def delete(self, tablename, cond_dict):
"""删除数据
args:
tablename :表名字
cond_dict :删除条件字典
example:
params = {"name" : "caixinglong", "age" : "38"}
mydb.delete(table, params)
"""
consql = ' '
if cond_dict != '':
for k, v in cond_dict.items():
if isinstance(v, str):
v = "\'" + v + "\'"
consql = consql + tablename + "." + k + '=' + v + ' and '
consql = consql + ' 1=1 '
sql = "DELETE FROM %s where%s" % (tablename, consql)
print sql
return self.executeCommit(sql)
def update(self, tablename, attrs_dict, cond_dict):
"""更新数据
args:
tablename :表名字
attrs_dict :更新属性键值对字典
cond_dict :更新条件字典
example:
params = {"name" : "caixinglong", "age" : "38"}
cond_dict = {"name" : "liuqiao", "age" : "18"}
mydb.update(table, params, cond_dict)
"""
attrs_list = []
consql = ' '
for tmpkey, tmpvalue in attrs_dict.items():
attrs_list.append("`" + tmpkey + "`" + "=" + "\'" + tmpvalue + "\'")
attrs_sql = ",".join(attrs_list)
print "attrs_sql:", attrs_sql
if cond_dict != '':
for k, v in cond_dict.items():
if isinstance(v, str):
v = "\'" + v + "\'"
consql = consql + "`" + tablename + "`." + "`" + k + "`" + '=' + v + ' and '
consql = consql + ' 1=1 '
sql = "UPDATE %s SET %s where%s" % (tablename, attrs_sql, consql)
print sql
return self.executeCommit(sql)
def dropTable(self, tablename):
"""删除数据库表
args:
tablename :表名字
"""
sql = "DROP TABLE %s" % tablename
self.executeCommit(sql)
def deleteTable(self, tablename):
"""清空数据库表
args:
tablename :表名字
"""
sql = "DELETE FROM %s" % tablename
self.executeCommit(sql)
def isExistTable(self, tablename):
"""判断数据表是否存在
args:
tablename :表名字
Return:
存在返回True,不存在返回False
"""
sql = "select * from %s" % tablename
result = self.executeCommit(sql)
if result is None:
return True
else:
if re.search("doesn't exist", result):
return False
else:
return True
if __name__ == "__main__":
mydb = MySQLdbHelper()
print mydb.getVersion()
table = 'test_MySQLdb'
attrs = {'name': 'varchar(200) DEFAULT NULL', 'age': 'int(11) DEFAULT NULL'}
constraint = 'PRIMARY KEY(`id`)'
print mydb.creatTable(table, attrs, constraint)
params = {"name": "caixinglong", "age": "38"}
mydb.insert('test_MySQLdb', params)
print mydb.select(table)
print mydb.select(table, fields=["name", "age"])
print mydb.select(table, fields=["age", "name"])
key = ["id", "name", "age"]
value = [[101, "liuqiao", "25"], [102, "liuqiao1", "26"], [103, "liuqiao2", "27"], [104, "liuqiao3", "28"]]
mydb.insertMany(table, key, value)
mydb.delete(table, params)
cond_dict = {"name": "liuqiao", "age": "18"}
mydb.update(table, params, cond_dict)
# mydb.deleteTable(table)
# mydb.dropTable(table)
print mydb.select(table + "1")
print mydb.isExistTable(table + "1")
|
zymtest2
|
/zymtest2-0.1.1-py3-none-any.whl/pytest/db_operate.py
|
db_operate.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.