code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from zxcvbn.scoring import START_UPPER, ALL_UPPER
from gettext import gettext as _
def get_feedback(score, sequence):
if len(sequence) == 0:
return {
'warning': '',
'suggestions': [
_("Use a few words, avoid common phrases."),
_("No need for symbols, digits, or uppercase letters.")
]
}
if score > 2:
return {
'warning': '',
'suggestions': [],
}
longest_match = sequence[0]
for match in sequence[1:]:
if len(match['token']) > len(longest_match['token']):
longest_match = match
feedback = get_match_feedback(longest_match, len(sequence) == 1)
extra_feedback = _('Add another word or two. Uncommon words are better.')
if feedback:
feedback['suggestions'].insert(0, extra_feedback)
if not feedback['warning']:
feedback['warning'] = ''
else:
feedback = {
'warning': '',
'suggestions': [extra_feedback]
}
return feedback
def get_match_feedback(match, is_sole_match):
if match['pattern'] == 'dictionary':
return get_dictionary_match_feedback(match, is_sole_match)
elif match['pattern'] == 'spatial':
if match['turns'] == 1:
warning = _('Straight rows of keys are easy to guess.')
else:
warning = _('Short keyboard patterns are easy to guess.')
return {
'warning': warning,
'suggestions': [
_('Use a longer keyboard pattern with more turns.')
]
}
elif match['pattern'] == 'repeat':
if len(match['base_token']) == 1:
warning = _('Repeats like "aaa" are easy to guess.')
else:
warning = _('Repeats like "abcabcabc" are only slightly harder to ' \
'guess than "abc".')
return {
'warning': warning,
'suggestions': [
_('Avoid repeated words and characters.')
]
}
elif match['pattern'] == 'sequence':
return {
'warning': _('Sequences like "abc" or "6543" are easy to guess.'),
'suggestions': [
_('Avoid sequences.')
]
}
elif match['pattern'] == 'regex':
if match['regex_name'] == 'recent_year':
return {
'warning': _("Recent years are easy to guess."),
'suggestions': [
_('Avoid recent years.'),
_('Avoid years that are associated with you.'),
]
}
elif match['pattern'] == 'date':
return {
'warning': _("Dates are often easy to guess."),
'suggestions': [
_('Avoid dates and years that are associated with you.'),
],
}
def get_dictionary_match_feedback(match, is_sole_match):
warning = ''
if match['dictionary_name'] == 'passwords':
if is_sole_match and not match.get('l33t', False) and not \
match['reversed']:
if match['rank'] <= 10:
warning = _('This is a top-10 common password.')
elif match['rank'] <= 100:
warning = _('This is a top-100 common password.')
else:
warning = _('This is a very common password.')
elif match['guesses_log10'] <= 4:
warning = _('This is similar to a commonly used password.')
elif match['dictionary_name'] == 'english':
if is_sole_match:
warning = _('A word by itself is easy to guess.')
elif match['dictionary_name'] in ['surnames', 'male_names',
'female_names', ]:
if is_sole_match:
warning = _('Names and surnames by themselves are easy to guess.')
else:
warning = _('Common names and surnames are easy to guess.')
else:
warning = ''
suggestions = []
word = match['token']
if START_UPPER.search(word):
suggestions.append(_("Capitalization doesn't help very much."))
elif ALL_UPPER.search(word) and word.lower() != word:
suggestions.append(_("All-uppercase is almost as easy to guess as "
"all-lowercase."))
if match['reversed'] and len(match['token']) >= 4:
suggestions.append(_("Reversed words aren't much harder to guess."))
if match.get('l33t', False):
suggestions.append(_("Predictable substitutions like '@' instead of 'a' "
"don't help very much."))
return {
'warning': warning,
'suggestions': suggestions,
} | zxcvbn-python | /zxcvbn-python-4.4.24.tar.gz/zxcvbn-python-4.4.24/zxcvbn/feedback.py | feedback.py |
from math import log, factorial
import re
from .adjacency_graphs import ADJACENCY_GRAPHS
def calc_average_degree(graph):
average = 0
for key, neighbors in graph.items():
average += len([n for n in neighbors if n])
average /= float(len(graph.items()))
return average
BRUTEFORCE_CARDINALITY = 10
MIN_GUESSES_BEFORE_GROWING_SEQUENCE = 10000
MIN_SUBMATCH_GUESSES_SINGLE_CHAR = 10
MIN_SUBMATCH_GUESSES_MULTI_CHAR = 50
MIN_YEAR_SPACE = 20
REFERENCE_YEAR = 2017
def nCk(n, k):
"""http://blog.plover.com/math/choose.html"""
if k > n:
return 0
if k == 0:
return 1
r = 1
for d in range(1, k + 1):
r *= n
r /= d
n -= 1
return r
# ------------------------------------------------------------------------------
# search --- most guessable match sequence -------------------------------------
# ------------------------------------------------------------------------------
#
# takes a sequence of overlapping matches, returns the non-overlapping sequence with
# minimum guesses. the following is a O(l_max * (n + m)) dynamic programming algorithm
# for a length-n password with m candidate matches. l_max is the maximum optimal
# sequence length spanning each prefix of the password. In practice it rarely exceeds 5 and the
# search terminates rapidly.
#
# the optimal "minimum guesses" sequence is here defined to be the sequence that
# minimizes the following function:
#
# g = l! * Product(m.guesses for m in sequence) + D^(l - 1)
#
# where l is the length of the sequence.
#
# the factorial term is the number of ways to order l patterns.
#
# the D^(l-1) term is another length penalty, roughly capturing the idea that an
# attacker will try lower-length sequences first before trying length-l sequences.
#
# for example, consider a sequence that is date-repeat-dictionary.
# - an attacker would need to try other date-repeat-dictionary combinations,
# hence the product term.
# - an attacker would need to try repeat-date-dictionary, dictionary-repeat-date,
# ..., hence the factorial term.
# - an attacker would also likely try length-1 (dictionary) and length-2 (dictionary-date)
# sequences before length-3. assuming at minimum D guesses per pattern type,
# D^(l-1) approximates Sum(D^i for i in [1..l-1]
#
# ------------------------------------------------------------------------------
def most_guessable_match_sequence(password, matches, _exclude_additive=False):
n = len(password)
# partition matches into sublists according to ending index j
matches_by_j = [[] for _ in range(n)]
try:
for m in matches:
matches_by_j[m['j']].append(m)
except TypeError:
pass
# small detail: for deterministic output, sort each sublist by i.
for lst in matches_by_j:
lst.sort(key=lambda m1: m1['i'])
optimal = {
# optimal.m[k][l] holds final match in the best length-l match sequence
# covering the password prefix up to k, inclusive.
# if there is no length-l sequence that scores better (fewer guesses)
# than a shorter match sequence spanning the same prefix,
# optimal.m[k][l] is undefined.
'm': [{} for _ in range(n)],
# same structure as optimal.m -- holds the product term Prod(m.guesses
# for m in sequence). optimal.pi allows for fast (non-looping) updates
# to the minimization function.
'pi': [{} for _ in range(n)],
# same structure as optimal.m -- holds the overall metric.
'g': [{} for _ in range(n)],
}
# helper: considers whether a length-l sequence ending at match m is better
# (fewer guesses) than previously encountered sequences, updating state if
# so.
def update(m, l):
k = m['j']
pi = estimate_guesses(m, password)
if l > 1:
# we're considering a length-l sequence ending with match m:
# obtain the product term in the minimization function by
# multiplying m's guesses by the product of the length-(l-1)
# sequence ending just before m, at m.i - 1.
pi *= optimal['pi'][m['i'] - 1][l - 1]
# calculate the minimization func
g = factorial(l) * pi
if not _exclude_additive:
g += MIN_GUESSES_BEFORE_GROWING_SEQUENCE ** (l - 1)
# update state if new best.
# first see if any competing sequences covering this prefix, with l or
# fewer matches, fare better than this sequence. if so, skip it and
# return.
for competing_l, competing_g in optimal['g'][k].items():
if competing_l > l:
continue
if competing_g <= g:
return
# this sequence might be part of the final optimal sequence.
optimal['g'][k][l] = g
optimal['m'][k][l] = m
optimal['pi'][k][l] = pi
# helper: evaluate bruteforce matches ending at k.
def bruteforce_update(k):
# see if a single bruteforce match spanning the k-prefix is optimal.
m = make_bruteforce_match(0, k)
update(m, 1)
for i in range(1, k + 1):
# generate k bruteforce matches, spanning from (i=1, j=k) up to
# (i=k, j=k). see if adding these new matches to any of the
# sequences in optimal[i-1] leads to new bests.
m = make_bruteforce_match(i, k)
for l, last_m in optimal['m'][i - 1].items():
l = int(l)
# corner: an optimal sequence will never have two adjacent
# bruteforce matches. it is strictly better to have a single
# bruteforce match spanning the same region: same contribution
# to the guess product with a lower length.
# --> safe to skip those cases.
if last_m.get('pattern', False) == 'bruteforce':
continue
# try adding m to this length-l sequence.
update(m, l + 1)
# helper: make bruteforce match objects spanning i to j, inclusive.
def make_bruteforce_match(i, j):
return {
'pattern': 'bruteforce',
'token': password[i:j + 1],
'i': i,
'j': j,
}
# helper: step backwards through optimal.m starting at the end,
# constructing the final optimal match sequence.
def unwind(n):
optimal_match_sequence = []
k = n - 1
# find the final best sequence length and score
l = None
g = float('inf')
for candidate_l, candidate_g in optimal['g'][k].items():
if candidate_g < g:
l = candidate_l
g = candidate_g
while k >= 0:
m = optimal['m'][k][l]
optimal_match_sequence.insert(0, m)
k = m['i'] - 1
l -= 1
return optimal_match_sequence
for k in range(n):
for m in matches_by_j[k]:
if m['i'] > 0:
for l in optimal['m'][m['i'] - 1]:
l = int(l)
update(m, l + 1)
else:
update(m, 1)
bruteforce_update(k)
optimal_match_sequence = unwind(n)
optimal_l = len(optimal_match_sequence)
# corner: empty password
if len(password) == 0:
guesses = 1
else:
guesses = optimal['g'][n - 1][optimal_l]
# final result object
return {
'password': password,
'guesses': guesses,
'guesses_log10': log(guesses, 10),
'sequence': optimal_match_sequence,
}
def estimate_guesses(match, password):
if match.get('guesses', False):
return match['guesses']
min_guesses = 1
if len(match['token']) < len(password):
if len(match['token']) == 1:
min_guesses = MIN_SUBMATCH_GUESSES_SINGLE_CHAR
else:
min_guesses = MIN_SUBMATCH_GUESSES_MULTI_CHAR
estimation_functions = {
'bruteforce': bruteforce_guesses,
'dictionary': dictionary_guesses,
'spatial': spatial_guesses,
'repeat': repeat_guesses,
'sequence': sequence_guesses,
'regex': regex_guesses,
'date': date_guesses,
}
guesses = estimation_functions[match['pattern']](match)
match['guesses'] = max(guesses, min_guesses)
match['guesses_log10'] = log(match['guesses'], 10)
return match['guesses']
def bruteforce_guesses(match):
guesses = BRUTEFORCE_CARDINALITY ** len(match['token'])
# small detail: make bruteforce matches at minimum one guess bigger than
# smallest allowed submatch guesses, such that non-bruteforce submatches
# over the same [i..j] take precedence.
if len(match['token']) == 1:
min_guesses = MIN_SUBMATCH_GUESSES_SINGLE_CHAR + 1
else:
min_guesses = MIN_SUBMATCH_GUESSES_MULTI_CHAR + 1
return max(guesses, min_guesses)
def dictionary_guesses(match):
# keep these as properties for display purposes
match['base_guesses'] = match['rank']
match['uppercase_variations'] = uppercase_variations(match)
match['l33t_variations'] = l33t_variations(match)
reversed_variations = match.get('reversed', False) and 2 or 1
return match['base_guesses'] * match['uppercase_variations'] * \
match['l33t_variations'] * reversed_variations
def repeat_guesses(match):
return match['base_guesses'] * match['repeat_count']
def sequence_guesses(match):
first_chr = match['token'][:1]
# lower guesses for obvious starting points
if first_chr in ['a', 'A', 'z', 'Z', '0', '1', '9']:
base_guesses = 4
else:
if re.compile(r'\d').match(first_chr):
base_guesses = 10 # digits
else:
# could give a higher base for uppercase,
# assigning 26 to both upper and lower sequences is more
# conservative.
base_guesses = 26
if not match['ascending']:
base_guesses *= 2
return base_guesses * len(match['token'])
def regex_guesses(match):
char_class_bases = {
'alpha_lower': 26,
'alpha_upper': 26,
'alpha': 52,
'alphanumeric': 62,
'digits': 10,
'symbols': 33,
}
if match['regex_name'] in char_class_bases:
return char_class_bases[match['regex_name']] ** len(match['token'])
elif match['regex_name'] == 'recent_year':
# conservative estimate of year space: num years from REFERENCE_YEAR.
# if year is close to REFERENCE_YEAR, estimate a year space of
# MIN_YEAR_SPACE.
year_space = abs(int(match['regex_match'].group(0)) - REFERENCE_YEAR)
year_space = max(year_space, MIN_YEAR_SPACE)
return year_space
def date_guesses(match):
year_space = max(abs(match['year'] - REFERENCE_YEAR), MIN_YEAR_SPACE)
guesses = year_space * 365
if match.get('separator', False):
guesses *= 4
return guesses
KEYBOARD_AVERAGE_DEGREE = calc_average_degree(ADJACENCY_GRAPHS['qwerty'])
# slightly different for keypad/mac keypad, but close enough
KEYPAD_AVERAGE_DEGREE = calc_average_degree(ADJACENCY_GRAPHS['keypad'])
KEYBOARD_STARTING_POSITIONS = len(ADJACENCY_GRAPHS['qwerty'].keys())
KEYPAD_STARTING_POSITIONS = len(ADJACENCY_GRAPHS['keypad'].keys())
def spatial_guesses(match):
if match['graph'] in ['qwerty', 'dvorak']:
s = KEYBOARD_STARTING_POSITIONS
d = KEYBOARD_AVERAGE_DEGREE
else:
s = KEYPAD_STARTING_POSITIONS
d = KEYPAD_AVERAGE_DEGREE
guesses = 0
L = len(match['token'])
t = match['turns']
# estimate the number of possible patterns w/ length L or less with t turns
# or less.
for i in range(2, L + 1):
possible_turns = min(t, i - 1) + 1
for j in range(1, possible_turns):
guesses += nCk(i - 1, j - 1) * s * pow(d, j)
# add extra guesses for shifted keys. (% instead of 5, A instead of a.)
# math is similar to extra guesses of l33t substitutions in dictionary
# matches.
if match['shifted_count']:
S = match['shifted_count']
U = len(match['token']) - match['shifted_count'] # unshifted count
if S == 0 or U == 0:
guesses *= 2
else:
shifted_variations = 0
for i in range(1, min(S, U) + 1):
shifted_variations += nCk(S + U, i)
guesses *= shifted_variations
return guesses
START_UPPER = re.compile(r'^[A-Z][^A-Z]+$')
END_UPPER = re.compile(r'^[^A-Z]+[A-Z]$')
ALL_UPPER = re.compile(r'^[^a-z]+$')
ALL_LOWER = re.compile(r'^[^A-Z]+$')
def uppercase_variations(match):
word = match['token']
if ALL_LOWER.match(word) or word.lower() == word:
return 1
for regex in [START_UPPER, END_UPPER, ALL_UPPER]:
if regex.match(word):
return 2
U = sum(1 for c in word if c.isupper())
L = sum(1 for c in word if c.islower())
variations = 0
for i in range(1, min(U, L) + 1):
variations += nCk(U + L, i)
return variations
def l33t_variations(match):
if not match.get('l33t', False):
return 1
variations = 1
for subbed, unsubbed in match['sub'].items():
# lower-case match.token before calculating: capitalization shouldn't
# affect l33t calc.
chrs = list(match['token'].lower())
S = sum(1 for chr in chrs if chr == subbed)
U = sum(1 for chr in chrs if chr == unsubbed)
if S == 0 or U == 0:
# for this sub, password is either fully subbed (444) or fully
# unsubbed (aaa) treat that as doubling the space (attacker needs
# to try fully subbed chars in addition to unsubbed.)
variations *= 2
else:
# this case is similar to capitalization:
# with aa44a, U = 3, S = 2, attacker needs to try unsubbed + one
# sub + two subs
p = min(U, S)
possibilities = 0
for i in range(1, p + 1):
possibilities += nCk(U + S, i)
variations *= possibilities
return variations | zxcvbn-python | /zxcvbn-python-4.4.24.tar.gz/zxcvbn-python-4.4.24/zxcvbn/scoring.py | scoring.py |
|Build Status|
zxcvbn
======
A realistic password strength estimator.
This is a Python implementation of the library created by the team at Dropbox.
The original library, written for JavaScript, can be found
`here <https://github.com/dropbox/zxcvbn>`__.
While there may be other Python ports available, this one is the most up
to date and is recommended by the original developers of zxcvbn at this
time.
Features
--------
- **Tested in Python versions 2.7, 3.3-3.6**
- Accepts user data to be added to the dictionaries that are tested against (name, birthdate, etc)
- Gives a score to the password, from 0 (terrible) to 4 (great)
- Provides feedback on the password and ways to improve it
- Returns time estimates on how long it would take to guess the password in different situations
Installation
------------
Install the package using pip: ``pip install zxcvbn``
Usage
-----
Pass a password as the first parameter, and a list of user-provided
inputs as the ``user_inputs`` parameter (optional).
.. code:: python
from zxcvbn import zxcvbn
results = zxcvbn('JohnSmith123', user_inputs=['John', 'Smith'])
print(results)
Output:
::
{
'password': 'JohnSmith123',
'score': 2,
'guesses': 2567800,
'guesses_log10': 6.409561194521849,
'calc_time': datetime.timedelta(0, 0, 5204)
'feedback': {
'warning': '',
'suggestions': [
'Add another word or two. Uncommon words are better.',
"Capitalization doesn't help very much"
]
},
'crack_times_display': {
'offline_fast_hashing_1e10_per_second': 'less than a second'
'offline_slow_hashing_1e4_per_second': '4 minutes',
'online_no_throttling_10_per_second': '3 days',
'online_throttling_100_per_hour': '3 years',
},
'crack_times_seconds': {
'offline_fast_hashing_1e10_per_second': 0.00025678,
'offline_slow_hashing_1e4_per_second': 256.78
'online_no_throttling_10_per_second': 256780.0,
'online_throttling_100_per_hour': 92440800.0,
},
'sequence': [{
'matched_word': 'john',
'rank': 2,
'pattern': 'dictionary',
'reversed': False,
'token': 'John',
'l33t': False,
'uppercase_variations': 2,
'i': 0,
'guesses': 50,
'l33t_variations': 1,
'dictionary_name': 'male_names',
'base_guesses': 2,
'guesses_log10': 1.6989700043360185,
'j': 3
}, {
'matched_word': 'smith123',
'rank': 12789,
'pattern': 'dictionary',
'reversed': False,
'token': 'Smith123',
'l33t': False,
'uppercase_variations': 2,
'i': 4,
'guesses': 25578,
'l33t_variations': 1,
'dictionary_name': 'passwords',
'base_guesses': 12789,
'guesses_log10': 4.407866583030775,
'j': 11
}],
}
Custom Ranked Dictionaries
--------------------------
In order to support more languages or just add password dictionaries of your own, there is a helper function you may use.
.. code:: python
from zxcvbn.matching import add_frequency_lists
add_frequency_lists({
'my_list': ['foo', 'bar'],
'another_list': ['baz']
})
These lists will be added to the current ones, but you can also overwrite the current ones if you wish.
The lists you add should be in order of how common the word is used with the most common words appearing first.
CLI
~~~
You an also use zxcvbn from the command line::
echo 'password' | zxcvbn --user-input <user-input> | jq
You can also execute the zxcvbn module::
echo 'password' | python -m zxcvbn --user-input <user-input> | jq
Contribute
----------
- Report an Issue: https://github.com/dwolfhub/zxcvbn-python/issues
- Submit a Pull Request: https://github.com/dwolfhub/zxcvbn-python/pulls
License
-------
The project is licensed under the MIT license.
.. |Build Status| image:: https://travis-ci.org/dwolfhub/zxcvbn-python.svg?branch=master
:target: https://travis-ci.org/dwolfhub/zxcvbn-python
| zxcvbn | /zxcvbn-4.4.28.tar.gz/zxcvbn-4.4.28/README.rst | README.rst |
import io
import os
import shutil
import subprocess
import sys
from cffi import FFI
ffi = FFI()
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
src_dir = os.path.join(root_dir, "native-src")
# first make sure {_frequency_lists,adjacency_graphs}.{hpp,cpp} are built
module_src_dir = os.path.join(src_dir, "zxcvbn")
for fnout in ["_frequency_lists.hpp", "_frequency_lists.cpp"]:
subprocess.check_call([sys.executable, os.path.join(root_dir, "data-scripts", "build_frequency_lists.py"),
os.path.join(root_dir, "data"), os.path.join(module_src_dir, fnout)])
for fnout in ["adjacency_graphs.hpp", "adjacency_graphs.cpp"]:
subprocess.check_call([sys.executable, os.path.join(root_dir, "data-scripts", "build_keyboard_adjacency_graphs.py"),
os.path.join(module_src_dir, fnout)])
# now produce amalgamation *shudders*
amalg = io.BytesIO()
for source_filename in ["zxcvbn.cpp",
"matching.cpp",
"scoring.cpp",
"frequency_lists.cpp",
"util.cpp",
"adjacency_graphs.cpp",
"_frequency_lists.cpp",
]:
with open(os.path.join(module_src_dir, source_filename), "rb") as f:
shutil.copyfileobj(f, amalg)
amalg.write(b"\n")
if sys.version_info[0] >= 3:
PyMODINIT_FUNC = 'extern "C" __attribute__ ((visibility ("default"))) PyObject *'
else:
PyMODINIT_FUNC = 'extern "C" __attribute__ ((visibility ("default"))) void'
EXTRA_COMPILE_ARGS = ["-std=c++14", "-fvisibility=hidden", "-Os", "-flto"]
EXTRA_LINK_ARGS = ["-fvisibility=hidden", "-Os", "-flto"]
ffi.set_source("zxcvbncpp._zxcvbncpp", amalg.getvalue().decode('utf-8'),
include_dirs=[src_dir],
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
define_macros=[("PyMODINIT_FUNC", PyMODINIT_FUNC)],
source_extension=".cpp")
ffi.cdef("""
typedef double zxcvbn_guesses_t;
struct zxcvbn_match_sequence;
typedef struct zxcvbn_match_sequence *zxcvbn_match_sequence_t;
int zxcvbn_password_strength(const char *pass, const char *const *user_inputs,
zxcvbn_guesses_t *guesses,
zxcvbn_match_sequence_t *mseq
);
void zxcvbn_match_sequence_destroy(zxcvbn_match_sequence_t);
""") | zxcvbncpp | /zxcvbncpp-1.0.1.tar.gz/zxcvbncpp-1.0.1/python-src/build_zxcvbn.py | build_zxcvbn.py |
import os
import sys
import json
def usage():
return '''
constructs adjacency_graphs.coffee from QWERTY and DVORAK keyboard layouts
usage:
%s adjacency_graphs.coffee
''' % sys.argv[0]
qwerty = r'''
`~ 1! 2@ 3# 4$ 5% 6^ 7& 8* 9( 0) -_ =+
qQ wW eE rR tT yY uU iI oO pP [{ ]} \|
aA sS dD fF gG hH jJ kK lL ;: '"
zZ xX cC vV bB nN mM ,< .> /?
'''
dvorak = r'''
`~ 1! 2@ 3# 4$ 5% 6^ 7& 8* 9( 0) [{ ]}
'" ,< .> pP yY fF gG cC rR lL /? =+ \|
aA oO eE uU iI dD hH tT nN sS -_
;: qQ jJ kK xX bB mM wW vV zZ
'''
keypad = r'''
/ * -
7 8 9 +
4 5 6
1 2 3
0 .
'''
mac_keypad = r'''
= / *
7 8 9 -
4 5 6 +
1 2 3
0 .
'''
def get_slanted_adjacent_coords(x, y):
'''
returns the six adjacent coordinates on a standard keyboard, where each row is slanted to the
right from the last. adjacencies are clockwise, starting with key to the left, then two keys
above, then right key, then two keys below. (that is, only near-diagonal keys are adjacent,
so g's coordinate is adjacent to those of t,y,b,v, but not those of r,u,n,c.)
'''
return [(x-1, y), (x, y-1), (x+1, y-1), (x+1, y), (x, y+1), (x-1, y+1)]
def get_aligned_adjacent_coords(x, y):
'''
returns the nine clockwise adjacent coordinates on a keypad, where each row is vert aligned.
'''
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
def build_graph(layout_str, slanted):
'''
builds an adjacency graph as a dictionary: {character: [adjacent_characters]}.
adjacent characters occur in a clockwise order.
for example:
* on qwerty layout, 'g' maps to ['fF', 'tT', 'yY', 'hH', 'bB', 'vV']
* on keypad layout, '7' maps to [None, None, None, '=', '8', '5', '4', None]
'''
position_table = {} # maps from tuple (x,y) -> characters at that position.
tokens = layout_str.split()
token_size = len(tokens[0])
x_unit = token_size + 1 # x position unit len is token len plus 1 for the following whitespace.
adjacency_func = get_slanted_adjacent_coords if slanted else get_aligned_adjacent_coords
assert all(len(token) == token_size for token in tokens), 'token len mismatch:\n ' + layout_str
for y, line in enumerate(layout_str.split('\n')):
# the way I illustrated keys above, each qwerty row is indented one space in from the last
slant = y - 1 if slanted else 0
for token in line.split():
x, remainder = divmod(line.index(token) - slant, x_unit)
assert remainder == 0, 'unexpected x offset for %s in:\n%s' % (token, layout_str)
position_table[(x,y)] = token
adjacency_graph = {}
for (x,y), chars in position_table.items():
for char in chars:
adjacency_graph[char] = []
for coord in adjacency_func(x, y):
# position in the list indicates direction
# (for qwerty, 0 is left, 1 is top, 2 is top right, ...)
# for edge chars like 1 or m, insert None as a placeholder when needed
# so that each character in the graph has a same-length adjacency list.
adjacency_graph[char].append(position_table.get(coord, None))
return adjacency_graph
GRAPHS = [('qwerty', (qwerty, True)),
('dvorak', (dvorak, True)),
('keypad', (keypad, False)),
('mac_keypad', (mac_keypad, False))]
def output_coffee(path):
with open(path, 'w') as f:
f.write('# generated by scripts/build_keyboard_adjacency_graphs.py\n')
f.write('adjacency_graphs = \n ')
lines = []
for graph_name, args in GRAPHS:
graph = build_graph(*args)
lines.append('%s: %s' % (graph_name, json.dumps(graph, sort_keys=True)))
f.write('\n '.join(lines))
f.write('\n\n')
f.write('module.exports = adjacency_graphs\n')
def escape(x):
return x.replace("\\", "\\\\").replace("\"", "\\\"")
def output_hpp(hpp_file):
with open(hpp_file, 'w') as f:
f.write('// generated by scripts/build_keyboard_adjacency_graphs.py\n')
tags = ',\n '.join(k.upper() for (k, _) in GRAPHS)
f.write("""#ifndef __ZXCVBN__ADJACENCY_GRAPHS_HPP
#define __ZXCVBN__ADJACENCY_GRAPHS_HPP
#include <zxcvbn/optional.hpp>
#include <array>
#include <initializer_list>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace zxcvbn {
enum class GraphTag {
%s
};
}
namespace std {
template<>
struct hash<zxcvbn::GraphTag> {
std::size_t operator()(const zxcvbn::GraphTag & v) const {
return static_cast<std::size_t>(v);
}
};
}
namespace zxcvbn {
using Graph = std::unordered_map<std::string, std::vector<optional::optional<std::string>>>;
using Graphs = std::unordered_map<GraphTag, Graph>;
const Graphs & graphs();
using degree_t = double;
extern const degree_t KEYBOARD_AVERAGE_DEGREE;
extern const degree_t KEYPAD_AVERAGE_DEGREE;
extern const std::size_t KEYBOARD_STARTING_POSITIONS;
extern const std::size_t KEYPAD_STARTING_POSITIONS;
}
#endif
""" % (tags,))
def output_cpp(cpp_file):
with open(cpp_file, 'w') as f:
f.write('// generated by scripts/build_keyboard_adjacency_graphs.py\n')
f.write("#include <zxcvbn/adjacency_graphs.hpp>\n\n")
f.write("#include <zxcvbn/optional.hpp>\n\n")
f.write("#include <array>\n")
f.write("#include <initializer_list>\n")
f.write("#include <utility>\n\n")
# find out largest adjacency_list
largest = max(len(adj)
for (_, args2) in GRAPHS
for adj in build_graph(*args2).values())
f.write("""namespace zxcvbn {
static
optional::optional<std::string> M(const char *s) {
return optional::make_optional(std::string(s));
}
const auto no = optional::nullopt;
""")
f.write("const Graphs _graphs = {\n")
for (name, args2) in GRAPHS:
graph = build_graph(*args2)
f.write(" {GraphTag::%s, {\n" % (name.upper(),));
for key, adj in sorted(graph.items()):
f.write(' {"%s", {%s}},\n' %
(escape(key), ', '.join('M("' + escape(a) + '")'
if a else
'no'
for a in adj)))
f.write(" }},\n")
f.write("""};
// on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1.
// this calculates the average over all keys.
static
degree_t calc_average_degree(const Graph & graph) {
degree_t average = 0;
for (const auto & item : graph) {
for (const auto & neighbor : item.second) {
average += neighbor ? 1 : 0;
}
}
average /= graph.size();
return average;
}
extern const degree_t KEYBOARD_AVERAGE_DEGREE = calc_average_degree(_graphs.at(GraphTag::QWERTY));
// slightly different for keypad/mac keypad, but close enough
extern const degree_t KEYPAD_AVERAGE_DEGREE = calc_average_degree(_graphs.at(GraphTag::KEYPAD));
extern const std::size_t KEYBOARD_STARTING_POSITIONS = _graphs.at(GraphTag::QWERTY).size();
extern const std::size_t KEYPAD_STARTING_POSITIONS = _graphs.at(GraphTag::KEYPAD).size();
const Graphs & graphs() {
return _graphs;
}
""")
f.write("}\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
print(usage())
sys.exit(0)
output_file = sys.argv[1]
_, ext = os.path.splitext(output_file.lower())
if ext == ".cpp":
output_fn = output_cpp
elif ext == ".hpp":
output_fn = output_hpp
else:
output_fn = output_coffee
output_fn(output_file)
sys.exit(0) | zxcvbncpp | /zxcvbncpp-1.0.1.tar.gz/zxcvbncpp-1.0.1/data-scripts/build_keyboard_adjacency_graphs.py | build_keyboard_adjacency_graphs.py |
import random
from urllib import urlopen
import sys
reload(sys)
sys.setdefaultencoding('gbk')
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
u"写一个名为“%%%”的类,其父类是“%%%”。",
"class %%%(object):\n\tdef ***(self, @@@)":
u"写一个名为“%%%”的类,有一个构造函数“_int_”,以自身和“***”为参数。",
"class %%%(object):\n\tdef ***(self, @@@)":
u"写一个名为“%%%”的类,有一个名为“***”的方法,以自身和“@@@”为参数。",
"*** = %%%()":
u"构造一个对象“***”,是“%%%”类型的。",
"***.***(@@@)":
u"从“***”中调用“***”方法,以自身和“@@@”为参数。",
"***.*** = '***'":
u"从“***”中调用“***”属性,并赋值为“***”"
}
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
for word in urlopen(WORD_URL).readlines():
WORDS.append(word.strip())
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
def go():
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST == False:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye" | zxdnester | /zxdnester-1.9.zip/zxdnester-1.9/zxdnester.py | zxdnester.py |
# zxext
*智学网的补充库*
  
收录了一些智学网库里比较冷门的函数。
**一个版本仅对应一个智学网的版本!**
### 对应关系
| zxext版本 | 对应zhixuewang版本 |
| :---------: | :----------------: |
| 0.1.0-0.1.1 | 1.1.12 |
## 支持的功能
1. 练习本功能(`workbook`)
## 问题和建议
如果您在使用的过程中遇到任何问题,欢迎前往 [Issue](https://github.com/anwenhu/zhixuewang/issues)提问
当然也可以加入这个QQ群讨论:862767072(备注:智学网扩展)
## 示例
```python
from zxext.workbook import Workbook # 导入练习本
from zhixuewang import * # 导入核心库
teacher = login("114514", "1919810")
wb = Workbook(teacher.get_session())
print(wb.search_press("人教"))
# >> 272
```
## 贡献
建议优先提交给`zhixuewang`库,除非是部分比较不常用的功能。 | zxext | /zxext-0.1.2.tar.gz/zxext-0.1.2/README.md | README.md |
zxgraphs: A Python implementation of ZX graphs for quantum computing.
=====================================================================
.. image:: https://img.shields.io/badge/python-3.9+-green.svg
:target: https://docs.python.org/3.9/
:alt: Python versions
.. image:: https://img.shields.io/pypi/v/zxgraphs.svg
:target: https://pypi.python.org/pypi/zxgraphs/
:alt: PyPI version
.. image:: https://img.shields.io/pypi/status/zxgraphs.svg
:target: https://pypi.python.org/pypi/zxgraphs/
:alt: PyPI status
.. image:: http://www.mypy-lang.org/static/mypy_badge.svg
:target: https://github.com/python/mypy
:alt: Checked with Mypy
.. image:: https://readthedocs.org/projects/zxgraphs/badge/?version=latest
:target: https://zxgraphs.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://github.com/hashberg-io/zxgraphs/actions/workflows/python-pytest.yml/badge.svg
:target: https://github.com/hashberg-io/zxgraphs/actions/workflows/python-pytest.yml
:alt: Python package status
.. image:: https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square
:target: https://github.com/RichardLitt/standard-readme
:alt: standard-readme compliant
ZX graphs are a graph-theoretic tool to represent quantum circuits and computations in terms of computational basis (Z basis) and Fourier basis (X basis) tensors (known to us as "spiders") for finite Abelian group algebras.
They are closely related to the original qubit ZX calculus [1]_ [2]_ and recent developments in higher-dimensional variants [3]_ [4]_.
.. [1] Bob Coecke and Ross Duncan, *"Interacting Quantum Observables: Categorical Algebra and Diagrammatics"*, 2009. `arXiv:0906.4725 <https://arxiv.org/abs/0906.4725>`_
.. [2] Aleks Kissinger and John van de Wetering, *"PyZX: Large Scale Automated Diagrammatic Reasoning"*, 2020. `arXiv:1904.04735 <https://arxiv.org/abs/1904.04735>`_, `pyzx on GitHub <https://github.com/Quantomatic/pyzx>`_
.. [3] John van de Wetering and Lia Yeh, *"Phase gadget compilation for diagonal qutrit gates"*, 2022. `arXiv:2204.13681 <https://arxiv.org/abs/2204.13681>`_
.. [4] Robert I. Booth and Titouan Carette, *"Complete ZX-calculi for the stabiliser fragment in odd prime dimensions"*, 2022. `arXiv:2204.12531 <https://arxiv.org/abs/2204.12531>`_
.. contents::
Install
-------
ZX graphs are currently in pre-alpha development.
Once development is complete, you will be able to install the latest release from `PyPI <https://pypi.org/project/zxgraphs/>`_ as follows:
.. code-block:: console
$ pip install --upgrade zxgraphs
Usage
-----
ZX graphs are currently in pre-alpha development.
API
---
ZX graphs are currently in pre-alpha development. Once development is complete, the full API documentation will be available at https://zxgraphs.readthedocs.io/
Contributing
------------
Please see `<CONTRIBUTING.md>`_.
License
-------
`LGPL (c) Hashberg Ltd. <LICENSE>`_
| zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/README.rst | README.rst |
# Security Policy
We take security of this package very seriously.
## Supported Versions
We will issue security updates for [PyPI releases](https://pypi.org/project/zxgraphs/) with the latest minor version number (regardless of micro version), by releasing a new minor version.
If you find a vulnerability which is not in any of the PyPI releases with the latest minor version, you should instead report it as a bug by [filing an issue](https://github.com/hashberg-io/zxgraphs/issues).
## Reporting a Vulnerability
To report a vulnerability, please send an email to [email protected] with the following information:
- how we can contact you privately
- how you wish to be publicly identified for the purpose of credit when we disclose the vulnerability
- which package releases are affected
- the Python version (including OS, if relevant) and the versions of all dependencies that you used when confirming the vulnerability
- detailed description of the vulnerability, including how we can reproduce it
We will come back to you within 24 hours to acknowledge your report and we will provide a detailed response within 48 hours, including an initial assessment of how we intend to address the vulnerability you disclosed. If the fix requires a prolonged amount of time (> 1 week), we will send you weekly updates on our progress.
## Disclosure Process
1. Upon initial acknowledgment, we will assign a Unique ID `UID` to your security report, which we will reference in all our communications using the header `[security report #UID]`.
2. Fixes are prepared and held locally in a new branch, without pushing to the public repository.
3. When all fixes are ready to be pushed, an issue announcing the existence of a vulnerability is opened on GitHub: this includes package versions affected, security report UID and embargo date (typically 72 hours from the issue being opened), but no further information.
4. On the embargo date, the fix branch is pushed and merged into the main branch, closing the issue, and a new minor version is released on both PyPI and GitHub. The release notes on GitHub provide a detailed description of the vulnerability, including credit to the initial discloser(s), as well as a summary of how the vulnerability was patched.
| zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/SECURITY.md | SECURITY.md |
# Contributing
All contributions, big and small, are very appreciated!
[File an issue](#file-an-issue) for bug reports, suggestions and questions, or [make a pull request](#make-a-pull-request) to actively contribute to the code or documentation.
However you decide to help, please refer to our [code of conduct](CODE_OF_CONDUCT.md) for what we expect from our community.
## File an Issue
Issues can be filed at https://github.com/hashberg-io/zxgraphs/issues. You can file an issue to:
- report a bug (using the `bug` label)
- suggest a new feature (using the `enhancement` label)
- suggest improvements to our documentation (using the `documentation` label)
- ask for information and start a discussion thread (using the `question` label)
If you are reporting a bug, please include the following information:
- project version (PyPI version number or commit number)
- Python version
- version of installed dependencies
- how the bug manifests (e.g. what you expect to happen vs what actually happens)
- how others can reproduce the bug
Please try to be concise in your description, providing a minimal reproducible example whenever possible.
If you're proposing a new feature, please describe it in detail, with a few examples of it might be implemented and of its intended usage.
## Make a Pull Request
You can [make a pull request](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests) to:
- fix a bug which is reported and discussed in an open issue
- implement a new feature which is suggested and discussed in an open issue
- improve our documentation in ways suggested and discussed in an open issue
You should [link your pull request to the issue(s)](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) that it addresses. Once your pull request passes all continuous integration checks, we will review it and either:
- approve the pull request and merge it
- start a discussion on how to improve the pull request before it can be approved
- reject the pull request, with an explanation as to why we don't consider it viable for improvement
### Continuous Integration
You can perform continuous integration checks on all supported versions by running [tox](https://tox.readthedocs.io/en/latest/) in the main project folder:
```
tox
```
Continuous integration involves the following individual checks:
1. testing with [pytest](https://docs.pytest.org/):
```
pytest test
```
2. static type-checking with [mypy](http://mypy-lang.org/):
```
mypy zxgraphs
```
3. linting with [pylint](https://www.pylint.org/):
```
pylint zxgraphs
```
Whenever relevant, please consider contributing some additional tests pertaining to your implementation.
### Documentation
The API documentation for this project is generated by [Sphinx](https://www.sphinx-doc.org/): please document any code changes and additions using [reST](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html) docstrings. The documentation is generated by running the following commands in the [docs/](docs/) folder:
```
docs>make api
docs>make clean
docs>make html
```
The script `make-api-clean-html.bat` automates the procedure on Windows. If you edit the [readme page](README.rst), please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification and run `readme_renderer` to check that it renders without issues:
```
python -m readme_renderer README.rst -o README-PROOF.html
```
| zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/CONTRIBUTING.md | CONTRIBUTING.md |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/CODE_OF_CONDUCT.md | CODE_OF_CONDUCT.md |
Getting Started
===============
ZX graphs are a graph-theoretic tool to represent quantum circuits and computations in terms of computational basis (Z basis) and Fourier basis (X basis) tensors (known to us as "spiders") for finite Abelian group algebras.
ZX graphs are currently in pre-alpha development.
Once development is complete, you will be able to install the latest release from `PyPI <https://pypi.org/project/zxgraphs/>`_ as follows:
.. code-block:: console
$ pip install --upgrade zxgraphs
GitHub repo: https://github.com/hashberg-io/zxgraphs
| zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/docs/getting-started.rst | getting-started.rst |
zxgraphs: A Python implementation of ZX graphs for quantum computing.
=====================================================================
ZX graphs are a graph-theoretic tool to represent quantum circuits and computations in terms of computational basis (Z basis) and Fourier basis (X basis) tensors (known to us as "spiders") for finite Abelian group algebras.
GitHub repo: https://github.com/hashberg-io/zxgraphs
.. toctree::
:maxdepth: 3
:caption: Contents
getting-started
.. include:: api-toc.rst
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/docs/index.rst | index.rst |
import glob
import importlib
import inspect
import json
import os
import pkgutil
from typing import Dict, List, Optional, Tuple
import sys
from typing_validation import validate
def _list_package_contents(pkg_name: str) -> List[str]:
modules = [pkg_name]
for submod in pkgutil.iter_modules([pkg_name.replace(".", "/")]):
submod_fullname = pkg_name+"."+submod.name
if submod.ispkg:
for subsubmod_name in _list_package_contents(submod_fullname):
modules.append(subsubmod_name)
else:
modules.append(submod_fullname)
return modules
def make_apidocs() -> None:
"""
A script to generate .rst files for API documentation.
"""
err_msg = """Expected a 'make-api.json' file, with the following structure:
{
"pkg_name": str,
"apidocs_folder": str,
"pkg_path": str,
"toc_filename": Optional[str],
"include_members": Dict[str, List[str]],
"exclude_members": Dict[str, List[str]],
"exclude_modules": List[str],
"member_fullnames": Dict[str, Dict[str, str]],
"special_class_members": Dict[str, List[str]],
}
Set "toc_filename" to null to avoid generating a table of contents file.
"""
try:
with open("make-api.json", "r") as f:
config = json.load(f)
pkg_name = config.get("pkg_name", None)
validate(pkg_name, str)
pkg_path = config.get("pkg_path", None)
validate(pkg_path, str)
apidocs_folder = config.get("apidocs_folder", None)
validate(apidocs_folder, str)
toc_filename = config.get("toc_filename", None)
validate(toc_filename, Optional[str])
include_members = config.get("include_members", {})
validate(include_members, Dict[str, List[str]])
exclude_members = config.get("exclude_members", {})
validate(exclude_members, Dict[str, List[str]])
include_modules = config.get("include_modules", [])
validate(include_modules, List[str])
exclude_modules = config.get("exclude_modules", [])
validate(exclude_modules, List[str])
member_fullnames = config.get("member_fullnames", {})
validate(member_fullnames, Dict[str, Dict[str, str]])
special_class_members = config.get("special_class_members", {})
validate(special_class_members, Dict[str, List[str]])
except FileNotFoundError:
print(err_msg)
sys.exit(1)
except TypeError:
print(err_msg)
sys.exit(1)
cwd = os.getcwd()
os.chdir(pkg_path)
sys.path = [os.getcwd()]+sys.path
modules = _list_package_contents(pkg_name)
modules_dict = {
mod_name: importlib.import_module(mod_name)
for mod_name in modules
}
for mod_name in include_modules:
if mod_name not in modules_dict:
modules_dict[mod_name] = importlib.import_module(mod_name)
os.chdir(cwd)
print(f"Removing all docfiles from {apidocs_folder}/")
for apidoc_file in glob.glob(f"{apidocs_folder}/*.rst"):
print(f" {apidoc_file}")
os.remove(apidoc_file)
print()
for mod_name, mod in modules_dict.items():
if mod_name in exclude_modules:
continue
filename = f"{apidocs_folder}/{mod_name}.rst"
print(f"Writing API docfile {filename}")
lines: List[str] = [
mod_name,
"="*len(mod_name),
"",
f".. automodule:: {mod_name}",
""
]
mod__all__ = getattr(mod, "__all__", [])
reexported_members: List[Tuple[str, str]] = []
for member_name in sorted(name for name in dir(mod) if not name.startswith("_")):
if mod_name in exclude_members and member_name in exclude_members[mod_name]:
continue
member = getattr(mod, member_name)
member_module = inspect.getmodule(member)
member_module_name = member_module.__name__ if member_module is not None else None
imported_member = member_module is not None and member_module != mod
if mod_name in include_members and member_name in include_members[mod_name]:
imported_member = False
if mod_name in member_fullnames and member_name in member_fullnames[mod_name]:
member_fullname = member_fullnames[mod_name][member_name]
elif imported_member:
if inspect.ismodule(member):
member_fullname = member_module_name or ""
else:
member_fullname = f"{member_module_name}.{member_name}"
else:
member_fullname = f"{mod_name}.{member_name}"
member_kind = "data"
if inspect.isclass(member):
member_kind = "class"
elif inspect.isfunction(member):
member_kind = "function"
elif inspect.ismodule(member):
member_kind = "module"
if not imported_member:
member_lines: List[str] = []
member_lines = [
member_name,
"-"*len(member_name),
"",
f".. auto{member_kind}:: {member_fullname}",
]
if member_kind == "class":
member_lines.append(" :show-inheritance:")
member_lines.append(" :members:")
if member_fullname in special_class_members and special_class_members[member_fullname]:
member_lines.append(f" :special-members: {', '.join(special_class_members[member_fullname])}")
member_lines.append("")
print(f" {member_kind} {member_name}")
lines.extend(member_lines)
elif member_name in mod__all__:
reexported_members.append((member_fullname, member_kind))
if reexported_members:
reexported_members_header = f"{mod_name}.__all__"
print(f" {reexported_members_header}:")
lines.extend([
reexported_members_header,
"-"*len(reexported_members_header),
"",
"The following members were explicitly reexported using ``__all__``:",
"",
])
refkinds = {
"data": "obj",
"function": "func",
"class": "class",
"module": "mod"
}
for member_fullname, member_kind in reexported_members:
refkind = f":py:{refkinds[member_kind]}:"
lines.append(f" - {refkind}`{member_fullname}`")
print(f" {member_kind} {member_fullname}")
lines.append("")
with open(filename, "w") as f:
f.write("\n".join(lines))
print("")
toctable_lines = [
".. toctree::",
" :maxdepth: 2",
" :caption: API Documentation",
""
]
print(f"Writing TOC for API docfiles at {toc_filename}")
for mod_name in modules_dict:
if mod_name in exclude_modules:
continue
line = f" {apidocs_folder}/{mod_name}"
toctable_lines.append(line)
print(line)
toctable_lines.append("")
print()
with open(toc_filename, "w") as f:
f.write("\n".join(toctable_lines))
if __name__ == "__main__":
make_apidocs() | zxgraphs | /zxgraphs-0.0.post5.tar.gz/zxgraphs-0.0.post5/docs/make-api.py | make-api.py |
# Python bindings for zxing-cpp
[](https://github.com/zxing-cpp/zxing-cpp/actions/workflows/python-build.yml)
[](https://pypi.org/project/zxing-cpp/)
## Installation
```bash
pip install zxing-cpp
```
or
```bash
python setup.py install
```
Note: To install via `setup.py`, you need a suitable [build environment](https://github.com/zxing-cpp/zxing-cpp#build-instructions) including a c++ compiler.
## Usage
```python
import cv2
import zxingcpp
img = cv2.imread('myimage.png')
results = zxingcpp.read_barcodes(img)
for result in results:
print("Found barcode:\n Text: '{}'\n Format: {}\n Position: {}"
.format(result.text, result.format, result.position))
if len(results) == 0:
print("Could not find any barcode.")
```
| zxing-cpp | /zxing-cpp-2.0.0.tar.gz/zxing-cpp-2.0.0/README.md | README.md |
# python-zxing
[](https://pypi.python.org/pypi/zxing)
[](https://github.com/dlenski/python-zxing/actions/workflows/test_and_release.yml)
[](https://www.gnu.org/licenses/lgpl-3.0)
This is a wrapper for the [ZXing barcode library](https://github.com/zxing/zxing).
It will allow you to read and decode barcode images from Python.
It was originally a "slightly less quick-and-dirty" fork of [oostendo/python-zxing](https://github.com/oostendo/python-zxing), but has since
evolved considerably beyond that ancestral package.
## Dependencies and installation
Use the Python 3 version of pip (usually invoked via `pip3`) to install: `pip3 install zxing`
* You'll neeed to have a recent `java` binary somewhere in your path. (Tested with OpenJDK v7, v8, v11.)
* pip will automatically download the relevant [JAR](https://en.wikipedia.org/wiki/JAR_(file_format)) files for the Java ZXing libraries (currently v3.5.1)
## Usage
The `BarCodeReader` class is used to decode images:
```python
>>> import zxing
>>> reader = zxing.BarCodeReader()
>>> print(reader.zxing_version, reader.zxing_version_info)
3.5.1 (3, 5, 1)
>>> barcode = reader.decode("test/barcodes/QR_CODE-easy.png")
>>> print(barcode)
BarCode(raw='This should be QR_CODE', parsed='This should be QR_CODE', path='test/barcodes/QR_CODE-easy.png', format='QR_CODE', type='TEXT', points=[(15.0, 87.0), (15.0, 15.0), (87.0, 15.0), (75.0, 75.0)])
```
The attributes of the decoded `BarCode` object are `raw`, `parsed`, `path`, `format`, `type`, and `points`. The list of formats which ZXing can decode is
[here](https://zxing.github.io/zxing/apidocs/com/google/zxing/BarcodeFormat.html).
The `decode()` method accepts an image path or [PIL Image object](https://pillow.readthedocs.io/en/stable/reference/Image.html) (or list thereof)
and takes optional parameters `try_harder` (boolean), `possible_formats` (list of formats to consider), and `pure_barcode` (boolean).
If no barcode is found, it returns a `False`-y `BarCode` object with all fields except `path` set to `None`.
If it encounters any other recognizable error from the Java ZXing library, it raises `BarCodeReaderException`.
## Command-line interface
The command-line interface can decode images into barcodes and output in either a human-readable or CSV format:
```
usage: zxing [-h] [-c] [--try-harder] [-V] image [image ...]
```
Human-readable:
```sh
$ zxing /tmp/barcode.png
/tmp/barcode.png
================
Decoded TEXT barcode in QR_CODE format.
Raw text: 'Testing 123'
Parsed text: 'Testing 123'
```
CSV output (can be opened by LibreOffice or Excel):
```sh
$ zxing /tmp/barcode1.png /tmp/barcode2.png /tmp/barcode3.png
Filename,Format,Type,Raw,Parsed
/tmp/barcode1.png,CODE_128,TEXT,Testing 123,Testing 123
/tmp/barcode2.png,QR_CODE,URI,http://zxing.org,http://zxing.org
/tmp/barcode3.png,QR_CODE,TEXT,"This text, ""Has stuff in it!"" Wow⏎Yes it does!","This text, ""Has stuff in it!"" Wow⏎Yes it does!"
```
## License
LGPLv3
| zxing | /zxing-1.0.tar.gz/zxing-1.0/README.md | README.md |
ZXingLight
==========
|build-status| |docs-status| |pypi-package|
A simple wrapper for `ZXing C++`_ inspired by zbarlight_.
Documentation is available on <https://zxinglight.readthedocs.io/en/latest/>.
.. _ZXing C++: https://github.com/glassechidna/zxing-cpp
.. _zbarlight: https://github.com/Polyconseil/zbarlight
.. |build-status| image:: https://travis-ci.org/lubo/zxinglight.svg?branch=master
:alt: Build status
:target: https://travis-ci.org/lubo/zxinglight
.. |docs-status| image:: https://readthedocs.org/projects/zxinglight/badge/?version=latest
:alt: Documentation status
:target: https://zxinglight.readthedocs.io/en/latest/
.. |pypi-package| image:: https://badge.fury.io/py/zxinglight.svg
:alt: PyPI package
:target: https://badge.fury.io/py/zxinglight
| zxinglight | /zxinglight-1.1.0.tar.gz/zxinglight-1.1.0/README.rst | README.rst |
# python-zxing
[](https://pypi.python.org/pypi/zxing)
[](https://travis-ci.com/dlenski/python-zxing)
[](https://www.gnu.org/licenses/lgpl-3.0)
This is a wrapper for the [ZXing barcode library](https://github.com/zxing/zxing). (It's a "slightly less quick-and-dirty" fork of [oostendo/python-zxing](https://github.com/oostendo/python-zxing).)
It will allow you to read and decode barcode images from Python.
## Dependencies and installation
Use the Python 3 version of pip (usually invoked via `pip3`) to install: `pip3 install zxing`
* You'll neeed to have a recent `java` binary somewhere in your path. (Tested with OpenJDK.)
* pip will automatically download the relevant [JAR](https://en.wikipedia.org/wiki/JAR_(file_format)) files for the Java ZXing libraries (currently v3.4.1)
## Usage
The `BarCodeReader` class is used to decode images:
```python
>>> import zxing
>>> reader = zxing.BarCodeReader()
>>> print(reader.zxing_version, reader.zxing_version_info)
3.4.1 (3, 4, 1)
>>> barcode = reader.decode("test/barcodes/QR_CODE-easy.png")
>>> print(barcode)
BarCode(raw='This should be QR_CODE', parsed='This should be QR_CODE', format='QR_CODE', type='TEXT', points=[(15.0, 87.0), (15.0, 15.0), (87.0, 15.0), (75.0, 75.0)])
```
The attributes of the decoded `BarCode` object are `raw`, `parsed`, `format`, `type`, and `points`. The list of formats which ZXing can decode is
[here](https://zxing.github.io/zxing/apidocs/com/google/zxing/BarcodeFormat.html).
The `decode()` method accepts an image path (or list of paths) and takes optional parameters `try_harder` (boolean), `possible_formats` (list of formats to consider), and `pure_barcode` (boolean).
If no barcode is found, it returns `None`, and if it encounters any other recognizable error from the Java ZXing library, it raises `BarCodeReaderException`.
## Command-line interface
The command-line interface can decode images into barcodes and output in either a human-readable or CSV format:
```
usage: zxing [-h] [-c] [--try-harder] image [image ...]
```
Human-readable:
```sh
$ zxing /tmp/barcode.png
/tmp/barcode.png
================
Decoded TEXT barcode in QR_CODE format.
Raw text: 'Testing 123'
Parsed text: 'Testing 123'
```
CSV output (can be opened by LibreOffice or Excel):
```sh
$ zxing /tmp/barcode1.png /tmp/barcode2.png /tmp/barcode3.png
Filename,Format,Type,Raw,Parsed
/tmp/barcode1.png,CODE_128,TEXT,Testing 123,Testing 123
/tmp/barcode2.png,QR_CODE,URI,http://zxing.org,http://zxing.org
/tmp/barcode3.png,QR_CODE,TEXT,"This text, ""Has stuff in it!"" Wow⏎Yes it does!","This text, ""Has stuff in it!"" Wow⏎Yes it does!"
```
## License
LGPLv3
| zxingmod | /zxingmod-0.19.tar.gz/zxingmod-0.19/README.md | README.md |
from __future__ import print_function
from urllib.parse import quote
from enum import Enum
import pathlib
import zipfile
from .version import __version__
import subprocess as sp, re, os
class BarCodeReaderException(Exception):
def __init__(self, message, filename=None, underlying=None):
self.message, self.filename, self.underlying = message, filename, underlying
super().__init__(message, filename, underlying)
class BarCodeReader(object):
cls = "com.google.zxing.client.j2se.CommandLineRunner"
def __init__(self, classpath=None, java=None):
self.java = java or 'java'
self.zxing_version = self.zxing_version_info = None
if classpath:
self.classpath = classpath if isinstance(classpath, str) else ':'.join(classpath)
elif "ZXING_CLASSPATH" in os.environ:
self.classpath = os.environ.get("ZXING_CLASSPATH","")
else:
self.classpath = os.path.join(os.path.dirname(__file__), 'java', '*')
with zipfile.ZipFile(os.path.join(os.path.dirname(__file__), 'java', 'core.jar')) as c:
for line in c.open('META-INF/MANIFEST.MF'):
if line.startswith(b'Bundle-Version: '):
self.zxing_version = line.split(b' ', 1)[1].strip().decode()
self.zxing_version_info = tuple(int(n) for n in self.zxing_version.split('.'))
break
def decode(self, filenames, try_harder=False, possible_formats=None, pure_barcode=False, products_only=False):
possible_formats = (possible_formats,) if isinstance(possible_formats, str) else possible_formats
if isinstance(filenames, str):
one_file = True
filenames = filenames,
else:
one_file = False
file_uris = [ pathlib.Path(f).absolute().as_uri() for f in filenames ]
cmd = [self.java, '-cp', self.classpath, self.cls] + file_uris
if try_harder:
cmd.append('--try_harder')
if pure_barcode:
cmd.append('--pure_barcode')
if products_only:
cmd.append('--products_only')
if possible_formats:
for pf in possible_formats:
cmd += ['--possible_formats', pf ]
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=False)
except FileNotFoundError as e:
raise BarCodeReaderException("Java binary specified (%s) does not exist" % self.java, self.java, e)
except PermissionError as e:
raise BarCodeReaderException("Java binary specified (%s) is not executable" % self.java, self.java, e)
stdout, stderr = p.communicate()
if stdout.startswith((b'Error: Could not find or load main class com.google.zxing.client.j2se.CommandLineRunner',
b'Exception in thread "main" java.lang.NoClassDefFoundError:')):
raise BarCodeReaderException("Java JARs not found in classpath (%s)" % self.classpath, self.classpath)
elif stdout.startswith((b'''Exception in thread "main" javax.imageio.IIOException: Can't get input stream from URL!''',
b'''Exception in thread "main" java.util.concurrent.ExecutionException: javax.imageio.IIOException: Can't get input stream from URL!''')):
raise BarCodeReaderException("Could not find image path: %s" % filenames, filenames)
elif stdout.startswith(b'''Exception in thread "main" java.io.IOException: Could not load '''):
raise BarCodeReaderException("Java library could not read image; is it in a supported format?", filenames)
elif stdout.startswith(b'''Exception '''):
raise BarCodeReaderException("Unknown Java exception: %s" % stdout)
elif p.returncode:
raise BarCodeReaderException("Unexpected Java subprocess return code %d" % p.returncode, self.java)
if p.returncode:
codes = [ None for fn in filenames ]
else:
file_results = []
for line in stdout.splitlines(True):
if line.startswith((b'file:///',b'Exception')):
file_results.append(line)
else:
file_results[-1] += line
codes = [ BarCode.parse(result) for result in file_results ]
if one_file:
return codes[0]
else:
# zxing (insanely) randomly reorders the output blocks, so we have to put them back in the
# expected order, based on their URIs
d = {c.uri: c for c in codes if c is not None} # there can be None in codes
return [d[f] if f in d else None for f in file_uris]
class CLROutputBlock(Enum):
UNKNOWN = 0
RAW = 1
PARSED = 2
POINTS = 3
class BarCode(object):
@classmethod
def parse(cls, zxing_output):
block = CLROutputBlock.UNKNOWN
uri = format = type = None
raw = parsed = b''
points = []
for l in zxing_output.splitlines(True):
if block==CLROutputBlock.UNKNOWN:
if l.endswith(b': No barcode found\n'):
return None
m = re.match(rb"(\S+) \(format:\s*([^,]+),\s*type:\s*([^)]+)\)", l)
if m:
uri, format, type = m.group(1).decode(), m.group(2).decode(), m.group(3).decode()
elif l.startswith(b"Raw result:"):
block = CLROutputBlock.RAW
elif block==CLROutputBlock.RAW:
if l.startswith(b"Parsed result:"):
block = CLROutputBlock.PARSED
else:
raw += l
elif block==CLROutputBlock.PARSED:
if re.match(rb"Found\s+\d+\s+result\s+points?", l):
block = CLROutputBlock.POINTS
else:
parsed += l
elif block==CLROutputBlock.POINTS:
m = re.match(rb"\s*Point\s*\d+:\s*\(([\d.]+),([\d.]+)\)", l)
if m:
points.append((float(m.group(1)), float(m.group(2))))
raw = raw[:-1].decode()
parsed = parsed[:-1].decode()
return cls(uri, format, type, raw, parsed, points)
def __init__(self, uri, format, type, raw, parsed, points):
self.raw = raw
self.parsed = parsed
self.uri = uri
self.format = format
self.type = type
self.points = points
def __repr__(self):
return '{}(raw={!r}, parsed={!r}, uri={!r}, format={!r}, type={!r}, points={!r})'.format(
self.__class__.__name__, self.raw, self.parsed, self.uri, self.format, self.type, self.points) | zxingmod | /zxingmod-0.19.tar.gz/zxingmod-0.19/zxing/__init__.py | __init__.py |
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from ._jsii import *
import aws_cdk as _aws_cdk_ceddda9d
import aws_cdk.aws_ec2 as _aws_cdk_aws_ec2_ceddda9d
import aws_cdk.aws_iam as _aws_cdk_aws_iam_ceddda9d
import constructs as _constructs_77d1e7e8
@jsii.data_type(
jsii_type="cdk-construct-simple-nat.RouteProps",
jsii_struct_bases=[],
name_mapping={"exclude_i_pv6": "excludeIPv6"},
)
class RouteProps:
def __init__(self, *, exclude_i_pv6: typing.Optional[builtins.bool] = None) -> None:
'''Properties for how adding IPs to route.
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4d34548d22997ef5a79c5e9d6f568bc469b4cd6258c1f2ceb04fc1b6982a08d8)
check_type(argname="argument exclude_i_pv6", value=exclude_i_pv6, expected_type=type_hints["exclude_i_pv6"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if exclude_i_pv6 is not None:
self._values["exclude_i_pv6"] = exclude_i_pv6
@builtins.property
def exclude_i_pv6(self) -> typing.Optional[builtins.bool]:
'''If excluding IPv6 when creating route.
:default: - false
'''
result = self._values.get("exclude_i_pv6")
return typing.cast(typing.Optional[builtins.bool], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RouteProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class SimpleNAT(
_aws_cdk_ceddda9d.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="cdk-construct-simple-nat.SimpleNAT",
):
'''Simple NAT instaces construct.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
'''
:param scope: -
:param id: -
:param vpc: The VPC the NAT instances will reside.
:param custom_scripts: The custom script when provisioning the NAT instances. Default: - no custom script.
:param instance_type: The instance type of NAT instances. Default: - t3.MICRO.
:param key_name: The key name of ssh key of NAT instances. Default: - No SSH access will be possible.
:param machine_image: The AMI of NAT instances. Default: - Amazon Linux 2 for x86_64.
:param nat_subnets_selection: The subnet selection for NAT instances, one NAT instance will be placed in the selected subnets. NOTE: must select the public subnet Default: - subnetType is SubnetType.PUBLIC and onePerAZ is true.
:param private_subnets_selection: The subnet selection for updating route tables for selected subnets. Default: - subnetType is SubnetType.PRIVATE_WITH_NAT.
:param role: The IAM role attached to NAT instances. Default: - an IAM role is created.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__197af24f4f5730f96fa183c445a232b5186626045f427ebb5867ad1d8c7e09da)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = SimpleNATProps(
vpc=vpc,
custom_scripts=custom_scripts,
instance_type=instance_type,
key_name=key_name,
machine_image=machine_image,
nat_subnets_selection=nat_subnets_selection,
private_subnets_selection=private_subnets_selection,
role=role,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.member(jsii_name="addV4Route")
def add_v4_route(self, v4_cidr: builtins.str) -> "SimpleNAT":
'''
:param v4_cidr: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c1c06f12fb3f5aa03c07a8dcbe1253103ac4995ab0bfa1a628dabf2fd78d682e)
check_type(argname="argument v4_cidr", value=v4_cidr, expected_type=type_hints["v4_cidr"])
return typing.cast("SimpleNAT", jsii.invoke(self, "addV4Route", [v4_cidr]))
@jsii.member(jsii_name="addV6Route")
def add_v6_route(self, v6_cidr: builtins.str) -> "SimpleNAT":
'''
:param v6_cidr: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__93261506b573bb0ec3744c03025737a256877686f97d9d1c0ce75e2181794949)
check_type(argname="argument v6_cidr", value=v6_cidr, expected_type=type_hints["v6_cidr"])
return typing.cast("SimpleNAT", jsii.invoke(self, "addV6Route", [v6_cidr]))
@jsii.member(jsii_name="withCloudflareRoute")
def with_cloudflare_route(
self,
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> "SimpleNAT":
'''Add Cloudflare IPs to route table.
See https://www.cloudflare.com/ips/ for details
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
props = RouteProps(exclude_i_pv6=exclude_i_pv6)
return typing.cast("SimpleNAT", jsii.invoke(self, "withCloudflareRoute", [props]))
@jsii.member(jsii_name="withGithubRoute")
def with_github_route(
self,
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> "SimpleNAT":
'''Add Github IPs to route table.
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
props = RouteProps(exclude_i_pv6=exclude_i_pv6)
return typing.cast("SimpleNAT", jsii.invoke(self, "withGithubRoute", [props]))
@jsii.member(jsii_name="withGoogleRoute")
def with_google_route(
self,
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> "SimpleNAT":
'''Add Google IPs to route table.
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
props = RouteProps(exclude_i_pv6=exclude_i_pv6)
return typing.cast("SimpleNAT", jsii.invoke(self, "withGoogleRoute", [props]))
@jsii.python.classproperty
@jsii.member(jsii_name="Ipv6Regex")
def IPV6_REGEX(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "Ipv6Regex"))
@jsii.data_type(
jsii_type="cdk-construct-simple-nat.SimpleNATProps",
jsii_struct_bases=[],
name_mapping={
"vpc": "vpc",
"custom_scripts": "customScripts",
"instance_type": "instanceType",
"key_name": "keyName",
"machine_image": "machineImage",
"nat_subnets_selection": "natSubnetsSelection",
"private_subnets_selection": "privateSubnetsSelection",
"role": "role",
},
)
class SimpleNATProps:
def __init__(
self,
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
'''Properties for NAT instances.
:param vpc: The VPC the NAT instances will reside.
:param custom_scripts: The custom script when provisioning the NAT instances. Default: - no custom script.
:param instance_type: The instance type of NAT instances. Default: - t3.MICRO.
:param key_name: The key name of ssh key of NAT instances. Default: - No SSH access will be possible.
:param machine_image: The AMI of NAT instances. Default: - Amazon Linux 2 for x86_64.
:param nat_subnets_selection: The subnet selection for NAT instances, one NAT instance will be placed in the selected subnets. NOTE: must select the public subnet Default: - subnetType is SubnetType.PUBLIC and onePerAZ is true.
:param private_subnets_selection: The subnet selection for updating route tables for selected subnets. Default: - subnetType is SubnetType.PRIVATE_WITH_NAT.
:param role: The IAM role attached to NAT instances. Default: - an IAM role is created.
'''
if isinstance(nat_subnets_selection, dict):
nat_subnets_selection = _aws_cdk_aws_ec2_ceddda9d.SubnetSelection(**nat_subnets_selection)
if isinstance(private_subnets_selection, dict):
private_subnets_selection = _aws_cdk_aws_ec2_ceddda9d.SubnetSelection(**private_subnets_selection)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__cb36a69875376bdd591a68534a6ce401cf1725004ca106be48dca435ff5b574c)
check_type(argname="argument vpc", value=vpc, expected_type=type_hints["vpc"])
check_type(argname="argument custom_scripts", value=custom_scripts, expected_type=type_hints["custom_scripts"])
check_type(argname="argument instance_type", value=instance_type, expected_type=type_hints["instance_type"])
check_type(argname="argument key_name", value=key_name, expected_type=type_hints["key_name"])
check_type(argname="argument machine_image", value=machine_image, expected_type=type_hints["machine_image"])
check_type(argname="argument nat_subnets_selection", value=nat_subnets_selection, expected_type=type_hints["nat_subnets_selection"])
check_type(argname="argument private_subnets_selection", value=private_subnets_selection, expected_type=type_hints["private_subnets_selection"])
check_type(argname="argument role", value=role, expected_type=type_hints["role"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"vpc": vpc,
}
if custom_scripts is not None:
self._values["custom_scripts"] = custom_scripts
if instance_type is not None:
self._values["instance_type"] = instance_type
if key_name is not None:
self._values["key_name"] = key_name
if machine_image is not None:
self._values["machine_image"] = machine_image
if nat_subnets_selection is not None:
self._values["nat_subnets_selection"] = nat_subnets_selection
if private_subnets_selection is not None:
self._values["private_subnets_selection"] = private_subnets_selection
if role is not None:
self._values["role"] = role
@builtins.property
def vpc(self) -> _aws_cdk_aws_ec2_ceddda9d.IVpc:
'''The VPC the NAT instances will reside.'''
result = self._values.get("vpc")
assert result is not None, "Required property 'vpc' is missing"
return typing.cast(_aws_cdk_aws_ec2_ceddda9d.IVpc, result)
@builtins.property
def custom_scripts(self) -> typing.Optional[builtins.str]:
'''The custom script when provisioning the NAT instances.
:default: - no custom script.
'''
result = self._values.get("custom_scripts")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def instance_type(self) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType]:
'''The instance type of NAT instances.
:default: - t3.MICRO.
'''
result = self._values.get("instance_type")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType], result)
@builtins.property
def key_name(self) -> typing.Optional[builtins.str]:
'''The key name of ssh key of NAT instances.
:default: - No SSH access will be possible.
'''
result = self._values.get("key_name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def machine_image(self) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage]:
'''The AMI of NAT instances.
:default: - Amazon Linux 2 for x86_64.
'''
result = self._values.get("machine_image")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage], result)
@builtins.property
def nat_subnets_selection(
self,
) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection]:
'''The subnet selection for NAT instances, one NAT instance will be placed in the selected subnets.
NOTE: must select the public subnet
:default: - subnetType is SubnetType.PUBLIC and onePerAZ is true.
'''
result = self._values.get("nat_subnets_selection")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection], result)
@builtins.property
def private_subnets_selection(
self,
) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection]:
'''The subnet selection for updating route tables for selected subnets.
:default: - subnetType is SubnetType.PRIVATE_WITH_NAT.
'''
result = self._values.get("private_subnets_selection")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection], result)
@builtins.property
def role(self) -> typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole]:
'''The IAM role attached to NAT instances.
:default: - an IAM role is created.
'''
result = self._values.get("role")
return typing.cast(typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SimpleNATProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"RouteProps",
"SimpleNAT",
"SimpleNATProps",
]
publication.publish()
def _typecheckingstub__4d34548d22997ef5a79c5e9d6f568bc469b4cd6258c1f2ceb04fc1b6982a08d8(
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__197af24f4f5730f96fa183c445a232b5186626045f427ebb5867ad1d8c7e09da(
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c1c06f12fb3f5aa03c07a8dcbe1253103ac4995ab0bfa1a628dabf2fd78d682e(
v4_cidr: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__93261506b573bb0ec3744c03025737a256877686f97d9d1c0ce75e2181794949(
v6_cidr: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__cb36a69875376bdd591a68534a6ce401cf1725004ca106be48dca435ff5b574c(
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
"""Type checking stubs"""
pass | zxkane.cdk-construct-simple-nat | /zxkane.cdk_construct_simple_nat-0.2.387-py3-none-any.whl/zxkane/cdk_construct_simple_nat/__init__.py | __init__.py |
__author__ = 'Zhang Fan'
from lxml import etree
class _base_library:
@staticmethod
def get_once(result, default=None):
if result:
return result[0]
return default
# region xpath原始查询代码
@staticmethod
def xpath_once(node, code, default=None):
return _base_library.get_once(node.xpath('{}[1]'.format(code)), default=default)
@staticmethod
def xpath_all(node, code):
return node.xpath(code)
# endregion
# region 比较判断
@staticmethod
def is_element(obj):
return isinstance(obj, etree._Element) or \
isinstance(obj, etree._ElementUnicodeResult) or \
isinstance(obj, etree._Comment)
@staticmethod
def is_node_element(obj):
# 判断对象是否为元素节点
return isinstance(obj, etree._Element)
@staticmethod
def is_text_element(obj):
# 判断对象是否为文本节点
return isinstance(obj, etree._ElementUnicodeResult)
@staticmethod
def is_comment(obj):
return isinstance(obj, etree._Comment)
# endregion
# region 转换获取
@staticmethod
def to_etree(text):
return etree.HTML(text)
@staticmethod
def to_string(node, default=None, del_none=True):
if isinstance(node, list):
result = []
for s in node:
s = _base_library.to_string(s, default)
if s or not del_none:
result.append(s)
return result
else:
return node.xpath('string(.)')
@staticmethod
def get_text(node, default=None, del_none=True):
if isinstance(node, list):
result = []
for s in node:
s = _base_library.get_text(s, default)
if s or not del_none:
result.append(s)
return result
else:
return _base_library.get_once(node.xpath('./text()'), default)
@staticmethod
def get_attr(node, attr, default=None):
return _base_library.get_once(node.xpath('./@' + attr), default)
@staticmethod
def get_html(node, encoding=None):
bhtml = etree.tostring(node, encoding=encoding)
if encoding:
return bhtml.decode(encoding)
return bhtml.decode()
# endregion
# region 高级查询
@staticmethod
def _parser_attr(**attrs):
if len(attrs) == 0:
return ''
fmt = '[{}]'
attr_fmt_all = '@{}'
attr_fmt = '@{}="{}"'
not_fmt = 'not({})'
text_fmt = 'text()="{}"'
search_attrs = [] # 查询属性
not_attrs = [] # 排除属性
for key, value in attrs.items():
if value is None: # 排除无效属性值
continue
# 判断是否为排除属性
_not = False
if value is False:
_not = True
# 去除前端下划线,并标记为排除
if key[0] == '_':
_not = True
key = key[1:]
# 去除class_尾部下划线
if key == 'class_':
key = 'class'
# 将key:value转换为xpath查询格式
if value is True or value is False:
attr_text = 'text()' if key == 'text' else attr_fmt_all.format(key)
else:
attr_text = text_fmt.format(value) if key == 'text' else attr_fmt.format(key, value)
search_attrs.append(attr_text) if not _not else not_attrs.append(attr_text)
# 检查排除属性
if not_attrs:
not_attrs = ' or '.join(not_attrs)
not_attrs = not_fmt.format(not_attrs)
search_attrs.append(not_attrs)
# 连接属性
search_attrs = ' and '.join(search_attrs)
if search_attrs:
return fmt.format(search_attrs)
return ''
@staticmethod
def find(node, name=None, class_=None, text=None, deep=True, **attrs):
'''
查询节点
:param node: 原始节点
:param name: 元素名, 如果不是str类型则查找所有元素
:param class_: class属性
:param text: 文本值
:param deep: 深度查询孙节点
:param attrs: 属性名前加下划线_会排除这个属性, 如_id=True在xpath中表现为 not(@id)
属性值为True, 表示这个属性匹配任意值
:return: 成功返回etree._Element节点, 失败返回None
'''
result = _base_library._find(node, once=True, name=name, class_=class_, text=text, deep=deep, **attrs)
return _base_library.get_once(result)
@staticmethod
def find_all(node, name=None, class_=None, text=None, deep=True, **attrs):
'''查询多个节点,使用方法同find,返回一个列表,查询失败返回空列表'''
return _base_library._find(node, once=False, name=name, class_=class_, text=text, deep=deep, **attrs)
@staticmethod
def _find(node, once=False, name=None, class_=None, text=None, deep=True, **attrs):
fmt = '{deep}{name}{attr_text}'
deep = './/' if deep else './'
name = name if isinstance(name, str) else '*'
attr_text = _base_library._parser_attr(class_=class_, text=text, **attrs)
code = fmt.format(deep=deep, name=name, attr_text=attr_text)
if once:
code = '{}[1]'.format(code)
return node.xpath(code)
# endregion
# region 节点树
@staticmethod
def find_pre(node):
# 返回当前节点前面的所有同级元素节点
return node.xpath('preceding-sibling::*')
@staticmethod
def find_pre_text(node):
# 返回当前节点前面的所有同级文本节点
return node.xpath('preceding-sibling::text()')
@staticmethod
def find_pre_all(node):
# 返回当前节点前面的所有同级节点
return node.xpath('preceding-sibling::node()')
@staticmethod
def find_pre_one(node):
return _base_library.get_once(node.xpath('preceding-sibling::node()[1]'))
@staticmethod
def find_next(node):
# 返回当前节点后面的所有同级元素节点
return node.xpath('following-sibling::*')
@staticmethod
def find_next_text(node):
# 返回当前节点后面的所有同级文本节点
return node.xpath('following-sibling::text()')
@staticmethod
def find_next_all(node):
# 返回当前节点后面的所有同级节点
return node.xpath('following-sibling::node()')
@staticmethod
def find_next_one(node):
return _base_library.get_once(node.xpath('following-sibling::node()[1]'))
@staticmethod
def find_child(node):
# 返回当前节点的所有子元素节点
return node.xpath('child::*')
@staticmethod
def find_child_text(node):
# 返回当前节点的所有子文本节点
return node.xpath('child::text()')
@staticmethod
def find_child_all(node):
# 返回当前节点的所有子节点
return node.xpath('child::node()')
@staticmethod
def find_parent(node):
return _base_library.get_once(node.xpath('parent::*'))
@staticmethod
def find_ancestor(node):
return node.xpath('ancestor::*')
# endregion
class _Element_List(list):
@property
def empty(self):
return len(self) == 0
def is_empty(self):
return len(self) == 0
@property
def string(self):
return self.get_string()
@property
def text(self):
return self.get_text()
@property
def string_list(self):
return self.get_string()
@property
def text_list(self):
return self.get_text()
def get_string(self, join_str='\t', strip=True):
return join_str.join(self.get_string_list(strip))
def get_text(self, join_str='\t', strip=True):
return join_str.join(self.get_text_list(strip))
def get_string_list(self, strip=True):
if not strip:
return [node.string for node in self if node.string]
values = []
for node in self:
text = node.string.strip()
if text:
values.append(text)
return values
def get_text_list(self, strip=True):
if not strip:
return [node.text for node in self if node.text]
values = []
for node in self:
text = node.text.strip()
if text:
values.append(text)
return values
class _Element():
def __init__(self, src):
self.name = 'comment' if _base_library.is_comment(src) else src.tag.lower()
self.base = src
self._string = None
self._text = None
self._attrs = None
# region 原始xpath代码查询
def xpath_once(self, code):
result = _base_library.xpath_once(self.base, code=code)
return self._build_Element(result)
def xpath_all(self, code):
result = _base_library.xpath_all(self.base, code=code)
return self._build_Element(result)
# endregion
# region 查询函数
def find(self, name=None, class_=None, text=None, deep=True, **attrs):
result = _base_library.find(self.base, name=name, class_=class_, text=text, deep=deep,
**attrs)
return self._build_Element(result)
def find_all(self, name=None, class_=None, text=None, deep=True, **attrs):
result = _base_library.find_all(self.base, name=name, class_=class_, text=text, deep=deep,
**attrs)
return self._build_Element(result)
# endregion
# region 判断
@property
def is_element(self):
return True
@property
def is_node_element(self):
return True
@property
def is_text_element(self):
return False
@property
def is_comment(self):
return _base_library.is_comment(self.base)
# endregion
# region 转换-获取函数
@property
def string(self):
# 返回此节点下所有的文本的组合
return self.get_string()
@property
def text(self):
# 返回此节点下文本
return self.get_text()
@property
def html(self):
return self.get_html()
def get_string(self):
if self._string is None:
result = _base_library.to_string(self.base)
self._string = self._build_Element(result)
return self._string
def get_text(self):
if self._text is None:
result = _base_library.get_text(self.base, '')
self._text = self._build_Element(result)
return self._text
def get_html(self, encoding='utf8'):
return _base_library.get_html(self.base, encoding)
def get_attr(self, attr, default=None):
# result = simple_xpath.get_attr(self.base, attr, default)
# return self._build_Element(result)
return self.attrs.get(attr, default)
@property
def attrs(self):
if self._attrs is None:
self._attrs = dict(self.base.attrib)
return self._attrs
# endregion
def remove_self(self):
_base_library.find_parent(self.base).remove(self.base)
self._string = None
self._text = None
def remove(self, element):
assert isinstance(element, _Element), '只能删除sharp_xpath._Element对象'
self.base.remove(element.base)
self._string = None
self._text = None
# region 节点树
@property
def previous_siblings(self):
result = _base_library.find_pre(self.base)
return self._build_Element(result)
@property
def previous_siblings_all(self):
result = _base_library.find_pre_all(self.base)
return self._build_Element(result)
@property
def previous_siblings_text(self):
result = _base_library.find_pre_text(self.base)
return self._build_Element(result)
@property
def previous_siblings_one(self):
result = _base_library.find_pre_one(self.base)
return self._build_Element(result)
@property
def next_siblings(self):
result = _base_library.find_next(self.base)
return self._build_Element(result)
@property
def next_siblings_all(self):
result = _base_library.find_next_all(self.base)
return self._build_Element(result)
@property
def next_siblings_text(self):
result = _base_library.find_next_text(self.base)
return self._build_Element(result)
@property
def next_siblings_one(self):
result = _base_library.find_next_one(self.base)
return self._build_Element(result)
@property
def childs(self):
result = _base_library.find_child(self.base)
return self._build_Element(result)
@property
def childs_all(self):
result = _base_library.find_child_all(self.base)
return self._build_Element(result)
@property
def childs_text(self):
result = _base_library.find_child_text(self.base)
return self._build_Element(result)
@property
def parent(self):
result = _base_library.find_parent(self.base)
return self._build_Element(result)
@property
def ancestor(self):
result = _base_library.find_ancestor(self.base)
return self._build_Element(result)
# endregion
def __call__(self, *args, **kwargs):
return self.find_all(*args, **kwargs)
def _build_Element(self, node):
if isinstance(node, list):
return _Element_List([self._build_Element(n) for n in node])
if not node is None:
if isinstance(node, str):
return _TextElement(node)
return _Element(node)
def __getattr__(self, name):
# 让这个对象能使用 obj.xxx 来获取属性或搜索一个节点
if name not in self.__dict__:
result = self.get_attr(name, default=None)
if result is None:
result = self.find(name, deep=True)
self.__dict__[name] = result
return self.__dict__[name]
def __getitem__(self, name):
# 让这个对象能使用 obj['xxx'] 来获取属性
return self.attrs[name]
class _TextElement(str):
def __init__(self, value=''):
self.base = value
self.name = 'text'
super().__init__()
@property
def string(self):
return self
@property
def text(self):
return self
@property
def is_element(self):
return _base_library.is_element(self.base)
@property
def is_node_element(self):
return False
@property
def is_text_element(self):
return _base_library.is_text_element(self.base)
@property
def is_comment(self):
return False
def get_string(self):
return self
def get_text(self):
return self
def __getattr__(self, name):
return None
def __deepcopy__(self, memodict=None):
return self
class Element(_Element):
def __init__(self, src):
if not _base_library.is_element(src):
assert isinstance(src, str) and src, '只能传入etree对象或一个html结构的str类型, 你传入的是{}'.format(type(src))
src = _base_library.to_etree(src)
super().__init__(src)
def load(src):
return Element(src) | zxpath | /zxpath-1.0.3.tar.gz/zxpath-1.0.3/zxpath.py | zxpath.py |
# 操作更方便的xpath
### 使用方法类似于Beautiful Soup4, 但是比他更快速
```
import zxpath
zx = zxpath.load('etree对象或者html源码') #加载
find() #查询一个节点, 失败返回None
zx.find('div', class_='content') #参考 .//div[@class="content"][1]
zx.find('div', class_=False) #参考 .//div[not(@class)][1]
zx.find('div', _class_='content') #参考 .//div[not(@class="content")][1]
zx.find('div', class=True, sun_node=False) #参考 ./div[@class][1] sun_node表示是否递归查询孙级节点
find_all() # 查询多个节点, 参数同find, 返回一个列表, 失败返回空列表
zx(*attr, **kw) #同find_all
```
> #_Element对象
> node = zx.find('div')
>
> node.id #获取id属性
> node.text #获取文本
> node.string #获取整个div的所有文本
> node.a #获取在这个节点下搜索到的第一个a元素节点
> node.html #获取这个节点的html源码
> node.find
> node.find_all
> node(*attr, **kw) #同find_all
> node.xpath_one #使用原始xpath代码查询一个节点
> node.xpath_all #使用原始xpath代码查询多个节点
更新日志:
> 1.0.3
> 移除节点后会重置string和text数据
> find参数sun_node改为deep
> 1.0.2
> 新增:
> _Element_List 所有Element对象查询的多个结果都改为_Element_List
> 1.0.1
> 修复了一些bug, 该bug曾导致:
> 在查找上一个同级节点时会忽略掉同级的文本节点
> 在查找下一个同级节点时会忽略掉同级的文本节点
- - -
本项目仅供所有人学习交流使用,禁止用于商业用途
| zxpath | /zxpath-1.0.3.tar.gz/zxpath-1.0.3/README.md | README.md |
from __future__ import annotations
import argparse
import ast
import code
import codecs
import contextlib
import inspect
import pipes
import re
import shlex
import subprocess
import sys
import traceback
from typing import Any, Generator, IO, Optional
UTF8Decoder = codecs.getincrementaldecoder("utf8")
class ZxpyArgs(argparse.Namespace):
interactive: Optional[bool]
filename: str
def cli() -> None:
"""
Simple CLI interface.
To run script(s):
zxpy script.py
To start a REPL:
zxpy
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--interactive',
action='store_true',
help='Run in interactive mode',
)
parser.add_argument('filename', help='Name of file to run', nargs='?')
# Everything passed after a `--` is arguments to be used by the script itself.
script_args = ['/bin/sh']
try:
separator_index = sys.argv.index('--')
script_args.extend(sys.argv[separator_index + 1 :])
# Remove everything after `--` so that argparse passes
sys.argv = sys.argv[:separator_index]
except ValueError:
# `--` not present in command, so no extra script args
pass
args = parser.parse_args(namespace=ZxpyArgs())
# Once arg parsing is done, replace argv with script args
sys.argv = script_args
if args.filename is None:
setup_zxpy_repl()
return
with open(args.filename) as file:
module = ast.parse(file.read())
globals_dict: dict[str, Any] = {}
try:
run_zxpy(args.filename, module, globals_dict)
except Exception:
# Only catch the exception in interactive mode
if not args.interactive:
raise
traceback.print_exc()
if args.interactive:
globals().update(globals_dict)
install()
def is_inside_single_quotes(string: str, index: int) -> bool:
"""Returns True if the given index is inside single quotes in a shell command."""
quote_index = string.find("'")
if quote_index == -1:
# No single quotes
return False
if index < quote_index:
# We're before the start of the single quotes
return False
double_quote_index = string.find('"')
if double_quote_index >= 0 and double_quote_index < quote_index:
next_double_quote = string.find('"', double_quote_index + 1)
if next_double_quote == -1:
# Double quote opened but never closed
return False
# Single quotes didn't start and we passed the index
if next_double_quote >= index:
return False
# Ignore all single quotes inside double quotes.
index -= next_double_quote + 1
rest = string[next_double_quote + 1 :]
return is_inside_single_quotes(rest, index)
next_quote = string.find("'", quote_index + 1)
if next_quote >= index:
# We're inside single quotes
return True
index -= next_quote + 1
rest = string[next_quote + 1 :]
return is_inside_single_quotes(rest, index)
@contextlib.contextmanager
def create_shell_process(command: str) -> Generator[IO[bytes], None, None]:
"""Creates a shell process, yielding its stdout to read data from."""
# shell argument support, i.e. $0, $1 etc.
dollar_indices = [index for index, char in enumerate(command) if char == '$']
for dollar_index in reversed(dollar_indices):
if (
dollar_index >= 0
and dollar_index + 1 < len(command)
and command[dollar_index + 1].isdigit()
and not is_inside_single_quotes(command, dollar_index)
):
end_index = dollar_index + 1
while end_index + 1 < len(command) and command[end_index + 1].isdigit():
end_index += 1
number = int(command[dollar_index + 1 : end_index + 1])
# Get argument number from sys.argv
if number < len(sys.argv):
replacement = sys.argv[number]
else:
replacement = ""
command = command[:dollar_index] + replacement + command[end_index + 1 :]
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
)
assert process.stdout is not None
yield process.stdout
process.wait()
process.stdout.close()
if process.returncode != 0:
raise ChildProcessError(process.returncode)
def run_shell(command: str) -> str:
"""This is indirectly run when doing ~'...'"""
with create_shell_process(command) as stdout:
output = stdout.read().decode()
return output
def run_shell_print(command: str) -> None:
"""Version of `run_shell` that prints out the response instead of returning a string."""
with create_shell_process(command) as stdout:
decoder = UTF8Decoder()
with open(stdout.fileno(), 'rb', closefd=False) as buff:
for text in iter(buff.read1, b""):
print(decoder.decode(text), end="")
print(decoder.decode(b"", final=True), end="")
def run_shell_alternate(command: str) -> tuple[str, str, int]:
"""Like run_shell but returns 3 values: stdout, stderr and return code"""
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout_text, stderr_text = process.communicate()
assert process.stdout is not None
assert process.stderr is not None
assert process.returncode is not None
return (
stdout_text.decode(),
stderr_text.decode(),
process.returncode,
)
def run_zxpy(
filename: str,
module: ast.Module,
globals_dict: dict[str, Any] | None = None,
) -> None:
"""Runs zxpy on a given file"""
patch_shell_commands(module)
code = compile(module, filename, mode="exec")
if globals_dict is None:
globals_dict = {}
globals_dict.update(
{
"__name__": "__main__",
"$run_shell": run_shell,
"$run_shell_alternate": run_shell_alternate,
"$run_shell_print": run_shell_print,
"$shlex_quote": shlex.quote,
}
)
exec(code, globals_dict)
def patch_shell_commands(module: ast.Module | ast.Interactive) -> None:
"""Patches the ast module to add zxpy functionality"""
shell_runner = ShellRunner()
shell_runner.visit(module)
ast.fix_missing_locations(module)
def quote_fstring_args(fstring: ast.JoinedStr) -> None:
for index, node in enumerate(fstring.values):
if isinstance(node, ast.FormattedValue):
# If it's marked as a raw shell string, then don't escape
if (
isinstance(node.format_spec, ast.JoinedStr)
and len(node.format_spec.values) == 1
and (
isinstance(node.format_spec.values[0], ast.Str)
and node.format_spec.values[0].s == "raw"
or isinstance(node.format_spec.values[0], ast.Constant)
and node.format_spec.values[0].value == "raw"
)
):
node.format_spec = None
continue
fstring.values[index] = ast.Call(
func=ast.Name(id="$shlex_quote", ctx=ast.Load()),
args=[node],
keywords=[],
)
class ShellRunner(ast.NodeTransformer):
"""Replaces the ~'...' syntax with run_shell(...)"""
@staticmethod
def modify_expr(
expr: ast.expr,
return_stderr_and_returncode: bool = False,
print_it: bool = False,
) -> ast.expr:
if (
isinstance(expr, ast.UnaryOp)
and isinstance(expr.op, ast.Invert)
and isinstance(expr.operand, (ast.Str, ast.JoinedStr))
):
if isinstance(expr.operand, ast.JoinedStr):
quote_fstring_args(expr.operand)
function_name = (
"$run_shell_alternate"
if return_stderr_and_returncode
else "$run_shell_print"
if print_it
else "$run_shell"
)
return ast.Call(
func=ast.Name(id=function_name, ctx=ast.Load()),
args=[expr.operand],
keywords=[],
)
return expr
def visit_Expr(self, expr: ast.Expr) -> ast.Expr:
expr.value = self.modify_expr(expr.value, print_it=True)
super().generic_visit(expr)
return expr
def visit_Assign(self, assign: ast.Assign) -> ast.Assign:
# If there's more than one target on the left, assume 3-tuple
multiple_targets = isinstance(assign.targets[0], (ast.List, ast.Tuple))
assign.value = self.modify_expr(
assign.value,
return_stderr_and_returncode=multiple_targets,
)
super().generic_visit(assign)
return assign
def visit_Call(self, call: ast.Call) -> ast.Call:
for index, arg in enumerate(call.args):
call.args[index] = self.modify_expr(arg)
super().generic_visit(call)
return call
def visit_Attribute(self, attr: ast.Attribute) -> ast.Attribute:
attr.value = self.modify_expr(attr.value)
super().generic_visit(attr)
return attr
def setup_zxpy_repl() -> None:
"""Sets up a zxpy interactive session"""
print("zxpy shell")
print("Python", sys.version)
print()
install()
sys.exit()
class ZxpyConsole(code.InteractiveConsole):
"""Runs zxpy over"""
def runsource(
self,
source: str,
filename: str = "<console>",
symbol: str = "single",
) -> bool:
# First, check if it could be incomplete input, return True if it is.
# This will allow it to keep taking input
with contextlib.suppress(SyntaxError, OverflowError):
if code.compile_command(source) == None:
return True
try:
ast_obj = ast.parse(source, filename, mode=symbol)
assert isinstance(ast_obj, ast.Interactive)
patch_shell_commands(ast_obj)
code_obj = compile(ast_obj, filename, mode=symbol)
except (ValueError, SyntaxError):
# Let the original implementation take care of incomplete input / errors
return super().runsource(source, filename, symbol)
self.runcode(code_obj)
return False
def install() -> None:
"""
Starts an interactive Python shell with zxpy features.
Useful for setting up a zxpy session in an already running REPL.
Simply do:
>>> import zx; zx.install()
and zxpy should be enabled in the REPL.
"""
# Get locals from parent frame
frames = inspect.getouterframes(inspect.currentframe())
if len(frames) > 1:
parent_frame = frames[1]
parent_locals = parent_frame.frame.f_locals
else:
parent_locals = {}
# For tab completion and arrow key support
if sys.platform != "win32":
import readline
readline.parse_and_bind("tab: complete")
zxpy_locals = {
**parent_locals,
"$run_shell": run_shell,
"$run_shell_alternate": run_shell_alternate,
"$run_shell_print": run_shell_print,
"$shlex_quote": shlex.quote,
}
ZxpyConsole(locals=zxpy_locals).interact(banner="", exitmsg="")
if __name__ == "__main__":
cli() | zxpy | /zxpy-1.6.3.tar.gz/zxpy-1.6.3/zx.py | zx.py |
# zxpy
[](https://pepy.tech/project/zxpy)
[](https://github.com/psf/black)
[](https://github.com/tusharsadhwani/zxpy/actions/workflows/tox.yml)
Shell scripts made simple 🐚
zxpy lets you seamlessly write shell commands inside Python code, to create readable and maintainable shell scripts.
Inspired by Google's [zx](https://github.com/google/zx), but made much simpler and more accessible using Python.
## Rationale
Bash is cool, and it's extremely powerful when paired with linux coreutils and pipes. But apart from that, it's a whole another language to learn, and has a (comparatively) unintuitive syntax for things like conditionals and loops.
`zxpy` aims to supercharge bash by allowing you to write scripts in Python, but with native support for bash commands and pipes.
Let's use it to find all `TODO`s in one of my other projects, and format them into a table:
```python
#! /usr/bin/env zxpy
todo_comments = ~"git grep -n TODO"
for todo in todo_comments.splitlines():
filename, lineno, code = todo.split(':', 2)
*_, comment = code.partition('TODO')
print(f"{filename:40} on line {lineno:4}: {comment.lstrip(': ')}")
```
Running this, we get:
```console
$ ./todo_check.py
README.md on line 154 : move this content somewhere more sensible.
instachat/lib/models/message.dart on line 7 : rename to uuid
instachat/lib/models/update.dart on line 13 : make int
instachat/lib/services/chat_service.dart on line 211 : error handling
server/api/api.go on line 94 : move these to /chat/@:address
server/api/user.go on line 80 : check for errors instead of relying on zero value
```
Writing something like this purely in bash or in Python would be much harder than this. Being able to use linux utilities seamlessly with a readable, general purpose language is what makes this a really powerful tool.
### A larger, practical example
You can find a comparison between a practical-ish script written in bash and
zxpy in [EXAMPLE.md](./EXAMPLE.md)
## Installation <a href="https://pypi.org/project/zxpy"><img src="https://img.shields.io/badge/pypi-zxpy-blue?style=flat"></a>
```console
pip install zxpy
```
### pipx
If you have `pipx` installed, you can try out zxpy without installing it, by running:
```console
pipx run zxpy
```
## Basic Examples
Make a file `script.py` (The name and extension can be anything):
```python
#! /usr/bin/env zxpy
~'echo Hello world!'
file_count = ~'ls -1 | wc -l'
print("file count is:", file_count)
```
And then run it:
```console
$ chmod +x ./script.py
$ ./script.py
Hello world!
file count is: 3
```
> Run `>>> help('zx')` in Python REPL to find out more ways to use zxpy.
A slightly more involved example: [run_all_tests.py](./examples/run_all_tests.py)
```python
#! /usr/bin/env zxpy
test_files = (~"find -name '*_test\.py'").splitlines()
for filename in test_files:
try:
print(f'Running {filename:.<50}', end='')
output = ~f'python {filename}' # variables in your shell commands :D
assert output == ''
print('Test passed!')
except:
print(f'Test failed.')
```
Output:
```bash
$ ./run_all_tests.py
Running ./tests/python_version_test.py....................Test failed.
Running ./tests/platform_test.py..........................Test passed!
Running ./tests/imports_test.py...........................Test passed!
```
More examples are in [EXAMPLE.md](./EXAMPLE.md), and in the [examples folder](./examples).
## `stderr` and return codes
To get `stderr` and return code information out of the shell command, there is an
alternative way of invoking the shell.
To use it, just use **3 variables** on the
left side of your `~'...'` shell string:
```python
stdout, stderr, return_code = ~'echo hi'
print(stdout) # hi
print(return_code) # 0
```
More examples are in the [examples folder](./examples).
## CLI Arguments
When writing a shell script, you often want to pass CLI arguments to it.
Like so:
```console
$ cat ./foo.sh
echo arg is: $1
$ ./foo.sh 123
arg is: 123
```
To do the same in `zxpy`, pass the script arguments after a `--` in the `zxpy` CLI command.
```python
#!/usr/bin/env zxpy
import sys
print("Argv is:", sys.argv)
~"echo output: $1 $2 $3"
```
```console
$ ./test.py
Argv is: ['/bin/sh']
output:
$ ./test.py -- abc def
Argv is: ['/bin/sh', 'abc', 'def']
output: abc def
```
Both `$1` and `sys.argv[1]` will do the same thing.
## Quoting
Take this shell command:
```console
$ uname -a
Linux pop-os 5.11.0 [...] x86_64 GNU/Linux
```
Now take this piece of code:
```pycon
>>> cmd = 'uname -a'
>>> ~f'{cmd}'
/bin/sh: 1: uname -a: not found
```
Why does this not work?
This is because `uname -a` was **quoted** into `'uname -a'`. All values passed
inside f-strings are automatically quoted to avoid [shell injection][1].
To prevent quoting, the `:raw` format_spec can be used:
```pycon
>>> cmd = 'uname -a'
>>> ~f'{cmd:raw}'
Linux pop-os 5.11.0 [...] x86_64 GNU/Linux
```
This _disables_ quoting, and the command is run as-is as provided in the string.
> Note that this shouldn't be used with external data, or this _will_ expose you
> to [shell injection][1].
## Interactive mode
```pycon
$ zxpy
zxpy shell
Python 3.8.5 (default, Jan 27 2021, 15:41:15)
[GCC 9.3.0]
>>> ~"ls | grep '\.py'"
__main__.py
setup.py
zx.py
>>>
```
> Also works with `path/to/python -m zx`
It can also be used to start a zxpy session in an already running REPL.
Simply do:
```pycon
>>> import zx; zx.install()
```
and zxpy should be enabled in the existing session.
## Development/Testing
To install from source, clone the repo, and do the following:
```console
$ source ./venv/bin/activate # Always use a virtualenv!
$ pip install -r requirements-dev.txt
Processing ./zxpy
[...]
Successfully installed zxpy-1.X.X
$ pytest # runs tests
```
[1]: https://owasp.org/www-community/attacks/Command_Injection
| zxpy | /zxpy-1.6.3.tar.gz/zxpy-1.6.3/README.md | README.md |
# zxtools
#### 介绍
zx的一个工具集,以Python为主,
#### 安装教程
1. python -m pip install .
2. python -m pip install zxt
3. python -m pip install --upgrade zxt
#### 上传教程
1. 创建 .pypirc 文件
type NUL > %UserProfile%\.pypirc
2. pypirc 规范
https://packaging.python.org/specifications/pypirc/
3. 升级工具
python -m pip install --upgrade build
python -m pip install --upgrade twine
4. Generating distribution archives (生成档案)
https://packaging.python.org/en/latest/tutorials/packaging-projects/
切换到 pyproject.toml 的同级目录, 一般先删除 dist 目录(RMDIR /S .\dist\ /Q)
python -m build
5. Uploading the distribution archives (上传档案)
https://packaging.python.org/en/latest/tutorials/packaging-projects/
python -m twine upload --repository zxt dist/*
#### 调试教程
1. 卸载 zxt 包
python -m pip uninstall zxt
2. 从 zxt 的源码中找到 pth.py 所在目录, 在该目录下执行如下命令:
python ./pth.py --dflt_opt=C
3. 源码已关联到 python 环境, 可以写代码调用 zxt 包进行调试了
| zxt | /zxt-0.20230811.1830.tar.gz/zxt-0.20230811.1830/README.md | README.md |
# zxtaputils - Utilities for handling TAP files on the ZX Spectrum (Next)
## Description
This is a collection of small utilities to work with
TAP files
This suite consists of the tools:
- bas2tap: turns BASIC code into a TAP file containing tokenized code
- tap2bas: view/save BASIC code contained in a TAP file
- tapextract: extract and save data from a TAP block
- tapify: store any files inside a TAP file as a container
- tapinfo: view information about a TAP file
- tapsplit: save a TAP file's blocks as individual files
| zxtaputils | /zxtaputils-1.0.0.tar.gz/zxtaputils-1.0.0/README.md | README.md |
=====================================
Tools to manipulate ZX Spectrum files
=====================================
.. image:: https://img.shields.io/travis/codeatcpp/zxtools/master.svg?style=flat
:target: https://travis-ci.org/codeatcpp/zxtools
.. image:: https://codecov.io/gh/codeatcpp/zxtools/branch/master/graph/badge.svg
:target: https://codecov.io/gh/codeatcpp/zxtools
.. image:: https://img.shields.io/github/release/codeatcpp/zxtools.svg?style=flat
:target: https://github.com/codeatcpp/zxtools/releases
.. image:: https://img.shields.io/pypi/v/zxtools.svg?style=flat
:target: https://pypi.python.org/pypi/zxtools
.. image:: http://img.shields.io/pypi/dm/zxtools.svg?style=flat
:target: https://pypi.python.org/pypi/zxtools
Here's a set of utils to manipulate files that were copied from a TR-DOS diskette or from a tape.
Originally the tools were written to simplify the following workflow:
1. Grab diskette image using `Hobeta <http://speccy.info/Hobeta>`_ tool.
2. Strip the file header and save the result to a new file.
3. Convert resulting `Zeus Z80 assembler <https://en.wikipedia.org/wiki/Zeus_Assembler>`_ file to the plain text format.
TODO: I have future plans to implement some more tools I need to restore my old ZX Spectrum projects.
But you can use them in the way you need. And it's very easy to use: download the package, run ``setup.py`` (or install via ``pip install zxtools``), invoke in the following way::
$ python3 -m zxtools.hobeta strip input.hobeta result.zeus
$ python3 -m zxtools.zeus2txt result.zeus listing.asm --include-code
.. image:: https://raw.githubusercontent.com/codeatcpp/zxtools/master/zeus2txt.jpg
NOTE: Python 3 is required to use this package, and Python 2 is not supported but you are welcome to fix it.
To view the resulting files with syntax colorization you can use special `Visual Studio Code plugin <https://marketplace.visualstudio.com/items?itemName=jia3ep.zeus-z80-asm>`_:
.. image:: https://raw.githubusercontent.com/codeatcpp/vscode-language-z80-asm/master/vscode.png
:target: https://marketplace.visualstudio.com/items?itemName=jia3ep.zeus-z80-asm
| zxtools | /zxtools-1.0.22.tar.gz/zxtools-1.0.22/README.rst | README.rst |
import curses
# NOTE: bug? mypy dont raises [attr-defined] erro
from curses.textpad import rectangle # type: ignore
from queue import SimpleQueue
from textwrap import wrap
from typing import List, Optional
from .datatypes import Margin, Pair, PairF
class Screen:
def __init__(self, screen):
self.scr = screen
self.vscreen = VScreen(self.scr)
def get_position(self) -> Pair:
data = self.scr.getbegyx()
return Pair(data[1], data[0])
def get_size(self) -> Pair:
data = self.scr.getmaxyx()
return Pair(data[1], data[0])
def draw(self) -> None:
self.vscreen.draw()
def recalculate(self) -> None:
self.vscreen.recalculate(
self.get_size() - Pair(2, 0), self.get_position(), Pair(0, 0), Pair(0, 1)
)
class VScreenTextbox:
def __init__(self, screen: Screen, buffer_size: int = 30):
self._screen = screen
self._rawdata: List[str] = []
self._buffer_size = buffer_size
self.data: List[str] = []
self.position = Pair(0, 0)
self.size = Pair(0, 0)
def add_text(self, text: str) -> None:
new_rawdata = [v.strip() for v in text.split("\n")]
# update formatted data
for ln in new_rawdata:
self.data += wrap(ln, self.size.x)
if len(self.data) > self.size.y:
self.data = self.data[len(self.data) - self.size.y :]
# update rawdata
self._rawdata += new_rawdata
if len(self._rawdata) > self._buffer_size:
self._rawdata = self._rawdata[len(self._rawdata) - self._buffer_size :]
def draw(self) -> None:
for i, v in enumerate(self.data):
self._screen.scr.addstr(
self.position.y + i, self.position.x, v, curses.A_NORMAL # type: ignore
)
def recalculate(self, position: Pair, size: Pair) -> None:
self.position = position
# recalculate formatted data
if self.size != size:
self.data = []
for ln in reversed(self._rawdata):
for i, v in enumerate(wrap(ln, self.size.x)):
self.data.insert(i, v)
if len(self.data) > self.size.y:
break
if len(self.data) > self.size.y:
self.data = self.data[len(self.data) - self.size.y :]
self.size = size
class VScreenLogbox(VScreenTextbox):
def __init__(
self, screen: Screen, data_source: Optional[SimpleQueue] = None, buffer_size: int = 30
):
super().__init__(screen, buffer_size)
self._data_source = data_source
def recalculate(self, position: Pair, size: Pair) -> None:
while self._data_source and (not self._data_source.empty()):
self.add_text(self._data_source.get())
super().recalculate(position, size)
class VScreen:
def __init__(
self,
screen: Screen,
sizep: PairF = PairF(1.0, 1.0),
subscreens: List["VScreen"] = [],
margin: Margin = Margin(0, 0, 0, 0),
data: Optional[VScreenTextbox] = None,
draw_borders: bool = False,
):
self.screen = screen
self.subscreens = subscreens
self.position = Pair(0, 0)
self.size = Pair(0, 0)
self._data = data
self.sizep = sizep
self.margin = margin
self.draw_borders = draw_borders
self.border_margin = Margin(1, 1, 1, 1) if draw_borders else Margin(0, 0, 0, 0)
def draw(self):
if self.draw_borders:
# draw bounds for subscreen
try:
rectangle(
self.screen.scr,
self.position.y,
self.position.x,
self.position.y + self.size.y - 1,
self.position.x + self.size.x - 1,
)
# TODO[PP]: exception thrown when drawing in fight bottom char place
# proper handling and correct type of exception should be fixed
except Exception as e:
print(e)
_ = None # TODO[PP]: stupid workarround for bandit check [B110:try_except_pass]
if self.subscreens:
# draw subscreens
for sscreen in self.subscreens:
sscreen.draw()
else:
# draw data
if self._data:
self._data.draw()
def _get_data_position(self) -> Pair:
return Pair(
self.position.x + self.margin.left + self.border_margin.left,
self.position.y + self.margin.top + self.border_margin.top,
)
def _get_data_size(self) -> Pair:
return Pair(
self.size.x
- self.margin.left
- self.margin.right
- self.border_margin.left
- self.border_margin.right,
self.size.y
- self.margin.top
- self.margin.bottom
- self.border_margin.top
- self.border_margin.bottom,
)
def recalculate(
self,
parent_size: Pair,
parent_position: Pair,
position_shift: Pair,
shift_direct: Pair,
):
self.position = parent_position + position_shift
self.size = (self.sizep * parent_size).round()
if self._data:
self._data.recalculate(self._get_data_position(), self._get_data_size())
if self.subscreens:
if shift_direct == Pair(0, 1):
subscreen_shift_direct = Pair(1, 0)
elif shift_direct == Pair(1, 0):
subscreen_shift_direct = Pair(0, 1)
else:
raise ValueError(f"Unsupported shift_direct value '{shift_direct}'")
pshift = Pair(0, 0)
sizep_sum = PairF(0.0, 0.0)
size_sum_test = Pair(0, 0)
size_sum = Pair(0, 0)
directed_one = Pair(1, 1) * shift_direct
for sscreen in self.subscreens:
sscreen.recalculate(
self.size,
self.position,
pshift * shift_direct,
subscreen_shift_direct,
)
sizep_sum += sscreen.sizep
size_sum += sscreen.size
size_sum_test = (sizep_sum * self.size).round()
if size_sum == size_sum_test:
pass
else:
if (size_sum.x < size_sum_test.x) or (size_sum.y < size_sum_test.y):
sscreen.size += directed_one
if self._data:
self._data.recalculate(
self._get_data_position(), self._get_data_size()
)
size_sum += directed_one
elif (size_sum.x > size_sum_test.x) or (size_sum.y > size_sum_test.y):
sscreen.size -= directed_one
if self._data:
self._data.recalculate(
self._get_data_position(), self._get_data_size()
)
size_sum -= directed_one
pshift = pshift + sscreen.size - self.position | zxvcv.util-cli | /zxvcv.util_cli-0.2.6-py311-none-any.whl/zxvcv/util_cli/vscreens.py | vscreens.py |
# Python 项目模版
## 开发
* 使用 `$ virtualenv venv --python=python3 && source venv/bin/activate` 创建并激活此 `virtualenv`;
* 使用 `$ make install_dev` 或者`$ pip install -e .[dev]` 安装项目依赖
* Coding
## 静态检查
项目已经配置好使用 `flake8` 做静态检查,执行`$ make lint`即可。
## 测试
单元测试位于 `tests/` 目录中,使用 `$ make test` 可以运行单元测试。如何编写单元测试可以参考 [pytest](https://github.com/pytest-dev/pytest)
默认会在 Python2.7/3.7 环境下运行单元测试,如果需要支持更多版本 Python,可以修改 `tox.ini`。
## 规范
### 代码规范
* 需要保证代码兼容 Python2.7,Python3.7+;
* 代码风格兼容 [PEP8](https://www.python.org/dev/peps/pep-0008/),除了代码最大宽度放宽到 120。
### 版本规范
* 版本需要遵循 [Semver](https://semver.org/lang/zh-CN/);
* 发布分支后,需要创建类似 `v0.0.1` 的 tag;
* 每次 Release,需要编辑 CHANGELOG.md,内容需要遵守 [changelog 规范](https://keepachangelog.com/zh-CN/1.0.0/)。
## Tips
### Makefile指南
makefile用于帮助开发者快速使用功能,目前支持的命令有
| 指令 | 作用 |
| :--------------: | :----------------------------------------------------------: |
| make | 按顺序执行 install_dev、isort、isort_check、lint、test(**操作更改代码!**) |
| make check | 按顺序执行 install_dev、isort_check、lint、test |
| make install_dev | 安装测试所需依赖(位于setup.py的DEV_REQUIRES) |
| make isort | 执行isort,规范化import顺序(**操作更改代码!**) |
| make isort_check | 执行import顺序规范性检查 |
| make lint | 执行flake8,检查你的代码规范性 |
| make test | 执行tox,检测单元测试的正确性 |
| make clean | 清除测试和检查产物 |
建议每次准备发布代码前,执行一次make或者make check来保证代码的规范性和健壮性。
### Python2/3 兼容
**每个** Python 文件头部都增加如下代码(尽量保证此import位于任何其他import之前):
```python
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
```
来保证代码在 Python2 和 Python3 下尽可能保持一致。
Python2 与 Python3 不兼容的代码,尽量使用 [six](https://pythonhosted.org/six/) 模块做兼容。比如 Python2 和 Python3 下 `range` 函数行为不一致,统一使用 `six.moves.range` 可以保证兼容性。
### 类型标注
Python [PEP484](https://www.python.org/dev/peps/pep-0484/) 中新增了类型标注功能,可以给代码增加可选的类型标注,配合 [mypy](http://mypy-lang.org/) 可以静态的给代码做类型检查。
开发中给每个 `.py` 文件编写一个对应的 `.pyi`,编写好导出类型的函数签名。此项目已经配置好相关规则,包发布后,使用者就可以使用编写好的类型信息做静态类型检查、代码补全。
如果对导出函数写 docstring,除了按照 PEP8 的要求进行编写之外,还可以对传入传出数据类型做标注与注释。注释格式参考 PyCharm 的[这篇文档](https://www.jetbrains.com/help/pycharm/using-docstrings-to-specify-types.html)的格式进行编写。PyCharm 以及 Jedi(vim / emacs / vscode 都是基于 jedi 进行自动补全)等等自动补全/静态检查工具都可以基于此格式的信息进行检查,Sphinx 生成的 API doc 也可以进行识别。
### editorconfig
可以安装 [editorconfig](https://editorconfig.org/) 编辑器插件,保持代码一致性。此项目已经默认配置。
| zxy-Test | /zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/README.md | README.md |
import base64
import hashlib
import hmac
import time
import uuid
from urllib.request import quote
# Part3 Packages
import requests
# Project Packages
from . import parse_response
"""
==========================================================================================
整理于 2019-04-16
修改于 2019-09-16(by ruijie.qiao) 添加API STS请求验证功能
请求参数案例1(默认AK传空值为STS Token验证方式,RoleName为空的默认值为ZhuyunFullReadOnlyAccess):
'AccessKeyId': None,
'AccessKeySecret': None,
'RoleName': None,
请求参数案例2(AK值不为空的时候,为普通的AK验证方式,这时候如果RoleName为非空,STS Token验证方式也不生效):
'AccessKeyId': XXXXXXXXXXXXXX,
'AccessKeySecret': XXXXXXXXXXXXXX,
'RoleName': None,
请求参数案例3(默认AK传空值为STS Token验证方式,RoleName不为空,RoleName为设置的值):
'AccessKeyId': None,
'AccessKeySecret': None,
'RoleName': XXXXXXXXXXXXXX,
==========================================================================================
"""
ROLE_URL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
PRODUCT_API_CONFIG_MAP = {
'ecs': {
'domain': 'ecs.aliyuncs.com',
'version': '2014-05-26',
'port': 443,
'protocol': 'https'
},
'rds': {
'domain': 'rds.aliyuncs.com',
'version': '2014-08-15',
'port': 443,
'protocol': 'https'
},
'drds': {
'domain': 'drds.aliyuncs.com',
'version': '2015-04-13',
'port': 443,
'protocol': 'https'
},
'slb': {
'domain': 'slb.aliyuncs.com',
'version': '2014-05-15',
'port': 443,
'protocol': 'https'
},
'ess': {
'domain': 'ess.aliyuncs.com',
'version': '2014-08-28',
'port': 443,
'protocol': 'https'
},
'mts': {
'domain': 'mts.aliyuncs.com',
'version': '2014-06-18',
'port': 443,
'protocol': 'https'
},
'yundun': {
'domain': 'yundun.aliyuncs.com',
'version': '2014-09-24',
'port': 443,
'protocol': 'https'
},
'cdn': {
'domain': 'cdn.aliyuncs.com',
'version': '2018-05-10',
'port': 443,
'protocol': 'https'
},
'ram': {
'domain': 'ram.aliyuncs.com',
'version': '2015-05-01',
'port': 443,
'protocol': 'https'
},
'sts': {
'domain': 'sts.aliyuncs.com',
'version': '2015-04-01',
'port': 443,
'protocol': 'https'
},
'dysms': {
'domain': 'dysmsapi.aliyuncs.com',
'version': '2017-05-25',
'port': 443,
'protocol': 'https'
},
'dyvms': {
'domain': 'dyvmsapi.aliyuncs.com',
'version': '2017-05-25',
'port': 443,
'protocol': 'https'
},
'dybase': {
'domain': 'dybaseapi.aliyuncs.com',
'version': '2017-05-25',
'port': 443,
'protocol': 'https'
},
'redis': {
'domain': 'r-kvstore.aliyuncs.com',
'version': '2015-01-01',
'port': 443,
'protocol': 'https'
},
'mongodb': {
'domain': 'mongodb.aliyuncs.com',
'version': '2015-12-01',
'port': 443,
'protocol': 'https'
},
'dts': {
'domain': 'dts.aliyuncs.com',
'version': '2020-01-01',
'port': 443,
'protocol': 'https'
},
'vpc': {
'domain': 'vpc.aliyuncs.com',
'version': '2016-04-28',
'port': 443,
'protocol': 'https'
},
'cms': {
'domain': 'metrics.aliyuncs.com',
'version': '2019-01-01',
'port': 443,
'protocol': 'https',
},
'waf': {
'domain': 'wafopenapi.cn-hangzhou.aliyuncs.com',
'version': '2018-01-17',
'port': 443,
'protocol': 'https',
},
'domain': {
'domain': 'domain.aliyuncs.com',
'version': '2018-01-29',
'port': 443,
'protocol': 'https',
},
'business': {
'domain': 'business.aliyuncs.com',
'version': '2017-12-14',
'port': 443,
'protocol': 'https',
},
'ddospro': {
'domain': 'ddospro.cn-hangzhou.aliyuncs.com',
'version': '2017-07-25',
'port': 443,
'protocol': 'https',
},
'ddoscoo': {
'domain': 'ddoscoo.cn-hangzhou.aliyuncs.com',
'version': '2017-12-28',
'port': 443,
'protocol': 'https',
},
'avds': {
'domain': 'avds.aliyuncs.com',
'version': '2017-11-29',
'port': 443,
'protocol': 'https',
},
'cbn': {
'domain': 'cbn.aliyuncs.com',
'version': '2017-09-12',
'port': 443,
'protocol': 'https',
},
'smartag': {
'domain': 'smartag.cn-shanghai.aliyuncs.com',
'version': '2018-03-13',
'port': 443,
'protocol': 'https',
},
'polardb': {
'domain': 'polardb.aliyuncs.com',
'version': '2017-08-01',
'port': 443,
'protocol': 'https',
},
'arms': {
'domain': 'arms.[RegionId].aliyuncs.com',
'version': '2019-08-08',
'port': 443,
'protocol': 'https',
},
'edas': {
'domain': 'edas.[RegionId].aliyuncs.com',
'version': '2017-08-01',
'port': 443,
'protocol': 'https',
},
}
def percent_encode(string):
if string is None:
raise Exception('params is None')
if not isinstance(string, (str, bytes, int)):
raise TypeError(str(string) + 'params TypeError')
if isinstance(string, bytes):
string.decode('utf-8')
elif isinstance(string, int):
string = str(string)
else:
string.encode('utf-8').decode('utf-8')
string = quote(string, '')
string = string.replace('+', '%20')
string = string.replace('*', '%2A')
string = string.replace('%7E', '~')
return string
class AliyunCommon(object):
"""Aliyun common HTTP API"""
def __init__(self, access_key_id=None, access_key_secret=None, role_name=None,
*args, **kwargs):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
if role_name is None or role_name == "":
self.role_name = "ZhuyunFullReadOnlyAccess"
else:
self.role_name = role_name
self.security_token = None
def sign(self, params_to_sign):
canonicalized_query_string = ''
sorted_params = sorted(params_to_sign.items(),
key=lambda kv_pair: kv_pair[0])
for k, v in sorted_params:
canonicalized_query_string += percent_encode(k) + '=' + percent_encode(
v) + '&'
canonicalized_query_string = canonicalized_query_string[:-1]
string_to_sign = 'POST&%2F&' + percent_encode(canonicalized_query_string)
h = hmac.new(bytes(self.access_key_secret + "&", 'utf-8'),
bytes(string_to_sign, 'utf-8'), hashlib.sha1)
signature = base64.encodebytes(h.digest()).strip()
return signature
def verify(self):
status_code, _ = self.ecs(Action='DescribeRegions')
return (status_code == 200)
def call(self, domain, version, port=80, protocol='http', timeout=3,
**biz_params):
api_params = {
'Format': 'json',
'Version': version,
'AccessKeyId': self.access_key_id,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid4()),
'Timestamp': time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
'partner_id': '1.0',
}
if self.access_key_id is None or self.access_key_secret is None or self.access_key_secret == "" or self.access_key_id == "":
resp_role = requests.get(ROLE_URL + self.role_name)
if resp_role.status_code == 200:
parsed_resp = parse_response(resp_role)
self.access_key_id = parsed_resp.get('AccessKeyId')
self.access_key_secret = parsed_resp.get('AccessKeySecret')
self.security_token = parsed_resp.get('SecurityToken')
api_params['AccessKeyId'] = self.access_key_id
if self.security_token:
api_params['SecurityToken'] = self.security_token
api_params.update(biz_params)
api_params['Signature'] = self.sign(api_params)
url = '{}://{}:{}/'.format(protocol, domain, port)
resp = requests.post(url, data=api_params, timeout=timeout)
parsed_resp = parse_response(resp)
return resp.status_code, parsed_resp
def __getattr__(self, product):
api_config = PRODUCT_API_CONFIG_MAP.get(product)
if not api_config:
raise Exception(
'Unknow Aliyun product API config.'
' Please use `call()` with full API configs.')
domain = api_config.get('domain')
version = api_config.get('version')
port = api_config.get('port')
protocol = api_config.get('protocol')
def f(timeout=3, **biz_params):
nonlocal domain
if '[RegionId]' in domain:
_RegionId = biz_params.get('RegionId')
if not _RegionId:
raise TypeError('Uncatched RegionId ,'
'this API config must RegionId.')
biz_params.pop('RegionId')
domain = domain.replace('[RegionId]', _RegionId)
return self.call(domain=domain, version=version, port=port,
protocol=protocol, timeout=timeout,
**biz_params)
return f | zy-aliyun-python-sdk | /zy_aliyun_python_sdk-0.1.6-py3-none-any.whl/aliyun_sdk/common.py | common.py |
import base64
import datetime
import hashlib
import hmac
# Part3 Packages
import requests
# Project Modules
from . import parse_response
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
OSS_API_CONFIG = {
'top_domain': 'aliyuncs.com',
'version': '',
'port': 443,
'protocol': 'https'
}
class AliyunOSS(object):
'''
Aliyun OSS HTTP API
'''
def __init__(self, access_key_id=None, access_key_secret=None, *args, **kwargs):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self._requests_session = requests.Session()
def get_query_string(self, query=None):
if not query:
return ''
query_string = ''
for k, v in sorted(query.items(), key=lambda kv_pair: kv_pair[0]):
if v is None:
query_string += '&' + k
else:
query_string += '&' + k + '=' + v
return '?' + query_string[1:]
def get_canonicalized_header_string(self, headers=None):
canonicalized_header_string = ''
if headers:
oss_headers = [(k.lower(), v) for k, v in headers.items() if k.lower().startswith('x-oss-')]
sorted_oss_headers = sorted(oss_headers, key=lambda kv_pair: kv_pair[0])
if sorted_oss_headers:
canonicalized_header_string = '\n'.join(k + ':' + v for k, v in sorted_oss_headers) + '\n'
return canonicalized_header_string
def get_canonicalized_resource_string(self, bucket_name=None, object_name=None, query=None):
canonicalized_resource_string = '/'
if bucket_name:
canonicalized_resource_string += bucket_name + '/'
if object_name:
canonicalized_resource_string += object_name
if query:
query_string = self.get_query_string(query)
canonicalized_resource_string += query_string
return canonicalized_resource_string
def sign(self, req, canonicalized_header_string, canonicalized_resource_string):
string_to_sign = '\n'.join([
req.method.upper(),
req.headers.get('content-md5', ''),
req.headers.get('content-type', ''),
req.headers.get('date', ''),
canonicalized_header_string + canonicalized_resource_string
])
h = hmac.new(bytes(self.access_key_secret, 'utf-8'), bytes(string_to_sign, 'utf-8'), hashlib.sha1)
signature = base64.encodebytes(h.digest()).strip()
return signature.decode('utf-8')
def verify(self):
status_code, _ = self.call('GET', 'oss-cn-hangzhou')
return (status_code == 200)
def call(self, method, region_id=None, bucket_name=None, object_name=None, query=None, body=None, headers=None,
timeout=3):
method = method.upper()
region_id = region_id or 'oss-cn-hangzhou'
if object_name and object_name.startswith('/'):
object_name = object_name[1:]
headers = headers or {}
headers['date'] = datetime.datetime.utcnow().strftime(GMT_FORMAT)
h = hashlib.md5()
if body is not None:
h.update(body)
headers['content-md5'] = base64.encodebytes(h.digest()).strip().decode()
canonicalized_header_string = self.get_canonicalized_header_string(headers)
canonicalized_resource_string = self.get_canonicalized_resource_string(bucket_name, object_name, query)
domain = '{}.{}'.format(region_id, OSS_API_CONFIG['top_domain'])
if bucket_name:
domain = bucket_name + '.' + domain
url = '{}://{}/{}'.format(OSS_API_CONFIG['protocol'], domain, object_name or '')
if query:
query_string = self.get_query_string(query)
url += query_string
req = requests.Request(method, url, data=body, headers=headers)
prepared_req = self._requests_session.prepare_request(req)
signature = self.sign(prepared_req, canonicalized_header_string, canonicalized_resource_string)
prepared_req.headers['authorization'] = 'OSS {}:{}'.format(self.access_key_id, signature)
resp = self._requests_session.send(prepared_req, timeout=timeout)
parsed_resp = parse_response(resp)
return resp.status_code, parsed_resp
def __getattr__(self, method):
method = method.upper()
def f(timeout=3, **biz_params):
kwargs = {
'region_id': biz_params.get('RegionId'),
'bucket_name': biz_params.get('BucketName'),
'object_name': biz_params.get('ObjectName'),
'query': biz_params.get('Query'),
'body': biz_params.get('Body'),
'headers': biz_params.get('Headers'),
}
return self.call(method, **kwargs)
return f | zy-aliyun-python-sdk | /zy_aliyun_python_sdk-0.1.6-py3-none-any.whl/aliyun_sdk/oss.py | oss.py |
import time
import pymongo
import bson
import os
import shutil
import sys
import traceback
from datetime import datetime
import requests
from pymongo import MongoClient
"""
模板作用:
1、main_function 这个方法,提供对指定《数据库》中的指定《表》进行循环查询,将查询出来的 document,传递到 core_logic 中进行处理
2、core_logic 这个方法由调用 data_fix_main 的用户自己完成,必须有三个参数;
3、本模板,可以通过 pip install zy-tools 直接调用,不必拷贝过来拷贝过去
"""
# 只需传入文件名称,自动生成以当前脚本名称为前缀的 .txt 文件,保存到相对路径下,作为日志文件。
def data_fix_logger(msg, fl_name='', mode="a", need_time=True, need_log=True):
if not need_log:
return
time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger_file_name = os.path.basename(sys.argv[0])[:-3] + fl_name + ".log"
with open(logger_file_name, mode) as f:
if need_time:
f.write(time_str + ' => ')
f.write(msg)
f.write("\n")
# 获取文件最后一个合法的 _id,用于断点续读
def get_last_id(file_name):
with open(file_name, 'r') as f:
index = -1
while True:
_id = f.readlines()[index].strip()
if len(_id) == 24:
return _id
data_fix_logger(f"Get the {-index} th _id error; Current _id: {_id}")
index -= 1
def get_q_size(service):
res = requests.get(service).json()
if res and res.get('data'):
return res.get('data').get('queueSize')
def find_documents(conn, data_base, collection, query, projection, sort_key="_id", sort_value=pymongo.ASCENDING, limits=0):
# 默认根据_id 升序排列,不限制返回的结果数量
_docs = conn[data_base][collection].find(query, projection).sort(sort_key, sort_value).limit(limits)
# 将结果集放到一个 list 中,方便计数
docs = [item for item in _docs]
return docs
def core_logic_example(conn, document, is_sandbox):
"""
核心处理逻辑样例,调用者自己实现。
【注意】必须有且仅有这三个参数
:param conn:
:param document:
:param is_sandbox:
:return:
"""
pass
def data_fix_main(uri, db, collection, more_filter, projections, core_logic, starts='', end_id='', quit_limit=0, limits=500, is_sandbox=True):
"""
:param uri: MongoDB 的账号信息
:param db: MongoDB 库名
:param collection: MongoDB 表名
:param more_filter: 查询 filter
:param projections: 查询 projection
:param core_logic: 核心逻辑,需调用者自己实现一个方法,且这个方法有且只能有两个参数,分别是 MongoDB 连接对象,和查出来的 document
:param starts: 起始 _id , 可以不提供
:param end_id: 终点 _id , 可以不提供
:param quit_limit: 处理 document 的数量限制,达到后程序结束,用于定量测试程序的运行;默认为0, 不做限制
:param limits: MongoDB 每次查询返回文档的限制,默认为 500
:param is_sandbox: 是否是测试状态,默认是 True
:return:
"""
start_id = ''
current_id = ''
exception_count = 0
has_query_count = 0
has_read_id_count = 0
conn = MongoClient(uri)
current_file_name = os.path.basename(sys.argv[0])[:-3]
# 如果存在断点文件,先复制保存一份,再继续使用该文件,防止发生错误,无法从之前的断点继续
old_logger_file = f"{current_file_name}_has_read_{collection}_ids.log"
if os.path.exists(old_logger_file):
try:
log_date = str(datetime.now().date())
log_time = str(datetime.now().time())[:-7]
bak_up_file_time = (log_date + '_' + log_time).replace('-', '_').replace(':', '_')
shutil.copy(old_logger_file, f"{current_file_name}_has_read_{collection}_ids_{bak_up_file_time}.log")
start_id = get_last_id(old_logger_file)
except Exception as e:
msg = str(e) + ",trace:" + traceback.format_exc()
data_fix_logger(f"Failed to get last id, exit! Error Msg {msg}.")
sys.exit()
# 读取数据库中的第一条 _id
if not start_id:
one_doc = conn[db][collection].find(more_filter, projection={"_id": 1}).sort("_id", pymongo.ASCENDING)
start_id = one_doc[0]["_id"]
data_fix_logger(str(start_id), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
query = {"_id": {"$gte": bson.ObjectId(start_id)}}
# 传入起点,则以传入的 objectId 为起点,否则以库中查询的第一条或者读取本地文件。
if starts:
query = {"_id": {"$gte": bson.ObjectId(starts)}}
if more_filter:
query.update(more_filter)
# 捕获异常20 次,则退出检查
while exception_count < 20:
has_query_count += 1
docs = find_documents(conn, db, collection, query, projections, "_id", pymongo.ASCENDING, limits)
fl = "_query_db_counts"
log_msg = f"****** Has queried {collection}: {has_query_count}*{limits}={has_query_count * limits} documents. *******"
data_fix_logger(log_msg, fl_name=fl, mode="w")
try:
# 查询结果为空,直接退出
if not docs:
data_fix_logger(f"Empty doc, exit! Last _id is: {current_id}.")
return
for doc in docs:
has_read_id_count += 1
current_id = _id = doc.get("_id")
query["_id"] = {"$gt": current_id}
data_fix_logger(str(current_id), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
# 给定退出限制, 达到限制的额时候,退出程序;
# 不给 quit_limit 传递值的时候,则不会在这里判断
if quit_limit and has_read_id_count > quit_limit:
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit()
# 程序退出条件 2
if end_id and (current_id > end_id):
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit() # 程序退出
# 核心处理逻辑
core_logic(conn, doc, is_sandbox)
except Exception as e:
query["_id"] = {"$gt": current_id} # 新的query
data_fix_logger(f'Get error, exception msg is {str(e) + ",trace:" + traceback.format_exc()}, current _id is: {current_id}.')
exception_count += 1
data_fix_logger(f"Catch exception 20 times, mission is over. Last _id is: {current_id}.")
def generate_gem_ts(uri, db, collection, more_filter, projections, core_logic, start_id='', end_id='', service_address='', q_size_limit=0, limits=500, need_stream_process=False):
"""
:param uri: mongoDB 地址
:param db: mongoDB 库名
:param collection: mongoDB 表名
:param more_filter: 其他 query
:param projections: projection
:param core_logic: 自定义核心处理逻辑
:param start_id: 自定义查询起点,必须是 MongoDB 的 ObjectId
:param end_id: 自定义查询终点,必须是 MongoDB 的 ObjectId
:param service_address: 可以指定队列地址,用来查询队列大小
:param q_size_limit: 可以指定队列大小,当大于此值时,会暂停查询
:param limits: 查询 MongoDB 的 limit
:param need_stream_process: 是否需要流处理,true 的话,则由核心处理逻辑处理每次查询出来的所有记录;默认为 false,逐条处理
:return:
"""
query = {}
current_id = ''
exception_count = 0
has_query_count = 0
has_read_id_count = 0
if isinstance(uri, str):
conn = MongoClient(uri)
elif isinstance(uri, pymongo.mongo_client.MongoClient):
conn = uri
else:
data_fix_logger(f'uri 类型错误,系统退出。')
sys.exit('uri 类型错误')
# 传入起点,则以传入的 objectId 为起点,否则以库中查询的第一条或者读取本地文件。
if start_id:
query = {"_id": {"$gte": bson.ObjectId(start_id)}}
if more_filter:
query.update(more_filter)
# 捕获异常20 次,则退出检查
while exception_count < 20:
# 如果需要根据队列堆积的量进行判断
if service_address and q_size_limit:
while 1:
q_size = int(get_q_size(service_address))
if q_size > q_size_limit:
# 十分钟一轮
data_fix_logger(f'queue size is greater than {q_size_limit}, sleep ten minus, queue size: {q_size}')
for i in range(30):
time.sleep(20)
# 为了保持链接, 20 秒查询一次
conn.enterprise.Counters.find_one({'seq': 736564644})
else:
break
has_query_count += 1
docs = find_documents(conn, db, collection, query, projections, "_id", pymongo.ASCENDING, limits)
fl = "_query_db_counts"
log_msg = f"****** Has queried {collection} with {query}: {has_query_count}*{limits}={has_query_count * limits} documents. *******"
data_fix_logger(log_msg, fl_name=fl, mode="w")
try:
# 查询结果为空,直接退出
if not docs:
data_fix_logger(f"Empty doc, exit! Last _id is: {current_id}.")
return
# 需要将所有 docs 一起处理
if need_stream_process:
current_id = _id = docs[-1].get("_id")
query["_id"] = {"$gt": current_id}
# 防止杀死进程的时候,这一轮没有执行完毕,下一次执行的时候会丢失数据
data_fix_logger(str(docs[0].get("_id")), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
core_logic(conn, docs)
# 程序退出条件
if end_id:
real_end_id = None
if isinstance(end_id, str):
real_end_id = bson.ObjectId(end_id)
elif isinstance(end_id, bson.ObjectId):
real_end_id = end_id
if current_id > real_end_id:
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit()
continue
for doc in docs:
has_read_id_count += 1
current_id = _id = doc.get("_id")
query["_id"] = {"$gt": current_id}
data_fix_logger(str(current_id), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
# 程序退出条件
if end_id:
real_end_id = None
if isinstance(end_id, str):
real_end_id = bson.ObjectId(end_id)
elif isinstance(end_id, bson.ObjectId):
real_end_id = end_id
if current_id > real_end_id:
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit()
# 核心处理逻辑
core_logic(conn, doc)
except Exception as e:
query["_id"] = {"$gt": current_id} # 新的query
data_fix_logger(f'Get error, exception msg is {str(e) + ",trace:" + traceback.format_exc()}, current _id is: {current_id}.')
exception_count += 1
data_fix_logger(f"Catch exception 20 times, mission is over. Last _id is: {current_id}.") | zy-tools | /zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/data_fix.py | data_fix.py |
import random
from math import sin, cos, pi, log
from tkinter import *
import ctypes
user32 = ctypes.windll.user32
CANVAS_WIDTH = user32.GetSystemMetrics(0) # 画布的宽
CANVAS_HEIGHT = user32.GetSystemMetrics(1) # 画布的高
CANVAS_CENTER_X = CANVAS_WIDTH / 2 # 画布中心的X轴坐标
CANVAS_CENTER_Y = CANVAS_HEIGHT / 2 # 画布中心的Y轴坐标
IMAGE_ENLARGE = 11 # 放大比例
HEART_COLOR = "#ff2121" # 心的颜色,这个是中国红
def heart_function(t, shrink_ratio: float = IMAGE_ENLARGE):
"""
“爱心函数生成器”
:param shrink_ratio: 放大比例
:param t: 参数
:return: 坐标
"""
# 基础函数
x = 16 * (sin(t) ** 3)
y = -(13 * cos(t) - 5 * cos(2 * t) - 2 * cos(3 * t) - cos(4 * t))
# 放大
x *= shrink_ratio
y *= shrink_ratio
# 移到画布中央
x += CANVAS_CENTER_X
y += CANVAS_CENTER_Y
return int(x), int(y)
def scatter_inside(x, y, beta=0.15):
"""
随机内部扩散
:param x: 原x
:param y: 原y
:param beta: 强度
:return: 新坐标
"""
ratio_x = - beta * log(random.random())
ratio_y = - beta * log(random.random())
dx = ratio_x * (x - CANVAS_CENTER_X)
dy = ratio_y * (y - CANVAS_CENTER_Y)
return x - dx, y - dy
def shrink(x, y, ratio):
"""
抖动
:param x: 原x
:param y: 原y
:param ratio: 比例
:return: 新坐标
"""
force = -1 / (((x - CANVAS_CENTER_X) ** 2 + (y - CANVAS_CENTER_Y) ** 2) ** 0.6) # 这个参数...
dx = ratio * force * (x - CANVAS_CENTER_X)
dy = ratio * force * (y - CANVAS_CENTER_Y)
return x - dx, y - dy
def curve(p):
"""
自定义曲线函数,调整跳动周期
:param p: 参数
:return: 正弦
"""
# 可以尝试换其他的动态函数,达到更有力量的效果(贝塞尔?)
return 2 * (2 * sin(4 * p)) / (2 * pi)
class Heart:
"""
爱心类
"""
def __init__(self, generate_frame=20):
self._points = set() # 原始爱心坐标集合
self._edge_diffusion_points = set() # 边缘扩散效果点坐标集合
self._center_diffusion_points = set() # 中心扩散效果点坐标集合
self.all_points = {} # 每帧动态点坐标
self.build(2000)
self.random_halo = 1000
self.generate_frame = generate_frame
for frame in range(generate_frame):
self.calc(frame)
def build(self, number):
# 爱心
for _ in range(number):
t = random.uniform(0, 2 * pi) # 随机不到的地方造成爱心有缺口
x, y = heart_function(t)
self._points.add((x, y))
# 爱心内扩散
for _x, _y in list(self._points):
for _ in range(3):
x, y = scatter_inside(_x, _y, 0.05)
self._edge_diffusion_points.add((x, y))
# 爱心内再次扩散
point_list = list(self._points)
for _ in range(4000):
x, y = random.choice(point_list)
x, y = scatter_inside(x, y, 0.17)
self._center_diffusion_points.add((x, y))
@staticmethod
def calc_position(x, y, ratio):
# 调整缩放比例
force = 1 / (((x - CANVAS_CENTER_X) ** 2 + (y - CANVAS_CENTER_Y) ** 2) ** 0.520) # 魔法参数
dx = ratio * force * (x - CANVAS_CENTER_X) + random.randint(-1, 1)
dy = ratio * force * (y - CANVAS_CENTER_Y) + random.randint(-1, 1)
return x - dx, y - dy
def calc(self, generate_frame):
ratio = 10 * curve(generate_frame / 10 * pi) # 圆滑的周期的缩放比例
halo_radius = int(4 + 6 * (1 + curve(generate_frame / 10 * pi)))
halo_number = int(3000 + 4000 * abs(curve(generate_frame / 10 * pi) ** 2))
all_points = []
# 光环
heart_halo_point = set() # 光环的点坐标集合
for _ in range(halo_number):
t = random.uniform(0, 2 * pi) # 随机不到的地方造成爱心有缺口
x, y = heart_function(t, shrink_ratio=11.6) # 魔法参数
x, y = shrink(x, y, halo_radius)
if (x, y) not in heart_halo_point:
# 处理新的点
heart_halo_point.add((x, y))
x += random.randint(-14, 14)
y += random.randint(-14, 14)
size = random.choice((1, 2, 2))
all_points.append((x, y, size))
# 轮廓
for x, y in self._points:
x, y = self.calc_position(x, y, ratio)
size = random.randint(1, 3)
all_points.append((x, y, size))
# 内容
for x, y in self._edge_diffusion_points:
x, y = self.calc_position(x, y, ratio)
size = random.randint(1, 2)
all_points.append((x, y, size))
for x, y in self._center_diffusion_points:
x, y = self.calc_position(x, y, ratio)
size = random.randint(1, 2)
all_points.append((x, y, size))
self.all_points[generate_frame] = all_points
def render(self, render_canvas, render_frame):
for x, y, size in self.all_points[render_frame % self.generate_frame]:
render_canvas.create_rectangle(x, y, x + size, y + size, width=0, fill=HEART_COLOR)
def draw(main: Tk, render_canvas: Canvas, render_heart: Heart, render_frame=0):
render_canvas.delete('all')
render_heart.render(render_canvas, render_frame)
main.after(160, draw, main, render_canvas, render_heart, render_frame + 1)
def run():
root = Tk() # 一个Tk
root.attributes('-fullscreen' , True) # 全屏
root.attributes('-alpha' , 0.9) # 透明度
canvas = Canvas(root , bg='black' , height=CANVAS_HEIGHT , width=CANVAS_WIDTH)
canvas.pack()
heart = Heart() # 心
draw(root , canvas , heart) # 开始画画~
root.mainloop() | zyc-love | /zyc_love-1.0.1.tar.gz/zyc_love-1.0.1/love_code/love.py | love.py |
=======
History
=======
1.0.0 (2021-09-16)
------------------
* Disabled creation of SKiDL logging and ERC files.
* Decided this tool was mature to the point it could be called 1.0.0.
0.4.0 (2020-09-20)
------------------
* Fixed infinite recursion caused by search that returns an entire list of invalid footprints (issue #2).
0.3.0 (2020-06-07)
------------------
* Fixed copy-paste part/footprint errors caused by clipboard already being open.
0.2.0 (2020-04-28)
------------------
* Replaced sorting function deleted from SKiDL.
* Updated some Grid definitions for wxpython 4.0.
0.1.0 (2019-12-17)
------------------
* Extracted zyc utility from SKiDL repo and released separately on PyPi.
| zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/HISTORY.rst | HISTORY.rst |
===
zyc
===
.. image:: https://img.shields.io/pypi/v/zyc.svg
:target: https://pypi.python.org/pypi/zyc
A GUI utility for searching and selecting parts and footprints for use with `SKiDL <https://pypi.org/project/skidl/>`_.
* Free software: MIT license
* Documentation: https://devbisme.github.io/zyc .
Features
--------
* Keyword search and selection of parts in KiCad libraries.
* Keyword search and selection of footprints in KiCad libraries.
* Copy-and-paste part instances into SKiDL code.
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/devbisme/zyc/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zyc could always use more documentation, whether as part of the
official zyc docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/devbisme/zyc/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zyc` for local development.
1. Fork the `zyc` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zyc.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zyc
$ cd zyc
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
6. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
2. The pull request should work for Python 2.7 and >=3.5.
| zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
Usage
============
Just enter `zyc` at the command line to bring up the GUI.

`zyc` has two, cooperating panes: a part is searched for and selected in the top pane, and then the
same is done for a footprint in the bottom pane. Then you can paste a SKiDL `Part(...)` instantiation
with the part and footprint into a code editing window.
To begin, start off by typing some search terms describing the part you want
into the upper text-entry box (1) and hit `RETURN` or click the `Search` button (2).

`zyc` will scan through all the available parts looking for your search terms in the library names and
the part names, aliases, descriptions, and keywords. (This initial search takes 20-30 seconds, but
any further searches are quick because the part data gets cached.)
The search terms can contain one or more text strings and/or regular expressions (REs).
Any matching parts are displayed in the Library/Part table (1).
Clicking on a row in the table (2) displays the part's description and keywords (3), datasheet hyperlink (4), and
I/O pin table (5) on the right-side of the pane.

Selecting a part also loads the footprint search text-entry box (6) with an initial set of search terms formed from
the part's list of recommended footprints plus the number of part pins.
You can edit these search terms and add more to modify the footprint search.
(Which is handy because, in this case, the number of pins is wrong since the pin list only contains
six entries but the actual number of pins is eight. So I'll just remove it.)
Then press the `Search` button (7) to scan through all the footprints for matching terms.
(As with parts, the initial footprint search takes 20-30 seconds but further searches are fast
because of caching.)
The footprints that match the search terms appear in the Library/Footprint table (1).
In this case, there is only one.
Selecting it makes the footprint description appear (2) as well as a drawing of the footprint (3).
(If a hyperlink for the package datasheet was available, that would show up as well.)

Once a part and footprint are selected, you can click on the `Copy` button in the upper panel (1)
to place a `Part(...)` instantiation on the clipboard.

Then you can go anywhere (such as your code editor) and paste the clipboard contents to get the part
instantiation:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC',
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
```
If you check the `Tmplt` box (2) and then click the `Copy` button, a part template (instead of an instance)
gets placed on the clipboard and appears as follows:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC', dest=TEMPLATE,
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
```
To make it easier to connect the pins, `zyc` lets you select the names from the pin list column (1).
Right-clicking on the table copies whatever is selected to the clipboard.

Then the pin names can be pasted into the code editor:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC',
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
"VDD",
"GP2",
"GP1",
"GP0",
"VSS",
"GP3"
```
Now you can use the strings to connect the microcontroller pins to various nets:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC',
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
uc["VDD", "VSS"] += Net('VCC'), Net('GND')
uc["GP0", "GP1", "GP2", "GP3"] += Bus('GP', 4)
```
`zyc` lets you double-click on any table column header to re-sort the table
based on the contents of that column.
This can be useful in grouping pins by their names, functions, or part units
before selecting and pasting them.
Using an FPGA as an example, double-clicking the `Unit` column (1)
orders the table so you can select all the I/O pins in one of its banks (2).

Then the pins can be pasted:
```python
"IO_0_14",
"IO_0_15",
"IO_25_14",
"IO_25_15",
"IO_L1N_T0_AD0N_15",
"IO_L1N_T0_D01_DIN_14",
"IO_L1P_T0_AD0P_15",
"IO_L1P_T0_D00_MOSI_14",
"IO_L2N_T0_AD8N_15",
"IO_L2N_T0_D03_14",
"IO_L2P_T0_AD8P_15",
"IO_L2P_T0_D02_14",
"IO_L3N_T0_DQS_AD1N_15",
"IO_L3N_T0_DQS_EMCCLK_14",
"IO_L3P_T0_DQS_AD1P_15",
"IO_L3P_T0_DQS_PUDC_B_14",
"IO_L4N_T0_15",
"IO_L4N_T0_D05_14",
"IO_L4P_T0_15",
"IO_L4P_T0_D04_14",
"IO_L5N_T0_AD9N_15",
"IO_L5N_T0_D07_14",
"IO_L5P_T0_AD9P_15",
"IO_L5P_T0_D06_14",
"IO_L6N_T0_D08_VREF_14",
"IO_L6N_T0_VREF_15",
"IO_L6P_T0_15",
"IO_L6P_T0_FCS_B_14",
"IO_L7N_T1_AD2N_15",
"IO_L7N_T1_D10_14",
"IO_L7P_T1_AD2P_15",
"IO_L7P_T1_D09_14",
"IO_L8N_T1_AD10N_15",
"IO_L8N_T1_D12_14",
"IO_L8P_T1_AD10P_15",
"IO_L8P_T1_D11_14",
"IO_L9N_T1_DQS_AD3N_15",
"IO_L9N_T1_DQS_D13_14",
"IO_L9P_T1_DQS_14",
"IO_L9P_T1_DQS_AD3P_15",
"IO_L10N_T1_AD11N_15",
"IO_L10N_T1_D15_14",
"IO_L10P_T1_AD11P_15",
"IO_L10P_T1_D14_14",
"IO_L11N_T1_SRCC_14",
"IO_L11N_T1_SRCC_15",
"IO_L11P_T1_SRCC_14",
"IO_L11P_T1_SRCC_15",
"IO_L12N_T1_MRCC_14",
"IO_L12N_T1_MRCC_15",
"IO_L12P_T1_MRCC_14",
"IO_L12P_T1_MRCC_15",
"IO_L13N_T2_MRCC_14",
"IO_L13N_T2_MRCC_15",
"IO_L13P_T2_MRCC_14",
"IO_L13P_T2_MRCC_15",
"IO_L14N_T2_SRCC_14",
"IO_L14N_T2_SRCC_15",
"IO_L14P_T2_SRCC_14",
"IO_L14P_T2_SRCC_15",
"IO_L15N_T2_DQS_ADV_B_15",
"IO_L15N_T2_DQS_DOUT_CSO_B_14",
"IO_L15P_T2_DQS_15",
"IO_L15P_T2_DQS_RDWR_B_14",
"IO_L16N_T2_A15_D31_14",
"IO_L16N_T2_A27_15",
"IO_L16P_T2_A28_15",
"IO_L16P_T2_CSI_B_14",
"IO_L17N_T2_A13_D29_14",
"IO_L17N_T2_A25_15",
"IO_L17P_T2_A14_D30_14",
"IO_L17P_T2_A26_15",
"IO_L18N_T2_A11_D27_14",
"IO_L18N_T2_A23_15",
"IO_L18P_T2_A12_D28_14",
"IO_L18P_T2_A24_15",
"IO_L19N_T3_A09_D25_VREF_14",
"IO_L19N_T3_A21_VREF_15",
"IO_L19P_T3_A10_D26_14",
"IO_L19P_T3_A22_15",
"IO_L20N_T3_A07_D23_14",
"IO_L20N_T3_A19_15",
"IO_L20P_T3_A08_D24_14",
"IO_L20P_T3_A20_15",
"IO_L21N_T3_DQS_A06_D22_14",
"IO_L21N_T3_DQS_A18_15",
"IO_L21P_T3_DQS_14",
"IO_L21P_T3_DQS_15",
"IO_L22N_T3_A04_D20_14",
"IO_L22N_T3_A16_15",
"IO_L22P_T3_A05_D21_14",
"IO_L22P_T3_A17_15",
"IO_L23N_T3_A02_D18_14",
"IO_L23N_T3_FWE_B_15",
"IO_L23P_T3_A03_D19_14",
"IO_L23P_T3_FOE_B_15",
"IO_L24N_T3_A00_D16_14",
"IO_L24N_T3_RS0_15",
"IO_L24P_T3_A01_D17_14",
"IO_L24P_T3_RS1_15"
```
This is definitely something that would be difficult to type manually without making a mistake!
`zyc` requires minimal setup.
By default, it will use the `KICAD_SYMBOL_DIR` environment variable to look for part libraries,
and it will look for the global footprint library table (`fp_lib_table`) in the default location
where KiCad installs it on various OSes.
You can also add or change the directories that are searched for part libraries (1) or for
`fp_lib_table` files (2) using the menu items below:

It may happen that you change some part libraries or add more footprint files while `zyc` is
running. If so, you'll want to refresh the part and footprint caches (3).
| zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/usage.md | usage.md |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zyc, run this command in your terminal:
.. code-block:: console
$ pip install zyc
This is the preferred method to install zyc, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zyc can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/devbisme/zyc
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/devbisme/zyc/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/devbisme/zyc
.. _tarball: https://github.com/devbisme/zyc/tarball/master
| zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/installation.rst | installation.rst |
* select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
*/
jQuery.urldecode = function(x) {
if (!x) {
return x
}
return decodeURIComponent(x.replace(/\+/g, ' '));
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) {
if (node.nodeType === 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 &&
!jQuery(node.parentNode).hasClass(className) &&
!jQuery(node.parentNode).hasClass("nohighlight")) {
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
var bbox = node.parentElement.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
addItems.push({
"parent": node.parentNode,
"target": rect});
}
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this, addItems);
});
}
}
var addItems = [];
var result = this.each(function() {
highlight(this, addItems);
});
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
this.initOnKeyListeners();
}
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated === 'undefined')
return string;
return (typeof translated === 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated === 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) === 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this === '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
},
initOnKeyListeners: function() {
$(document).keydown(function(event) {
var activeElementType = document.activeElement.tagName;
// don't navigate when in search box, textarea, dropdown or button
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT'
&& activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey
&& !event.shiftKey) {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
if (prevHref) {
window.location.href = prevHref;
return false;
}
break;
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
break;
}
}
});
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/_build/singlehtml/_static/doctools.js | doctools.js |
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
/* Non-minified version is copied as a separate JS file, is available */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
} | zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/_build/singlehtml/_static/language_data.js | language_data.js |
!function(n,r){"object"==typeof exports&&"undefined"!=typeof module?module.exports=r():"function"==typeof define&&define.amd?define("underscore",r):(n="undefined"!=typeof globalThis?globalThis:n||self,function(){var t=n._,e=n._=r();e.noConflict=function(){return n._=t,e}}())}(this,(function(){
// Underscore.js 1.13.1
// https://underscorejs.org
// (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
var n="1.13.1",r="object"==typeof self&&self.self===self&&self||"object"==typeof global&&global.global===global&&global||Function("return this")()||{},t=Array.prototype,e=Object.prototype,u="undefined"!=typeof Symbol?Symbol.prototype:null,o=t.push,i=t.slice,a=e.toString,f=e.hasOwnProperty,c="undefined"!=typeof ArrayBuffer,l="undefined"!=typeof DataView,s=Array.isArray,p=Object.keys,v=Object.create,h=c&&ArrayBuffer.isView,y=isNaN,d=isFinite,g=!{toString:null}.propertyIsEnumerable("toString"),b=["valueOf","isPrototypeOf","toString","propertyIsEnumerable","hasOwnProperty","toLocaleString"],m=Math.pow(2,53)-1;function j(n,r){return r=null==r?n.length-1:+r,function(){for(var t=Math.max(arguments.length-r,0),e=Array(t),u=0;u<t;u++)e[u]=arguments[u+r];switch(r){case 0:return n.call(this,e);case 1:return n.call(this,arguments[0],e);case 2:return n.call(this,arguments[0],arguments[1],e)}var o=Array(r+1);for(u=0;u<r;u++)o[u]=arguments[u];return o[r]=e,n.apply(this,o)}}function _(n){var r=typeof n;return"function"===r||"object"===r&&!!n}function w(n){return void 0===n}function A(n){return!0===n||!1===n||"[object Boolean]"===a.call(n)}function x(n){var r="[object "+n+"]";return function(n){return a.call(n)===r}}var S=x("String"),O=x("Number"),M=x("Date"),E=x("RegExp"),B=x("Error"),N=x("Symbol"),I=x("ArrayBuffer"),T=x("Function"),k=r.document&&r.document.childNodes;"function"!=typeof/./&&"object"!=typeof Int8Array&&"function"!=typeof k&&(T=function(n){return"function"==typeof n||!1});var D=T,R=x("Object"),F=l&&R(new DataView(new ArrayBuffer(8))),V="undefined"!=typeof Map&&R(new Map),P=x("DataView");var q=F?function(n){return null!=n&&D(n.getInt8)&&I(n.buffer)}:P,U=s||x("Array");function W(n,r){return null!=n&&f.call(n,r)}var z=x("Arguments");!function(){z(arguments)||(z=function(n){return W(n,"callee")})}();var L=z;function $(n){return O(n)&&y(n)}function C(n){return function(){return n}}function K(n){return function(r){var t=n(r);return"number"==typeof t&&t>=0&&t<=m}}function J(n){return function(r){return null==r?void 0:r[n]}}var G=J("byteLength"),H=K(G),Q=/\[object ((I|Ui)nt(8|16|32)|Float(32|64)|Uint8Clamped|Big(I|Ui)nt64)Array\]/;var X=c?function(n){return h?h(n)&&!q(n):H(n)&&Q.test(a.call(n))}:C(!1),Y=J("length");function Z(n,r){r=function(n){for(var r={},t=n.length,e=0;e<t;++e)r[n[e]]=!0;return{contains:function(n){return r[n]},push:function(t){return r[t]=!0,n.push(t)}}}(r);var t=b.length,u=n.constructor,o=D(u)&&u.prototype||e,i="constructor";for(W(n,i)&&!r.contains(i)&&r.push(i);t--;)(i=b[t])in n&&n[i]!==o[i]&&!r.contains(i)&&r.push(i)}function nn(n){if(!_(n))return[];if(p)return p(n);var r=[];for(var t in n)W(n,t)&&r.push(t);return g&&Z(n,r),r}function rn(n,r){var t=nn(r),e=t.length;if(null==n)return!e;for(var u=Object(n),o=0;o<e;o++){var i=t[o];if(r[i]!==u[i]||!(i in u))return!1}return!0}function tn(n){return n instanceof tn?n:this instanceof tn?void(this._wrapped=n):new tn(n)}function en(n){return new Uint8Array(n.buffer||n,n.byteOffset||0,G(n))}tn.VERSION=n,tn.prototype.value=function(){return this._wrapped},tn.prototype.valueOf=tn.prototype.toJSON=tn.prototype.value,tn.prototype.toString=function(){return String(this._wrapped)};var un="[object DataView]";function on(n,r,t,e){if(n===r)return 0!==n||1/n==1/r;if(null==n||null==r)return!1;if(n!=n)return r!=r;var o=typeof n;return("function"===o||"object"===o||"object"==typeof r)&&function n(r,t,e,o){r instanceof tn&&(r=r._wrapped);t instanceof tn&&(t=t._wrapped);var i=a.call(r);if(i!==a.call(t))return!1;if(F&&"[object Object]"==i&&q(r)){if(!q(t))return!1;i=un}switch(i){case"[object RegExp]":case"[object String]":return""+r==""+t;case"[object Number]":return+r!=+r?+t!=+t:0==+r?1/+r==1/t:+r==+t;case"[object Date]":case"[object Boolean]":return+r==+t;case"[object Symbol]":return u.valueOf.call(r)===u.valueOf.call(t);case"[object ArrayBuffer]":case un:return n(en(r),en(t),e,o)}var f="[object Array]"===i;if(!f&&X(r)){if(G(r)!==G(t))return!1;if(r.buffer===t.buffer&&r.byteOffset===t.byteOffset)return!0;f=!0}if(!f){if("object"!=typeof r||"object"!=typeof t)return!1;var c=r.constructor,l=t.constructor;if(c!==l&&!(D(c)&&c instanceof c&&D(l)&&l instanceof l)&&"constructor"in r&&"constructor"in t)return!1}o=o||[];var s=(e=e||[]).length;for(;s--;)if(e[s]===r)return o[s]===t;if(e.push(r),o.push(t),f){if((s=r.length)!==t.length)return!1;for(;s--;)if(!on(r[s],t[s],e,o))return!1}else{var p,v=nn(r);if(s=v.length,nn(t).length!==s)return!1;for(;s--;)if(p=v[s],!W(t,p)||!on(r[p],t[p],e,o))return!1}return e.pop(),o.pop(),!0}(n,r,t,e)}function an(n){if(!_(n))return[];var r=[];for(var t in n)r.push(t);return g&&Z(n,r),r}function fn(n){var r=Y(n);return function(t){if(null==t)return!1;var e=an(t);if(Y(e))return!1;for(var u=0;u<r;u++)if(!D(t[n[u]]))return!1;return n!==hn||!D(t[cn])}}var cn="forEach",ln="has",sn=["clear","delete"],pn=["get",ln,"set"],vn=sn.concat(cn,pn),hn=sn.concat(pn),yn=["add"].concat(sn,cn,ln),dn=V?fn(vn):x("Map"),gn=V?fn(hn):x("WeakMap"),bn=V?fn(yn):x("Set"),mn=x("WeakSet");function jn(n){for(var r=nn(n),t=r.length,e=Array(t),u=0;u<t;u++)e[u]=n[r[u]];return e}function _n(n){for(var r={},t=nn(n),e=0,u=t.length;e<u;e++)r[n[t[e]]]=t[e];return r}function wn(n){var r=[];for(var t in n)D(n[t])&&r.push(t);return r.sort()}function An(n,r){return function(t){var e=arguments.length;if(r&&(t=Object(t)),e<2||null==t)return t;for(var u=1;u<e;u++)for(var o=arguments[u],i=n(o),a=i.length,f=0;f<a;f++){var c=i[f];r&&void 0!==t[c]||(t[c]=o[c])}return t}}var xn=An(an),Sn=An(nn),On=An(an,!0);function Mn(n){if(!_(n))return{};if(v)return v(n);var r=function(){};r.prototype=n;var t=new r;return r.prototype=null,t}function En(n){return _(n)?U(n)?n.slice():xn({},n):n}function Bn(n){return U(n)?n:[n]}function Nn(n){return tn.toPath(n)}function In(n,r){for(var t=r.length,e=0;e<t;e++){if(null==n)return;n=n[r[e]]}return t?n:void 0}function Tn(n,r,t){var e=In(n,Nn(r));return w(e)?t:e}function kn(n){return n}function Dn(n){return n=Sn({},n),function(r){return rn(r,n)}}function Rn(n){return n=Nn(n),function(r){return In(r,n)}}function Fn(n,r,t){if(void 0===r)return n;switch(null==t?3:t){case 1:return function(t){return n.call(r,t)};case 3:return function(t,e,u){return n.call(r,t,e,u)};case 4:return function(t,e,u,o){return n.call(r,t,e,u,o)}}return function(){return n.apply(r,arguments)}}function Vn(n,r,t){return null==n?kn:D(n)?Fn(n,r,t):_(n)&&!U(n)?Dn(n):Rn(n)}function Pn(n,r){return Vn(n,r,1/0)}function qn(n,r,t){return tn.iteratee!==Pn?tn.iteratee(n,r):Vn(n,r,t)}function Un(){}function Wn(n,r){return null==r&&(r=n,n=0),n+Math.floor(Math.random()*(r-n+1))}tn.toPath=Bn,tn.iteratee=Pn;var zn=Date.now||function(){return(new Date).getTime()};function Ln(n){var r=function(r){return n[r]},t="(?:"+nn(n).join("|")+")",e=RegExp(t),u=RegExp(t,"g");return function(n){return n=null==n?"":""+n,e.test(n)?n.replace(u,r):n}}var $n={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`"},Cn=Ln($n),Kn=Ln(_n($n)),Jn=tn.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g},Gn=/(.)^/,Hn={"'":"'","\\":"\\","\r":"r","\n":"n","\u2028":"u2028","\u2029":"u2029"},Qn=/\\|'|\r|\n|\u2028|\u2029/g;function Xn(n){return"\\"+Hn[n]}var Yn=/^\s*(\w|\$)+\s*$/;var Zn=0;function nr(n,r,t,e,u){if(!(e instanceof r))return n.apply(t,u);var o=Mn(n.prototype),i=n.apply(o,u);return _(i)?i:o}var rr=j((function(n,r){var t=rr.placeholder,e=function(){for(var u=0,o=r.length,i=Array(o),a=0;a<o;a++)i[a]=r[a]===t?arguments[u++]:r[a];for(;u<arguments.length;)i.push(arguments[u++]);return nr(n,e,this,this,i)};return e}));rr.placeholder=tn;var tr=j((function(n,r,t){if(!D(n))throw new TypeError("Bind must be called on a function");var e=j((function(u){return nr(n,e,r,this,t.concat(u))}));return e})),er=K(Y);function ur(n,r,t,e){if(e=e||[],r||0===r){if(r<=0)return e.concat(n)}else r=1/0;for(var u=e.length,o=0,i=Y(n);o<i;o++){var a=n[o];if(er(a)&&(U(a)||L(a)))if(r>1)ur(a,r-1,t,e),u=e.length;else for(var f=0,c=a.length;f<c;)e[u++]=a[f++];else t||(e[u++]=a)}return e}var or=j((function(n,r){var t=(r=ur(r,!1,!1)).length;if(t<1)throw new Error("bindAll must be passed function names");for(;t--;){var e=r[t];n[e]=tr(n[e],n)}return n}));var ir=j((function(n,r,t){return setTimeout((function(){return n.apply(null,t)}),r)})),ar=rr(ir,tn,1);function fr(n){return function(){return!n.apply(this,arguments)}}function cr(n,r){var t;return function(){return--n>0&&(t=r.apply(this,arguments)),n<=1&&(r=null),t}}var lr=rr(cr,2);function sr(n,r,t){r=qn(r,t);for(var e,u=nn(n),o=0,i=u.length;o<i;o++)if(r(n[e=u[o]],e,n))return e}function pr(n){return function(r,t,e){t=qn(t,e);for(var u=Y(r),o=n>0?0:u-1;o>=0&&o<u;o+=n)if(t(r[o],o,r))return o;return-1}}var vr=pr(1),hr=pr(-1);function yr(n,r,t,e){for(var u=(t=qn(t,e,1))(r),o=0,i=Y(n);o<i;){var a=Math.floor((o+i)/2);t(n[a])<u?o=a+1:i=a}return o}function dr(n,r,t){return function(e,u,o){var a=0,f=Y(e);if("number"==typeof o)n>0?a=o>=0?o:Math.max(o+f,a):f=o>=0?Math.min(o+1,f):o+f+1;else if(t&&o&&f)return e[o=t(e,u)]===u?o:-1;if(u!=u)return(o=r(i.call(e,a,f),$))>=0?o+a:-1;for(o=n>0?a:f-1;o>=0&&o<f;o+=n)if(e[o]===u)return o;return-1}}var gr=dr(1,vr,yr),br=dr(-1,hr);function mr(n,r,t){var e=(er(n)?vr:sr)(n,r,t);if(void 0!==e&&-1!==e)return n[e]}function jr(n,r,t){var e,u;if(r=Fn(r,t),er(n))for(e=0,u=n.length;e<u;e++)r(n[e],e,n);else{var o=nn(n);for(e=0,u=o.length;e<u;e++)r(n[o[e]],o[e],n)}return n}function _r(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=Array(u),i=0;i<u;i++){var a=e?e[i]:i;o[i]=r(n[a],a,n)}return o}function wr(n){var r=function(r,t,e,u){var o=!er(r)&&nn(r),i=(o||r).length,a=n>0?0:i-1;for(u||(e=r[o?o[a]:a],a+=n);a>=0&&a<i;a+=n){var f=o?o[a]:a;e=t(e,r[f],f,r)}return e};return function(n,t,e,u){var o=arguments.length>=3;return r(n,Fn(t,u,4),e,o)}}var Ar=wr(1),xr=wr(-1);function Sr(n,r,t){var e=[];return r=qn(r,t),jr(n,(function(n,t,u){r(n,t,u)&&e.push(n)})),e}function Or(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=0;o<u;o++){var i=e?e[o]:o;if(!r(n[i],i,n))return!1}return!0}function Mr(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=0;o<u;o++){var i=e?e[o]:o;if(r(n[i],i,n))return!0}return!1}function Er(n,r,t,e){return er(n)||(n=jn(n)),("number"!=typeof t||e)&&(t=0),gr(n,r,t)>=0}var Br=j((function(n,r,t){var e,u;return D(r)?u=r:(r=Nn(r),e=r.slice(0,-1),r=r[r.length-1]),_r(n,(function(n){var o=u;if(!o){if(e&&e.length&&(n=In(n,e)),null==n)return;o=n[r]}return null==o?o:o.apply(n,t)}))}));function Nr(n,r){return _r(n,Rn(r))}function Ir(n,r,t){var e,u,o=-1/0,i=-1/0;if(null==r||"number"==typeof r&&"object"!=typeof n[0]&&null!=n)for(var a=0,f=(n=er(n)?n:jn(n)).length;a<f;a++)null!=(e=n[a])&&e>o&&(o=e);else r=qn(r,t),jr(n,(function(n,t,e){((u=r(n,t,e))>i||u===-1/0&&o===-1/0)&&(o=n,i=u)}));return o}function Tr(n,r,t){if(null==r||t)return er(n)||(n=jn(n)),n[Wn(n.length-1)];var e=er(n)?En(n):jn(n),u=Y(e);r=Math.max(Math.min(r,u),0);for(var o=u-1,i=0;i<r;i++){var a=Wn(i,o),f=e[i];e[i]=e[a],e[a]=f}return e.slice(0,r)}function kr(n,r){return function(t,e,u){var o=r?[[],[]]:{};return e=qn(e,u),jr(t,(function(r,u){var i=e(r,u,t);n(o,r,i)})),o}}var Dr=kr((function(n,r,t){W(n,t)?n[t].push(r):n[t]=[r]})),Rr=kr((function(n,r,t){n[t]=r})),Fr=kr((function(n,r,t){W(n,t)?n[t]++:n[t]=1})),Vr=kr((function(n,r,t){n[t?0:1].push(r)}),!0),Pr=/[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;function qr(n,r,t){return r in t}var Ur=j((function(n,r){var t={},e=r[0];if(null==n)return t;D(e)?(r.length>1&&(e=Fn(e,r[1])),r=an(n)):(e=qr,r=ur(r,!1,!1),n=Object(n));for(var u=0,o=r.length;u<o;u++){var i=r[u],a=n[i];e(a,i,n)&&(t[i]=a)}return t})),Wr=j((function(n,r){var t,e=r[0];return D(e)?(e=fr(e),r.length>1&&(t=r[1])):(r=_r(ur(r,!1,!1),String),e=function(n,t){return!Er(r,t)}),Ur(n,e,t)}));function zr(n,r,t){return i.call(n,0,Math.max(0,n.length-(null==r||t?1:r)))}function Lr(n,r,t){return null==n||n.length<1?null==r||t?void 0:[]:null==r||t?n[0]:zr(n,n.length-r)}function $r(n,r,t){return i.call(n,null==r||t?1:r)}var Cr=j((function(n,r){return r=ur(r,!0,!0),Sr(n,(function(n){return!Er(r,n)}))})),Kr=j((function(n,r){return Cr(n,r)}));function Jr(n,r,t,e){A(r)||(e=t,t=r,r=!1),null!=t&&(t=qn(t,e));for(var u=[],o=[],i=0,a=Y(n);i<a;i++){var f=n[i],c=t?t(f,i,n):f;r&&!t?(i&&o===c||u.push(f),o=c):t?Er(o,c)||(o.push(c),u.push(f)):Er(u,f)||u.push(f)}return u}var Gr=j((function(n){return Jr(ur(n,!0,!0))}));function Hr(n){for(var r=n&&Ir(n,Y).length||0,t=Array(r),e=0;e<r;e++)t[e]=Nr(n,e);return t}var Qr=j(Hr);function Xr(n,r){return n._chain?tn(r).chain():r}function Yr(n){return jr(wn(n),(function(r){var t=tn[r]=n[r];tn.prototype[r]=function(){var n=[this._wrapped];return o.apply(n,arguments),Xr(this,t.apply(tn,n))}})),tn}jr(["pop","push","reverse","shift","sort","splice","unshift"],(function(n){var r=t[n];tn.prototype[n]=function(){var t=this._wrapped;return null!=t&&(r.apply(t,arguments),"shift"!==n&&"splice"!==n||0!==t.length||delete t[0]),Xr(this,t)}})),jr(["concat","join","slice"],(function(n){var r=t[n];tn.prototype[n]=function(){var n=this._wrapped;return null!=n&&(n=r.apply(n,arguments)),Xr(this,n)}}));var Zr=Yr({__proto__:null,VERSION:n,restArguments:j,isObject:_,isNull:function(n){return null===n},isUndefined:w,isBoolean:A,isElement:function(n){return!(!n||1!==n.nodeType)},isString:S,isNumber:O,isDate:M,isRegExp:E,isError:B,isSymbol:N,isArrayBuffer:I,isDataView:q,isArray:U,isFunction:D,isArguments:L,isFinite:function(n){return!N(n)&&d(n)&&!isNaN(parseFloat(n))},isNaN:$,isTypedArray:X,isEmpty:function(n){if(null==n)return!0;var r=Y(n);return"number"==typeof r&&(U(n)||S(n)||L(n))?0===r:0===Y(nn(n))},isMatch:rn,isEqual:function(n,r){return on(n,r)},isMap:dn,isWeakMap:gn,isSet:bn,isWeakSet:mn,keys:nn,allKeys:an,values:jn,pairs:function(n){for(var r=nn(n),t=r.length,e=Array(t),u=0;u<t;u++)e[u]=[r[u],n[r[u]]];return e},invert:_n,functions:wn,methods:wn,extend:xn,extendOwn:Sn,assign:Sn,defaults:On,create:function(n,r){var t=Mn(n);return r&&Sn(t,r),t},clone:En,tap:function(n,r){return r(n),n},get:Tn,has:function(n,r){for(var t=(r=Nn(r)).length,e=0;e<t;e++){var u=r[e];if(!W(n,u))return!1;n=n[u]}return!!t},mapObject:function(n,r,t){r=qn(r,t);for(var e=nn(n),u=e.length,o={},i=0;i<u;i++){var a=e[i];o[a]=r(n[a],a,n)}return o},identity:kn,constant:C,noop:Un,toPath:Bn,property:Rn,propertyOf:function(n){return null==n?Un:function(r){return Tn(n,r)}},matcher:Dn,matches:Dn,times:function(n,r,t){var e=Array(Math.max(0,n));r=Fn(r,t,1);for(var u=0;u<n;u++)e[u]=r(u);return e},random:Wn,now:zn,escape:Cn,unescape:Kn,templateSettings:Jn,template:function(n,r,t){!r&&t&&(r=t),r=On({},r,tn.templateSettings);var e=RegExp([(r.escape||Gn).source,(r.interpolate||Gn).source,(r.evaluate||Gn).source].join("|")+"|$","g"),u=0,o="__p+='";n.replace(e,(function(r,t,e,i,a){return o+=n.slice(u,a).replace(Qn,Xn),u=a+r.length,t?o+="'+\n((__t=("+t+"))==null?'':_.escape(__t))+\n'":e?o+="'+\n((__t=("+e+"))==null?'':__t)+\n'":i&&(o+="';\n"+i+"\n__p+='"),r})),o+="';\n";var i,a=r.variable;if(a){if(!Yn.test(a))throw new Error("variable is not a bare identifier: "+a)}else o="with(obj||{}){\n"+o+"}\n",a="obj";o="var __t,__p='',__j=Array.prototype.join,"+"print=function(){__p+=__j.call(arguments,'');};\n"+o+"return __p;\n";try{i=new Function(a,"_",o)}catch(n){throw n.source=o,n}var f=function(n){return i.call(this,n,tn)};return f.source="function("+a+"){\n"+o+"}",f},result:function(n,r,t){var e=(r=Nn(r)).length;if(!e)return D(t)?t.call(n):t;for(var u=0;u<e;u++){var o=null==n?void 0:n[r[u]];void 0===o&&(o=t,u=e),n=D(o)?o.call(n):o}return n},uniqueId:function(n){var r=++Zn+"";return n?n+r:r},chain:function(n){var r=tn(n);return r._chain=!0,r},iteratee:Pn,partial:rr,bind:tr,bindAll:or,memoize:function(n,r){var t=function(e){var u=t.cache,o=""+(r?r.apply(this,arguments):e);return W(u,o)||(u[o]=n.apply(this,arguments)),u[o]};return t.cache={},t},delay:ir,defer:ar,throttle:function(n,r,t){var e,u,o,i,a=0;t||(t={});var f=function(){a=!1===t.leading?0:zn(),e=null,i=n.apply(u,o),e||(u=o=null)},c=function(){var c=zn();a||!1!==t.leading||(a=c);var l=r-(c-a);return u=this,o=arguments,l<=0||l>r?(e&&(clearTimeout(e),e=null),a=c,i=n.apply(u,o),e||(u=o=null)):e||!1===t.trailing||(e=setTimeout(f,l)),i};return c.cancel=function(){clearTimeout(e),a=0,e=u=o=null},c},debounce:function(n,r,t){var e,u,o,i,a,f=function(){var c=zn()-u;r>c?e=setTimeout(f,r-c):(e=null,t||(i=n.apply(a,o)),e||(o=a=null))},c=j((function(c){return a=this,o=c,u=zn(),e||(e=setTimeout(f,r),t&&(i=n.apply(a,o))),i}));return c.cancel=function(){clearTimeout(e),e=o=a=null},c},wrap:function(n,r){return rr(r,n)},negate:fr,compose:function(){var n=arguments,r=n.length-1;return function(){for(var t=r,e=n[r].apply(this,arguments);t--;)e=n[t].call(this,e);return e}},after:function(n,r){return function(){if(--n<1)return r.apply(this,arguments)}},before:cr,once:lr,findKey:sr,findIndex:vr,findLastIndex:hr,sortedIndex:yr,indexOf:gr,lastIndexOf:br,find:mr,detect:mr,findWhere:function(n,r){return mr(n,Dn(r))},each:jr,forEach:jr,map:_r,collect:_r,reduce:Ar,foldl:Ar,inject:Ar,reduceRight:xr,foldr:xr,filter:Sr,select:Sr,reject:function(n,r,t){return Sr(n,fr(qn(r)),t)},every:Or,all:Or,some:Mr,any:Mr,contains:Er,includes:Er,include:Er,invoke:Br,pluck:Nr,where:function(n,r){return Sr(n,Dn(r))},max:Ir,min:function(n,r,t){var e,u,o=1/0,i=1/0;if(null==r||"number"==typeof r&&"object"!=typeof n[0]&&null!=n)for(var a=0,f=(n=er(n)?n:jn(n)).length;a<f;a++)null!=(e=n[a])&&e<o&&(o=e);else r=qn(r,t),jr(n,(function(n,t,e){((u=r(n,t,e))<i||u===1/0&&o===1/0)&&(o=n,i=u)}));return o},shuffle:function(n){return Tr(n,1/0)},sample:Tr,sortBy:function(n,r,t){var e=0;return r=qn(r,t),Nr(_r(n,(function(n,t,u){return{value:n,index:e++,criteria:r(n,t,u)}})).sort((function(n,r){var t=n.criteria,e=r.criteria;if(t!==e){if(t>e||void 0===t)return 1;if(t<e||void 0===e)return-1}return n.index-r.index})),"value")},groupBy:Dr,indexBy:Rr,countBy:Fr,partition:Vr,toArray:function(n){return n?U(n)?i.call(n):S(n)?n.match(Pr):er(n)?_r(n,kn):jn(n):[]},size:function(n){return null==n?0:er(n)?n.length:nn(n).length},pick:Ur,omit:Wr,first:Lr,head:Lr,take:Lr,initial:zr,last:function(n,r,t){return null==n||n.length<1?null==r||t?void 0:[]:null==r||t?n[n.length-1]:$r(n,Math.max(0,n.length-r))},rest:$r,tail:$r,drop:$r,compact:function(n){return Sr(n,Boolean)},flatten:function(n,r){return ur(n,r,!1)},without:Kr,uniq:Jr,unique:Jr,union:Gr,intersection:function(n){for(var r=[],t=arguments.length,e=0,u=Y(n);e<u;e++){var o=n[e];if(!Er(r,o)){var i;for(i=1;i<t&&Er(arguments[i],o);i++);i===t&&r.push(o)}}return r},difference:Cr,unzip:Hr,transpose:Hr,zip:Qr,object:function(n,r){for(var t={},e=0,u=Y(n);e<u;e++)r?t[n[e]]=r[e]:t[n[e][0]]=n[e][1];return t},range:function(n,r,t){null==r&&(r=n||0,n=0),t||(t=r<n?-1:1);for(var e=Math.max(Math.ceil((r-n)/t),0),u=Array(e),o=0;o<e;o++,n+=t)u[o]=n;return u},chunk:function(n,r){if(null==r||r<1)return[];for(var t=[],e=0,u=n.length;e<u;)t.push(i.call(n,e,e+=r));return t},mixin:Yr,default:tn});return Zr._=Zr,Zr})); | zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/_build/singlehtml/_static/underscore.js | underscore.js |
(function() {
// Baseline setup
// --------------
// Establish the root object, `window` in the browser, or `global` on the server.
var root = this;
// Save the previous value of the `_` variable.
var previousUnderscore = root._;
// Establish the object that gets returned to break out of a loop iteration.
var breaker = {};
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype;
// Create quick reference variables for speed access to core prototypes.
var slice = ArrayProto.slice,
unshift = ArrayProto.unshift,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// All **ECMAScript 5** native function implementations that we hope to use
// are declared here.
var
nativeForEach = ArrayProto.forEach,
nativeMap = ArrayProto.map,
nativeReduce = ArrayProto.reduce,
nativeReduceRight = ArrayProto.reduceRight,
nativeFilter = ArrayProto.filter,
nativeEvery = ArrayProto.every,
nativeSome = ArrayProto.some,
nativeIndexOf = ArrayProto.indexOf,
nativeLastIndexOf = ArrayProto.lastIndexOf,
nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeBind = FuncProto.bind;
// Create a safe reference to the Underscore object for use below.
var _ = function(obj) { return new wrapper(obj); };
// Export the Underscore object for **Node.js**, with
// backwards-compatibility for the old `require()` API. If we're in
// the browser, add `_` as a global object via a string identifier,
// for Closure Compiler "advanced" mode.
if (typeof exports !== 'undefined') {
if (typeof module !== 'undefined' && module.exports) {
exports = module.exports = _;
}
exports._ = _;
} else {
root['_'] = _;
}
// Current version.
_.VERSION = '1.3.1';
// Collection Functions
// --------------------
// The cornerstone, an `each` implementation, aka `forEach`.
// Handles objects with the built-in `forEach`, arrays, and raw objects.
// Delegates to **ECMAScript 5**'s native `forEach` if available.
var each = _.each = _.forEach = function(obj, iterator, context) {
if (obj == null) return;
if (nativeForEach && obj.forEach === nativeForEach) {
obj.forEach(iterator, context);
} else if (obj.length === +obj.length) {
for (var i = 0, l = obj.length; i < l; i++) {
if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return;
}
} else {
for (var key in obj) {
if (_.has(obj, key)) {
if (iterator.call(context, obj[key], key, obj) === breaker) return;
}
}
}
};
// Return the results of applying the iterator to each element.
// Delegates to **ECMAScript 5**'s native `map` if available.
_.map = _.collect = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (nativeMap && obj.map === nativeMap) return obj.map(iterator, context);
each(obj, function(value, index, list) {
results[results.length] = iterator.call(context, value, index, list);
});
if (obj.length === +obj.length) results.length = obj.length;
return results;
};
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`. Delegates to **ECMAScript 5**'s native `reduce` if available.
_.reduce = _.foldl = _.inject = function(obj, iterator, memo, context) {
var initial = arguments.length > 2;
if (obj == null) obj = [];
if (nativeReduce && obj.reduce === nativeReduce) {
if (context) iterator = _.bind(iterator, context);
return initial ? obj.reduce(iterator, memo) : obj.reduce(iterator);
}
each(obj, function(value, index, list) {
if (!initial) {
memo = value;
initial = true;
} else {
memo = iterator.call(context, memo, value, index, list);
}
});
if (!initial) throw new TypeError('Reduce of empty array with no initial value');
return memo;
};
// The right-associative version of reduce, also known as `foldr`.
// Delegates to **ECMAScript 5**'s native `reduceRight` if available.
_.reduceRight = _.foldr = function(obj, iterator, memo, context) {
var initial = arguments.length > 2;
if (obj == null) obj = [];
if (nativeReduceRight && obj.reduceRight === nativeReduceRight) {
if (context) iterator = _.bind(iterator, context);
return initial ? obj.reduceRight(iterator, memo) : obj.reduceRight(iterator);
}
var reversed = _.toArray(obj).reverse();
if (context && !initial) iterator = _.bind(iterator, context);
return initial ? _.reduce(reversed, iterator, memo, context) : _.reduce(reversed, iterator);
};
// Return the first value which passes a truth test. Aliased as `detect`.
_.find = _.detect = function(obj, iterator, context) {
var result;
any(obj, function(value, index, list) {
if (iterator.call(context, value, index, list)) {
result = value;
return true;
}
});
return result;
};
// Return all the elements that pass a truth test.
// Delegates to **ECMAScript 5**'s native `filter` if available.
// Aliased as `select`.
_.filter = _.select = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (nativeFilter && obj.filter === nativeFilter) return obj.filter(iterator, context);
each(obj, function(value, index, list) {
if (iterator.call(context, value, index, list)) results[results.length] = value;
});
return results;
};
// Return all the elements for which a truth test fails.
_.reject = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
each(obj, function(value, index, list) {
if (!iterator.call(context, value, index, list)) results[results.length] = value;
});
return results;
};
// Determine whether all of the elements match a truth test.
// Delegates to **ECMAScript 5**'s native `every` if available.
// Aliased as `all`.
_.every = _.all = function(obj, iterator, context) {
var result = true;
if (obj == null) return result;
if (nativeEvery && obj.every === nativeEvery) return obj.every(iterator, context);
each(obj, function(value, index, list) {
if (!(result = result && iterator.call(context, value, index, list))) return breaker;
});
return result;
};
// Determine if at least one element in the object matches a truth test.
// Delegates to **ECMAScript 5**'s native `some` if available.
// Aliased as `any`.
var any = _.some = _.any = function(obj, iterator, context) {
iterator || (iterator = _.identity);
var result = false;
if (obj == null) return result;
if (nativeSome && obj.some === nativeSome) return obj.some(iterator, context);
each(obj, function(value, index, list) {
if (result || (result = iterator.call(context, value, index, list))) return breaker;
});
return !!result;
};
// Determine if a given value is included in the array or object using `===`.
// Aliased as `contains`.
_.include = _.contains = function(obj, target) {
var found = false;
if (obj == null) return found;
if (nativeIndexOf && obj.indexOf === nativeIndexOf) return obj.indexOf(target) != -1;
found = any(obj, function(value) {
return value === target;
});
return found;
};
// Invoke a method (with arguments) on every item in a collection.
_.invoke = function(obj, method) {
var args = slice.call(arguments, 2);
return _.map(obj, function(value) {
return (_.isFunction(method) ? method || value : value[method]).apply(value, args);
});
};
// Convenience version of a common use case of `map`: fetching a property.
_.pluck = function(obj, key) {
return _.map(obj, function(value){ return value[key]; });
};
// Return the maximum element or (element-based computation).
_.max = function(obj, iterator, context) {
if (!iterator && _.isArray(obj)) return Math.max.apply(Math, obj);
if (!iterator && _.isEmpty(obj)) return -Infinity;
var result = {computed : -Infinity};
each(obj, function(value, index, list) {
var computed = iterator ? iterator.call(context, value, index, list) : value;
computed >= result.computed && (result = {value : value, computed : computed});
});
return result.value;
};
// Return the minimum element (or element-based computation).
_.min = function(obj, iterator, context) {
if (!iterator && _.isArray(obj)) return Math.min.apply(Math, obj);
if (!iterator && _.isEmpty(obj)) return Infinity;
var result = {computed : Infinity};
each(obj, function(value, index, list) {
var computed = iterator ? iterator.call(context, value, index, list) : value;
computed < result.computed && (result = {value : value, computed : computed});
});
return result.value;
};
// Shuffle an array.
_.shuffle = function(obj) {
var shuffled = [], rand;
each(obj, function(value, index, list) {
if (index == 0) {
shuffled[0] = value;
} else {
rand = Math.floor(Math.random() * (index + 1));
shuffled[index] = shuffled[rand];
shuffled[rand] = value;
}
});
return shuffled;
};
// Sort the object's values by a criterion produced by an iterator.
_.sortBy = function(obj, iterator, context) {
return _.pluck(_.map(obj, function(value, index, list) {
return {
value : value,
criteria : iterator.call(context, value, index, list)
};
}).sort(function(left, right) {
var a = left.criteria, b = right.criteria;
return a < b ? -1 : a > b ? 1 : 0;
}), 'value');
};
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
_.groupBy = function(obj, val) {
var result = {};
var iterator = _.isFunction(val) ? val : function(obj) { return obj[val]; };
each(obj, function(value, index) {
var key = iterator(value, index);
(result[key] || (result[key] = [])).push(value);
});
return result;
};
// Use a comparator function to figure out at what index an object should
// be inserted so as to maintain order. Uses binary search.
_.sortedIndex = function(array, obj, iterator) {
iterator || (iterator = _.identity);
var low = 0, high = array.length;
while (low < high) {
var mid = (low + high) >> 1;
iterator(array[mid]) < iterator(obj) ? low = mid + 1 : high = mid;
}
return low;
};
// Safely convert anything iterable into a real, live array.
_.toArray = function(iterable) {
if (!iterable) return [];
if (iterable.toArray) return iterable.toArray();
if (_.isArray(iterable)) return slice.call(iterable);
if (_.isArguments(iterable)) return slice.call(iterable);
return _.values(iterable);
};
// Return the number of elements in an object.
_.size = function(obj) {
return _.toArray(obj).length;
};
// Array Functions
// ---------------
// Get the first element of an array. Passing **n** will return the first N
// values in the array. Aliased as `head`. The **guard** check allows it to work
// with `_.map`.
_.first = _.head = function(array, n, guard) {
return (n != null) && !guard ? slice.call(array, 0, n) : array[0];
};
// Returns everything but the last entry of the array. Especcialy useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N. The **guard** check allows it to work with
// `_.map`.
_.initial = function(array, n, guard) {
return slice.call(array, 0, array.length - ((n == null) || guard ? 1 : n));
};
// Get the last element of an array. Passing **n** will return the last N
// values in the array. The **guard** check allows it to work with `_.map`.
_.last = function(array, n, guard) {
if ((n != null) && !guard) {
return slice.call(array, Math.max(array.length - n, 0));
} else {
return array[array.length - 1];
}
};
// Returns everything but the first entry of the array. Aliased as `tail`.
// Especially useful on the arguments object. Passing an **index** will return
// the rest of the values in the array from that index onward. The **guard**
// check allows it to work with `_.map`.
_.rest = _.tail = function(array, index, guard) {
return slice.call(array, (index == null) || guard ? 1 : index);
};
// Trim out all falsy values from an array.
_.compact = function(array) {
return _.filter(array, function(value){ return !!value; });
};
// Return a completely flattened version of an array.
_.flatten = function(array, shallow) {
return _.reduce(array, function(memo, value) {
if (_.isArray(value)) return memo.concat(shallow ? value : _.flatten(value));
memo[memo.length] = value;
return memo;
}, []);
};
// Return a version of the array that does not contain the specified value(s).
_.without = function(array) {
return _.difference(array, slice.call(arguments, 1));
};
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// Aliased as `unique`.
_.uniq = _.unique = function(array, isSorted, iterator) {
var initial = iterator ? _.map(array, iterator) : array;
var result = [];
_.reduce(initial, function(memo, el, i) {
if (0 == i || (isSorted === true ? _.last(memo) != el : !_.include(memo, el))) {
memo[memo.length] = el;
result[result.length] = array[i];
}
return memo;
}, []);
return result;
};
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
_.union = function() {
return _.uniq(_.flatten(arguments, true));
};
// Produce an array that contains every item shared between all the
// passed-in arrays. (Aliased as "intersect" for back-compat.)
_.intersection = _.intersect = function(array) {
var rest = slice.call(arguments, 1);
return _.filter(_.uniq(array), function(item) {
return _.every(rest, function(other) {
return _.indexOf(other, item) >= 0;
});
});
};
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
_.difference = function(array) {
var rest = _.flatten(slice.call(arguments, 1));
return _.filter(array, function(value){ return !_.include(rest, value); });
};
// Zip together multiple lists into a single array -- elements that share
// an index go together.
_.zip = function() {
var args = slice.call(arguments);
var length = _.max(_.pluck(args, 'length'));
var results = new Array(length);
for (var i = 0; i < length; i++) results[i] = _.pluck(args, "" + i);
return results;
};
// If the browser doesn't supply us with indexOf (I'm looking at you, **MSIE**),
// we need this function. Return the position of the first occurrence of an
// item in an array, or -1 if the item is not included in the array.
// Delegates to **ECMAScript 5**'s native `indexOf` if available.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
_.indexOf = function(array, item, isSorted) {
if (array == null) return -1;
var i, l;
if (isSorted) {
i = _.sortedIndex(array, item);
return array[i] === item ? i : -1;
}
if (nativeIndexOf && array.indexOf === nativeIndexOf) return array.indexOf(item);
for (i = 0, l = array.length; i < l; i++) if (i in array && array[i] === item) return i;
return -1;
};
// Delegates to **ECMAScript 5**'s native `lastIndexOf` if available.
_.lastIndexOf = function(array, item) {
if (array == null) return -1;
if (nativeLastIndexOf && array.lastIndexOf === nativeLastIndexOf) return array.lastIndexOf(item);
var i = array.length;
while (i--) if (i in array && array[i] === item) return i;
return -1;
};
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](http://docs.python.org/library/functions.html#range).
_.range = function(start, stop, step) {
if (arguments.length <= 1) {
stop = start || 0;
start = 0;
}
step = arguments[2] || 1;
var len = Math.max(Math.ceil((stop - start) / step), 0);
var idx = 0;
var range = new Array(len);
while(idx < len) {
range[idx++] = start;
start += step;
}
return range;
};
// Function (ahem) Functions
// ------------------
// Reusable constructor function for prototype setting.
var ctor = function(){};
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally). Binding with arguments is also known as `curry`.
// Delegates to **ECMAScript 5**'s native `Function.bind` if available.
// We check for `func.bind` first, to fail fast when `func` is undefined.
_.bind = function bind(func, context) {
var bound, args;
if (func.bind === nativeBind && nativeBind) return nativeBind.apply(func, slice.call(arguments, 1));
if (!_.isFunction(func)) throw new TypeError;
args = slice.call(arguments, 2);
return bound = function() {
if (!(this instanceof bound)) return func.apply(context, args.concat(slice.call(arguments)));
ctor.prototype = func.prototype;
var self = new ctor;
var result = func.apply(self, args.concat(slice.call(arguments)));
if (Object(result) === result) return result;
return self;
};
};
// Bind all of an object's methods to that object. Useful for ensuring that
// all callbacks defined on an object belong to it.
_.bindAll = function(obj) {
var funcs = slice.call(arguments, 1);
if (funcs.length == 0) funcs = _.functions(obj);
each(funcs, function(f) { obj[f] = _.bind(obj[f], obj); });
return obj;
};
// Memoize an expensive function by storing its results.
_.memoize = function(func, hasher) {
var memo = {};
hasher || (hasher = _.identity);
return function() {
var key = hasher.apply(this, arguments);
return _.has(memo, key) ? memo[key] : (memo[key] = func.apply(this, arguments));
};
};
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
_.delay = function(func, wait) {
var args = slice.call(arguments, 2);
return setTimeout(function(){ return func.apply(func, args); }, wait);
};
// Defers a function, scheduling it to run after the current call stack has
// cleared.
_.defer = function(func) {
return _.delay.apply(_, [func, 1].concat(slice.call(arguments, 1)));
};
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time.
_.throttle = function(func, wait) {
var context, args, timeout, throttling, more;
var whenDone = _.debounce(function(){ more = throttling = false; }, wait);
return function() {
context = this; args = arguments;
var later = function() {
timeout = null;
if (more) func.apply(context, args);
whenDone();
};
if (!timeout) timeout = setTimeout(later, wait);
if (throttling) {
more = true;
} else {
func.apply(context, args);
}
whenDone();
throttling = true;
};
};
// Returns a function, that, as long as it continues to be invoked, will not
// be triggered. The function will be called after it stops being called for
// N milliseconds.
_.debounce = function(func, wait) {
var timeout;
return function() {
var context = this, args = arguments;
var later = function() {
timeout = null;
func.apply(context, args);
};
clearTimeout(timeout);
timeout = setTimeout(later, wait);
};
};
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
_.once = function(func) {
var ran = false, memo;
return function() {
if (ran) return memo;
ran = true;
return memo = func.apply(this, arguments);
};
};
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
_.wrap = function(func, wrapper) {
return function() {
var args = [func].concat(slice.call(arguments, 0));
return wrapper.apply(this, args);
};
};
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
_.compose = function() {
var funcs = arguments;
return function() {
var args = arguments;
for (var i = funcs.length - 1; i >= 0; i--) {
args = [funcs[i].apply(this, args)];
}
return args[0];
};
};
// Returns a function that will only be executed after being called N times.
_.after = function(times, func) {
if (times <= 0) return func();
return function() {
if (--times < 1) { return func.apply(this, arguments); }
};
};
// Object Functions
// ----------------
// Retrieve the names of an object's properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`
_.keys = nativeKeys || function(obj) {
if (obj !== Object(obj)) throw new TypeError('Invalid object');
var keys = [];
for (var key in obj) if (_.has(obj, key)) keys[keys.length] = key;
return keys;
};
// Retrieve the values of an object's properties.
_.values = function(obj) {
return _.map(obj, _.identity);
};
// Return a sorted list of the function names available on the object.
// Aliased as `methods`
_.functions = _.methods = function(obj) {
var names = [];
for (var key in obj) {
if (_.isFunction(obj[key])) names.push(key);
}
return names.sort();
};
// Extend a given object with all the properties in passed-in object(s).
_.extend = function(obj) {
each(slice.call(arguments, 1), function(source) {
for (var prop in source) {
obj[prop] = source[prop];
}
});
return obj;
};
// Fill in a given object with default properties.
_.defaults = function(obj) {
each(slice.call(arguments, 1), function(source) {
for (var prop in source) {
if (obj[prop] == null) obj[prop] = source[prop];
}
});
return obj;
};
// Create a (shallow-cloned) duplicate of an object.
_.clone = function(obj) {
if (!_.isObject(obj)) return obj;
return _.isArray(obj) ? obj.slice() : _.extend({}, obj);
};
// Invokes interceptor with the obj, and then returns obj.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
_.tap = function(obj, interceptor) {
interceptor(obj);
return obj;
};
// Internal recursive comparison function.
function eq(a, b, stack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the Harmony `egal` proposal: http://wiki.ecmascript.org/doku.php?id=harmony:egal.
if (a === b) return a !== 0 || 1 / a == 1 / b;
// A strict comparison is necessary because `null == undefined`.
if (a == null || b == null) return a === b;
// Unwrap any wrapped objects.
if (a._chain) a = a._wrapped;
if (b._chain) b = b._wrapped;
// Invoke a custom `isEqual` method if one is provided.
if (a.isEqual && _.isFunction(a.isEqual)) return a.isEqual(b);
if (b.isEqual && _.isFunction(b.isEqual)) return b.isEqual(a);
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className != toString.call(b)) return false;
switch (className) {
// Strings, numbers, dates, and booleans are compared by value.
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return a == String(b);
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive. An `egal` comparison is performed for
// other numeric values.
return a != +a ? b != +b : (a == 0 ? 1 / a == 1 / b : a == +b);
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a == +b;
// RegExps are compared by their source patterns and flags.
case '[object RegExp]':
return a.source == b.source &&
a.global == b.global &&
a.multiline == b.multiline &&
a.ignoreCase == b.ignoreCase;
}
if (typeof a != 'object' || typeof b != 'object') return false;
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
var length = stack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (stack[length] == a) return true;
}
// Add the first object to the stack of traversed objects.
stack.push(a);
var size = 0, result = true;
// Recursively compare objects and arrays.
if (className == '[object Array]') {
// Compare array lengths to determine if a deep comparison is necessary.
size = a.length;
result = size == b.length;
if (result) {
// Deep compare the contents, ignoring non-numeric properties.
while (size--) {
// Ensure commutative equality for sparse arrays.
if (!(result = size in a == size in b && eq(a[size], b[size], stack))) break;
}
}
} else {
// Objects with different constructors are not equivalent.
if ('constructor' in a != 'constructor' in b || a.constructor != b.constructor) return false;
// Deep compare objects.
for (var key in a) {
if (_.has(a, key)) {
// Count the expected number of properties.
size++;
// Deep compare each member.
if (!(result = _.has(b, key) && eq(a[key], b[key], stack))) break;
}
}
// Ensure that both objects contain the same number of properties.
if (result) {
for (key in b) {
if (_.has(b, key) && !(size--)) break;
}
result = !size;
}
}
// Remove the first object from the stack of traversed objects.
stack.pop();
return result;
}
// Perform a deep comparison to check if two objects are equal.
_.isEqual = function(a, b) {
return eq(a, b, []);
};
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
_.isEmpty = function(obj) {
if (_.isArray(obj) || _.isString(obj)) return obj.length === 0;
for (var key in obj) if (_.has(obj, key)) return false;
return true;
};
// Is a given value a DOM element?
_.isElement = function(obj) {
return !!(obj && obj.nodeType == 1);
};
// Is a given value an array?
// Delegates to ECMA5's native Array.isArray
_.isArray = nativeIsArray || function(obj) {
return toString.call(obj) == '[object Array]';
};
// Is a given variable an object?
_.isObject = function(obj) {
return obj === Object(obj);
};
// Is a given variable an arguments object?
_.isArguments = function(obj) {
return toString.call(obj) == '[object Arguments]';
};
if (!_.isArguments(arguments)) {
_.isArguments = function(obj) {
return !!(obj && _.has(obj, 'callee'));
};
}
// Is a given value a function?
_.isFunction = function(obj) {
return toString.call(obj) == '[object Function]';
};
// Is a given value a string?
_.isString = function(obj) {
return toString.call(obj) == '[object String]';
};
// Is a given value a number?
_.isNumber = function(obj) {
return toString.call(obj) == '[object Number]';
};
// Is the given value `NaN`?
_.isNaN = function(obj) {
// `NaN` is the only value for which `===` is not reflexive.
return obj !== obj;
};
// Is a given value a boolean?
_.isBoolean = function(obj) {
return obj === true || obj === false || toString.call(obj) == '[object Boolean]';
};
// Is a given value a date?
_.isDate = function(obj) {
return toString.call(obj) == '[object Date]';
};
// Is the given value a regular expression?
_.isRegExp = function(obj) {
return toString.call(obj) == '[object RegExp]';
};
// Is a given value equal to null?
_.isNull = function(obj) {
return obj === null;
};
// Is a given variable undefined?
_.isUndefined = function(obj) {
return obj === void 0;
};
// Has own property?
_.has = function(obj, key) {
return hasOwnProperty.call(obj, key);
};
// Utility Functions
// -----------------
// Run Underscore.js in *noConflict* mode, returning the `_` variable to its
// previous owner. Returns a reference to the Underscore object.
_.noConflict = function() {
root._ = previousUnderscore;
return this;
};
// Keep the identity function around for default iterators.
_.identity = function(value) {
return value;
};
// Run a function **n** times.
_.times = function (n, iterator, context) {
for (var i = 0; i < n; i++) iterator.call(context, i);
};
// Escape a string for HTML interpolation.
_.escape = function(string) {
return (''+string).replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"').replace(/'/g, ''').replace(/\//g,'/');
};
// Add your own custom functions to the Underscore object, ensuring that
// they're correctly added to the OOP wrapper as well.
_.mixin = function(obj) {
each(_.functions(obj), function(name){
addToWrapper(name, _[name] = obj[name]);
});
};
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
_.uniqueId = function(prefix) {
var id = idCounter++;
return prefix ? prefix + id : id;
};
// By default, Underscore uses ERB-style template delimiters, change the
// following template settings to use alternative delimiters.
_.templateSettings = {
evaluate : /<%([\s\S]+?)%>/g,
interpolate : /<%=([\s\S]+?)%>/g,
escape : /<%-([\s\S]+?)%>/g
};
// When customizing `templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /.^/;
// Within an interpolation, evaluation, or escaping, remove HTML escaping
// that had been previously added.
var unescape = function(code) {
return code.replace(/\\\\/g, '\\').replace(/\\'/g, "'");
};
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
_.template = function(str, data) {
var c = _.templateSettings;
var tmpl = 'var __p=[],print=function(){__p.push.apply(__p,arguments);};' +
'with(obj||{}){__p.push(\'' +
str.replace(/\\/g, '\\\\')
.replace(/'/g, "\\'")
.replace(c.escape || noMatch, function(match, code) {
return "',_.escape(" + unescape(code) + "),'";
})
.replace(c.interpolate || noMatch, function(match, code) {
return "'," + unescape(code) + ",'";
})
.replace(c.evaluate || noMatch, function(match, code) {
return "');" + unescape(code).replace(/[\r\n\t]/g, ' ') + ";__p.push('";
})
.replace(/\r/g, '\\r')
.replace(/\n/g, '\\n')
.replace(/\t/g, '\\t')
+ "');}return __p.join('');";
var func = new Function('obj', '_', tmpl);
if (data) return func(data, _);
return function(data) {
return func.call(this, data, _);
};
};
// Add a "chain" function, which will delegate to the wrapper.
_.chain = function(obj) {
return _(obj).chain();
};
// The OOP Wrapper
// ---------------
// If Underscore is called as a function, it returns a wrapped object that
// can be used OO-style. This wrapper holds altered versions of all the
// underscore functions. Wrapped objects may be chained.
var wrapper = function(obj) { this._wrapped = obj; };
// Expose `wrapper.prototype` as `_.prototype`
_.prototype = wrapper.prototype;
// Helper function to continue chaining intermediate results.
var result = function(obj, chain) {
return chain ? _(obj).chain() : obj;
};
// A method to easily add functions to the OOP wrapper.
var addToWrapper = function(name, func) {
wrapper.prototype[name] = function() {
var args = slice.call(arguments);
unshift.call(args, this._wrapped);
return result(func.apply(_, args), this._chain);
};
};
// Add all of the Underscore functions to the wrapper object.
_.mixin(_);
// Add all mutator Array functions to the wrapper.
each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
wrapper.prototype[name] = function() {
var wrapped = this._wrapped;
method.apply(wrapped, arguments);
var length = wrapped.length;
if ((name == 'shift' || name == 'splice') && length === 0) delete wrapped[0];
return result(wrapped, this._chain);
};
});
// Add all accessor Array functions to the wrapper.
each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
wrapper.prototype[name] = function() {
return result(method.apply(this._wrapped, arguments), this._chain);
};
});
// Start chaining a wrapped Underscore object.
wrapper.prototype.chain = function() {
this._chain = true;
return this;
};
// Extracts the result from a wrapped and chained object.
wrapper.prototype.value = function() {
return this._wrapped;
};
}).call(this); | zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/_build/singlehtml/_static/underscore-1.3.1.js | underscore-1.3.1.js |
if (!Scorer) {
/**
* Simple result scoring code.
*/
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [filename, title, anchor, descr, score]
// and returns the new score.
/*
score: function(result) {
return result[4];
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5}, // used to be unimportantResults
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2
};
}
if (!splitQuery) {
function splitQuery(query) {
return query.split(/\s+/);
}
}
/**
* Search Module
*/
var Search = {
_index : null,
_queued_query : null,
_pulse_status : -1,
htmlToText : function(htmlString) {
var virtualDocument = document.implementation.createHTMLDocument('virtual');
var htmlElement = $(htmlString, virtualDocument);
htmlElement.find('.headerlink').remove();
docContent = htmlElement.find('[role=main]')[0];
if(docContent === undefined) {
console.warn("Content block not found. Sphinx search tries to obtain it " +
"via '[role=main]'. Could you check your theme or template.");
return "";
}
return docContent.textContent || docContent.innerText;
},
init : function() {
var params = $.getQueryParameters();
if (params.q) {
var query = params.q[0];
$('input[name="q"]')[0].value = query;
this.performSearch(query);
}
},
loadIndex : function(url) {
$.ajax({type: "GET", url: url, data: null,
dataType: "script", cache: true,
complete: function(jqxhr, textstatus) {
if (textstatus != "success") {
document.getElementById("searchindexloader").src = url;
}
}});
},
setIndex : function(index) {
var q;
this._index = index;
if ((q = this._queued_query) !== null) {
this._queued_query = null;
Search.query(q);
}
},
hasIndex : function() {
return this._index !== null;
},
deferQuery : function(query) {
this._queued_query = query;
},
stopPulse : function() {
this._pulse_status = 0;
},
startPulse : function() {
if (this._pulse_status >= 0)
return;
function pulse() {
var i;
Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = '';
for (i = 0; i < Search._pulse_status; i++)
dotString += '.';
Search.dots.text(dotString);
if (Search._pulse_status > -1)
window.setTimeout(pulse, 500);
}
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch : function(query) {
// create the required interface elements
this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p class="search-summary"> </p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...'));
this.startPulse();
// index already loaded, the browser was quick!
if (this.hasIndex())
this.query(query);
else
this.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query : function(query) {
var i;
// stem the searchterms and add them to the correct list
var stemmer = new Stemmer();
var searchterms = [];
var excluded = [];
var hlterms = [];
var tmp = splitQuery(query);
var objectterms = [];
for (i = 0; i < tmp.length; i++) {
if (tmp[i] !== "") {
objectterms.push(tmp[i].toLowerCase());
}
if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i] === "") {
// skip this "word"
continue;
}
// stem the word
var word = stemmer.stemWord(tmp[i].toLowerCase());
// prevent stemmer from cutting word smaller than two chars
if(word.length < 3 && tmp[i].length >= 3) {
word = tmp[i];
}
var toAppend;
// select the correct list
if (word[0] == '-') {
toAppend = excluded;
word = word.substr(1);
}
else {
toAppend = searchterms;
hlterms.push(tmp[i].toLowerCase());
}
// only add if not already in the list
if (!$u.contains(toAppend, word))
toAppend.push(word);
}
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
// console.debug('SEARCH: searching for:');
// console.info('required: ', searchterms);
// console.info('excluded: ', excluded);
// prepare search
var terms = this._index.terms;
var titleterms = this._index.titleterms;
// array of [filename, title, anchor, descr, score]
var results = [];
$('#search-progress').empty();
// lookup as object
for (i = 0; i < objectterms.length; i++) {
var others = [].concat(objectterms.slice(0, i),
objectterms.slice(i+1, objectterms.length));
results = results.concat(this.performObjectSearch(objectterms[i], others));
}
// lookup as search terms in fulltext
results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) {
for (i = 0; i < results.length; i++)
results[i][4] = Scorer.score(results[i]);
}
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort(function(a, b) {
var left = a[4];
var right = b[4];
if (left > right) {
return 1;
} else if (left < right) {
return -1;
} else {
// same score: sort alphabetically
left = a[1].toLowerCase();
right = b[1].toLowerCase();
return (left > right) ? -1 : ((left < right) ? 1 : 0);
}
});
// for debugging
//Search.lastresults = results.slice(); // a copy
//console.info('search results:', Search.lastresults);
// print the results
var resultCount = results.length;
function displayNextItem() {
// results left, load the summary and display it
if (results.length) {
var item = results.pop();
var listItem = $('<li></li>');
var requestUrl = "";
var linkUrl = "";
if (DOCUMENTATION_OPTIONS.BUILDER === 'dirhtml') {
// dirhtml builder
var dirname = item[0] + '/';
if (dirname.match(/\/index\/$/)) {
dirname = dirname.substring(0, dirname.length-6);
} else if (dirname == 'index/') {
dirname = '';
}
requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX;
linkUrl = item[0] + DOCUMENTATION_OPTIONS.LINK_SUFFIX;
}
listItem.append($('<a/>').attr('href',
linkUrl +
highlightstring + item[2]).html(item[1]));
if (item[3]) {
listItem.append($('<span> (' + item[3] + ')</span>'));
Search.output.append(listItem);
setTimeout(function() {
displayNextItem();
}, 5);
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
$.ajax({url: requestUrl,
dataType: "text",
complete: function(jqxhr, textstatus) {
var data = jqxhr.responseText;
if (data !== '' && data !== undefined) {
listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
}
Search.output.append(listItem);
setTimeout(function() {
displayNextItem();
}, 5);
}});
} else {
// no source available, just display title
Search.output.append(listItem);
setTimeout(function() {
displayNextItem();
}, 5);
}
}
// search finished, update title and status message
else {
Search.stopPulse();
Search.title.text(_('Search Results'));
if (!resultCount)
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
else
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
Search.status.fadeIn(500);
}
}
displayNextItem();
},
/**
* search for object names
*/
performObjectSearch : function(object, otherterms) {
var filenames = this._index.filenames;
var docnames = this._index.docnames;
var objects = this._index.objects;
var objnames = this._index.objnames;
var titles = this._index.titles;
var i;
var results = [];
for (var prefix in objects) {
for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name;
var fullnameLower = fullname.toLowerCase()
if (fullnameLower.indexOf(object) > -1) {
var score = 0;
var parts = fullnameLower.split('.');
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower == object || parts[parts.length - 1] == object) {
score += Scorer.objNameMatch;
// matches in last name
} else if (parts[parts.length - 1].indexOf(object) > -1) {
score += Scorer.objPartialMatch;
}
var match = objects[prefix][name];
var objname = objnames[match[1]][2];
var title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
if (otherterms.length > 0) {
var haystack = (prefix + ' ' + name + ' ' +
objname + ' ' + title).toLowerCase();
var allfound = true;
for (i = 0; i < otherterms.length; i++) {
if (haystack.indexOf(otherterms[i]) == -1) {
allfound = false;
break;
}
}
if (!allfound) {
continue;
}
}
var descr = objname + _(', in ') + title;
var anchor = match[3];
if (anchor === '')
anchor = fullname;
else if (anchor == '-')
anchor = objnames[match[1]][1] + '-' + fullname;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2])) {
score += Scorer.objPrio[match[2]];
} else {
score += Scorer.objPrioDefault;
}
results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]);
}
}
}
return results;
},
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
*/
escapeRegExp : function(string) {
return string.replace(/[.*+\-?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
},
/**
* search for full-text terms in the index
*/
performTermsSearch : function(searchterms, excluded, terms, titleterms) {
var docnames = this._index.docnames;
var filenames = this._index.filenames;
var titles = this._index.titles;
var i, j, file;
var fileMap = {};
var scoreMap = {};
var results = [];
// perform the search on the required terms
for (i = 0; i < searchterms.length; i++) {
var word = searchterms[i];
var files = [];
var _o = [
{files: terms[word], score: Scorer.term},
{files: titleterms[word], score: Scorer.title}
];
// add support for partial matches
if (word.length > 2) {
var word_regex = this.escapeRegExp(word);
for (var w in terms) {
if (w.match(word_regex) && !terms[word]) {
_o.push({files: terms[w], score: Scorer.partialTerm})
}
}
for (var w in titleterms) {
if (w.match(word_regex) && !titleterms[word]) {
_o.push({files: titleterms[w], score: Scorer.partialTitle})
}
}
}
// no match but word was a required one
if ($u.every(_o, function(o){return o.files === undefined;})) {
break;
}
// found search word in contents
$u.each(_o, function(o) {
var _files = o.files;
if (_files === undefined)
return
if (_files.length === undefined)
_files = [_files];
files = files.concat(_files);
// set score for the word in each file to Scorer.term
for (j = 0; j < _files.length; j++) {
file = _files[j];
if (!(file in scoreMap))
scoreMap[file] = {};
scoreMap[file][word] = o.score;
}
});
// create the mapping
for (j = 0; j < files.length; j++) {
file = files[j];
if (file in fileMap && fileMap[file].indexOf(word) === -1)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
}
// now check if the files don't contain excluded terms
for (file in fileMap) {
var valid = true;
// check if all requirements are matched
var filteredTermCount = // as search terms with length < 3 are discarded: ignore
searchterms.filter(function(term){return term.length > 2}).length
if (
fileMap[file].length != searchterms.length &&
fileMap[file].length != filteredTermCount
) continue;
// ensure that none of the excluded terms is in the search result
for (i = 0; i < excluded.length; i++) {
if (terms[excluded[i]] == file ||
titleterms[excluded[i]] == file ||
$u.contains(terms[excluded[i]] || [], file) ||
$u.contains(titleterms[excluded[i]] || [], file)) {
valid = false;
break;
}
}
// if we have still a valid result we can add it to the result list
if (valid) {
// select one (max) score for the file.
// for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
results.push([docnames[file], titles[file], '', null, score, filenames[file]]);
}
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words, hlwords is the list of normal, unstemmed
* words. the first one is used to find the occurrence, the
* latter for highlighting it.
*/
makeSearchSummary : function(htmlText, keywords, hlwords) {
var text = Search.htmlToText(htmlText);
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
});
start = Math.max(start - 120, 0);
var excerpt = ((start > 0) ? '...' : '') +
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<p class="context"></p>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
});
return rv;
}
};
$(document).ready(function() {
Search.init();
}); | zyc | /zyc-1.0.0.tar.gz/zyc-1.0.0/docs/_build/singlehtml/_static/searchtools.js | searchtools.js |
# ansiformat
ANSI Formatted Text for Terminals.
## AnsiFormat for Custom Formatting
### Colors
```python
from zycelium.ansiformat import AnsiFormat
af = AnsiFormat()
print(af("Red text on default background").red)
print(af("Default text on lime background").on.lime)
print(af("Black text on yellow background").black.on.yellow)
print(af("Black text on cyan background").black.on.color("#00ffff"))
print(af("Red text on yellow background").color("#ff0000").on.color("#ffff00"))
```
### Effects
```python
from zycelium.ansiformat import AnsiFormat
af = AnsiFormat()
print(af("Bold").bold)
print(af("Dim").dim)
print(af("Italic").italic)
print(af("Underline").underline)
print(af("Blink").blink)
print(af("Inverse").inverse)
print(af("Hidden").hidden)
print(af("Strike").strike)
```
### Using Colors and Effects Together
```python
from zycelium.ansiformat import AnsiFormat
af = AnsiFormat()
print(af("Red text on default background, bold").red.bold)
print(af("Same, but with shortcut (b) for bold").red.b)
print(af("Same, but with shortcut (i) for italic").red.i)
print(af("Default text on lime background, with strike-through").on.lime.strike)
print(af("Black text on yellow background, underlined").black.on.yellow.underline)
print(af("Black text on cyan background, blinking").black.on.color("#00ffff").blink)
print(af("Red text on yellow background, inversed").color("#ff0000").on.color("#ffff00").inverse)
```
## AnsiMarkup for Quick Formatting
```python
from zycelium.ansiformat import AnsiMarkup, palette
m = AnsiMarkup(palette=palette.midnight_ablaze)
print(m.debug("debug"))
print(m.info("info"))
print(m.ok("ok"))
print(m.warning("warning"))
print(m.error("error"))
print(m.critical("critical"))
print(m.p("paragraph"))
print(m.aside("aside"))
print(m.note("note"))
print(m.alert("alert"))
print(m.h1("heading one"))
print(m.h2("heading two"))
print(m.h3("heading three"))
print(m.h4("heading four"))
print(m.h5("heading five"))
print(m.h6("heading six"))
with m.indent():
print(m.li("list item 1"))
print(m.li("list item 2"))
with m.indent():
print(m.li("list item 2.1"))
print(m.li("list item 2.2"))
``` | zycelium.ansiformat | /zycelium.ansiformat-0.1.1.tar.gz/zycelium.ansiformat-0.1.1/README.md | README.md |
import logging
import os
import platform
import sys
from math import sqrt
from typing import Optional, Tuple, Union
import sty
from colour import Color as _Color
from colour import COLOR_NAME_TO_RGB
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
IntervalValue = Union[int, float]
RGB255Tuple = Tuple[int, ...]
RGBTuple = Tuple[float, ...]
# Constants
CUBE_INTENSITIES = [
0x80,
0x5F,
0x87,
0xAF,
0xD7,
0xFF,
]
ANSI_8_PALETTE = [
"#000000", # black
"#ff0000", # red
"#00ff00", # lime
"#ffff00", # yellow
"#0000ff", # blue
"#ff00ff", # magenta
"#00ffff", # cyan
"#ffffff", # white
]
ANSI_16_PALETTE = [
"#000000", # black
"#880000", # darkred
"#008800", # darkgreen
"#888800", # orange
"#000088", # darkblue
"#880088", # darkmagenta
"#008888", # darkcyan
"#888888", # silver
"#000000", # gray
"#ff0000", # red
"#00ff00", # lime
"#ffff00", # yellow
"#0000ff", # blue
"#ff00ff", # magenta
"#00ffff", # cyan
"#ffffff", # white
]
def generate_256color(index):
if index < 16:
return ANSI_16_PALETTE[index]
elif index < 232:
q1, _b = divmod(index - 16, 6)
q2, _g = divmod(q1, 6)
_, _r = divmod(q2, 6)
r = CUBE_INTENSITIES[_r]
g = CUBE_INTENSITIES[_g]
b = CUBE_INTENSITIES[_b]
return f"#{hex(r)[2:]}{hex(g)[2:]}{hex(b)[2:]}"
else:
i = 10 * (index - 232) + 8
h = hex(i)[2:]
if len(h) == 1:
h = "0" + h
return f"#{h}{h}{h}"
ANSI_256_PALETTE = [generate_256color(i) for i in range(256)]
def map_interval(
from_start: IntervalValue,
from_end: IntervalValue,
to_start: IntervalValue,
to_end: IntervalValue,
value: IntervalValue,
) -> IntervalValue:
"""
Map numbers from an interval to another.
>>> map_interval(0, 1, 0, 255, 0.5)
127.5
>>> map_interval(0, 255, 0, 1, 128) # doctest: +ELLIPSIS
0.50...
:param from_start: lower bound of source interval.
:param from_end: upper bound of source interval.
:param to_start: lower bound of target interval.
:param to_end: upper bound of target interval.
:param value: source value to map to target interval.
:return: value in target interval.
"""
return (value - from_start) * (to_end - to_start) / (
from_end - from_start
) + to_start
def rgb_to_RGB255(rgb: RGBTuple) -> RGB255Tuple:
"""
Convert from Color.rgb's 0-1 range to ANSI RGB (0-255) range.
>>> rgb_to_RGB255((1, 0.5, 0))
(255, 128, 0)
"""
return tuple([int(round(map_interval(0, 1, 0, 255, c))) for c in rgb])
def color_hex_to_int(color):
return int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)
def distance(color_1, color_2):
r1, g1, b1 = color_hex_to_int(color_1)
r2, g2, b2 = color_hex_to_int(color_2)
return sqrt((r2 - r1) ** 2 + (g2 - g1) ** 2 + (b2 - b1) ** 2)
def match_index(color, palette):
min_index = -1
min_distance = 1000000000
for i, c in enumerate(palette):
d = distance(color, c)
if d < min_distance:
min_index = i
min_distance = d
return min_index
def match_color(color, palette):
index = match_index(color, palette)
return palette[index]
def terminal_colors(stream=sys.stdout) -> int:
"""
Get number of supported ANSI colors for a stream.
Defaults to sys.stdout.
>>> terminal_colors(sys.stderr)
0
"""
colors = 0
if stream.isatty():
if platform.system() == "Windows":
# colorama supports 8 ANSI colors
# (and dim is same as normal)
colors = 8
elif os.environ.get("NO_COLOR", None) is not None:
colors = 0
elif os.environ.get("COLORTERM", "").lower() in {"truecolor", "24bit"}:
colors = 16_777_216
elif os.environ.get("TERM", "") in {"vt100", "vt200", "vt220"}:
colors = 0
elif os.environ.get("TERM", "") in {"xterm"}:
colors = 8
elif os.environ.get("TERM", "") in {"xterm-color", "rxvt", "rxvt-88color"}:
colors = 16
elif os.environ.get("TERM", "") in {"ansi", "xterm-256color"}:
colors = 256
elif os.environ.get("TERM", "").lower() in {"truecolor", "24bit"}:
colors = 16_777_216
else:
# curses is used to autodetect terminal colors on *nix.
try:
from curses import setupterm, tigetnum
setupterm()
colors = max(0, tigetnum("colors"))
except ImportError:
pass
except:
pass
return colors
class Color(_Color):
@property
def RGB(self):
return rgb_to_RGB255(self.rgb)
@property
def ansi8(self):
return (match_index(self.hex_l, ANSI_8_PALETTE),)
@property
def ansi16(self):
return (match_index(self.hex_l, ANSI_16_PALETTE),)
@property
def ansi256(self):
return (match_index(self.hex_l, ANSI_256_PALETTE),)
def match_palette(self, palette):
return Color(match_color(self.hex_l, palette)).RGB | zycelium.ansiformat | /zycelium.ansiformat-0.1.1.tar.gz/zycelium.ansiformat-0.1.1/src/zycelium/ansiformat/color.py | color.py |
from collections import defaultdict
from typing import Optional, Union
import sty
from .color import Color, terminal_colors
SHORTCUTS = {
"b": "ef_bold",
"bold": "ef_bold",
"dim": "ef_dim",
"blink": "ef_blink",
"hidden": "ef_hidden",
"inverse": "ef_inverse",
"strike": "ef_strike",
"i": "ef_italic",
"italic": "ef_italic",
"u": "ef_underl",
"underline": "ef_underl",
}
class AnsiString(str):
def __new__(
cls, text: str, terminal_colors: int = 0, custom_palette: Optional[list] = None
):
return super().__new__(cls, text)
def __init__(
self, text: str, terminal_colors: int = 0, custom_palette: Optional[list] = None
):
self._text = text
self._terminal_colors = terminal_colors
self._custom_palette = custom_palette
self._buffer = []
self._fg_is_set = False
def __str__(self):
if self._buffer:
text = "".join(self._buffer) + self._text
if self._terminal_colors > 0:
text += sty.rs.all
return text
else:
return self._text
def __call__(self, text: str):
if self._text:
self._buffer.append(self._text)
self._text = text
self._fg_is_set = False
return self
def __getattr__(self, name):
if name.startswith("fg_"):
return self.fg(name[3:])
elif name.startswith("bg_"):
return self.bg(name[3:])
elif name.startswith("rs_"):
return self.rs(name[3:])
elif name.startswith("ef_"):
return self.ef(name[3:])
elif name in SHORTCUTS:
return getattr(self, SHORTCUTS[name])
elif name == "color":
return self.color
else:
if self._fg_is_set:
return self.bg(name)
else:
return self.fg(name)
def color(self, name: str):
if self._fg_is_set:
return self.bg(name)
else:
return self.fg(name)
def render_color(self, color: Color):
if self._terminal_colors <= 0:
return None
if isinstance(color, str):
color = Color(color)
else:
color = Color(*color)
if self._terminal_colors == 8:
return color.ansi8
elif self._terminal_colors == 16:
return color.ansi16
elif self._terminal_colors == 256:
return color.ansi256
else:
if self._custom_palette:
return color.match_palette(self._custom_palette)
else:
return color.RGB
def fg(self, color: Union[str, list, tuple]):
if self._terminal_colors == 0:
return self
color = self.render_color(color)
if color is None:
return self
self._buffer.append(sty.fg(*color))
return self
def bg(self, color: Union[str, list, tuple]):
if self._terminal_colors == 0:
return self
color = self.render_color(color)
if color is None:
return self
self._buffer.append(sty.bg(*color))
return self
def rs(self, name: str):
if self._terminal_colors == 0:
return self
ansi = getattr(sty.rs, name)
self._buffer.append(self._text)
self._text = ""
self._buffer.append(ansi)
return self
def ef(self, name: str):
if self._terminal_colors == 0:
return self
ansi = getattr(sty.ef, name)
self._buffer.append(ansi)
return self
@property
def on(self):
self._fg_is_set = True
return self
@property
def raw(self):
return repr(str(self)) | zycelium.ansiformat | /zycelium.ansiformat-0.1.1.tar.gz/zycelium.ansiformat-0.1.1/src/zycelium/ansiformat/string.py | string.py |
class AnsiPalette(object):
@property
def versitile_16(self):
"""
https://lospec.com/palette-list/versitle-16
"""
return [
"#0c0b0f",
"#af3f52",
"#5f3959",
"#2e2937",
"#905c9a",
"#d78d42",
"#93593b",
"#8d92ac",
"#d47fb0",
"#dddd50",
"#75b546",
"#4a6db2",
"#f3cb94",
"#eff0e9",
"#a7d6da",
"#56a1c8",
]
@property
def mega_drive_blue(self):
"""
https://lospec.com/palette-list/mega-drive-blue
"""
return [
"#2b120d",
"#9f1211",
"#fc1400",
"#fc6a00",
"#fcfc00",
"#000000",
"#330445",
"#9c0c9c",
"#ff099d",
"#00072c",
"#040085",
"#0000ff",
"#0066ff",
"#67cdfc",
"#004849",
"#067826",
"#00c908",
"#52ff00",
"#c6f022",
"#352c2e",
"#754944",
"#ad5950",
"#f6757a",
"#fac59b",
"#693623",
"#b0572b",
"#e98e42",
"#fcb448",
"#fffcff",
"#666688",
"#9892ad",
"#b8b8d1",
"#ddd9e6",
"#fcfcfc",
]
@property
def vinik_24(self):
"""
https://lospec.com/palette-list/vinik24
"""
return [
"#000000",
"#6f6776",
"#9a9a97",
"#c5ccb8",
"#8b5580",
"#c38890",
"#a593a5",
"#666092",
"#9a4f50",
"#c28d75",
"#7ca1c0",
"#416aa3",
"#8d6268",
"#be955c",
"#68aca9",
"#387080",
"#6e6962",
"#93a167",
"#6eaa78",
"#557064",
"#9d9f7f",
"#7e9e99",
"#5d6872",
"#433455",
]
@property
def endesga_32(self):
"""
https://lospec.com/palette-list/endesga-32
"""
return [
"#be4a2f",
"#d77643",
"#ead4aa",
"#e4a672",
"#b86f50",
"#733e39",
"#3e2731",
"#a22633",
"#e43b44",
"#f77622",
"#feae34",
"#fee761",
"#63c74d",
"#3e8948",
"#265c42",
"#193c3e",
"#124e89",
"#0099db",
"#2ce8f5",
"#ffffff",
"#c0cbdc",
"#8b9bb4",
"#5a6988",
"#3a4466",
"#262b44",
"#181425",
"#ff0044",
"#68386c",
"#b55088",
"#f6757a",
"#e8b796",
"#c28569",
]
@property
def pear36(self):
"""
https://lospec.com/palette-list/pear36
"""
return [
"#5e315b",
"#8c3f5d",
"#ba6156",
"#f2a65e",
"#ffe478",
"#cfff70",
"#8fde5d",
"#3ca370",
"#3d6e70",
"#323e4f",
"#322947",
"#473b78",
"#4b5bab",
"#4da6ff",
"#66ffe3",
"#ffffeb",
"#c2c2d1",
"#7e7e8f",
"#606070",
"#43434f",
"#272736",
"#3e2347",
"#57294b",
"#964253",
"#e36956",
"#ffb570",
"#ff9166",
"#eb564b",
"#b0305c",
"#73275c",
"#422445",
"#5a265e",
"#80366b",
"#bd4882",
"#ff6b97",
"#ffb5b5",
]
@property
def pollen8(self):
"""
https://lospec.com/palette-list/pollen8
"""
return [
"#73464c",
"#ab5675",
"#ee6a7c",
"#ffa7a5",
"#ffe07e",
"#ffe7d6",
"#72dcbb",
"#34acba",
]
@property
def bubblegum16(self):
"""
https://lospec.com/palette-list/bubblegum-16
"""
return [
"#16171a",
"#7f0622",
"#d62411",
"#ff8426",
"#ffd100",
"#fafdff",
"#ff80a4",
"#ff2674",
"#94216a",
"#430067",
"#234975",
"#68aed4",
"#bfff3c",
"#10d275",
"#007899",
"#002859",
]
@property
def nostalgia(self):
"""
https://lospec.com/palette-list/nostalgia
"""
return [
"#d0d058",
"#a0a840",
"#708028",
"#405010",
]
@property
def midnight_ablaze(self):
"""
https://lospec.com/palette-list/midnight-ablaze
"""
return [
"#ff8274",
"#d53c6a",
"#7c183c",
"#460e2b",
"#31051e",
"#1f0510",
"#130208",
] | zycelium.ansiformat | /zycelium.ansiformat-0.1.1.tar.gz/zycelium.ansiformat-0.1.1/src/zycelium/ansiformat/palette.py | palette.py |
# zycelium.dataconfig
Create [dataclasses](https://docs.python.org/3/library/dataclasses.html) backed by configuration files.
[](https://github.com/zycelium/dataconfig/actions/workflows/python-package.yml)
- `Development Status :: 3 - Alpha`
## Usage
### Use defaults:
Create a new python script and name it `example.py`
```python
from zycelium.dataconfig import dataconfig
@dataconfig
class Config:
name: str = "World"
config = Config().load()
print(f"Hello, {config.name}!")
```
Create a `config.ini` file in the same directory as `example.py`
```ini
name = "DataConfig"
```
Finally, from the same directory, run `python example.py` ,
your console session should look something like this:
```console
$ python example.py
Hello, DataConfig!
```
The defaults here are:
- Config file name: `"config.ini"`
- Paths to look for the config file (current working directory): `["."]`
### Specify file-name for configuration:
```python
from zycelium.dataconfig import dataconfig
@dataconfig(file="custom_config.ini")
class Config:
name: str = "World"
config = Config().load()
print(f"Hello, {config.name}!")
```
In this example, we specify the file-name on this line:
`@dataconfig(file="custom_config.ini")` with keyword arguments
`file="custom_config.ini"` passed to `@dataconfig()`.
### Specify file-lookup-paths:
```python
from zycelium.dataconfig import dataconfig
@dataconfig(paths=[".", "examples", "/usr/local/etc"])
class Config:
name: str = "World"
config = Config().load()
print(f"Hello, {config.name}!")
```
Here, we pass `paths=[".", "examples"]` to `@dataconfig()`
to specify the paths on filesystem where `dataconfig` should
look for the default `"config.ini"` file. We can also specify
the filename along with the paths. Paths can be relative
to current working directory or absolute.
### Save configuration to file:
```python
from zycelium.dataconfig import dataconfig
FILE_NAME = "newconfig.ini"
@dataconfig(file=FILE_NAME)
class Config:
name: str = "World"
config = Config()
config.save()
print(f"Saved config to file: {FILE_NAME}.")
```
Here, we set the config-file-name while creating the class,
when `save()` is called, it will create the file and save
contents of `Config`.
If we try running the same example again, we will get an error:
`FileExistsError: File newconfig.ini exists, refusing to overwrite.`
This is to protect us from accidentally overwriting an existing config file.
To overwrite it, pass `overwrite=True` to `save()` like this:
`config.save(overwrite=True)`
### Frozen configuration:
```python
from zycelium.dataconfig import dataconfig
@dataconfig(frozen=True)
class Config:
name: str = "World"
config = Config().load(replace=True)
print(f"Hello, {config.name}!")
```
To load a frozen config, we need to pass `replace=True` to `load()`,
if we forget, we get the error:
`dataclasses.FrozenInstanceError: cannot assign to field 'name'`
Once loaded, we cannot overwrite the configuration.
### Use with Click Integration for CLI apps:
Here, dataconfig will generate options for click CLI framework,
one to add defaults to all options with names that exist in
the dataconfig class, overridden by values found in the configuration
file. These options can be overridden by passing values as usual
to the command line.
There's also a new option added to the command: "--conf", which
can be used to specify a different configuration file to load
defaults.
And finally, any changes made in the command line are applied to
the dataconfig object, but not saved to the configuration file
unless the `save()` method is called later.
Frozen dataconfig does not work with commandline integration.
```python
import click
from zycelium.dataconfig import dataconfig
@dataconfig
class Config:
name: str = "World"
config = Config()
# No need to load() config when using click_option()
@click.command()
@click.option("--name")
@config.click_option()
def main(name):
print(f"Hello, {name}!")
print(f"Hello, {config.name}!")
main()
```
### For more examples:
Read through the `tests/` directory, where you will find the
expected usage and how and why dataconfig can fail.
## Install
From [PyPI](https://pypi.org/)
```console
pip install zycelium.dataconfig
```
From source:
```console
git clone https://github.com/zycelium/dataconfig.git
cd dataconfig
pip install -e .
```
| zycelium.dataconfig | /zycelium.dataconfig-0.1.1.tar.gz/zycelium.dataconfig-0.1.1/README.md | README.md |
from dataclasses import asdict, dataclass, FrozenInstanceError
from dataclasses import replace as replace_dataclass
from functools import partial
from pathlib import Path
import click
from configobj import ConfigObj
__version__ = "0.1.1"
DEFAULT_FILE = "config.ini"
def locate(file, paths, auto, file_path=""):
if file_path:
return Path(file_path)
elif auto:
for _path in paths:
path = Path(_path).joinpath(file)
if path.exists():
return path
return Path(".").joinpath(file)
else:
raise FileNotFoundError(f"File {file!r} not found at {paths}.")
def load(obj, path="", unrepr=True, replace=False):
path = locate(
file=obj._file, paths=obj._paths, auto=obj._auto, file_path=path
)
config_obj = ConfigObj(str(path), unrepr=unrepr)
return obj.from_dict(config_obj, replace=replace)
def from_dict(obj, data, replace=False):
if not replace:
for k, v in data.items():
if hasattr(obj, k):
setattr(obj, k, v)
return obj
else:
fields = {k: v for k, v in data.items() if hasattr(obj, k)}
return replace_dataclass(obj, **fields)
def to_dict(obj):
fields = asdict(obj)
return fields
def save(obj, path="", unrepr=True, overwrite=False):
path = locate(
file=obj._file, paths=obj._paths, auto=obj._auto, file_path=path
)
config_obj = ConfigObj(unrepr=unrepr)
config_obj.update(asdict(obj))
if path.exists() and not overwrite:
raise FileExistsError(f"File {path} exists, refusing to overwrite.")
with path.open("wb") as outfile:
config_obj.write(outfile)
return obj
def click_option(obj, *param_decls, **attrs):
param_decls = param_decls or ("--conf",)
def wrap(func):
attrs.setdefault("is_eager", True)
attrs.setdefault("help", "Read configuration from FILE")
attrs.setdefault("expose_value", False)
path = attrs.pop("path", obj._file)
attrs["callback"] = partial(_file_option_callback, obj, path=path)
config_update_option = click.option(
"--config-update",
is_eager=False,
expose_value=False,
hidden=True,
callback=partial(_config_update_callback, obj),
)
return config_update_option(click.option(*param_decls, **attrs)(func))
return wrap
def _file_option_callback(obj, ctx, option, value, path):
ctx.default_map = ctx.default_map or {}
path = value or path
obj.load(path=path)
options = obj.to_dict()
ctx.default_map.update(options)
def _config_update_callback(obj, ctx, option, value):
data = {k: v for k, v in ctx.params.items() if v is not None}
obj.from_dict(data)
def dataconfig(
_cls=None,
*,
file=DEFAULT_FILE,
paths=None,
auto=True,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
def wrap(cls):
setattr(cls, "_file", file)
setattr(cls, "_paths", paths or ["."])
setattr(cls, "_auto", auto)
setattr(cls, "load", load)
setattr(cls, "save", save)
setattr(cls, "from_dict", from_dict)
setattr(cls, "to_dict", to_dict)
setattr(cls, "click_option", click_option)
wrapped_cls = dataclass(
cls,
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
frozen=frozen,
)
return wrapped_cls
if _cls is None:
return wrap
return wrap(_cls) | zycelium.dataconfig | /zycelium.dataconfig-0.1.1.tar.gz/zycelium.dataconfig-0.1.1/src/zycelium/dataconfig/__init__.py | __init__.py |
# pamda
This is a repo try to copy <https://github.com/ramda/ramda> in python.
## install
For whom wants to use this package.
```bash
> pip install zydmayday-pamda
> pip install zydmayday-pamda -U # get the latest
```
## Usage
```python
>>> from pamda import curry
>>> def sum(a, b, c): return a + b + c
>>> curry(sum)(1)(2, 3)
6
```
```python
>>> import pamda as R # similar to ramda syntax
>>> def sum(a, b, c): return a + b + c
>>> R.curry(sum)(1)(2, 3)
6
```
## Contribute
For whom wants to contribute to this repo.
```bash
# see: https://pre-commit.com/ for more details
$ pre-commit install # install hooks
```
Check the latest branch to be released in [here](https://github.com/zydmayday/pamda/branches).
Checkout new branch from that release branch and create PR.
## CheckList
Functions supported now.
- [x] __
- [x] add
```python
# different from ramda, ramda treat null as 0
>>> R.add(None, None) # float('nan)
```
- [ ] addIndex
- [x] adjust
- [x] all
- Transducer part is not fully tested.
- [ ] allPass
- [x] always
- [x] And (`and` is a keyword in python)
- [ ] andThen
- [x] any
- [ ] anyPass
- [ ] ap
- [ ] aperture
- [x] append
- [ ] apply
- [ ] applySpec
- [ ] applyTo
- [ ] ascend
- [ ] assoc
- [ ] assocPath
- [ ] binary
- [ ] bind
- [ ] both
- [ ] call
- [ ] chain
- [ ] clamp
- [ ] clone
- [ ] collectBy
- [x] comparator
- [ ] complement
- [x] compose
- [ ] composeWith
- [x] concat
- [ ] cond
- [ ] construct
- [ ] constructN
- [ ] converge
- [ ] count
- [x] countBy
- [x] curry
- [x] curryN
- [ ] dec
- [ ] defaultTo
- [ ] descend
- [x] difference
- [x] differenceWith
- [ ] dissoc
- [ ] dissocPath
- [x] divide
- [ ] drop
- [ ] dropLast
- [ ] dropLastWhile
- [ ] dropRepeats
- [ ] dropRepeatsWith
- [ ] dropWhile
- [ ] either
- [ ] empty
- [ ] endsWith
- [ ] eqBy
- [ ] eqProps
- [x] equals
- [ ] evolve
- [ ] F
- [x] filter
- [x] find
- [ ] findIndex
- [ ] findLast
- [ ] findLastIndex
- [ ] flatten
- [ ] flip
- [ ] forEach
- [ ] forEachObjIndexed
- [ ] fromPairs
- [ ] groupBy
- [ ] groupWith
- [ ] gt
- [ ] gte
- [ ] has
- [ ] hasIn
- [ ] hasPath
- [ ] head
- [ ] identical
- [x] identity
- [ ] ifElse
- [ ] inc
- [ ] includes
- [ ] indexBy
- [ ] indexOf
- [ ] init
- [ ] innerJoin
- [ ] insert
- [ ] insertAll
- [ ] intersection
- [ ] intersperse
- [x] into
- [ ] invert
- [ ] invertObj
- [ ] invoker
- [ ] is
- [ ] isEmpty
- [ ] isNil
- [ ] join
- [ ] juxt
- [x] keys
```python
# When using R.keys(obj) and obj is a class instance, we use obj.__dict__ as keys.
class A:
c = 'not included'
def __init__(self):
self.a = 1
self.b = 2
a = A()
R.keys(a) # ['a', 'b']
```
- [ ] keysIn
- [ ] last
- [ ] lastIndexOf
- [ ] length
- [ ] lens
- [ ] lensIndex
- [ ] lensPath
- [ ] lensProp
- [ ] lift
- [ ] liftN
- [ ] lt
- [ ] lte
- [x] Map (`map` is a keyword in python)
- [ ] mapAccum
- [ ] mapAccumRight
- [ ] mapObjIndexed
- [ ] match
- [ ] mathMod
- [ ] max
- [ ] maxBy
- [ ] mean
- [ ] median
- [ ] memoizeWith
- [ ] mergeAll
- [ ] mergeDeepLeft
- [ ] mergeDeepRight
- [ ] mergeDeepWith
- [ ] mergeDeepWithKey
- [ ] mergeLeft
- [ ] mergeRight
- [ ] mergeWith
- [ ] mergeWithKey
- [ ] min
- [ ] minBy
- [ ] modify
- [ ] modifyPath
- [ ] modulo
- [ ] move
- [x] multiply
- [ ] nAry
- [ ] negate
- [ ] none
- [ ] not
- [x] nth
- [ ] nthArg
- [ ] o
- [x] objOf
- [ ] of
- [ ] omit
- [ ] on
- [ ] once
- [ ] or
- [ ] otherwise
- [ ] over
- [ ] pair
- [ ] partial
- [ ] partialObject
- [ ] partialRight
- [ ] partition
- [ ] path
- [ ] pathEq
- [ ] pathOr
- [ ] paths
- [ ] pathSatisfies
- [ ] pick
- [ ] pickAll
- [ ] pickBy
- [x] pipe
- [ ] pipeWith
- [ ] pluck
- [ ] prepend
- [ ] product
- [ ] project
- [ ] promap
- [x] prop
- [ ] propEq
- [ ] propIs
- [ ] propOr
- [ ] props
- [ ] propSatisfies
- [ ] range
- [x] reduce
- [x] reduceBy
- [x] reduced
- [ ] reduceRight
- [ ] reduceWhile
- [x] reject
- [ ] remove
- [ ] repeat
- [ ] replace
- [x] reverse
- [ ] scan
- [ ] sequence
- [ ] set
- [x] slice
- [x] sort
- [ ] sortBy
- [ ] sortWith
- [ ] split
- [ ] splitAt
- [ ] splitEvery
- [ ] splitWhen
- [ ] splitWhenever
- [ ] startsWith
- [ ] subtract
- [ ] sum
- [ ] symmetricDifference
- [ ] symmetricDifferenceWith
- [ ] T
- [x] tail
- [ ] take
- [ ] takeLast
- [ ] takeLastWhile
- [ ] takeWhile
- [ ] tap
- [ ] test
- [ ] thunkify
- [ ] times
- [ ] toLower
- [ ] toPairs
- [ ] toPairsIn
- [ ] toString
- [ ] toUpper
- [ ] transduce
- [ ] transpose
- [ ] traverse
- [ ] trim
- [ ] tryCatch
- [ ] type
- [ ] unapply
- [ ] unary
- [ ] uncurryN
- [ ] unfold
- [ ] union
- [ ] unionWith
- [ ] uniq
- [ ] uniqBy
- [ ] uniqWith
- [ ] unless
- [ ] unnest
- [ ] until
- [ ] unwind
- [ ] update
- [ ] useWith
- [ ] values
- [ ] valuesIn
- [ ] view
- [ ] when
- [ ] where
- [ ] whereAny
- [ ] whereEq
- [ ] without
- [ ] xor
- [ ] xprod
- [ ] zip
- [ ] zipObj
- [ ] zipWith
| zydmayday-pamda | /zydmayday-pamda-0.0.9.tar.gz/zydmayday-pamda-0.0.9/README.md | README.md |
## 安装
> pip install zyf_timer
>
> 或者
>
> pip install zyf_timer -i https://pypi.python.org/simple
## 使用
### 函数计时
#### 示例1:timeit
```python
from zyf import timeit
@timeit
def sleep(seconds: int):
time.sleep(seconds)
```
运行
```bash
>> sleep(1)
Function sleep -> takes 1.001 seconds
```
#### 示例2:repeat_timeit
```python
from zyf import repeat_timeit
@repeat_timeit(number=5)
def list_insert_time_test():
l = []
for i in range(10000):
l.insert(0, i)
@repeat_timeit(repeat=3, number=5)
def list_append_time_test():
l = []
for i in range(1000000):
l.append(i)
return l
@repeat_timeit(number=5, print_detail=True)
def list_gen_time_test():
l = [i for i in range(1000000)]
return l
@repeat_timeit(repeat=3, number=5, print_detail=True)
def list_extend_time_test():
l = []
for i in range(1000000):
l.extend([i])
@repeat_timeit(repeat=3, number=5, print_detail=True, print_table=True)
def list_range_time_test():
l = list(range(1000000))
```
运行
```bash
>> list_insert_time_test()
Function list_insert_time_test -> 5 function calls: average takes 0.097 seconds
>> list_append_time_test()
Function list_append_time_test -> 3 trials with 5 function calls per trial: average trial 3.269 seconds. average function call 0.654 seconds
>> list_gen_time_test()
Time Spend of 5 function calls:
Function -> list_gen_time_test: total 1.550 seconds, average 0.310 seconds
Average: 0.310 seconds
>> list_extend_time_test()
Time Spend of 3 trials with 5 function calls per trial:
Function -> list_extend_time_test:
best: 3.289 seconds, worst: 3.626 seconds, average: 3.442 seconds
Average trial: 3.442 seconds. Average function call: 0.688 seconds
>> list_range_time_test()
Time Spend of 3 trials with 5 function calls per trial:
+----------------------+---------------+---------------+---------------+-----------------------+
| Function | Best trial | Worst trial | Average trial | Average function call |
+----------------------+---------------+---------------+---------------+-----------------------+
| list_range_time_test | 0.640 seconds | 0.714 seconds | 0.677 seconds | 0.135 seconds |
+----------------------+---------------+---------------+---------------+-----------------------+
```
示例3:构建列表效率对比
```python
from zyf import repeat_timeit
@repeat_timeit(number=3)
def list_insert_time_test():
l = []
for i in range(100000):
l.insert(0, i)
@repeat_timeit(number=5)
def list_extend_time_test():
l = []
for i in range(100000):
l.extend([i])
@repeat_timeit(number=5)
def list_append_time_test():
l = []
for i in range(100000):
l.append(i)
return l
@repeat_timeit(number=5)
def list_gen_time_test():
l = [i for i in range(100000)]
return l
@repeat_timeit(number=5)
def list_range_time_test():
l = list(range(100000))
if __name__ == '__main__':
list_range_time_test()
list_gen_time_test()
list_append_time_test()
list_extend_time_test()
list_insert_time_test()
```
运行结果
```bash
Function list_range_time_test -> 5 function calls: average takes 0.012 seconds
Function list_gen_time_test -> 5 function calls: average takes 0.017 seconds
Function list_append_time_test -> 5 function calls: average takes 0.038 seconds
Function list_extend_time_test -> 5 function calls: average takes 0.067 seconds
Function list_insert_time_test -> 3 function calls: average takes 13.747 seconds
```
| zyf-timer | /zyf_timer-1.8.tar.gz/zyf_timer-1.8/README.md | README.md |
import time
from functools import wraps
from prettytable import PrettyTable
def repeat_timeit(repeat: int = 0, number: int = 10, digit: int = 3, print_detail: bool = False,
print_table: bool = False):
def wrap(func):
"""
装饰器: 判断函数执行时间
:param func:
:return:
"""
@wraps(func)
def inner(*args, **kwargs):
func_name, ret = func.__name__, None
if repeat > 0:
r = []
for _ in range(repeat):
end, ret = _timeit(func, number, *args, **kwargs)
r.append(end)
min_time, max_time, avg_time = min(r), max(r), sum(r) / repeat
best_trial_time_string = build_time_print_string(min_time, digit=digit)
worst_trial_time_string = build_time_print_string(max_time, digit=digit)
avg_trial_time_string = build_time_print_string(avg_time, digit=digit)
avg_func_call_time_string = build_time_print_string(avg_time / number, digit)
if print_table:
if print_detail:
print(f'Time Spend of {repeat} trials with {number} function calls per trial:')
table = PrettyTable(
['Function', 'Best trial', 'Worst trial', 'Average trial', 'Average function call'])
table.add_row(
[func_name, best_trial_time_string, worst_trial_time_string, avg_trial_time_string,
avg_func_call_time_string])
else:
table = PrettyTable(['Function', 'Average trial', 'Average function call'])
table.add_row([func_name, avg_trial_time_string, avg_func_call_time_string])
print(table)
else:
if print_detail:
print(
f'Time Spend of {repeat} trials with {number} function calls per trial:\n\tFunction -> {func_name}: \n\t\tbest: {best_trial_time_string}, worst: {worst_trial_time_string}, average: {avg_trial_time_string}')
print(
f'Average trial: {avg_trial_time_string}. Average function call: {avg_func_call_time_string}')
else:
print(
f'Function {func_name} -> {repeat} trials with {number} function calls per trial: average trial {avg_trial_time_string}, average function call {avg_func_call_time_string}')
else:
end, ret = _timeit(func, number, *args, **kwargs)
total_time_string = build_time_print_string(end, digit)
avg_time_string = build_time_print_string(end / number, digit)
if print_table:
if print_detail:
print(f'Time Spend of {number} function calls:')
table = PrettyTable(['Function', 'Total cost', 'Average cost'])
table.add_row([func_name, total_time_string, avg_time_string])
else:
table = PrettyTable(['Function', 'Average cost'])
table.add_row([func_name, avg_time_string])
print(table)
else:
if print_detail:
print(
f'Time Spend of {number} function calls:\n\tFunction -> {func_name}: total {total_time_string}, average {avg_time_string}')
print(f'Average: {avg_time_string}')
else:
print(f'Function {func_name} -> {number} function calls: average takes {avg_time_string}')
return ret
return inner
return wrap
def _timeit(func, number, *args, **kwargs):
start = time.time()
num = 1
while num < number:
func(*args, **kwargs)
num += 1
ret = func(*args, **kwargs)
end = time.time() - start
return end, ret
def build_time_print_string(time_seconds: float, digit: int):
if time_seconds > 60:
minutes, seconds = divmod(time_seconds, 60)
return f'{int(minutes)} minutes {seconds:.{digit}f} seconds'
return f'{time_seconds:.{digit}f} seconds'
def timeit(func):
"""
装饰器: 判断函数执行时间
:param func:
:return:
"""
@wraps(func)
def inner(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time() - start
time_string = build_time_print_string(end, digit=3)
print(f'Function {func.__name__} -> takes {time_string}')
return ret
return inner | zyf-timer | /zyf_timer-1.8.tar.gz/zyf_timer-1.8/zyf_timer/timer.py | timer.py |
## 安装
> pip install zyf
>
> 或者
>
> pip install zyf -i https://pypi.python.org/simple
## 使用
### 函数计时
#### 示例1:timeit
```python
from zyf.timer import timeit
@timeit
def sleep(seconds: int):
time.sleep(seconds)
sleep()
```
运行
```
>> sleep(1)
Function sleep -> takes 1.001 seconds
```
#### 示例2:Timeit
```python
from zyf.timer import timeit, Timeit
@Timeit(prefix='跑步')
def run():
time.sleep(3)
run()
```
运行
```
跑步 -> takes 3.000 seconds
```
#### 示例3:repeat_timeit
```python
from zyf.timer import repeat_timeit
@repeat_timeit(number=5)
def list_insert_time_test():
l = []
for i in range(10000):
l.insert(0, i)
@repeat_timeit(repeat=3, number=5)
def list_append_time_test():
l = []
for i in range(1000000):
l.append(i)
return l
@repeat_timeit(number=5, print_detail=True)
def list_gen_time_test():
l = [i for i in range(1000000)]
return l
@repeat_timeit(repeat=3, number=5, print_detail=True)
def list_extend_time_test():
l = []
for i in range(1000000):
l.extend([i])
@repeat_timeit(repeat=3, number=5, print_detail=True, print_table=True)
def list_range_time_test():
l = list(range(1000000))
```
运行
```python
>> list_insert_time_test()
Function list_insert_time_test -> 5 function calls: average takes 0.097 seconds
>> list_append_time_test()
Function list_append_time_test -> 3 trials with 5 function calls per trial: average trial 3.269 seconds. average function call 0.654 seconds
>> list_gen_time_test()
Time Spend of 5 function calls:
Function -> list_gen_time_test: total 1.550 seconds, average 0.310 seconds
Average: 0.310 seconds
>> list_extend_time_test()
Time Spend of 3 trials with 5 function calls per trial:
Function -> list_extend_time_test:
best: 3.289 seconds, worst: 3.626 seconds, average: 3.442 seconds
Average trial: 3.442 seconds. Average function call: 0.688 seconds
>> list_range_time_test()
Time Spend of 3 trials with 5 function calls per trial:
+----------------------+---------------+---------------+---------------+-----------------------+
| Function | Best trial | Worst trial | Average trial | Average function call |
+----------------------+---------------+---------------+---------------+-----------------------+
| list_range_time_test | 0.640 seconds | 0.714 seconds | 0.677 seconds | 0.135 seconds |
+----------------------+---------------+---------------+---------------+-----------------------+
```
#### 示例4:构建列表效率对比
```python
from zyf.timer import repeat_timeit
@repeat_timeit(number=3)
def list_insert_time_test():
l = []
for i in range(100000):
l.insert(0, i)
@repeat_timeit(number=5)
def list_extend_time_test():
l = []
for i in range(100000):
l.extend([i])
@repeat_timeit(number=5)
def list_append_time_test():
l = []
for i in range(100000):
l.append(i)
return l
@repeat_timeit(number=5)
def list_gen_time_test():
l = [i for i in range(100000)]
return l
@repeat_timeit(number=5)
def list_range_time_test():
l = list(range(100000))
if __name__ == '__main__':
list_range_time_test()
list_gen_time_test()
list_append_time_test()
list_extend_time_test()
list_insert_time_test()
```
运行结果
```bash
Function list_range_time_test -> 5 function calls: average takes 0.012 seconds
Function list_gen_time_test -> 5 function calls: average takes 0.017 seconds
Function list_append_time_test -> 5 function calls: average takes 0.038 seconds
Function list_extend_time_test -> 5 function calls: average takes 0.067 seconds
Function list_insert_time_test -> 3 function calls: average takes 13.747 seconds
```
### 请求头
#### user_agent
##### 功能说明
> 支持获取各类请求头,包含移动端和PC端浏览器,可以指定获取某类请求头,也可以随机获取。
##### 使用示例
```python
from zyf.user_agent import UserAgent
ua = UserAgent()
print(ua.random)
print(ua.chrome)
print(ua.firefox)
print(ua.opera)
print(ua.uc)
print(ua.mobile)
```
输出
```bash
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3
Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6
Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10
Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50
Openwave/ UCWEB7.0.2.37/28/999
Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5
```
### 文件操作
#### scan_directory_contents
##### 功能说明
> 扫描指定文件夹内所有文件,输出文件路径
##### 使用示例
```python
from zyf.file import scan_directory_contents
for file in scan_directory_contents('D:/python/data'):
print(file)
# 可以指定后缀
for file in scan_directory_contents('D:/python/data', suffix='.csv'):
print(file)
```
#### count_word_freq
##### 功能说明
> 对`文献.xlsx`中关键词列的进行`词频统计`,可指定单词分隔符,默认为`; ',也可指定输出词频统计列名,默认为freq和word。
##### 使用示例
```python
from zyf.file import count_word_freq
count_word_freq('文献.xlsx', col_name='关键词', sep='; ', to_col_freq='频数', to_col_word='单词', to_file='文献_关键词_统计.xlsx')
```
### 颜色相关
#### color
##### 功能说明
> 打印功能扩展,添加颜色输出
##### 使用示例
```python
from zyf.color import print_color, Foreground
print_color("这是什么颜色", foreground=Foreground.Red)
print_color("这是什么颜色", foreground=Foreground.White)
print_color("这是什么颜色", foreground=Foreground.Green)
print_color("这是什么颜色", foreground=Foreground.Black)
print_color("这是什么颜色", foreground=Foreground.Blue)
print_color("这是什么颜色", foreground=Foreground.Cyan)
print_color("这是什么颜色", foreground=Foreground.Purplish_red)
print_color("这是什么颜色", foreground=Foreground.Yellow)
```
### 数据下载
#### 政策数据下载
> 根据关键词对政策数据库进行搜索,并将搜索到的政策数据进行下载及字段解析,存储到文件中。
##### 使用说明
```
国务院政策文件库
1. 设置settings中的请求参数 -> gov_policy_params
2. 运行代码
北大法宝
1. 网页登陆之后将cookie复制,修改settings中的cookie信息
2. 根据你的检索词和检索时间修改settings中的QueryBased64Request和Year
3. 运行代码
律商网
1. 网页登陆之后将cookie复制,修改settings中的cookie信息
2. 根据你的检索信息修改settings中的keyword/start/end/page_size
3. 运行代码
```
**注:北大法宝和律商网需要有会员账号才能全部完整政策信息, 所以需要设置cookie信息。**
##### 使用示例
- 国务院政策数据下载
```python
def gov_policy_demo():
from zyf.crawler.policy.goverment_policy import GovPolicyCrawler
spider = GovPolicyCrawler()
spider.run(keyword='疫情', issue_depart=['国务院', '国务院部门', '国务院公报'], page_size=50)
```
- 北大法宝政策数据下载
```python
def pkulaw_policy_demo():
from zyf.crawler.policy.pkulaw_policy import PkulawdCrawler
pkulaw_request_params = {
'cookie': None,
'query_base64_request': {
'疫情': 'eyJGaWVsZE5hbWUiOm51bGwsIlZhbHVlIjpudWxsLCJSdWxlVHlwZSI6NCwiTWFueVZhbHVlU3BsaXQiOiJcdTAwMDAiLCJXb3JkTWF0Y2hUeXBlIjowLCJXb3JkUmF0ZSI6MCwiQ29tYmluYXRpb25UeXBlIjoyLCJDaGlsZE5vZGVzIjpbeyJGaWVsZE5hbWUiOiJLZXl3b3JkU2VhcmNoVHJlZSIsIlZhbHVlIjpudWxsLCJSdWxlVHlwZSI6NCwiTWFueVZhbHVlU3BsaXQiOiJcdTAwMDAiLCJXb3JkTWF0Y2hUeXBlIjowLCJXb3JkUmF0ZSI6MCwiQ29tYmluYXRpb25UeXBlIjoxLCJDaGlsZE5vZGVzIjpbeyJGaWVsZE5hbWUiOiJDaGVja0Z1bGxUZXh0IiwiVmFsdWUiOiLnlqvmg4UiLCJSdWxlVHlwZSI6NCwiTWFueVZhbHVlU3BsaXQiOiJcdTAwMDAiLCJXb3JkTWF0Y2hUeXBlIjoxLCJXb3JkUmF0ZSI6MCwiQ29tYmluYXRpb25UeXBlIjoyLCJDaGlsZE5vZGVzIjpbXSwiQW5hbHl6ZXIiOiJpa19zbWFydCIsIkJvb3N0IjoiMC4xIiwiTWluaW11bV9zaG91bGRfbWF0Y2giOm51bGx9LHsiRmllbGROYW1lIjoiU291cmNlQ2hlY2tGdWxsVGV4dCIsIlZhbHVlIjoi55ar5oOFIiwiUnVsZVR5cGUiOjQsIk1hbnlWYWx1ZVNwbGl0IjoiXHUwMDAwIiwiV29yZE1hdGNoVHlwZSI6MSwiV29yZFJhdGUiOjAsIkNvbWJpbmF0aW9uVHlwZSI6MiwiQ2hpbGROb2RlcyI6W10sIkFuYWx5emVyIjpudWxsLCJCb29zdCI6bnVsbCwiTWluaW11bV9zaG91bGRfbWF0Y2giOm51bGx9XSwiQW5hbHl6ZXIiOm51bGwsIkJvb3N0IjpudWxsLCJNaW5pbXVtX3Nob3VsZF9tYXRjaCI6bnVsbH1dLCJBbmFseXplciI6bnVsbCwiQm9vc3QiOm51bGwsIk1pbmltdW1fc2hvdWxkX21hdGNoIjpudWxsfQ==',
},
'year': [2003, 2004],
'page_size': 100,
}
crawler = PkulawdCrawler(**pkulaw_request_params)
crawler.run()
```
- 律商网政策数据下载
```python
def lexis_policy_demo():
from zyf.crawler.policy.lexis_policy import LexisNexisCrawler
lexis_request_params = {
'cookie': None,
'keywords': '疫情',
'start': '2020-01-01',
'end': '2020-12-31',
'page_size': 100,
}
crawler = LexisNexisCrawler(**lexis_request_params)
crawler.run()
```
- 综合示例
配置文件:settings.py
```python
# 国务院
gov_policy_params = {
'keyword': '医疗联合体',
'min_time': None,
'max_time': None,
'issue_depart': ['国务院', '国务院部门', '国务院公报'],
'searchfield': 'title:content:summary',
'sort': 'pubtime',
'page_size': 50,
'to_file': None
}
# 北大法宝
pkulaw_request_params = {
'cookie': None,
'query_base64_request': {
'疫情': 'eyJGaWVsZE5hbWUiOm51bGwsIlZhbHVlIjpudWxsLCJSdWxlVHlwZSI6NCwiTWFueVZhbHVlU3BsaXQiOiJcdTAwMDAiLCJXb3JkTWF0Y2hUeXBlIjowLCJXb3JkUmF0ZSI6MCwiQ29tYmluYXRpb25UeXBlIjoyLCJDaGlsZE5vZGVzIjpbeyJGaWVsZE5hbWUiOiJLZXl3b3JkU2VhcmNoVHJlZSIsIlZhbHVlIjpudWxsLCJSdWxlVHlwZSI6NCwiTWFueVZhbHVlU3BsaXQiOiJcdTAwMDAiLCJXb3JkTWF0Y2hUeXBlIjowLCJXb3JkUmF0ZSI6MCwiQ29tYmluYXRpb25UeXBlIjoxLCJDaGlsZE5vZGVzIjpbeyJGaWVsZE5hbWUiOiJDaGVja0Z1bGxUZXh0IiwiVmFsdWUiOiLnlqvmg4UiLCJSdWxlVHlwZSI6NCwiTWFueVZhbHVlU3BsaXQiOiJcdTAwMDAiLCJXb3JkTWF0Y2hUeXBlIjoxLCJXb3JkUmF0ZSI6MCwiQ29tYmluYXRpb25UeXBlIjoyLCJDaGlsZE5vZGVzIjpbXSwiQW5hbHl6ZXIiOiJpa19zbWFydCIsIkJvb3N0IjoiMC4xIiwiTWluaW11bV9zaG91bGRfbWF0Y2giOm51bGx9LHsiRmllbGROYW1lIjoiU291cmNlQ2hlY2tGdWxsVGV4dCIsIlZhbHVlIjoi55ar5oOFIiwiUnVsZVR5cGUiOjQsIk1hbnlWYWx1ZVNwbGl0IjoiXHUwMDAwIiwiV29yZE1hdGNoVHlwZSI6MSwiV29yZFJhdGUiOjAsIkNvbWJpbmF0aW9uVHlwZSI6MiwiQ2hpbGROb2RlcyI6W10sIkFuYWx5emVyIjpudWxsLCJCb29zdCI6bnVsbCwiTWluaW11bV9zaG91bGRfbWF0Y2giOm51bGx9XSwiQW5hbHl6ZXIiOm51bGwsIkJvb3N0IjpudWxsLCJNaW5pbXVtX3Nob3VsZF9tYXRjaCI6bnVsbH1dLCJBbmFseXplciI6bnVsbCwiQm9vc3QiOm51bGwsIk1pbmltdW1fc2hvdWxkX21hdGNoIjpudWxsfQ==',
},
'year': [2003, 2004],
'page_size': 100,
}
# 律商网
lexis_request_params = {
'cookie': None,
'keywords': '疫情',
'start': '2020-01-01',
'end': '2020-12-31',
'page_size': 100,
}
```
使用示例
```python
import settings
def policy_spider():
print('请选择政策来源: 1. 国务院政策文件库 2.北大法宝 3.律商网 4. 新冠疫情数据(卫健委)')
choice = input('请选择政策来源(数字)>> ')
if choice == '1':
from zyf.crawler.policy.goverment_policy import GovPolicyCrawler
crawler = GovPolicyCrawler()
crawler.run(**settings.gov_policy_params)
elif choice == '2':
from zyf.crawler.policy.pkulaw_policy import PkulawdCrawler
crawler = PkulawdCrawler(**settings.pkulaw_request_params)
crawler.run()
elif choice == '3':
from zyf.crawler.policy.lexis_policy import LexisNexisCrawler
crawler = LexisNexisCrawler(**settings.lexis_request_params)
crawler.run()
else:
raise Exception('输入的政策来源不正确')
```
#### 图片下载
##### 使用说明

##### 使用示例
```python
from zyf.color import print_color
def start_spider():
print_color('高清壁纸:1. NET牛人(https://ss.netnr.com/) 2. 彼岸图网(https://pic.netbian.com/)')
choice = input('请选择壁纸来源 >> ')
if choice == '1':
from zyf.crawler.image.netnr import NetnrCrawler
crawler = NetnrCrawler(dir_path='images/netnr')
elif choice == '2':
from zyf.crawler.image.netbian import NetbianCrawler
crawler = NetbianCrawler(dir_path='images/netbian')
else:
raise Exception('输入的壁纸来源不正确')
crawler.run()
if __name__ == '__main__':
start_spider()
```
### 数据库连接
#### DBPoolHelper
##### 使用说明
> 提供sqlite3、mysql、postgresql、sqkserver连接池,方便操作,该功能使用依赖于dbutils,需要提前安装,另外,需要安装对应数据库的第三方依赖
>
> postgressql -> psycopg2
>
> mysql -> pymysql
>
> sqlite -> sqlite3
##### 使用示例
```python
from zyf.db import DBPoolHelper
db1 = DBPoolHelper(db_type='postgressql', dbname='student', user='postgres', password='0000', host='localhost', port=5432)
db2 = DBPoolHelper(db_type='mysql', dbname='student', user='root', password='0000', host='localhost', port=3306)
db3 = DBPoolHelper(db_type='sqlite3', dbname='student.db')
```
#### MongoHelper
##### 使用说明
> 为mongodb操作提供便利,需要安装pymongo
##### 使用示例
```python
from zyf.db import MongoHelper
mongo = MongoHelper(mongo_db='flask', mongo_uri='localhost')
data = mongo.read('label')
print(data.head())
condition = {"药品ID": 509881}
data = mongo.dbFind('label', condition)
print(data)
for i in data:
print(i)
for item in mongo.findAll():
print(item)
``` | zyf | /zyf-1.2.tar.gz/zyf-1.2/README.md | README.md |
import functools
import inspect
import os
import pytest
from testit_adapter_pytest import utils as testit
from jira import JIRA, JIRAError
from threading import Lock
__all__ = [
"check",
"equal",
"not_equal",
"is_true",
"is_false",
"is_none",
"is_not_none",
"is_in",
"is_not_in",
"greater",
"greater_equal",
"less",
"less_equal",
"check_func",
"check_dict_values",
"check_status_code"
]
_stop_on_fail = False
_failures = []
class Singleton(type):
""" Класс, реализующий механизм создания одного экземпляра объекта. """
_instances = {}
_lock = Lock()
def __call__(cls, *args, **kwargs):
# делаем блокировку, чтоб не создалось несколько экземпляров объекта
with cls._lock:
if cls not in cls._instances:
# создаем экземпляр объекта, если он еще не создан
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
else:
# если экземпляр объекта уже создан, то инициализируем его параметры
cls._instances[cls].__init__(*args, **kwargs)
return cls._instances[cls]
class JiraConnection(metaclass=Singleton):
def __init__(self):
try:
self.client = JIRA(
server=os.environ.get('JIRA_SERVER'),
token_auth=os.environ.get('AUTH_JIRA_TOKEN'))
self.client.myself()
except Exception:
pytest.fail(
"Ошибка авторизации в Jira! Тест падает по дефекту, мы уже работаем над его исправлением!",
pytrace=False)
def clear_failures():
global _failures
_failures = []
def get_failures():
return _failures
def set_stop_on_fail(stop_on_fail):
global _stop_on_fail
_stop_on_fail = stop_on_fail
class CheckContextManager(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
__tracebackhide__ = True
if exc_type is not None and issubclass(exc_type, AssertionError):
if _stop_on_fail:
return
else:
log_failure(exc_val)
return True
check = CheckContextManager()
def check_func(func):
@functools.wraps(func)
def wrapper(*args, **kwds):
__tracebackhide__ = True
try:
func(*args, **kwds)
return True
except AssertionError as e:
if _stop_on_fail:
if kwds.get('bug_link'):
check_issue(kwds.get('bug_link'), e)
log_failure(e)
raise e
if kwds.get('bug_link'):
check_issue(kwds.get('bug_link'), e)
else:
log_failure(e)
return False
return wrapper
@check_func
def equal(
actual_value: any,
expected_value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что два значения равны. \n
:param actual_value: фактическое значение.
:param expected_value: ожидаемое значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Фактическое значение должно быть равно ожидаемому.\n
Фактическое значение = '{actual_value}',\n
Ожидаемое значение = '{expected_value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = "\nОшибка! Фактическое значение должно быть равно ожидаемому.\n" \
f"Фактическое значение = '{actual_value}',\n" \
f"Ожидаемое значение = '{expected_value}'."
assert actual_value == expected_value, msg
@check_func
def not_equal(
actual_value: any,
expected_value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что два значения не равны. \n
:param actual_value: фактическое значение.
:param expected_value: ожидаемое значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Фактическое значение должно быть не равно ожидаемому.\n
Фактическое значение = '{actual_value}',\n
Ожидаемое значение = '{expected_value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = "\nОшибка! Фактическое значение должно быть не равно ожидаемому.\n" \
f"Фактическое значение = '{actual_value}',\n" \
f"Ожидаемое значение = '{expected_value}'."
assert actual_value != expected_value, msg
@check_func
def is_true(
result: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что результат выполнения операции равен True. \n
:param result: результат выполнения операции.
:param msg: сообщение об ошибке. По умолчанию = None.
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение должно быть равно 'True'. Фактическое значение = '{result}'."
assert bool(result), msg
@check_func
def is_false(
result: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что результат выполнения операции равен False. \n
:param result: результат выполнения операции.
:param msg: сообщение об ошибке. По умолчанию = None.
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение должно быть равно 'False'. Фактическое значение = '{result}'."
assert not bool(result), msg
@check_func
def is_none(
value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что значение равно None. \n
:param value: проверяемое значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Значение должно быть равно 'None'.\n
Фактическое значение = '{value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение должно быть равно 'None'. Фактическое значение = '{value}'."
assert value is None, msg
@check_func
def is_not_none(
value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что значение не равно None. \n
:param value: проверяемое значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Значение должно быть равно 'None'.\n
Фактическое значение = '{value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение не должно быть равно 'None'. Фактическое значение = '{value}'."
assert value is not None, msg
@check_func
def is_in(
value: any,
sequence: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что значение есть в последовательности. \n
:param value: значение.
:param sequence: последовательность (строка, список, кортеж, множество или словарь).
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Последовательность '{sequence}' должна содержать значение '{value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Последовательность '{sequence}' должна содержать значение '{value}'."
assert value in sequence, msg
@check_func
def is_not_in(
value: any,
sequence: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что значения нет в последовательности. \n
:param value: значение.
:param sequence: последовательность (строка, список, кортеж, множество или словарь).
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Последовательность '{sequence}' не должна содержать значение '{value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Последовательность '{sequence}' не должна содержать значение '{value}'."
assert value not in sequence, msg
@check_func
def greater(
first_value: any,
second_value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что первое значение больше второго значения. \n
:param first_value: первое значение.
:param second_value: второе значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
Ошибка! Значение '{first_value}' должно быть больше значения '{second_value}'.
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение '{first_value}' должно быть больше значения '{second_value}'."
assert first_value > second_value, msg
@check_func
def greater_equal(
first_value: any,
second_value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что первое значение больше или равно второму значению. \n
:param first_value: первое значение.
:param second_value: второе значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
Ошибка! Значение '{first_value}' должно быть больше или равно значению '{second_value}'.
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение '{first_value}' должно быть больше или равно значению '{second_value}'."
assert first_value >= second_value, msg
@check_func
def less(
first_value: any,
second_value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что первое значение меньше второго значения. \n
:param first_value: первое значение.
:param second_value: второе значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
Ошибка! Значение '{first_value}' должно быть меньше значения '{second_value}'.
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение '{first_value}' должно быть меньше значения '{second_value}'."
assert first_value < second_value, msg
@check_func
def less_equal(
first_value: any,
second_value: any,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что первое значение меньше или равно второму значению. \n
:param first_value: первое значение.
:param second_value: второе значение.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
Ошибка! Значение '{first_value}' должно быть меньше или равно значению '{second_value}'.
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"\nОшибка! Значение '{first_value}' должно быть меньше или равно значению '{second_value}'."
assert first_value <= second_value, msg
@check_func
def check_dict_values(
actual_data: dict,
expected_data: dict,
verified_fields: list = None,
unverified_fields: list = None,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что все значения из словаря с ожидаемыми данными равны значениям из словаря с фактическими данными. \n
:param actual_data: словарь с фактическими данными.
:param expected_data: словарь с ожидаемыми данными.
:param verified_fields: список полей, которые нужно проверить.
:param unverified_fields: список полей, которые не нужно проверять.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Фактическое значение должно быть равно ожидаемому.\n
Фактическое значение = '{actual_value}',\n
Ожидаемое значение = '{expected_value}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
verified_keys = expected_data.keys()
if verified_fields:
verified_keys = verified_fields
elif unverified_fields:
verified_keys -= unverified_fields
for key in verified_keys:
if not msg:
msg = f"\nОшибка! Фактическое значение поля словаря '{key}' не соответствует ожидаемому.\n" \
f"Фактическое значение = '{actual_data.get(key)}',\n" \
f"Ожидаемое значение = '{expected_data.get(key)}'."
assert actual_data.get(key) == expected_data.get(key), msg
@check_func
def check_status_code(
actual_code: int,
expected_code: int,
msg: str = None,
stop_on_fail: bool = False,
bug_link: str = None):
"""
Проверить, что фактический статус-код соответстует ожидаемому. \n
:param actual_code: фактический статус-код.
:param expected_code: ожидаемый статус-код.
:param msg: сообщение об ошибке. По умолчанию используется сообщение вида:
'Ошибка! Фактический статус-код не соответствует ожидаемому.\n
Фактический статус-код = '{actual_code}',\n
Ожидаемый статус-код = '{expected_code}'.'
:param bug_link: ссылка на баг. По умолчанию = None.
:param stop_on_fail: параметр, отвечающий за необходимость фейлить тест после первой проваленной проверки.
По умолчанию = False.
"""
set_stop_on_fail(stop_on_fail)
if not msg:
msg = f"Ошибка! Фактический статус-код не соответствует ожидаемому.\n" \
f"Фактический статус-код = '{actual_code}',\n" \
f"Ожидаемый статус-код = '{expected_code}'."
assert actual_code == expected_code, msg
def get_full_context(level):
(_, filename, line, funcname, contextlist) = inspect.stack()[level][0:5]
filename = os.path.relpath(filename)
context = contextlist[0].strip()
return (filename, line, funcname, context)
def log_failure(msg):
__tracebackhide__ = True
level = 3
pseudo_trace = []
func = ""
while "test_" not in func:
(file, line, func, context) = get_full_context(level)
if "site-packages" in file:
break
line = "{}:{} in {}() -> {}\n".format(file, line, func, context)
pseudo_trace.append(line)
level += 1
pseudo_trace_str = "\n".join(reversed(pseudo_trace))
entry = "FAILURE: {}\n{}".format(msg if msg else "", pseudo_trace_str)
_failures.append(entry)
def check_issue(issue_number: str, exception: AssertionError):
"""
Проверить актуальность дефектов. \n
:param issue_number: номер задачи.
:param exception: данные об ошибке сравнения.
"""
jira_connection = JiraConnection()
try:
issue_info = jira_connection.client.issue(issue_number, fields="status, fixVersions, priority, resolutiondate")
except JIRAError:
pytest.fail(f"Ошибка! Задача с номером '{issue_number}' не найдена в Jira!", pytrace=False)
unfixed_bug_msg, fixed_bug_msg = '', ''
status_name = issue_info.fields.status.name
if status_name != 'Готово':
unfixed_bug_msg = \
f"\nТест падает по дефекту: {os.environ.get('JIRA_SERVER')}/browse/{issue_info.key},\n" \
f"Приоритет задачи: '{issue_info.fields.priority}'!\n" \
f"Статус задачи: '{status_name}'!\n"
elif status_name == 'Готово':
versions = ', '.join([service.name for service in issue_info.fields.fixVersions])
fixed_bug_msg = \
f"\nВоспроизводится дефект: {os.environ.get('JIRA_SERVER')}/browse/{issue_info.key},\n" \
f"Статус задачи: '{status_name}',\n" \
f"Дата решения задачи: '{issue_info.fields.resolutiondate}',\n" \
f"Баг исправлен в версиях: '{versions}'!\n"
if unfixed_bug_msg:
testit.addLink(type=testit.LinkType.DEFECT, url=f"{os.environ.get('JIRA_SERVER')}/browse/{issue_number}")
reason = exception.args[0] + unfixed_bug_msg
log_failure(reason)
pytest.xfail(reason=reason)
elif fixed_bug_msg:
reason = exception.args[0] + fixed_bug_msg
log_failure(reason) | zyfra-check | /zyfra-check-0.0.9.tar.gz/zyfra-check-0.0.9/src/zyfra_check/check_methods.py | check_methods.py |
import environ
env = environ.Env()
PRODUCTION = env.bool("DJANGO_PRODUCTION", default=False)
"""
:annotation: = False
Whether or not the app is running production mode.
If ``True``, ``DEBUG`` is explicitly set to ``False`` to avoid leaking information.
.. note::
Controlled by the environment variable ``DJANGO_PRODUCTION`` by default
"""
DEBUG = False if PRODUCTION else env.bool("DJANGO_DEBUG", default=True)
"""
:annotation: = True
Used internally by Django to decide how much debugging context is sent to the browser when a failure occurs.
Cannot be ``True`` if ``PRODUCTION`` is ``True``
.. note::
Controlled by the environment variable ``DJANGO_DEBUG`` by default
"""
def prod_required_env(key, default, method="str"):
"""
Throw an exception if PRODUCTION is true and the environment key is not provided
:type key: str
:param key: Name of the environment variable to fetch
:type default: any
:param default: Default value for non-prod environments
:type method: str
:param method: django-environ instance method, used to type resulting data
.. seealso::
- `django-environ <https://github.com/joke2k/django-environ>`_
- `django-environ supported types <https://github.com/joke2k/django-environ#supported-types>`_
"""
if PRODUCTION:
default = environ.Env.NOTSET
return getattr(env, method)(key, default)
ALLOWED_HOSTS = [prod_required_env("DJANGO_ALLOWED_HOST", default="*")]
"""
:annotation: = ['*']
Sets the list of valid ``HOST`` header values. Typically this is handled by a reverse proxy in front of the deploy Django application. In development, this is provided by the Caddy reverse proxy.
.. warning:: Requires ``DJANGO_ALLOWED_HOST`` to be set in production mode
"""
db_config = env.db_url("DATABASE_URL", default="postgres://postgres:postgres@db/postgres")
"""
:annotation: = env.db_url("DATABASE_URL", default="postgres://postgres:postgres@db/postgres")
Parses the ``DATABASE_URL`` environment variable into a Django `databases`_ dictionary.
Uses a standard database URI schema.
"""
DATABASES = {"default": db_config}
"""
Django `databases <https://docs.djangoproject.com/en/3.1/ref/settings/#databases>`_ configuration value.
The default entry is generated automatically from :py:data:`db_config`.
.. note::
If you need more than one database or a different default setup, you can modify this value in your application's ``settings.py`` file.
"""
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" | zygoat-django | /zygoat_django-1.0.1-py3-none-any.whl/zygoat_django/settings/environment.py | environment.py |
# zygoat
<img src="https://user-images.githubusercontent.com/640862/75250233-e287ea80-57a5-11ea-9d9f-553662a17706.jpeg" />
## What is zygoat?
`zygoat` is a command line tool used to bootstrap and configure a React/Django/Postgres stack web application.
Linting, test configuration, boilerplate, and development environment are automatically taken care of using `zygoat` so that you can get up and running faster.
`zygoat` also includes a preset deployment configuration to allow you to deploy your stack to an AWS environment with a single command. You'll get a full serverless AWS stack to keep things inexpensive and nimble.
## How does it work?
`zygoat` works by defining `Components`, defined as parts of projects, and then defining how you implement those components based on whether you're creating a new project, updating an existing project, or deleting a component that's no longer needed.
For instance, for the python backend, we want to include `black`, which is a tool for automatically formatting python code in a standard way to make it pep8 compliant. To install `black` in for the python backend part of the project, we create a `Component` for it, specifically a `FileComponent`, which defines how we treat files that we need in projects. Then we register the `Black` component (defined in [black.py](https://github.com/bequest/zygoat/blob/master/zygoat/components/backend/black.py)) with the `Backend` component (defined in [backend/\_\_init\_\_.py](https://github.com/bequest/zygoat/blob/master/zygoat/components/backend/__init__.py)) as a sub component. This way, whenever you create or update (or delete) a project with the `Backend` component, you'll do the same 'phase' to the `Black` component.
## Installation
```bash
pip install --upgrade zygoat
```
## Usage
```bash
mkdir my-cool-new-app && cd my-cool-new-app
git init
zg new my-cool-new-app
```
For more customization and configuration, [check out the official documentation](https://zygoat.readthedocs.io/en/latest/).
## How do I develop changes for it?
Make a new git repository somewhere, we'll call it test-zg
```bash
mkdir test-zg && cd test-zg
git init
```
Install the zygoat package locally
```bash
pip install --user --upgrade ~/Projects/zygoat # Or wherever you have it
```
If you're using the asdf version manager, reshim
```bash
asdf reshim python
```
Run zg commands, see if they fail
```bash
zg new test
zg update
zg delete
```
---
## Contributing
`zygoat` is developed using the [Poetry](https://python-poetry.org/docs/) packaging framework for Python projects to make development as simple and portable as possible.
---
## Documentation
[Available on ReadTheDocs](https://zygoat.readthedocs.io/en/latest/)
| zygoat | /zygoat-1.19.0.tar.gz/zygoat-1.19.0/README.md | README.md |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from proto import zhiyan_rpc_pb2 as proto_dot_zhiyan__rpc__pb2
class ZhiYanServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.zymod = channel.unary_unary(
'/zhiyan_rpc.ZhiYanService/zymod',
request_serializer=proto_dot_zhiyan__rpc__pb2.ZhiYanRequest.SerializeToString,
response_deserializer=proto_dot_zhiyan__rpc__pb2.ZhiYanResponse.FromString,
)
self.zyregistermod = channel.unary_unary(
'/zhiyan_rpc.ZhiYanService/zyregistermod',
request_serializer=proto_dot_zhiyan__rpc__pb2.ZhiYanRegisterModuleRequest.SerializeToString,
response_deserializer=proto_dot_zhiyan__rpc__pb2.ZhiYanRegisterModuleResponse.FromString,
)
self.zyevent = channel.unary_unary(
'/zhiyan_rpc.ZhiYanService/zyevent',
request_serializer=proto_dot_zhiyan__rpc__pb2.ZhiYanEventRequest.SerializeToString,
response_deserializer=proto_dot_zhiyan__rpc__pb2.ZhiYanEventResponse.FromString,
)
class ZhiYanServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def zymod(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def zyregistermod(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def zyevent(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ZhiYanServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'zymod': grpc.unary_unary_rpc_method_handler(
servicer.zymod,
request_deserializer=proto_dot_zhiyan__rpc__pb2.ZhiYanRequest.FromString,
response_serializer=proto_dot_zhiyan__rpc__pb2.ZhiYanResponse.SerializeToString,
),
'zyregistermod': grpc.unary_unary_rpc_method_handler(
servicer.zyregistermod,
request_deserializer=proto_dot_zhiyan__rpc__pb2.ZhiYanRegisterModuleRequest.FromString,
response_serializer=proto_dot_zhiyan__rpc__pb2.ZhiYanRegisterModuleResponse.SerializeToString,
),
'zyevent': grpc.unary_unary_rpc_method_handler(
servicer.zyevent,
request_deserializer=proto_dot_zhiyan__rpc__pb2.ZhiYanEventRequest.FromString,
response_serializer=proto_dot_zhiyan__rpc__pb2.ZhiYanEventResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'zhiyan_rpc.ZhiYanService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ZhiYanService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def zymod(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/zhiyan_rpc.ZhiYanService/zymod',
proto_dot_zhiyan__rpc__pb2.ZhiYanRequest.SerializeToString,
proto_dot_zhiyan__rpc__pb2.ZhiYanResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def zyregistermod(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/zhiyan_rpc.ZhiYanService/zyregistermod',
proto_dot_zhiyan__rpc__pb2.ZhiYanRegisterModuleRequest.SerializeToString,
proto_dot_zhiyan__rpc__pb2.ZhiYanRegisterModuleResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def zyevent(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/zhiyan_rpc.ZhiYanService/zyevent',
proto_dot_zhiyan__rpc__pb2.ZhiYanEventRequest.SerializeToString,
proto_dot_zhiyan__rpc__pb2.ZhiYanEventResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | zygrpc | /zygrpc-0.0.1.15-py3-none-any.whl/proto/zhiyan_rpc_pb2_grpc.py | zhiyan_rpc_pb2_grpc.py |
==========
Zyklop ◎
==========
This program is a wrapper around rsync. It will help you:
* if you need to sync files from remote server frequently
* No need to keep the location of the file in your mind. It finds
them for you.
Requirements
==============
* Python >= 2.6 (Python >= 2.7 for tests)
* rsync installed
* locate installed with up-to-date database on the remote system
First Steps
===========
If you are new to ssh, setup an ssh configuration first. If you are
dealing with a lot of servers, giving them an alias makes them easier to
remember and you don't have to type as much.
#. Create an ssh configuration in your SSH home, e.g.::
vim ~/.ssh/config
You can use the following example as a starter::
Host spameggs
Hostname 12.112.11.122
Compression yes
CompressionLevel 9
User guido
but be sure to check the `documentation
<https://duckduckgo.com/?q=ssh+config+documentation&t=canonical>`_
or the man page (5) for `ssh_config`
#. Make the config only readable for the owner::
chmod 600 ~/.ssh/config
#. Test if you can login to your configured host using only your
alias::
ssh spameggs
Examples
========
#. Syncing ZODB from remote server configured in ``~/.ssh/config``
as spameggs. We choose not the first database, but the second::
$ zyklop spameggs:Data.fs .
Use /opt/otherbuildout/var/filestorage/Data.fs? Y(es)/N(o)/A(bort) n
Use /opt/buildout/var/filestorage/Data.fs? Y(es)/N(o)/A(bort) y
#. Syncing a directory providing a path segment::
$ zyklop spameggs:buildout/var/filestorage$ .
#. Syncing a directory which ends with `blobstorage``, excluding any
other `blobstorage` directories with postfixes in the name (e.g.
`blobstorage.old`)::
$ zyklop spameggs:blobstorage$ .
#. Use an **absolute path** if you know exactly where to copy from::
$ zyklop spameggs:/tmp/Data.fs .
#. Syncing a directory which needs higher privileges. We use the
``-s`` argument::
$ zyklop -s spameggs:blobstorage$ .
#. **Dry run** prints out all found remote paths and just exits::
$ zyklop -d spameggs:blobstorage$ .
/opt/otherbuildout/var/blobstorage
/opt/otherbuildout/var/blobstorage.old
/opt/buildout/var/blobstorag
#. Sync the first result zyklop finds automatically **without
prompting**::
$ zyklop -y spameggs:blobstorage$ .
Known Problems
--------------
Zyklop just hangs
This can be caused by paramiko and a not sufficient SSH setup. Make
sure you can login without problems by simply issuing a::
ssh myhost
If that does not solve your problem, try to provide an absolute path
from the source. Sometimes users don't have many privileges on the
remote server and the paramiko just waits for the output of a remote
command::
zyklop myhost:/path/to/file .
Motivation
==========
I'm dealing with Zope servers most of my time. Some of them have a
*huge* Data.fs - an object oriented database. I do have in 99% of the
cases an older version of the clients database on my PC. Copying the
whole database will take me ages. Using rsync and simply downloading a
binary patch makes updating my local database a quick thing.
To summarize, with zyklop I'd like to address two things:
1. Downloading large ZODBs takes a long time and
bandwidth. I simply don't want to wait that long and download that
much.
2. Most of the time I can not remember the exact path where the item
to copy is on the remote server.
TODO
====
* tty support: sometimes needed if SSH is configured to only allow
tty's to connect.
* Don't hang if only password auth is configured for SSH
Development
===========
If you're interested in hacking, clone zyklop on github:
https://github.com/romanofski/zyklop
| zyklop | /zyklop-0.5.2.zip/zyklop-0.5.2/README.rst | README.rst |
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser()
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=False,
help="Use Disribute rather than Setuptools.")
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
# We decided to always use distribute, make sure this is the default for us
# USE_DISTRIBUTE = options.distribute
USE_DISTRIBUTE = True
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
cmd = 'from setuptools.command.easy_install import main; main()'
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
if is_jython:
import subprocess
assert subprocess.Popen([sys.executable] + ['-c', quote(cmd), '-mqNxd',
quote(tmpeggs), 'zc.buildout' + VERSION],
env=dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
),
).wait() == 0
else:
assert os.spawnle(
os.P_WAIT, sys.executable, quote (sys.executable),
'-c', quote (cmd), '-mqNxd', quote (tmpeggs), 'zc.buildout' + VERSION,
dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
),
) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zyklop | /zyklop-0.5.2.zip/zyklop-0.5.2/bootstrap.py | bootstrap.py |
.. zyklop documentation master file, created by
sphinx-quickstart on Thu Feb 9 18:32:45 2012.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
.. moduleauthor:: Róman Joost <[email protected]>
.. default-domain:: py
.. toctree::
:maxdepth: 2
.. include:: ../README.rst
.. include:: CHANGES.txt
API
===
.. automodule:: zyklop
:members:
SSH
---
.. automodule:: zyklop.ssh
:members:
Implemented Search
------------------
.. automodule:: zyklop.search
:members:
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zyklop | /zyklop-0.5.2.zip/zyklop-0.5.2/docs/index.rst | index.rst |
#Zyklus
A simple event loop for executing functions within the loop's thread.
## Usage
### Current thread
```python
from zyklus import Zyklus
def output(what):
print(what)
zyklus = Zyklus()
zyklus.post(lambda: output(1))
zyklus.post(lambda: output(2))
zyklus.post(lambda: output(3))
zyklus.post_delayed(lambda: output(5), 1)
zyklus.post(lambda: output(4))
zyklus.post_delayed(zyklus.terminate, 1.1)
zyklus.loop()
output("done")
```
output:
```
1
2
3
4
5
done
```
### In background
```python
from zyklus import Zyklus
import threading
def output(what):
print(what)
zyklus = Zyklus()
zyklusThread = threading.Thread(target=zyklus.loop)
zyklusThread.start()
zyklus.post(lambda: output(1))
zyklus.post(lambda: output(2))
zyklus.post(lambda: output(3))
zyklus.post_delayed(lambda: output(5), 1)
zyklus.post(lambda: output(4))
zyklus.post_delayed(zyklus.terminate, 1.5)
zyklusThread.join()
output("done")
```
output:
```
1
2
3
4
5
done
```
## Installation
```
pip install zyklus
``` | zyklus | /zyklus-0.2.tar.gz/zyklus-0.2/README.md | README.md |
from .data_utils.analysis import Analyzer
# data
from .data_utils.processing import Processor
from .data_utils.text_processing import MyTokenizer
from .data_utils.text_processing import TextProcessor
from .data_utils.html_processing import HtmlProcessor
from .data_utils.image_processing import ImageProcessor
from .data_utils.pdf_processing import PDFProcessor
# processing
split_data_evenly = Processor.split_data_evenly # 均分数据
split_train_eval = Processor.split_train_eval # 切分训练集和评估集
two_classification_sampling = Processor.two_classification_sampling # 二分类采样
remove_some_model_files = Processor.remove_some_model_files # 删除模型冗余文件
save_dataframe_to_excel = Processor.save_dataframe_to_excel # df保存至excel,sheet
# text processing
# 切句切词: my_tokenizer = MyTokenizer() \ my_tokenizer.cut_paragraph_to_sentences, my_tokenizer.cut_sentence_to_words
clean_text = TextProcessor.clean_text # 清洗数据
ner_find = TextProcessor.ner_find # 从文本中搜寻实体---继续优化
remove_illegal_chars = TextProcessor.remove_illegal_chars # 移除非法字符
remove_invisible_chars = TextProcessor.remove_invisible_chars # 移除不可见字符
remove_html_tags = TextProcessor.remove_html_tags # 移除html标签---待优化
# analysis
get_text_language = Analyzer.get_text_language # 文本的语言
get_text_string_length = Analyzer.get_text_string_length # 文本字符串长度
get_text_token_length = Analyzer.get_text_token_length # 文本model_token长度
show_dataframe_base_info = Analyzer.show_dataframe_base_info # df基本信息
show_dataframe_completely = Analyzer.show_dataframe_completely # df完全显示
show_plt_completely = Analyzer.show_plt_completely # plt显示问题
analyze_numerical_array = Analyzer.analyze_numerical_array # 分析数值数组
analyze_category_array = Analyzer.analyze_category_array # 分析分类数组
show_bio_data_info = Analyzer.show_bio_data_info # 分析实体识别bio数据
# image processing
ImgProcessor = ImageProcessor()
show_image = ImgProcessor.show_image
format_image = ImgProcessor.format_image
read_image = ImgProcessor.read_image
save_image = ImgProcessor.save_image
get_text_from_one_image = ImgProcessor.get_text_from_one_image
get_tables_from_image = ImgProcessor.get_tables_from_image
# html processing
turn_html_content_to_pdf = HtmlProcessor.turn_html_content_to_pdf
# pdf processing
extract_tables_from_non_scanned_pdf = PDFProcessor.extract_tables_from_non_scanned_pdf
get_text_from_pdf_area = PDFProcessor.get_text_from_pdf_area
get_texts_and_tables_from_pdf = PDFProcessor.get_texts_and_tables_from_pdf
#########################################################################
# model
from .model_utils.model_utils import ModelUtils
# model_uitls
get_best_cuda_device = ModelUtils.get_best_cuda_device # 获取最好的若干cuda
fix_torch_multiprocessing = ModelUtils.fix_torch_multiprocessing # fix_torch_multiprocessing
predict_with_multi_gpus = ModelUtils.predict_with_multi_gpus
# models
from .model_utils.models.ner_bio import NerBIO, NerBIOModel
from .model_utils.models.ner_t5 import NerT5
# metric
from .model_utils.metrics.ner_metric import entity_recognition_metrics # 实体识别t5评估标准
# algorithm
from .model_utils.algorithms.sunday_match import sunday_match # 子序列匹配 | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/__init__.py | __init__.py |
import ast
import base64
import io
import PIL
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pytesseract
import requests
from PIL import Image
from cv2 import cv2
from collections import Counter
class ImageProcessor:
def __init__(self):
self.pharmcube_ocr_url = 'http://localhost/2txt'
# self.pharmcube_ocr_url ='http://101.201.249.176:1990/2txt'
# self.pharmcube_ocr_url = 'http://localhost/2txt_CV'
self.baidu_ocr_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate"
self.request_url, self.headers = None,None
def get_baidu_ocr_config(self):
# 获取access_token , client_id 为官网获取的AK, client_secret 为官网获取的SK
appid = "25533636"
client_id = "PLvUz16ePip4txCcYXk2Ablh"
client_secret = "8HXb8DIo2t7eNaw1aD6XGZi4U1Kytj41"
token_url = "https://aip.baidubce.com/oauth/2.0/token"
host = f"{token_url}?grant_type=client_credentials&client_id={client_id}&client_secret={client_secret}"
response = requests.get(host)
access_token = response.json().get("access_token")
request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
request_url = f"{request_url}?access_token={access_token}"
return request_url, headers
@staticmethod
def read_image(image: str, method='cv2'):
# opencv 读取图片数据格式为numpy.ndarray,(高、宽、通道)
# PIL用PIL.Image.Image (宽、高、通道), Image对象有crop功能,也就是图像切割功能
if method == 'PIL':
# PIL.PngImagePlugin.PngImageFile,PIL读取顺序RGB 并通过.convert来定义读取图片类型:1:位图 L:灰度图 RGB:彩色图
image = Image.open(image)
elif method == 'cv2':
image = cv2.imread(image, flags=1) # ndarray,opencv读取顺序BGR, flag=1默认彩色图片, 0:读取灰度图
else:
image = mpimg.imread(image) # ndarray, 二维grb ,3个通道
return image
@staticmethod
def show_image(img, other_mode=False):
# rgb 格式显示图像,cv2.imshow() BGR模式显示,img.show() PIL对象自带,RGB模式, plt.imshow() RGB
if isinstance(img, str): # 图像路径
img = ImageProcessor.read_image(img, method='cv2') # ndarray
try:
if other_mode:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # BGR转RGB
finally:
plt.imshow(img)
plt.xticks([]), plt.yticks([])
plt.show()
def format_image(self, image, format='Image'):
if format == 'Image':
if isinstance(image, str):
image = Image.open(image)
elif isinstance(image, np.ndarray):
image = Image.fromarray(image) # -np数组转化为img对象
else:
if isinstance(image, str):
image = cv2.imread(image, 1)
elif isinstance(image, PIL.PpmImagePlugin.PpmImageFile) | isinstance(image, PIL.Image.Image):
image = np.array(image) # img对象转化为np数组
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
def save_image(self):
# PIL.Image.save()保存RGB图像
# cv2.imwrite()--opencv---保存图片,等效BGR2RGB
pass
def get_text_from_one_image(self, image, method='pharmcube_ocr'):
"""
使用ocr提取图像中的文本
Args:
image: 图像-路径/Image/ndarray
method: pharmcube_ocr/baidu_ocr/pytesseract
Returns:
"""
image = self.format_image(image, 'Image') # imgae-RGB-IMGAGE对象
if image.mode != 'GBA':
image = image.convert('RGB')
if method == 'pharmcube_ocr':
buf = io.BytesIO()
image.save(buf, format='JPEG')
byte_im = buf.getvalue()
response = requests.post(self.pharmcube_ocr_url, files={'file': byte_im})
text = ast.literal_eval(response.text)
text = '\n'.join(text)
elif method == 'baidu_ocr': # 付费
image = np.array(image)
image = cv2.imencode('.jpg', image)[1]
image = image.tobytes()
image = base64.b64encode(image).decode('utf8')
body = {
"image": image,
"language_type": "auto_detect",
"recognize_granularity": "small",
"detect_direction": "true",
"vertexes_location": "true",
"paragraph": "true",
"probability": "true",
}
if not self.request_url:
self.request_url, self.headers, = self.get_baidu_ocr_config()
response = requests.post(self.request_url, headers=self.headers, data=body)
content = response.content.decode("UTF-8")
content = eval(content)
text = ''
if 'words_result' in content.keys():
content= content['words_result']
for c in content:
text += (c['words'].replace(' ', '') + '\n')
else: # pytesseract
text = pytesseract.image_to_string(image, lang="chi_sim") # png
text = text.replace(' ', '')
return text
def get_tables_from_image(self, image, ocr_method=None):
"""
从图像中获取若干表格的位置以及表格内容
Args:
image:
ocr_method: 使用ocr识别单元格文本
Returns:
"""
image = self.format_image(image, 'cv2')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert raw to gray picture and binary
binary = cv2.adaptiveThreshold(~gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 35, -5)
# get horizontal and vertical line
rows, cols = binary.shape
scale = 40
# 识别横线:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (cols // scale, 1))
eroded = cv2.erode(binary, kernel, iterations=1)
dilated_col = cv2.dilate(eroded, kernel, iterations=1)
# 识别竖线
scale = 40 # can use different threshold
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, rows // scale))
eroded = cv2.erode(binary, kernel, iterations=1)
dilated_row = cv2.dilate(eroded, kernel, iterations=1)
mat_mask = dilated_col + dilated_row # 表格的线(横线+竖线)
bitwise_and = cv2.bitwise_and(dilated_col, dilated_row) # 交点
ys, xs = np.where(bitwise_and > 0)
# '''get the start coordinate of each line'''
# lines_pos = np.where(dilated_col > 0)
# linepos = Counter(lines_pos[0])
# start = 0
# starts = []
# for i in linepos:
# num = linepos[i]
# tmp = lines_pos[1][start:start + num][0]
# start += num
# starts.append(tmp)
# start_pos = min(starts)
#
# '''mark left margin if it do not been recognized'''
# linecols = Counter(ys)
# st = 0
# for i in linecols:
# ys = np.insert(ys, st, i)
# xs = np.insert(xs, st, start_pos)
# st += linecols[i]
# st += 1
contours, hierarchy = cv2.findContours(mat_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
tables_location = []
tables = []
for c in contours:
if c.size > 4:
if cv2.contourArea(c) > image.shape[1]:
left = np.min(c, axis=0)[0][0]
top = np.min(c, axis=0)[0][1]
right = np.max(c, axis=0)[0][0]
bottom = np.max(c, axis=0)[0][1]
tmp_xs = []
tmp_ys = []
for x, y in zip(xs, ys):
if ((left - 10) < x < (right + 10)) and ((top - 10) < y < (bottom + 10)):
tmp_xs.append(x)
tmp_ys.append(y) # 顺序,点是从左到右一次排
if (not tmp_ys) | (not tmp_xs):
continue
tmp_xs = self._format_coordinates(tmp_xs)
tmp_ys = self._format_coordinates(tmp_ys)
table = self._get_table_from_coordinates(tmp_xs, tmp_ys)
tables_location.append((left, top, right, bottom))
if ocr_method:
tmp_table = []
for row in table:
t = []
for cell in row:
cell_image = gray[max(0,cell[1]-5):cell[3], cell[0]:cell[2]]
t.append(self.get_text_from_one_image(cell_image, ocr_method))
tmp_table.append(t)
tables.append(tmp_table)
else:
tables.append(table)
# 在图像中表格从上往下排
sorted_tables = []
tmp_tables_location = {t[1]: e for e, t in enumerate(tables_location)}
for t in sorted(tmp_tables_location.keys()):
sorted_tables.append(tables[tmp_tables_location.get(t)])
tables_location.sort(key=lambda x: x[1])
return sorted_tables, tables_location
def _format_coordinates(self, coordinates):
# 对于一个表格,格式化表格坐标,【0,1,40,10,11,40】--》【0,0,10,10,40,40】
sorted_coordinates = np.sort(coordinates)
format_dict = {sorted_coordinates[0]: sorted_coordinates[0]}
start_point = sorted_coordinates[0]
for i in range(len(sorted_coordinates) - 1):
if sorted_coordinates[i + 1] - sorted_coordinates[i] > 10:
start_point = sorted_coordinates[i + 1]
format_dict.update({sorted_coordinates[i + 1]: start_point})
return [format_dict.get(c) for c in coordinates] # 有重复
def _get_table_from_coordinates(self, xs, ys):
"""
# 对于一个表格,根据横向和纵向坐标,扣取其中的单元格坐标信息
Args:
xs: 横向坐标
ys: 竖向坐标
Returns:格式化的表格,列表,每个元素是一行(列表),每行中有若干(left, top, right, bottom)
【[(left, top, right, bottom)]】
"""
table_dict = dict()
table = []
column = None
for x, y in zip(xs, ys):
if y != column:
table_dict[y] = {x}
column = y
else:
table_dict[y].add(x)
# 不存在一个字段名称在上,两个字段值对应在下的情况
if len(table_dict) > 1:
columns = sorted(list(table_dict.keys()))
for c in range(len(columns) - 1):
top = columns[c]
bottom = columns[c + 1]
all_xs = table_dict.get(top) & table_dict.get(bottom)
all_xs = sorted(list(all_xs))
t = []
if len(all_xs) >= 2:
for x in range(len(all_xs) - 1):
left = all_xs[x]
right = all_xs[x + 1]
t.append((left, top, right, bottom))
table.append(t)
return table
if __name__ == '__main__':
img = "/home/zyl/disk/PharmAI/pharm_ai/intel/data/test.PNG"
i_p = ImageProcessor()
t, t_l = i_p.get_tables_from_image(img,'pharmcube_ocr')
print(t)
print(t_l)
# t, t_l = i_p.get_tables_from_image(img, 'baidu_ocr')
# print(t)
# print(t_l)
#
# t, t_l = i_p.get_tables_from_image(img, 'tr_ocr')
# print(t)
# print(t_l) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/image_processing.py | image_processing.py |
import pandas as pd
class Processor:
def __init__(self):
pass
@staticmethod
def split_data_evenly(data, num) -> list:
"""
split_data_evenly,顺序均分数据,遵循最后一份最少的原则
Args:
data: may be list,dataframe,tuple... should have __len__
num: the number of sub_data
Returns:
list of sub_data
"""
data_length = len(data)
step = int(data_length / num)
other_data = data_length % num
if data_length <= num:
print('Warning: data_length <= data_num')
return data
if other_data == 0:
return [data[i:i + step] for i in range(0, data_length, step)]
else:
first_part_data = [data[i:i + step + 1] for i in range(0, int((step + 1) * other_data), step + 1)]
second_part_list = [data[i:i + step] for i in range(int((step + 1) * other_data), data_length, step)]
first_part_data.extend(second_part_list)
return first_part_data
@staticmethod
def split_train_eval(data: pd.DataFrame, max_eval_size=5000):
"""
切分训练集和评估集
Args:
data: pd.DataFrame
max_eval_size: 评估集最大size
Returns:
train,eval
"""
from sklearn.utils import resample
raw_data = resample(data, replace=False)
cut_point = min(max_eval_size, int(0.2 * len(raw_data)))
eval_df = raw_data[0:cut_point]
train_df = raw_data[cut_point:]
return train_df, eval_df
@staticmethod
def two_classification_sampling(train_df: pd.DataFrame, column='labels', neg_label='|', mode='up_sampling'):
"""
训练集二分类采样:上采样和下采样
Args:
train_df: pd.DataFrame
column: the column to sampling
neg_label: neg_label
mode:up_sampling/down_sampling
Returns:
data: pd.DataFrame
"""
import pandas as pd
from sklearn.utils import resample
negative_df = train_df[train_df[column] == neg_label]
neg_len = negative_df.shape[0]
positive_df = train_df[train_df[column] != neg_label]
pos_len = positive_df.shape[0]
if neg_len > pos_len:
if mode == 'down_sampling':
down_sampling_df = resample(negative_df, replace=False, n_samples=pos_len, random_state=242)
train_df = pd.concat([positive_df, down_sampling_df], ignore_index=True)
else:
up_sampling_df = resample(positive_df, replace=True, n_samples=(neg_len - pos_len), random_state=242)
train_df = pd.concat([train_df, up_sampling_df], ignore_index=True)
elif neg_len < pos_len:
if mode == 'down_sampling':
down_sampling_df = resample(positive_df, replace=False, n_samples=neg_len, random_state=242)
train_df = pd.concat([down_sampling_df, negative_df], ignore_index=True)
else:
up_sampling_df = resample(negative_df, replace=True, n_samples=(pos_len - neg_len), random_state=242)
train_df = pd.concat([train_df, up_sampling_df], ignore_index=True)
train_df = resample(train_df, replace=False)
return train_df
@staticmethod
def remove_some_model_files(model_args):
"""
simple-transformer 根据模型参数自动删除模型相关文件
Args:
model_args: simple-transformer的args
Returns:
"""
import os
if os.path.isdir(model_args.output_dir):
cmd = 'rm -rf ' + model_args.output_dir.split('outputs')[0] + 'outputs/'
os.system(cmd)
if os.path.isdir(model_args.output_dir.split('outputs')[0] + '__pycache__/'):
cmd = 'rm -rf ' + model_args.output_dir.split('outputs')[0] + '__pycache__/'
os.system(cmd)
if os.path.isdir(model_args.output_dir.split('outputs')[0] + 'cache/'):
cmd = 'rm -rf ' + model_args.output_dir.split('outputs')[0] + 'cache/'
os.system(cmd)
@staticmethod
def save_dataframe_to_excel(dataframe, excel_path, sheet_name='default'):
"""
df添加sheet
Args:
dataframe: df
excel_path: path
sheet_name: sheet
Returns:
"""
try:
from openpyxl import load_workbook
book = load_workbook(excel_path)
writer = pd.ExcelWriter(excel_path, engine='openpyxl')
writer.book = book
except:
writer = pd.ExcelWriter(excel_path, engine='openpyxl')
dataframe.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
if __name__ == '__main__':
print(Processor.split_data_evenly([0, 2, 3, 4, 5], 3)) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/processing.py | processing.py |
import html
import re
import langid
class MyTokenizer:
def __init__(self,do_lower_case=False):
# 把连号‘-’分开,空格也作为一个词
self.sentences_tokenizer_en = self.get_sentences_tokenizer_en()
self.words_tokenizer_en = self.get_word_tokenizer_en(do_lower_case=do_lower_case)
@staticmethod
def cut_paragraph_to_sentences_zh(para: str, drop_empty_line=True, strip=True, deduplicate=False):
"""
中文切句
Args:
para: 输入段落文本
drop_empty_line: 是否丢弃空行
strip: 是否对每一句话做一次strip
deduplicate: 是否对连续标点去重,帮助对连续标点结尾的句子分句
Returns:
sentences: list[str]
"""
if deduplicate:
para = re.sub(r"([。!?\!\?])\1+", r"\1", para)
para = re.sub('([。!?\?!])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?!][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
sentences = para.split("\n")
if strip:
sentences = [sent.strip() for sent in sentences]
if drop_empty_line:
sentences = [sent for sent in sentences if len(sent.strip()) > 0]
return sentences
@staticmethod
def get_sentences_tokenizer_en():
"""
the tokenizer for cutting paragraph to sentences
Returns:
tokenizer
"""
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
punkt_param = PunktParameters()
abbreviation = ['et al.', 'i.e.', 'e.g.', 'etc.', 'i.e', 'e.g', 'etc', ' et al']
punkt_param.abbrev_types = set(abbreviation)
tokenizer = PunktSentenceTokenizer(punkt_param)
return tokenizer
@staticmethod
def cut_sentence_to_words_zh(sentence: str):
"""
cut_sentence_to_words_zh
Args:
sentence: a sentence ,str
Returns:
sentences: list[str]
"""
english = 'abcdefghijklmnopqrstuvwxyz0123456789αγβδεζηθικλμνξοπρστυφχψω'
output = []
buffer = ''
for s in sentence:
if s in english or s in english.upper(): # 英文或数字
buffer += s
else: # 中文
if buffer:
output.append(buffer)
buffer = ''
output.append(s)
if buffer:
output.append(buffer)
return output
@staticmethod
def get_word_tokenizer_en(do_lower_case=False):
"""
the tokenizer for cutting sentence to words
Returns:
tokenizer
"""
from transformers import BasicTokenizer
return BasicTokenizer(do_lower_case=do_lower_case)
# from nltk import WordPunctTokenizer
# return WordPunctTokenizer() # ').' 分不开,垃圾
def cut_sentence_to_words(self, sentence: str,return_starts = False):
if langid.classify(sentence)[0] == 'zh':
words = self.cut_sentence_to_words_zh(sentence)
else:
words = self.words_tokenizer_en.tokenize(sentence)
if return_starts:
starts = [] # 每个word在句子中的位置
i = 0
for j in words:
while i < len(sentence):
if sentence[i:i + len(j)] == j:
starts.append(i)
i += len(j)
break
else:
i += 1
return words,starts
return words
def cut_paragraph_to_sentences(self, paragraph: str):
if langid.classify(paragraph)[0] == 'zh':
return self.cut_paragraph_to_sentences_zh(paragraph)
else:
return self.sentences_tokenizer_en.tokenize(paragraph)
class TextProcessor:
def __init__(self):
pass
@staticmethod
def clean_text(text: str):
"""
清洗数据
Args:
text: 文本
Returns:
text
"""
import re
text = re.sub('<[^<]+?>', '', text).replace('\n', '').strip() # 去html中的<>标签
text = ' '.join(text.split()).strip()
return text
@staticmethod
def ner_find(text: str, entities: dict, ignore_nested=True):
"""
find the loaction of entities in a text
Args:
text: a text, like '我爱吃苹果、大苹果,小苹果,苹果【II】,梨子,中等梨子,雪梨,梨树。'
entities: {'entity_type1':{entity_str1,entity_str2...},
'entity_type2':{entity_str1,entity_str2...},
...}
like : {'apple': ['苹果', '苹果【II】'], 'pear': ['梨', '梨子'],}
ignore_nested: if nested
#>>>IndexedRuleNER().ner(text, entities, False)
Returns:
indexed_entities:{'entity_type1':[[start_index,end_index,entity_str],
[start_index,end_index,entity_str]...]
'entity_type2':[[start_index,end_index,entity_str],
[start_index,end_index,entity_str]...]
...}
#>>>{'apple': [[3, 5, '苹果'], [7, 9, '苹果'], [11, 13, '苹果'], [14, 16, '苹果'], [14, 20, '苹果【II】']],
'pear': [[21, 22, '梨'], [26, 27, '梨'], [30, 31, '梨'], [32, 33, '梨'], [21, 23, '梨子'], [26, 28, '梨子']]}
"""
indexed_entities = dict()
for every_type, every_value in entities.items():
every_type_value = []
for every_entity in list(every_value):
special_character = set(re.findall('\W', str(every_entity)))
for i in special_character:
every_entity = every_entity.replace(i, '\\' + i)
re_result = re.finditer(every_entity, text)
for i in re_result:
res = [i.span()[0], i.span()[1], i.group()]
if res != []:
every_type_value.append([i.span()[0], i.span()[1], i.group()])
indexed_entities[every_type] = every_type_value
if ignore_nested:
for key, value in indexed_entities.items():
all_indexs = [set(range(i[0], i[1])) for i in value]
for i in range(len(all_indexs)):
for j in range(i, len(all_indexs)):
if i != j and all_indexs[j].issubset(all_indexs[i]):
value.remove(value[j])
indexed_entities[key] = value
elif i != j and all_indexs[i].issubset(all_indexs[j]):
value.remove(value[i])
indexed_entities[key] = value
return indexed_entities
@staticmethod
def remove_illegal_chars(text: str):
"""
移除非法字符
Args:
text:
Returns:
"""
ILLEGAL_CHARACTERS_RE = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]')
return ILLEGAL_CHARACTERS_RE.sub(r'', text) # 非法字符
@staticmethod
def remove_invisible_chars(text, including_char=('\t', '\n', '\r')):
"""移除所有不可见字符,除'\t', '\n', '\r'外"""
str = ''
for t in text:
if (t not in including_char) and (not t.isprintable()):
str += ' '
else:
str += t
return str
@staticmethod
def remove_html_tags(text):
# soup = BeautifulSoup(raw_str, features="html.parser")
# return ''.join([s.text.replace('\n', '') for s in soup.contents if hasattr(s, 'text') and s.text])
# text = re.sub('<[^<]+?>', '', text).replace('\n', '').strip() # 去html中的<>标签
# text = ' '.join(text.split()).strip()
return html.unescape(text) # html转义字符 | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/text_processing.py | text_processing.py |
from pdf2image import convert_from_path
from zyl_utils.data_utils.image_processing import ImageProcessor
IMAGEPROCESSOR = ImageProcessor()
import fitz
import pdfplumber
from tabula import read_pdf
class PDFProcessor:
def __init__(self):
pass
@staticmethod
def extract_tables_from_non_scanned_pdf(pdf: str, start_page: int = 0, end_page: int = None,
method='tabula'):
"""extract tables from a pdf
Args:
pdf: PDF File
start_page: the first page to begin extract,from 0 to start
end_page: the last page to extract
method:
Returns:
table_list : list/dict
"""
pdf_object = pdfplumber.open(pdf)
pdf_pages = pdf_object.pages[start_page:] if end_page is None else pdf_object.pages[
start_page:end_page + 1]
tables = []
for i in range(len(pdf_pages)):
if method == 'tabula':
tables_df = read_pdf(pdf, pages=start_page + i + 1, multiple_tables=True)
for t in tables_df:
table = []
table.append(list(t.columns))
for j in range(len(t)):
table.append(list(t.iloc[j]))
tables.append(table)
else: # 'pdfplumber'
table = pdf_pages[i].extract_tables()
for t in table:
if t:
tables.append(t)
return tables
@staticmethod
def get_text_from_pdf_area(pdf_page, left, top, right, bottom, mode='text'):
# clip = fitz.Rect(0, start_height, pdf_page.rect.width, tables[i]['top'])
clip = fitz.Rect(left, top, right, bottom)
if mode == 'text':
ss = '\n'
else:
ss = ' '
text = ''
lines_texts = pdf_page.get_textpage(clip=clip).extractBLOCKS()
last_line_bottom = 0
for l in range(len(lines_texts)):
if (last_line_bottom - lines_texts[l][1]) < 0.1 * (lines_texts[l][3] - lines_texts[l][1]):
text += '\n'
last_line_bottom = max(last_line_bottom, lines_texts[l][3])
spans = lines_texts[l][4].split('\n')
for s in range(len(spans) - 1):
if spans[s] in spans[s + 1]:
continue
else:
text += (str(spans[s]) + ss)
return text
@staticmethod
def get_texts_and_tables_from_pdf(pdf, ocr_method='pharmcube_ocr'):
images = convert_from_path(pdf, dpi=72)
pdf_doc = fitz.Document(pdf)
pdf_texts = ''
all_tables = []
for pg in range(0, len(images)):
img = images[pg]
pdf_page = pdf_doc.load_page(pg)
if not pdf_page.get_text():
is_scanned = True
img = img.crop((10, 10, pdf_page.rect.width - 10, pdf_page.rect.height - 10))
else:
is_scanned = False
tables, tables_location = IMAGEPROCESSOR.get_tables_from_image(img, ocr_method)
all_tables.extend(tables)
text_page = ''
if tables_location:
start_height = 0
for i in range(len(tables_location)):
if tables_location[i][1] < start_height:
continue
if is_scanned:
text_area = IMAGEPROCESSOR.get_text_from_one_image(img, method=ocr_method)
text_page += text_area
else:
text_area = PDFProcessor.get_text_from_pdf_area(pdf_page, left=0, top=start_height,
right=pdf_page.rect.width,
bottom=tables_location[i][1])
text_page += (text_area + '\n<表格>\n')
start_height = tables_location[i][-1]
if i == (len(tables_location) - 1):
text_area = PDFProcessor.get_text_from_pdf_area(pdf_page, left=0, top=start_height,
right=pdf_page.rect.width,
bottom=pdf_page.rect.height)
text_page += text_area
else:
if is_scanned:
text_page = IMAGEPROCESSOR.get_text_from_one_image(img, method=ocr_method)
else:
text_page = PDFProcessor.get_text_from_pdf_area(pdf_page, left=0, top=0,
right=pdf_page.rect.width,
bottom=pdf_page.rect.height)
pdf_texts += (text_page + '\n')
return pdf_texts, all_tables
if __name__ == '__main__':
pdf = "/home/zyl/disk/PharmAI/pharm_ai/intel/data/v1/test_dt_pdfs/6310ee78a81a81d4d4a6de3169ccb40d.pdf"
print(PDFProcessor.extract_tables_from_non_scanned_pdf(pdf)) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/pdf_processing.py | pdf_processing.py |
import langid
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class Analyzer:
def __init__(self):
pass
@staticmethod
def get_text_language(text: str):
"""
注意:短语尽量不要输入判断,越短越不准,# ’癌症‘判断为‘ja'
Args:
text:
Returns:
"""
return langid.classify(text)[0]
@staticmethod
def get_text_string_length(text: str):
return len(text)
@staticmethod
def get_text_token_length(text: str, model_tokenizer=None):
if not model_tokenizer:
from transformers import BertTokenizer
model_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
return len(model_tokenizer.tokenize(text))
@staticmethod
def show_dataframe_base_info(df: pd.DataFrame, column=None):
if column:
df = df[column]
print(df.describe())
print(df.info())
@staticmethod
def show_dataframe_completely():
"""
完全显示pandas的dataframe的所有值
Returns:
"""
import pandas as pd
pd.set_option('max_colwidth', 500) # 设置value的显示长度为200,默认为50
pd.set_option('display.max_columns', None) # 显示所有列,把行显示设置成最大
pd.set_option('display.max_rows', None) # 显示所有行,把列显示设置成最大
@staticmethod
def show_plt_completely():
"""
plt显示问题
Returns:
"""
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
@staticmethod
def analyze_numerical_array(data):
"""
分析数值数组
Args:
data:
Returns:
"""
Analyzer.show_plt_completely()
if not isinstance(data, np.ndarray):
data = np.array(data)
q1 = np.percentile(data, 25) # 第一四分位数,从小到大25%,下四分位数
q2 = np.percentile(data, 50) # 第二四分位数,从小到大50%,中位数
q3 = np.percentile(data, 75) # 第三四分位数,从小到大75%,上四分位数
iqr = q3 - q1 # 四分位数差(IQR,interquartile range),上四分位数-下四分位数
lower_limit = q1 - 1.5 * iqr
upper_limit = q3 + 1.5 * iqr
print(f"""
计数: {len(data)}
均值: {data.mean()}
标准差: {data.std()}
方差: {data.var()}
最大值: {np.max(data)}
最小值: {np.min(data)}
下四分位数: {q1}
中位数: {q2}
上四分位数: {q3}
下异常值界限:{lower_limit} ,异常值数:{len(np.where(data < lower_limit)[0])}
上异常值界限:{upper_limit} ,异常值数:{len(np.where(data > upper_limit)[0])}
"""
)
plt.subplot(211)
plt.hist(data)
plt.subplot(212)
plt.boxplot(data, vert=False)
plt.show()
@staticmethod
def analyze_category_array(data: pd.Series):
"""
分析类型数据
Args:
data:
Returns:
"""
Analyzer.show_plt_completely()
if not isinstance(data, pd.Series):
data = pd.Series(data)
data_value_counts = data.value_counts()
data_pie = data_value_counts / len(data)
print(f"""
data:
{data_value_counts}
data_percent:
{data_pie.sort_values}
"""
)
plt.subplot()
data_value_counts.plot.bar()
plt.show()
plt.subplot()
data_pie.plot.pie(autopct='%.1f%%', title='pie', )
plt.show()
@staticmethod
def show_bio_data_info(bio_dt: pd.DataFrame, label='DISEASE'):
"""
show bio format data info
Args:
bio_dt: ["sentence_id", "words", "labels"]
label: entity cls
Returns:
info
"""
labels = bio_dt.groupby(by=['sentence_id'], sort=False)
from zyl_utils.model_utils.models.ner_bio import NerBIO
labels = labels.apply(lambda x: x['labels'].tolist())
y_true = [set(NerBIO.get_id_entity(l, label=label)) for l in labels]
pos = [y for y in y_true if y != set()]
neg = [y for y in y_true if y == set()]
print(f'数据集大小(句): {len(labels)}句')
print(f'其中有实体的样本数: {len(pos)}句')
print(f'其中没有实体的样本数: {len(neg)}句')
print(f'数据集大小(词): {len(bio_dt)}词')
print(f"其中‘O’标签大小(词): {len(bio_dt[bio_dt['labels'] == 'O'])}词")
print(f"其中‘B’标签大小(词): {len(bio_dt[bio_dt['labels'].str.startswith('B')])}词")
print(f"其中‘I’标签大小(词): {len(bio_dt[bio_dt['labels'].str.startswith('I')])}词")
if __name__ == '__main__':
df = pd.read_hdf('/home/zyl/disk/PharmAI/pharm_ai/panel/data/v4/processing_v4_4.h5',
'disease_eval_bio')
Analyzer.show_bio_data_info(df, label='DISEASE') | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/analysis.py | analysis.py |
import re
import langid
import pandas as pd
class MyTokenizer:
def __init__(self):
# 把连号‘-’分开
self.sentences_tokenizer_zh = self._cut_paragraph_to_sentences_zh
self.sentences_tokenizer_en = self._cut_paragraph_to_sentences_en().tokenize
self.words_tokenizer_zh = self._cut_sentence_to_words_zh
self.words_tokenizer_en = self._cut_sentence_to_words_en().tokenize
def _cut_paragraph_to_sentences_zh(self, para: str, drop_empty_line=True, strip=True, deduplicate=False):
"""
Args:
para: 输入文本
drop_empty_line: 是否丢弃空行
strip: 是否对每一句话做一次strip
deduplicate: 是否对连续标点去重,帮助对连续标点结尾的句子分句
Returns:
sentences: list of str
"""
if deduplicate:
para = re.sub(r"([。!?\!\?])\1+", r"\1", para)
para = re.sub('([。!?\?!])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?!][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
sentences = para.split("\n")
if strip:
sentences = [sent.strip() for sent in sentences]
if drop_empty_line:
sentences = [sent for sent in sentences if len(sent.strip()) > 0]
return sentences
def _cut_paragraph_to_sentences_en(self):
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
punkt_param = PunktParameters()
abbreviation = ['et al.', 'i.e.', 'e.g.', 'etc.', 'i.e', 'e.g', 'etc', ' et al']
punkt_param.abbrev_types = set(abbreviation)
tokenizer = PunktSentenceTokenizer(punkt_param)
return tokenizer
def _cut_sentence_to_words_zh(self, sentence: str):
english = 'abcdefghijklmnopqrstuvwxyz0123456789αγβδεζηθικλμνξοπρστυφχψω'
output = []
buffer = ''
for s in sentence:
if s in english or s in english.upper(): # 英文或数字
buffer += s
else: # 中文
if buffer:
output.append(buffer)
buffer = ''
output.append(s)
if buffer:
output.append(buffer)
return output
def _cut_sentence_to_words_en(self):
from nltk import WordPunctTokenizer
# from transformers import BasicTokenizer
# BasicTokenizer(do_lower_case=False).tokenize()
return WordPunctTokenizer()
def cut_sentence_to_words(self, sentence: str):
if langid.classify(sentence)[0] == 'zh':
return self.words_tokenizer_zh(sentence)
else:
return self.words_tokenizer_en(sentence)
def cut_paragraph_to_sentences(self, paragraph: str):
if langid.classify(paragraph)[0] == 'zh':
return self.sentences_tokenizer_zh(paragraph)
else:
return self.sentences_tokenizer_en(paragraph)
class NlpUtils:
def __init__(self):
pass
@staticmethod
def show_all():
import pandas as pd
# 设置value的显示长度为200,默认为50
pd.set_option('max_colwidth', 250)
# 显示所有列,把行显示设置成最大
pd.set_option('display.max_columns', None)
# 显示所有行,把列显示设置成最大
pd.set_option('display.max_rows', None)
@staticmethod
def df_clean_language(df, column_name, language_list=('en', 'zh')):
# dataframe过滤出某一列文本的语言
import langid
df['language'] = df[column_name].apply(lambda x: langid.classify(str(x))[0])
df = df[df['language'].isin(language_list)]
df = df.drop(['language'], axis=1)
return df
@staticmethod
def split_data_evenly(dt, num):
dt_length = len(dt)
step = int(dt_length / num)
other_dt = dt_length % num
if dt_length <= num:
print('dt_length <= dt_num')
return dt
if other_dt == 0:
return [dt[i:i + step] for i in range(0, dt_length, step)]
else:
first_dt = [dt[i:i + step + 1] for i in range(0, int((step + 1) * other_dt), step + 1)]
second_list = [dt[i:i + step] for i in range(int((step + 1) * other_dt), dt_length, step)]
first_dt.extend(second_list)
return first_dt
@staticmethod
def clean_text(text):
import re
text = re.sub('<[^<]+?>', '', text).replace('\n', '').strip() # 去html中的<>标签
text = ' '.join(text.split()).strip()
return text
@staticmethod
def cut_train_eval(all_df):
from sklearn.utils import resample
raw_df = resample(all_df, replace=False)
cut_point = min(5000, int(0.2 * len(raw_df)))
eval_df = raw_df[0:cut_point]
train_df = raw_df[cut_point:]
return train_df, eval_df
@staticmethod
def two_classification_sampling(train_df, column='labels', pos_label=1, mode='up_sampling'):
import pandas as pd
from sklearn.utils import resample
negative_df = train_df[train_df[column] != pos_label]
neg_len = negative_df.shape[0]
positive_df = train_df[train_df[column] == pos_label]
pos_len = positive_df.shape[0]
if neg_len > pos_len:
if mode == 'down_sampling':
down_sampling_df = resample(negative_df, replace=False, n_samples=pos_len, random_state=242)
return pd.concat([positive_df, down_sampling_df], ignore_index=True)
else:
up_sampling_df = resample(positive_df, replace=True, n_samples=(neg_len - pos_len), random_state=242)
return pd.concat([train_df, up_sampling_df], ignore_index=True)
elif neg_len < pos_len:
if mode == 'down_sampling':
down_sampling_df = resample(positive_df, replace=False, n_samples=neg_len, random_state=242)
return pd.concat([down_sampling_df, negative_df], ignore_index=True)
else:
up_sampling_df = resample(negative_df, replace=True, n_samples=(pos_len - neg_len), random_state=242)
return pd.concat([train_df, up_sampling_df], ignore_index=True)
else:
return train_df
@staticmethod
def find_index(raw_text, find_text, label='label'):
# special_character = set(re.findall('\W', str(raw_text)))
# for i in special_character:
# raw_text = raw_text.replace(i, '\\' + i)
re_result = re.finditer(find_text, raw_text)
starts = []
for i in re_result:
starts.append(i.span()[0])
return [{'label': label, 'start': s, 'offset': len(find_text)} for s in starts]
@staticmethod
def ner_find(text: str, entities: dict, ignore_nested=True):
"""
find the loaction of entities in a text
Args:
text: a text, like '我爱吃苹果、大苹果,小苹果,苹果【II】,梨子,中等梨子,雪梨,梨树。'
entities: {'entity_type1':{entity_str1,entity_str2...},
'entity_type2':{entity_str1,entity_str2...},
...}
like : {'apple': ['苹果', '苹果【II】'], 'pear': ['梨', '梨子'],}
ignore_nested: if nested
#>>>IndexedRuleNER().ner(text, entities, False)
Returns:
indexed_entities:{'entity_type1':[[start_index,end_index,entity_str],
[start_index,end_index,entity_str]...]
'entity_type2':[[start_index,end_index,entity_str],
[start_index,end_index,entity_str]...]
...}
#>>>{'apple': [[3, 5, '苹果'], [7, 9, '苹果'], [11, 13, '苹果'], [14, 16, '苹果'], [14, 20, '苹果【II】']],
'pear': [[21, 22, '梨'], [26, 27, '梨'], [30, 31, '梨'], [32, 33, '梨'], [21, 23, '梨子'], [26, 28, '梨子']]}
"""
indexed_entities = dict()
for every_type, every_value in entities.items():
every_type_value = []
for every_entity in list(every_value):
special_character = set(re.findall('\W', str(every_entity)))
for i in special_character:
every_entity = every_entity.replace(i, '\\' + i)
re_result = re.finditer(every_entity, text)
for i in re_result:
res = [i.span()[0], i.span()[1], i.group()]
if res != []:
every_type_value.append([i.span()[0], i.span()[1], i.group()])
indexed_entities[every_type] = every_type_value
if ignore_nested:
for key, value in indexed_entities.items():
all_indexs = [set(range(i[0], i[1])) for i in value]
for i in range(len(all_indexs)):
for j in range(i, len(all_indexs)):
if i != j and all_indexs[j].issubset(all_indexs[i]):
value.remove(value[j])
indexed_entities[key] = value
elif i != j and all_indexs[i].issubset(all_indexs[j]):
value.remove(value[i])
indexed_entities[key] = value
return indexed_entities
@staticmethod
def remove_some_model_files(args):
import os
if os.path.isdir(args.output_dir):
cmd = 'rm -rf ' + args.output_dir.split('outputs')[0] + 'outputs/'
os.system(cmd)
if os.path.isdir(args.output_dir.split('outputs')[0] + '__pycache__/'):
cmd = 'rm -rf ' + args.output_dir.split('outputs')[0] + '__pycache__/'
os.system(cmd)
if os.path.isdir(args.output_dir.split('outputs')[0] + 'cache/'):
cmd = 'rm -rf ' + args.output_dir.split('outputs')[0] + 'cache/'
os.system(cmd)
# @staticmethod
# def sunday_match(target, pattern):
# """
#
# Args:
# target:
# pattern:
#
# Returns:
#
# """
# len_target = len(target)
# len_pattern = len(pattern)
#
# if len_pattern > len_target:
# return list()
#
# index = 0
# starts = []
# while index < len_target:
# if pattern == target[index:index + len_pattern]:
# starts.append(index)
# index += 1
# else:
# if (index + len(pattern)) >= len_target:
# return starts
# else:
# if target[index + len(pattern)] not in pattern:
# index += (len_pattern + 1)
# else:
# index += 1
# return starts
# @staticmethod
# def transfomer_data_format_from_t5_to_ner(df: pd.DataFrame, delimiter='|',
# keep_addition_info=('id', 'text_type')):
# """
#
# Args:
# df: dataframe,must have the columns-['prefix','input_text','target_text']
#
# Returns:
#
# """
# all_cls = df.value_counts('prefix').index.to_list()
# custom_labels = ['O']
# for c in all_cls:
# custom_labels.append('B-' + c.upper())
# custom_labels.append('I-' + c.upper())
# sentence_id = 0
# res_li = []
# my_tokenizer = MyTokenizer()
#
# df = df.drop_duplicates(subset=['input_text'])
# for input_text, sub_df in tqdm(df.groupby('input_text', sort=False)):
# words = my_tokenizer.cut_sentence_to_word_piece(input_text)
# labels = ['O'] * len(words)
#
# for _, d in sub_df.iterrows():
# if keep_addition_info:
# for k in range(len(keep_addition_info)):
# exec(f'info_{k} = d[keep_addition_info[{k}]]')
#
# cls = d['prefix']
# sub_label = set(d['target_text'].split(delimiter))
# while '' in sub_label:
# sub_label.remove('')
# if sub_label:
# for every_entity in sub_label:
# entity = my_tokenizer.cut_sentence_to_word_piece(every_entity)
# res_starts = sunday_match(target=words, pattern=entity)
# if res_starts:
# for r in res_starts:
# labels[r] = 'B-' + cls.upper()
# if len(entity) > 1:
# labels[r + 1: r + len(entity)] = ['I-' + cls.upper()] * (len(entity) - 1)
#
# sentence_ner = []
# for w, l in zip(words, labels):
# r = {'sentence_id': sentence_id, 'words': w, 'labels': l}
# if keep_addition_info:
# for k in range(len(keep_addition_info)):
# r.update({keep_addition_info[k]: eval(f'info_{k}')})
# sentence_ner.append(r)
#
# res_li.extend(sentence_ner)
# sentence_id += 1
#
# df = pd.DataFrame(res_li)
# return df
if __name__ == '__main__':
test_df = pd.read_excel("/home/zyl/disk/PharmAI/pharm_ai/panel/data/v2.4.c/processed_0820.xlsx", 'eval')[0:100]
print('1')
# DTUtils.transfomer_data_format_from_t5_to_ner(test_df)
pass
# class Project(MyModel):
# def __init__(self):
# super(Project, self).__init__()
# self.start_time = '...'
# self.end_time = '...'
#
# self.wandb_proj = 'test'
# self.use_model = 'classification' # mt5 /classification
# self.model_type = 'bert'
# self.pretrained_model = ConfigFilePaths.bert_dir_remote
#
# def run(self):
# self.train_test()
#
# def train_test(self):
# self.model_version = 'vtest'
# self.pretrained_model = '/home/zyl/disk/PharmAI/pharm_ai/po/best_model/v4.2.0.4/'
# self.args = MyModel.set_model_parameter(model_version=self.model_version,
# args=ClassificationArgs(),
# save_dir='po')
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
# self.cuda_device = 0
# self.args.n_gpu = 3
#
# self.args.num_train_epochs = 1
# self.args.learning_rate = 5e-5
# self.args.train_batch_size = 64 # 512
# self.args.eval_batch_size = 32 # 256
# self.args.max_seq_length = 512
# self.args.gradient_accumulation_steps = 8 # 256
#
# train_df = pd.read_excel('./data/processed_0825.xlsx', 'train')
# eval_df = pd.read_excel('./data/processed_0825.xlsx', 'test')
# self.train(train_df=train_df, eval_df=eval_df)
#
#
# pass
# # d = range(0, 10)
# # num = 5
# # print(DTUtils.split_data_evenly(d, 5))
# # print('1')
# r = ['a',' ','','df','x',]
# f = ['','df']
# g = DTUtils.find_index(r, f)
# print(g)
# for i in g:
# print(r[i['start']:i['start']+i['offset']])
# print(r[22:25]) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/data_utils/nlp_utils.py | nlp_utils.py |
from enum import Enum
from typing import List, Set, Optional, Dict, Union
from fastapi import Body, FastAPI, Query
from fastapi import Depends # depends依赖项
from fastapi import File, UploadFile
from fastapi import Form
from fastapi.responses import HTMLResponse
from pydantic import BaseModel, Field, EmailStr
# use html #############################################
# app = FastAPI()
# app.mount("/static", StaticFiles(directory="static"), name="static")
# templates = Jinja2Templates(directory="templates")
# @app.get("/items/{id}", response_class=HTMLResponse)
# async def read_item(request: Request, id: str):
# return templates.TemplateResponse("demo.html", {"request": request, "id": id})
# #####################################
# 1. 实例化接口#################################
app = FastAPI(title="Fastapi",
version="0.0.1",
contact={
"name": "张玉良",
"url": "https://github.com/ZYuliang/",
"email": "[email protected]",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
description="项目描述,接口说明,日志更新记录",
openapi_tags=[
{
"name": "interface1",
"description": "接口1说明",
},
{
"name": "interface2",
"description": "接口2说明",
"externalDocs": {
"description": "添加外部文档说明",
"url": "https://fastapi.tiangolo.com/",
},
},
],
)
# 2.定义输入输出##############################
class RequestItem(str, Enum):
name: str = Field(..., example="Foo", title="The description of the item", max_length=300, alias="other_name",
description="Query string for the items to search in the database that have a good match",
regex=None)
num: Optional[float] = Query(..., min_length=3)
# image: Optional[List[Image]] = None
tags: Set[str] = set()
class ResponseItem(BaseModel):
url: str
name: str
class ModelName(str, Enum):
alexnet = "alexnet"
resnet = "resnet"
lenet = "lenet"
class Image(BaseModel):
url: str
name: str
# 请求体---参数类型,默认值,限制,描述
class Item(BaseModel):
# 当一个属性具有默认值时,它不是必需的。否则它是一个必需属性。item.dict()
name: str = Field(..., example="Foo")
description: Optional[str] = None # 可选参数,默认值为None
price: float
tax: Optional[float] = None
q: str = Query(..., min_length=3) # ... 表示必须参数
q2: List[str] = Query(["foo", "bar"]) # Query检验
q3: list = Query([])
q4: Optional[str] = Query(
None,
alias="item-query", # 别名
title="Query string", # 标题
description="Query string for the items to search in the database that have a good match", # 描述
min_length=3,
deprecated=True, # 表明该参数已经弃用
regex="^fixedquery$" # 字符串正则表达式
)
size: float = Query(..., gt=0, lt=10.5) # int,float。大于小于设置
description2: Optional[str] = Field(
None, title="The description of the item", max_length=300
)
price: float = Field(..., gt=0, description="The price must be greater than zero")
tags: Set[str] = set()
image: Optional[List[Image]] = None # 子请求体
# 例子
class Config:
schema_extra = {
"example": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
}
class User(BaseModel):
username: str
full_name: Optional[str] = None
class UserIn(BaseModel):
username: str
password: str
email: EmailStr
full_name: Optional[str] = None
class BaseItem(BaseModel):
description: str
type: str
class CarItem(BaseItem):
type = "car"
class PlaneItem(BaseItem):
type = "plane"
size: int
# 3.接口函数 #####################################
# response_model_exclude_unset=True响应中将不会包含那些默认值,而是仅有实际设置的值 或者response_model_include={"name", "description"}
@app.post("/items/", response_model=UserIn, response_model_exclude_unset=True)
async def create_item(item: Item, img: List[Image], weights: Dict[int, float], importance: int = Body(...),
response_model=Union[PlaneItem, CarItem], status_code=201):
print(item.dict())
return item
@app.post("/login/")
async def login(username: str = Form(...), password: str = Form(...)):
# 通过表单字段发送 username 和 password
return {"username": username}
@app.post("/files/")
async def create_file(file: bytes = File(...)): # 以 bytes 形式读取和接收文件内容
return {"file_size": len(file)}
@app.post("/uploadfile/")
async def create_upload_file(file: UploadFile = File(...)): # 更适于处理图像、视频、二进制文件等大型文件,好处是不会占用所有内存
# filename:上传文件名字符串(str),例如, myimage.jpg;
# content_type:内容类型(MIME类型 / 媒体类型)字符串(str),例如,image / jpeg;
# file: SpooledTemporaryFile( file - like对象)。其实就是Python文件,可直接传递给其他预期file - like对象的函数或支持库。
# UploadFile支持以下 async 方法,(使用内部SpooledTemporaryFile)可调用相应的文件方法。
# write(data):把data (str或bytes)写入文件;
# read(size):按指定数量的字节或字符(size(int))读取文件内容;
# seek(offset):移动至文件offset (int)字节处的位置;例如,await myfile.seek(0)移动到文件开头;执行
# await myfile.read()后,需再次读取已读取内容时,这种方法特别好用;
# close():关闭文件。
contents = await file.read() # 或contents = myfile.file.read()
return {"filename": file.filename}
@app.post("/files/", tags=["items"], summary="Create an item",
description="Create an item with all the information, name, description, price, tax and a set of unique tags",
response_description="The created item", deprecated=True)
# tags 相当于改url所属的区域或者说是类型,不同url块
# summary对url的总结
# description对url的描述):
# response_description返回描述
# , deprecated=True弃用的接口
async def create_files(files: List[bytes] = File(...)):
"""
直接写在这里面的是接口的描述,用markdown
Create an item with all the information:
- **name**: each item must have a name
- **description**: a long description
- **price**: required
- **tax**: if the item doesn't have tax, you can omit this
- **tags**: a set of unique tag strings for this item
"""
return {"file_sizes": [len(file) for file in files]}
@app.get("/")
async def main():
content = """
<body>
<form action="/files/" enctype="multipart/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
<form action="/uploadfiles/" enctype="multipart/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
</body>
"""
return HTMLResponse(content=content)
async def common_parameters(q: Optional[str] = None, skip: int = 0, limit: int = 100):
return {"q": q, "skip": skip, "limit": limit}
@app.get("/items/")
async def read_items(commons: dict = Depends(common_parameters)):
return commons
# 4.测试###############################
# from fastapi.testclient import TestClient
#
# from .main import app
#
# client = TestClient(app)
#
# def test_read_main():
# response = client.get("/")
# assert response.status_code == 200
# assert response.json() == {"msg": "Hello World"}
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=3243) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/api_utils/api_utils.py | api_utils.py |
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from simpletransformers.t5 import T5Model, DDPT5Model
from zyl_utils.data_utils.nlp_utils import DTUtils
class MyT5(T5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
class MyDDPT5(DDPT5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyDDPT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/my_T5model.py | my_T5model.py |
import pandas as pd
class NERUtils:
# ner utils for mt5 model
def __init__(self):
# eval_entity_recognition ------评估
# revise_target_texts。
# revise_target_text
# keep_entities_in_input_text
# predict_entity_recognition-------预测
# split_texts_with_sliding_window
# model.predict_gpu
# combine_pred_target_texts_by_ids
# revise_target_texts
# revise_target_text
# keep_entities_in_input_text
# entity_recognition_v2-----标准
pass
@staticmethod
def eval_entity_recognition(model, eval_df: pd.DataFrame, check_in_input_text: bool, delimiter='|', tokenizer=None,
use_sliding_window=False, sliding_window=512, stride=0.8, pos_neg_ratio=None,
use_multi_gpus=None, self_metric=False):
"""eval entity recognition in mt5 model, version-v2 , reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
model: a mt5 model
eval_df: a pd.Dataframe , must have columns ['prefix','input_text','target_text']
check_in_input_text: if the entities are in input_texts
delimiter: the delimiter in target_text to split different entities
use_sliding_window: if truncate the input text when predict
sliding_window: truncating_size
stride: overlapping_size
use_multi_gpus:use_multi_gpus
pos_neg_ratio : the ratio of positive and negative sample importance
self_metric:self_metric
tokenizer: tokenizer to split sentence
Returns:
show report and res, {prefix:res_df},type:dict
"""
prefixes = eval_df['prefix'].to_list()
input_texts = eval_df['input_text'].tolist()
target_texts = eval_df['target_text'].tolist()
revised_target_texts = NERUtils.revise_target_texts(target_texts=target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
pred_target_texts = NERUtils.predict_entity_recognition(model, prefixes, input_texts, tokenizer=tokenizer,
use_sliding_window=use_sliding_window,
sliding_window=sliding_window, stride=stride,
delimiter=delimiter, use_multi_gpus=use_multi_gpus)
revised_pred_target_texts = NERUtils.revise_target_texts(target_texts=pred_target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
eval_df['true_target_text'] = revised_target_texts
eval_df['pred_target_text'] = revised_pred_target_texts
eval_res = {}
for prefix in set(prefixes):
prefix_df = eval_df[eval_df['prefix'] == prefix]
y_true = prefix_df['true_target_text'].tolist()
y_pred = prefix_df['pred_target_text'].tolist()
print(f'{prefix} report:')
res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio,
self_metric=self_metric)
eval_res[prefix] = res_df
print(f'sum report:')
res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
pos_neg_ratio=pos_neg_ratio, self_metric=self_metric)
eval_res['sum'] = res_df
return eval_res # {prefix:res_df},type:dict
@staticmethod
def predict_entity_recognition(model, prefixes: list, input_texts: list, use_sliding_window=False,
sliding_window=512, stride=0.8, tokenizer=None,
delimiter='|', use_multi_gpus=None) -> list:
"""predict entity recognition in mt5 model,
Args:
model: a mt5 model
prefixes: prefixes
input_texts: input_texts
use_sliding_window: if use_sliding_window
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
delimiter: the delimiter in target_text to split different entities,default: '|'
use_multi_gpus: use_multi_gpus
Returns:
pred_target_texts:list,every element in pred_target_texts corresponds a prefix and an input_text
"""
if len(input_texts) == 1:
use_multi_gpus = None
assert len(prefixes) == len(input_texts)
if use_sliding_window:
t_ids, t_prefixes, t_input_texts = NERUtils.split_texts_with_sliding_window(input_texts, prefixes,
tokenizer=tokenizer,
sliding_window=sliding_window,
stride=stride)
to_predict_texts = [i + ': ' + j for i, j in zip(t_prefixes, t_input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
pred_target_texts = NERUtils.combine_pred_target_texts_by_ids(pred_target_texts, t_ids, delimiter)
else:
to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
assert len(pred_target_texts) == len(input_texts)
return pred_target_texts # type:list[str]
@staticmethod
def split_text_with_sliding_window(text: str, sliding_window=128, tokenizer=None, stride=0.8) -> list:
""" any sequence exceeding the max_seq_length will be split into several windows (sub-sequences),
each of length max_seq_length. The windows will typically overlap each other to a certain degree to
minimize any information loss that may be caused by hard cutoffs.
Args:
text: a str text
sliding_window: truncating_size:sliding window, max_seq_length
tokenizer: tokenizer
stride: The amount of overlap between the windows,The stride can be specified in terms of either a fraction
of the max_seq_length, or as an absolute number of tokens.
Returns:
truncated_input_text: the list of truncated_input_text
"""
if not isinstance(text, str):
text = str(text)
if not tokenizer:
try:
from transformers.models.t5 import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("mt5-base")
except Exception:
print('no tokenizer....')
tokens = tokenizer.tokenize(text)
if len(tokens) <= sliding_window:
return [text]
else:
split_text = []
if stride < 1:
step_size = int(sliding_window * stride)
else:
step_size = int(stride)
steps = int(len(tokens) / step_size)
for i in range(0, steps + 1):
text_i_tokens = tokens[i * step_size:i * step_size + sliding_window]
if text_i_tokens:
text_i = ''.join(text_i_tokens).replace('▁', ' ').strip()
split_text.append(text_i)
if (len(split_text) > 1) and (
len(tokenizer.tokenize(split_text[-1])) < (sliding_window - step_size)):
split_text = split_text[0:-1]
return split_text
@staticmethod
def split_texts_with_sliding_window(input_texts: list, prefixes: list, tokenizer=None,
sliding_window=512, stride=0.8):
""" for every input_text in input_texts, split it and record the split_ids for combining
Args:
input_texts: the list of many input_text
prefixes: the prefix list of the input_texts list
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
Returns:
split_ids, split_prefixes, split_input_texts
"""
assert len(input_texts) == len(prefixes) # every input_text corresponds a prefix
input_texts_ids = range(len(input_texts))
split_ids = []
split_prefixes = []
split_input_texts = []
if not tokenizer:
try:
from transformers.models.t5 import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("mt5-base")
except Exception:
print('no tokenizer....')
for i_t_d, p, i_t in zip(input_texts_ids, prefixes, input_texts):
split_input_text = NERUtils.split_text_with_sliding_window(i_t, sliding_window, tokenizer, stride)
for t_i_t in split_input_text:
split_ids.append(i_t_d)
split_input_texts.append(t_i_t)
split_prefixes.append(p)
return split_ids, split_prefixes, split_input_texts # type:tuple[list[int],list[str],list[str]]
@staticmethod
def combine_pred_target_texts_by_ids(pred_target_texts, split_ids, delimiter: str = '|') -> list:
"""combine truncated_predicted_target_texts split_ids
Args:
pred_target_texts: the result of predicting the truncated input_texts
split_ids: get the truncated_ids when truncating input_texts
delimiter: the delimiter in target_text to split different entities
Returns:
pred_target_texts: predicted target_texts
"""
ids_target_text_dict = dict()
for i, j in zip(split_ids, pred_target_texts):
if not ids_target_text_dict.get(i):
ids_target_text_dict[i] = delimiter + j + delimiter
else:
ids_target_text_dict[i] = ids_target_text_dict[i] + j + delimiter
pred_target_texts = [ids_target_text_dict[k] for k in sorted(ids_target_text_dict.keys())]
return pred_target_texts # type:list
@staticmethod
def revise_target_texts(target_texts: list, input_texts: list, check_in_input_text: bool = False, delimiter='|'):
"""revise the target texts,
Args:
target_texts: the list of the target_texts
input_texts: the list of the input_texts
check_in_input_text: if check the entities in input_text
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_texts = list[set]
"""
revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
t_t in target_texts] # type:list[set,...]
if check_in_input_text:
revised_target_texts = NERUtils.keep_entities_in_input_text(input_texts, revised_target_texts)
return revised_target_texts # type:list[set]
@staticmethod
def revise_target_text(target_text: str, delimiter: str = '|', return_format='set'):
""" revise the target text
Args:
target_text: str, target_text
return_format: 'set' means:'every entity is an element in a set', 'str' means: different entities are split
by the delimiter
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_text : set or list
"""
assert isinstance(target_text, str)
target_text = target_text.split(delimiter)
target_text = set([' '.join(e.strip().split()) for e in target_text])
if '' in target_text:
target_text.remove('')
if return_format == 'set':
revised_target_text = target_text
elif return_format == 'list':
revised_target_text = list(target_text)
else: # return_format == 'str'
revised_target_text = '|'
if target_text != set():
for entity in list(target_text):
revised_target_text += (str(entity) + '|')
return revised_target_text
@staticmethod
def keep_entities_in_input_text(input_texts: list, target_texts: list):
"""for each sample, for every entity ,keep the entities that are in the input text,and remove other entities
Args:
input_texts: the list of many input_text,and every input text is a string
target_texts: the list of many target_text,and evert target text is a set
Returns:
revise_target_texts: list[str]
"""
revised_target_texts = []
for input_text, target_text in zip(input_texts, target_texts):
if target_text != set():
elements = list(target_text)
for e in elements:
if str(e) not in input_text:
target_text.remove(e) # type:set
revised_target_texts.append(target_text)
return revised_target_texts # type:list[set]
@staticmethod
def entity_recognition_v2(y_true: list, y_pred: list, pos_neg_ratio: str = None, self_metric=False):
"""the metric of entity_recognition, version-v2, reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
y_true: list[set],the list of true target texts,each element is a set
y_pred: list[set],the list of pred target texts,each element is a set
pos_neg_ratio: the ratio of positive and negative sample importance, default: the ratio of positive and
negative sample sizes, you can set it,like"7:3"
self_metric: self_metric
Returns:
show report and res
"""
neg_data = 0
neg_correct_dt = 0
neg_wrong_dt = 0
neg_redundant_entities = 0
pos_data = 0
pos_correct_dt = 0
pos_wrong_dt = 0
pos_correct_entities = 0
pos_wrong_entities = 0
pos_omitted_entities = 0
pos_redundant_entities = 0
for i, j in zip(y_true, y_pred):
if i == set():
neg_data += 1
if j == set():
neg_correct_dt += 1
else:
neg_wrong_dt += 1
neg_redundant_entities += len(j)
else:
pos_data += 1
true_pred = len(i & j)
pos_correct_entities += true_pred
if i == j:
pos_correct_dt += 1
elif len(i) >= len(j):
pos_wrong_dt += 1
pos_wrong_entities += (len(j) - true_pred)
pos_omitted_entities += (len(i) - len(j))
else:
pos_wrong_dt += 1
pos_redundant_entities += (len(j) - len(i))
pos_wrong_entities += (len(i) - true_pred)
all_pos_entities = pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities
if neg_data == 0:
neg_metric = 0
else:
neg_metric = neg_correct_dt / (neg_correct_dt + neg_redundant_entities)
if pos_data == 0:
pos_metric = 0
else:
pos_metric = pos_correct_entities / all_pos_entities
sum_metric_micro = (pos_correct_entities + neg_correct_dt) / (
neg_correct_dt + neg_redundant_entities + all_pos_entities)
# sum_metric_macro = neg_metric * 0.5 + pos_metric * 0.5
if pos_neg_ratio:
pos_all = float(pos_neg_ratio.split(':')[0])
neg_all = float(pos_neg_ratio.split(':')[1])
pos_ratio = pos_all / (pos_all + neg_all)
neg_ratio = neg_all / (pos_all + neg_all)
else:
pos_ratio = pos_data / (pos_data + neg_data)
neg_ratio = neg_data / (pos_data + neg_data)
sum_metric_weighted = pos_ratio * pos_metric + neg_ratio * neg_metric
# pos_precision = pos_correct_dt / (neg_correct_dt + pos_correct_dt)
# recall = pos_correct_dt / pos_data
tp = pos_correct_dt
fn = pos_wrong_dt
fp = neg_wrong_dt
tn = neg_correct_dt
accuracy = (tp + tn) / (tp + fn + fp + tn)
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 / (1 / precision + 1 / recall)
r = {
'positive data': [str(pos_data), pos_correct_dt, pos_wrong_dt, pos_correct_entities,
pos_wrong_entities, pos_omitted_entities, pos_redundant_entities, pos_metric],
'negative data': [neg_data, neg_correct_dt, neg_wrong_dt, '-', '-', '-', neg_redundant_entities,
neg_metric],
'all data ': [str(pos_data + neg_data), neg_correct_dt + pos_correct_dt, neg_wrong_dt + pos_wrong_dt,
pos_correct_entities, pos_wrong_entities, pos_omitted_entities,
pos_redundant_entities + neg_redundant_entities,
sum_metric_micro],
# 'precision': ['', '', '', '', '', '', '', precision],
# 'recall': ['', '', '', '', '', '', '', recall],
# 'f1 score': ['', '', '', '', '', '', '', (2 * precision * recall) / (precision + recall)],
# 'accuracy score': ['', '', '', '', '', '', '', (neg_correct_dt + pos_correct_dt) / (pos_data + neg_data)],
# 'micro score': ['', '', '', '', '', '', '', sum_metric_micro],
# 'macro score': ['', '', '', '', '', '', '', sum_metric_macro],
'weighted score': ['', '', '', '', '', '', '', sum_metric_weighted],
}
index = ['| data_num', '| correct_data', '| wrong_data', '| correct_entities', '| wrong_entities',
'| omitted_entities', '| redundant_entities', '| score']
res_df = pd.DataFrame(r, index=index).T
pd.set_option('precision', 4)
pd.set_option('display.width', None)
pd.set_option('display.max_columns', None)
pd.set_option("colheader_justify", "center")
print(res_df)
print(
f"正样本集得分为:{pos_correct_entities} / "
f"({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(pos_metric, 4)},负样本集得分为:{neg_correct_dt} / ({neg_correct_dt} + "
f"{neg_redundant_entities})={round(neg_metric, 4)},",
f"总体得分为: ({pos_correct_entities} + {neg_correct_dt}) / "
f"({all_pos_entities}+{neg_correct_dt + neg_redundant_entities})={round(sum_metric_micro, 4)}",
f"准确率:{accuracy}",
)
print('\n')
if self_metric:
more_not_error_pos = (pos_correct_entities + pos_redundant_entities) / (
pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities)
f"自定义-正样本集得分为:{pos_correct_entities + pos_redundant_entities} /" \
f" ({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(more_not_error_pos, 4)},负样本集得分为:{round(1, 4)},"
print('\n')
return res_df # type:pd.DataFrame
if __name__ == '__main__':
pass | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/ner_utils.py | ner_utils.py |
pass
# ##################################################################
# @staticmethod
# def eval_entry_match(model, eval_df: pd.DataFrame, my_dict, delimiter='|', use_dict_match=True,
# pos_neg_ratio=None, keep_entry_in_dict=True, use_multi_gpus=None):
# prefixes = eval_df['prefix'].tolist()
# input_texts = eval_df['input_text'].tolist()
# target_texts = eval_df['target_text'].tolist()
#
# revised_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# pred_target_texts = NERUtils.predict_entry_match(em_model=model, prefix_match_dict=my_dict.prefix_match_dict,
# prefixes=prefixes, input_texts=input_texts,
# use_multi_gpus=use_multi_gpus,
# use_dict_match=use_dict_match)
#
# revised_pred_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=pred_target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# eval_df['true_target_text'] = revised_target_texts
# eval_df['pred_target_text'] = revised_pred_target_texts
#
# eval_res = {}
# for prefix in set(prefixes):
# prefix_df = eval_df[eval_df['prefix'] == prefix]
# y_true = prefix_df['true_target_text'].tolist()
# y_pred = prefix_df['pred_target_text'].tolist()
# print(f'{prefix} report:')
# res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio)
# eval_res[prefix] = res_df
#
# print(f'sum report:')
# res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
# pos_neg_ratio=pos_neg_ratio)
# eval_res['sum'] = res_df
# return eval_res
#
#
# @staticmethod
# def predict_entry_match(em_model, prefix_match_dict, prefixes: list, input_texts: list, use_dict_match=True,
# use_multi_gpus=None):
# if len(input_texts) == 1:
# use_multi_gpus = None
# if use_dict_match:
# pred_by_dict = []
# for p, i in zip(prefixes, input_texts):
# pred_by_dict.append(
# NERUtils.predict_entry_match_by_dict_match(str(i).strip(), dictionary=prefix_match_dict.get(p),
# use_edit_distance=False))
#
# # i = i.lower() # modify
#
# # if p == 'disease_em':
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=di_dict, use_edit_distance=False))
# # else:
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=tar_dict, use_edit_distance=False))
# else:
# pred_by_dict = [None] * len(input_texts)
#
# to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
# if not use_multi_gpus:
# pred_by_model = em_model.predict(to_predict_texts)
# else:
# pred_by_model = em_model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
# # pred_by_model = em_model.predict(to_predict_texts)
# assert len(pred_by_model) == len(pred_by_dict)
# pred_target_texts = [d if d else m for d, m in zip(pred_by_dict, pred_by_model)]
# return pred_target_texts
#
#
# @staticmethod
# def predict_entry_match_by_dict_match(input_text: str, dictionary: dict, use_edit_distance: bool = False):
# """predict the entry of a string by using dictionary match
#
# Args:
# input_text: a string
# dictionary: the dict, {entity:entry}
# use_edit_distance: True or False
#
# Returns:
# None or entry(str)
# """
# entry = dictionary.get(input_text)
# if not entry:
# if use_edit_distance:
# import Levenshtein
# max_score = 0
# for every_entity in dictionary.keys():
# score = Levenshtein.ratio(every_entity, input_text)
# if score >= max_score and score > 0.80: # 42-->43-->52
# max_score = score
# entry = dictionary.get(every_entity)
# return entry # None or entry
#
#
# @staticmethod
# def em_revise_target_texts(prefixes, target_texts, prefix_dict, delimiter='|', keep_entry_in_dict=False):
# revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
# t_t in target_texts] # type:list[set,...]
#
# if keep_entry_in_dict:
# result = []
# for p, r_t_t in zip(prefixes, revised_target_texts):
# res = set()
# if r_t_t:
# for j in list(r_t_t):
# if j in prefix_dict.get(p):
# res.add(j)
# result.append(res)
# return result
# return revised_target_texts # type:list[set]
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
# @staticmethod
# def cut_entities(input_entities: list, prefixes: list):
# assert len(input_entities) == len(prefixes) # a input_text corresponds a prefix
# input_texts_ids = range(len(input_entities))
#
# cut_ids = []
# cut_input_entities = []
# cut_prefixes = []
# for id, i_e, p in zip(input_texts_ids, input_entities, prefixes):
# if not isinstance(i_e, set):
# cut_i_e = NERUtils.revise_target_text(target_text=i_e, return_format='set', delimiter='|')
# else:
# cut_i_e = i_e
# if cut_i_e != set():
# for c_i_t in cut_i_e:
# cut_ids.append(id)
# cut_input_entities.append(c_i_t)
# cut_prefixes.append(p)
# return cut_ids, cut_input_entities, cut_prefixes # type:list
#
# @staticmethod
# def combine_cut_entities(input_entities: list, cut_entities: list, cut_ids: list):
# dic = dict()
# for i, j in zip(cut_ids, cut_entities):
# if i not in dic.keys():
# dic[i] = j
# else:
# if isinstance(j, str):
# dic[i] = dic[i] + '|' + j
# else:
# dic[i].update(j)
#
# res = []
# all_keys = list(dic.keys())
# for i in range(len(input_entities)):
# if i in all_keys:
# res.append(dic[i])
# else:
# res.append(set())
# return res
###################################
# eval_entry_match
# em_revise_target_texts
# predict_entry_match
# predict_entry_match_by_dict_match
# model.predict_gpu
# | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/entry_match.py | entry_match.py |
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class NerModel:
"""
ner model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--ner-model"
self.show_running_loss = False
self.wandb_proj = 'ner'
self.save_dir = './'
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的处理的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.model_type = 'roberta'
self.pretrained_model = 'roberta-base' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
self.model_args = self.my_config()
def my_config(self):
return {
'train_batch_size': 8,
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# multiprocess
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
}
@staticmethod
def deal_with_df(df):
df = df[["sentence_id", "words", "labels"]]
df = df.astype({'sentence_id': 'int', 'words': 'str', 'labels': 'str'})
return df
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame):
# deal with dt
train_data = NerModel.deal_with_df(train_data)
eval_data = NerModel.deal_with_df(eval_data)
train_size = len(set(train_data['sentence_id'].tolist()))
eval_size = len(set(eval_data['sentence_id'].tolist()))
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'train_size': train_size,
'eval_size': eval_size,
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']
}
}
)
# get model
model = NERModel(model_type=self.model_type, model_name=self.pretrained_model, labels=self.labels,
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def train_example(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_data = pd.read_excel(train_file)
eval_data = pd.read_excel(eval_file)
self.save_dir = './'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.pretrained_model = 'bert-base-multilingual-cased' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-DISEASE", "I-DISEASE"]
self.model_args = self.my_config()
self.model_args.update(
{
'train_file': train_file,
'eval_file': eval_file,
'num_train_epochs': 3,
'learning_rate': 1e-3,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_data,eval_data)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = self.model_args.get('eval_size')
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
@staticmethod
def get_entity(pred_list, label='DISEASE'):
if not label:
label = ''
entities = []
e = ''
is_entity = 0
for index, p in enumerate(pred_list):
if p == '0':
if is_entity == 1:
entities.append(e)
is_entity = 0
elif p.startswith('B-' + label):
if is_entity == 1:
if e:
entities.append(e)
e = '-' + str(index)
is_entity = 1
elif p.startswith('I-' + label):
e = e + ('-' + str(index))
if is_entity == 1:
entities.append(e)
return entities
def eval(self, eval_df: pd.DataFrame):
eval_data = NerModel.deal_with_df(eval_df)
eval_size = len(set(eval_df['sentence_id'].tolist()))
self.model_args.update(
{
'eval_size': eval_size,
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'eval']
}
}
)
model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
result, model_outputs, preds_list = model.eval_model(eval_data)
wandb.init(
project=self.wandb_proj,
config = self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval']
)
wandb.log({"f1_score": result.get('f1_score')})
return result
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.save_dir = './'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.use_cuda = True
self.cuda_device = 1
self.model_args = self.my_config()
self.model_args.update(
{
'eval_file': eval_file,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.eval(eval_data)
if __name__ == '__main__':
s = ['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', 'B-DISEASE', 'B-DISEASE', 'B-DISEASE', 'I-DISEASE',
'I-DISEASE', 'O', 'B-DISEASE', 'O', 'I-DISEASE', 'I-DISEASE', 'B-DISEASE', 'I-DISEASE']
print(NerModel.get_entity(s)) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/ner_model.py | ner_model.py |
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.classification import ClassificationModel, ClassificationArgs, DDPClassificationModel
from simpletransformers.t5 import T5Args
from zyl_utils.model_utils.my_T5model import MyT5, MyDDPT5
class MyModel:
"""
my model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.wandb_proj = 'test'
self.model_version = 'test' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.use_model = 'classification' # mt5 /classification
self.model_type = 'bert'
self.pretrained_model = './best/v1.1.1.1/' # 预训练模型位置
self.use_cuda = True
self.cuda_device = 0
self.num_labels = 2
self.args = MyModel.set_model_parameter(model_version=self.model_version,
args=self._set_args(), save_dir='./')
def _set_args(self):
if self.use_model == 't5' or self.use_model == 'mt5':
return T5Args()
else:
return ClassificationArgs()
@staticmethod
def set_model_parameter(model_version='test', args=ClassificationArgs(), save_dir='./'):
# multiprocess
args.use_multiprocessing = False
args.use_multiprocessing_for_evaluation = False
# base config
args.reprocess_input_data = True
args.use_cached_eval_features = False
args.fp16 = False
args.manual_seed = 234
args.gradient_accumulation_steps = 2 # ==increase batch size,Use time for memory,
# save
args.no_save = False
args.save_eval_checkpoints = False
args.save_model_every_epoch = False
args.save_optimizer_and_scheduler = True
args.save_steps = -1
# eval
args.evaluate_during_training = True
args.evaluate_during_training_verbose = True
args.no_cache = False
args.use_early_stopping = False
args.encoding = None
args.do_lower_case = False
args.dynamic_quantize = False
args.quantized_model = False
args.silent = False
args.overwrite_output_dir = True
args.output_dir = save_dir + 'outputs/' + model_version + '/'
args.cache_dir = save_dir + 'cache/' + model_version + '/'
args.best_model_dir = save_dir + 'best_model/' + model_version + '/'
args.tensorboard_dir = save_dir + 'runs/' + model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/'
return args
def get_train_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.pretrained_model, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'classification':
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def deal_with_df(df, use_model='cls'):
if use_model == 't5' or use_model == 'mt5':
df = df[['prefix', 'input_text', 'target_text']]
df = df.astype('str')
elif use_model == 'sentence_pair':
df = df[['text_a', 'text_b', 'labels']]
df = df.astype({'text_a': 'str', 'text_b': 'str', 'labels': 'int'})
else:
df = df.astype({'text': 'str', 'labels': 'int'})
df = df[['text', 'labels']]
return df
def train(self, train_df: pd.DataFrame, eval_df: pd.DataFrame, if_send_message=False):
# deal with dt
train_df = MyModel.deal_with_df(train_df, use_model=self.use_model)
eval_df = MyModel.deal_with_df(eval_df, use_model=self.use_model)
# config some parameters
train_size = train_df.shape[0]
self.args.update_from_dict({'train_length': train_size})
all_steps = train_size / self.args.train_batch_size
self.args.logging_steps = int(max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.evaluate_during_training_steps = int(
max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.wandb_project = self.wandb_proj
self.args.wandb_kwargs = {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']}
# get model
model = self.get_train_model()
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train length---{train_size}')
if self.use_model == 't5' or self.use_model == 'mt5':
model.train_model(train_data=train_df, eval_data=eval_df)
else:
model.train_model(train_df=train_df, eval_df=eval_df)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
if if_send_message:
print(f'train failed!!! ERROR:{error}')
# ModelUtils.send_to_me(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def get_predict_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.args.best_model_dir, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'sentence_pair':
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
eval_length = eval_df.shape[0]
# wand_b
wandb.init(project=self.wandb_proj, config=self.args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval length---{eval_length}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / eval_length, 5)
eval_time = round(need_time * eval_length, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {eval_length} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": eval_length})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/my_model.py | my_model.py |
import logging
import math
import os
import random
from dataclasses import asdict
import pandas as pd
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from simpletransformers.t5.t5_model import T5Model
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers.optimization import AdamW, Adafactor
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class DDPT5Model(T5Model):
"""The DDP version of T5Model"""
def __init__(
self,
model_type,
model_name,
args=None,
tokenizer=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a DDP T5Model model. Turn off multi-processing settings.
Args:
model_type: The type of model (t5, mt5)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
super().__init__(model_type, model_name, args, tokenizer, use_cuda, cuda_device, **kwargs)
self.args.use_multiprocessing = False
self.args.use_multiprocessing_for_evaluation = False
if self.args.n_gpu == 1:
raise ValueError("You are using DDP with single GPU.")
def train_model(
self,
train_data,
output_dir=None,
show_running_loss=True,
args=None,
eval_data=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
verbose (optional): whether output staff.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
os.environ['MASTER_ADDR'] = 'localhost'
port = random.randint(10000, 20000)
os.environ['MASTER_PORT'] = str(port)
mp.spawn(self.train_each_proc, nprocs=self.args.n_gpu,
args=(train_dataset, output_dir,
show_running_loss, eval_data, verbose, kwargs))
# self.save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train_each_proc(self, process_index, train_dataset, *train_args):
"""
A wrapper function of train() for each process of DDP.
:param process_index: param train_dataset passed into train().
:param train_dataset: The training set.
:param train_args: other position arguments passed to train().
:return: The same as train().
"""
self._local_rank = process_index
self._world_size = self.args.n_gpu
self.train(train_dataset, *train_args[:-1], **train_args[-1])
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
args = self.args
self.device = torch.device(f"cuda:{self._local_rank}")
self._move_model_to_device()
torch.distributed.init_process_group(
backend='nccl',
init_method='env://',
world_size=self._world_size,
rank=self._local_rank
)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self._local_rank])
model = self.model
if self._local_rank == 0:
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = DistributedSampler(
train_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size // self._world_size,
pin_memory=True
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
if 0 < args.save_after < 1:
args.save_after = math.ceil(t_total * args.save_after)
if args.optimizer == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "Adafactor":
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adafactor_eps,
clip_threshold=args.adafactor_clip_threshold,
decay_rate=args.adafactor_decay_rate,
beta1=args.adafactor_beta1,
weight_decay=args.weight_decay,
scale_parameter=args.adafactor_scale_parameter,
relative_step=args.adafactor_relative_step,
warmup_init=args.adafactor_warmup_init,
)
if self._local_rank == 0:
print("Using Adafactor for T5")
else:
raise ValueError(
"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.".format(
args.optimizer
)
)
if args.scheduler == "constant_schedule":
scheduler = get_constant_schedule(optimizer)
elif args.scheduler == "constant_schedule_with_warmup":
scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)
elif args.scheduler == "linear_schedule_with_warmup":
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "cosine_with_hard_restarts_schedule_with_warmup":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "polynomial_decay_schedule_with_warmup":
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
lr_end=args.polynomial_decay_schedule_lr_end,
power=args.polynomial_decay_schedule_power,
)
else:
raise ValueError("{} is not a valid scheduler.".format(args.scheduler))
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if self._local_rank == 0:
logger.info(" Training started")
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch",
disable=args.silent or self._local_rank != 0, mininterval=0)
epoch_number = 0
best_eval_metric = None
current_loss = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
stop_training = False
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to global_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project and self._local_rank == 0:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for epoch in train_iterator:
model.train()
train_sampler.set_epoch(epoch)
if epochs_trained > 0:
epochs_trained -= 1
continue
if self._local_rank == 0:
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs} on process {self._local_rank}",
disable=args.silent or self._local_rank != 0,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
loss = self.compute_loss(model, args, inputs)
else:
loss = self.compute_loss(model, args, inputs)
loss_ = loss.clone()
torch.distributed.barrier()
torch.distributed.reduce(loss_, 0)
current_loss = loss_.item() / self._world_size
if show_running_loss and self._local_rank == 0:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
if args.optimizer == "AdamW":
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0 and self._local_rank == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0]
},
step=global_step
)
if args.save_steps > 0 and global_step % args.save_steps == 0 and self._local_rank == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
model.train()
if stop_training:
break
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if (args.save_model_every_epoch or args.evaluate_during_training) and self._local_rank == 0:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch and self._local_rank == 0:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
# close tensorboard writer to avoid EOFError.
if self._local_rank == 0:
tb_writer.close()
wandb.finish()
def eval_model(
self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs
):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
eval_dataset = self.load_and_cache_examples(
eval_data, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(
eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs
)
self.results.update(result)
if self.args.evaluate_generated_text:
if self.args.preprocess_inputs:
to_predict = [
prefix + ": " + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
else:
to_predict = [
prefix + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
preds = self.predict(to_predict)
result = self.compute_metrics(
eval_data["target_text"].tolist(), preds, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = DistributedSampler(
eval_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size // self._world_size,
pin_memory=True
)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
if self.args.fp16:
from torch.cuda import amp
for batch in tqdm(
eval_dataloader,
disable=args.silent or silent,
desc="Running Evaluation"
):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
torch.distributed.barrier()
torch.distributed.reduce(loss, 0)
eval_loss += loss.item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps / self._world_size
if self._local_rank == 0:
print(eval_loss)
results["eval_loss"] = eval_loss
if self._local_rank == 0:
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def logging_and_saving(
self,
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter):
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores), step=global_step)
stop_training = False
if global_step > args.save_after:
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
return stop_training, best_eval_metric, early_stopping_counter
def check_early_stopping(self, early_stopping_counter, args, train_iterator, verbose):
stop_training = False
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
stop_training = True
return stop_training, early_stopping_counter
def compute_loss(self, model, args, inputs):
outputs = model(**inputs)
if args.r_drop:
outputs_ = model(**inputs)
loss = self.compute_r_drop_loss(
outputs['loss'],
outputs_['loss'],
outputs['logits'],
outputs_['logits'],
inputs['attention_mask'],
args.r_drop_alpha
)
else:
loss = outputs[0]
return loss
def compute_kl_loss(self, p, q, pad_mask=None, reduction='mean'):
p_loss = F.kl_div(F.log_softmax(p, dim=-1), F.softmax(q, dim=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, dim=-1), F.softmax(p, dim=-1), reduction='none')
if pad_mask is not None:
p_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
q_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
if reduction == 'mean':
p_loss = p_loss.mean()
q_loss = q_loss.mean()
elif reduction == 'sum':
p_loss = p_loss.sum()
q_loss = q_loss.sum()
else:
raise ValueError('Only mean or sum reduction is supported in computing KL Divergence!')
loss = (p_loss + q_loss) / 2
return loss
def compute_r_drop_loss(self, ce1, ce2, logit1, logit2, attention_mask, alpha, reduction='mean'):
kl_loss = self.compute_kl_loss(logit1, logit2, attention_mask, reduction=reduction)
ce_loss = 0.5 * (ce1 + ce2)
return ce_loss + alpha * kl_loss | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/DDPT5model.py | DDPT5model.py |
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from loguru import logger
from ..data_utils.processing import Processor
class ModelUtils:
def __init__(self):
pass
@staticmethod
def get_best_cuda_device(gpu_num=1):
"""
获取显存最多的若干gpu的号
Args:
gpu_num:
Returns:
deviceMemory,like: '1,2'
"""
import pynvml
import numpy as np
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
deviceMemory = dict()
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
deviceMemory.update({i: mem_info.free / 1024 / 1024}) # M
deviceMemory = sorted(deviceMemory.items(), key=lambda x: x[1], reverse=True)
deviceMemory = np.array(deviceMemory, dtype=np.int64).tolist()
deviceMemory_tuple = deviceMemory[0:gpu_num]
deviceMemory = ','.join([str(d[0]) for d in deviceMemory_tuple])
logger.info(f'Use (gpus, memories): {deviceMemory_tuple}M')
return deviceMemory
@staticmethod
def fix_torch_multiprocessing():
"""
This function will close the shared memory of pytorch,
to fix `OSError: [Errno 12] Cannot allocate memory` ,
when multiprocessing is used to convert data into transformers features.
Add this function to the top of `train.py` ,or before loading a transformer model.
Reference:
- https://github.com/huaweicloud/dls-example/issues/26#issuecomment-411990039
- https://github.com/pytorch/fairseq/issues/1171#issuecomment-549345884
"""
import sys
import torch
from torch.utils.data import dataloader
from torch.multiprocessing.reductions import ForkingPickler
default_collate_func = dataloader.default_collate
def default_collate_override(batch):
dataloader._use_shared_memory = False
return default_collate_func(batch)
setattr(dataloader, 'default_collate', default_collate_override)
for t in torch._storage_classes:
if sys.version_info[0] == 2:
if t in ForkingPickler.dispatch:
del ForkingPickler.dispatch[t]
else:
if t in ForkingPickler._extra_reducers:
del ForkingPickler._extra_reducers[t]
@staticmethod
def predict_with_multi_gpus(self, to_predict, gpus: list = None):
"""
多gpu预测,必须在init中加入”self.funcs=None“
Args:
self: cls 某个模型类
to_predict: 要预测的东西,list
gpus: 若干gpu,list, gpus can be like: ["1","2"]
Returns:
预测的结果
"""
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = Processor.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
from simpletransformers.t5 import T5Model
from simpletransformers.ner import NERModel
class MyT5(T5Model):
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
self.funcs = []
def predict_with_multi_gpus(self, to_predict, gpus: list = None):
return ModelUtils.predict_with_multi_gpus(self, to_predict, gpus)
class MyNer(NERModel):
def __init__(self, model_type, model_name, args=None, labels=None, tokenizer=None, use_cuda=True, cuda_device=-1,
**kwargs):
super(MyNer, self).__init__(model_type=model_type, model_name=model_name, args=args, labels=labels,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
self.funcs = []
def predict_with_multi_gpus(self, to_predict, gpus: list = None):
return ModelUtils.predict_with_multi_gpus(self, to_predict, gpus)
# ##################################################################
# @staticmethod
# def eval_entry_match(model, eval_df: pd.DataFrame, my_dict, delimiter='|', use_dict_match=True,
# pos_neg_ratio=None, keep_entry_in_dict=True, use_multi_gpus=None):
# prefixes = eval_df['prefix'].tolist()
# input_texts = eval_df['input_text'].tolist()
# target_texts = eval_df['target_text'].tolist()
#
# revised_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# pred_target_texts = NERUtils.predict_entry_match(em_model=model, prefix_match_dict=my_dict.prefix_match_dict,
# prefixes=prefixes, input_texts=input_texts,
# use_multi_gpus=use_multi_gpus,
# use_dict_match=use_dict_match)
#
# revised_pred_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=pred_target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# eval_df['true_target_text'] = revised_target_texts
# eval_df['pred_target_text'] = revised_pred_target_texts
#
# eval_res = {}
# for prefix in set(prefixes):
# prefix_df = eval_df[eval_df['prefix'] == prefix]
# y_true = prefix_df['true_target_text'].tolist()
# y_pred = prefix_df['pred_target_text'].tolist()
# print(f'{prefix} report:')
# res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio)
# eval_res[prefix] = res_df
#
# print(f'sum report:')
# res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
# pos_neg_ratio=pos_neg_ratio)
# eval_res['sum'] = res_df
# return eval_res
#
#
# @staticmethod
# def predict_entry_match(em_model, prefix_match_dict, prefixes: list, input_texts: list, use_dict_match=True,
# use_multi_gpus=None):
# if len(input_texts) == 1:
# use_multi_gpus = None
# if use_dict_match:
# pred_by_dict = []
# for p, i in zip(prefixes, input_texts):
# pred_by_dict.append(
# NERUtils.predict_entry_match_by_dict_match(str(i).strip(), dictionary=prefix_match_dict.get(p),
# use_edit_distance=False))
#
# # i = i.lower() # modify
#
# # if p == 'disease_em':
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=di_dict, use_edit_distance=False))
# # else:
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=tar_dict, use_edit_distance=False))
# else:
# pred_by_dict = [None] * len(input_texts)
#
# to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
# if not use_multi_gpus:
# pred_by_model = em_model.predict(to_predict_texts)
# else:
# pred_by_model = em_model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
# # pred_by_model = em_model.predict(to_predict_texts)
# assert len(pred_by_model) == len(pred_by_dict)
# pred_target_texts = [d if d else m for d, m in zip(pred_by_dict, pred_by_model)]
# return pred_target_texts
#
#
# @staticmethod
# def predict_entry_match_by_dict_match(input_text: str, dictionary: dict, use_edit_distance: bool = False):
# """predict the entry of a string by using dictionary match
#
# Args:
# input_text: a string
# dictionary: the dict, {entity:entry}
# use_edit_distance: True or False
#
# Returns:
# None or entry(str)
# """
# entry = dictionary.get(input_text)
# if not entry:
# if use_edit_distance:
# import Levenshtein
# max_score = 0
# for every_entity in dictionary.keys():
# score = Levenshtein.ratio(every_entity, input_text)
# if score >= max_score and score > 0.80: # 42-->43-->52
# max_score = score
# entry = dictionary.get(every_entity)
# return entry # None or entry
#
#
# @staticmethod
# def em_revise_target_texts(prefixes, target_texts, prefix_dict, delimiter='|', keep_entry_in_dict=False):
# revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
# t_t in target_texts] # type:list[set,...]
#
# if keep_entry_in_dict:
# result = []
# for p, r_t_t in zip(prefixes, revised_target_texts):
# res = set()
# if r_t_t:
# for j in list(r_t_t):
# if j in prefix_dict.get(p):
# res.add(j)
# result.append(res)
# return result
# return revised_target_texts # type:list[set]
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
# @staticmethod
# def cut_entities(input_entities: list, prefixes: list):
# assert len(input_entities) == len(prefixes) # a input_text corresponds a prefix
# input_texts_ids = range(len(input_entities))
#
# cut_ids = []
# cut_input_entities = []
# cut_prefixes = []
# for id, i_e, p in zip(input_texts_ids, input_entities, prefixes):
# if not isinstance(i_e, set):
# cut_i_e = NERUtils.revise_target_text(target_text=i_e, return_format='set', delimiter='|')
# else:
# cut_i_e = i_e
# if cut_i_e != set():
# for c_i_t in cut_i_e:
# cut_ids.append(id)
# cut_input_entities.append(c_i_t)
# cut_prefixes.append(p)
# return cut_ids, cut_input_entities, cut_prefixes # type:list
#
# @staticmethod
# def combine_cut_entities(input_entities: list, cut_entities: list, cut_ids: list):
# dic = dict()
# for i, j in zip(cut_ids, cut_entities):
# if i not in dic.keys():
# dic[i] = j
# else:
# if isinstance(j, str):
# dic[i] = dic[i] + '|' + j
# else:
# dic[i].update(j)
#
# res = []
# all_keys = list(dic.keys())
# for i in range(len(input_entities)):
# if i in all_keys:
# res.append(dic[i])
# else:
# res.append(set())
# return res
###################################
# eval_entry_match
# em_revise_target_texts
# predict_entry_match
# predict_entry_match_by_dict_match
# model.predict_gpu
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/model_utils.py | model_utils.py |
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from simpletransformers.t5 import T5Model
try:
from zyl_utils.model_utils.models.DDPT5model import DDPT5Model
except:
print()
from zyl_utils.data_utils.nlp_utils import DTUtils
class MyT5(T5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs
class MyDDPT5(DDPT5Model):
"""
add function: use-multi-gpu
"""
def __init__(self, model_type, model_name, args=None, tokenizer=None, use_cuda=True, cuda_device=-1, **kwargs):
super(MyDDPT5, self).__init__(model_type=model_type, model_name=model_name, args=args,
tokenizer=tokenizer, use_cuda=use_cuda, cuda_device=cuda_device, **kwargs)
def get_funcs(self, gpus):
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
def predict_gpu(self, to_predict, gpus: list = None):
# gpus can be like: ["1","2"]
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
outputs = self.predict(to_predict=to_predict)
else:
if not self.funcs:
self.get_funcs(gpus)
print('Start processing data...')
max_workers = len(gpus)
sub_data_sets = DTUtils.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
outputs = []
for i in sorted(res.keys()):
for j in res[i]:
outputs.append(j)
return outputs | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/my_T5model.py | my_T5model.py |
pass
# ##################################################################
# @staticmethod
# def eval_entry_match(model, eval_df: pd.DataFrame, my_dict, delimiter='|', use_dict_match=True,
# pos_neg_ratio=None, keep_entry_in_dict=True, use_multi_gpus=None):
# prefixes = eval_df['prefix'].tolist()
# input_texts = eval_df['input_text'].tolist()
# target_texts = eval_df['target_text'].tolist()
#
# revised_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# pred_target_texts = NERUtils.predict_entry_match(em_model=model, prefix_match_dict=my_dict.prefix_match_dict,
# prefixes=prefixes, input_texts=input_texts,
# use_multi_gpus=use_multi_gpus,
# use_dict_match=use_dict_match)
#
# revised_pred_target_texts = NERUtils.em_revise_target_texts(prefixes=prefixes, target_texts=pred_target_texts,
# prefix_dict=my_dict.prefix_dict,
# delimiter=delimiter,
# keep_entry_in_dict=keep_entry_in_dict)
#
# eval_df['true_target_text'] = revised_target_texts
# eval_df['pred_target_text'] = revised_pred_target_texts
#
# eval_res = {}
# for prefix in set(prefixes):
# prefix_df = eval_df[eval_df['prefix'] == prefix]
# y_true = prefix_df['true_target_text'].tolist()
# y_pred = prefix_df['pred_target_text'].tolist()
# print(f'{prefix} report:')
# res_df = NERUtils.entity_recognition_v2(y_true, y_pred, pos_neg_ratio=pos_neg_ratio)
# eval_res[prefix] = res_df
#
# print(f'sum report:')
# res_df = NERUtils.entity_recognition_v2(revised_target_texts, revised_pred_target_texts,
# pos_neg_ratio=pos_neg_ratio)
# eval_res['sum'] = res_df
# return eval_res
#
#
# @staticmethod
# def predict_entry_match(em_model, prefix_match_dict, prefixes: list, input_texts: list, use_dict_match=True,
# use_multi_gpus=None):
# if len(input_texts) == 1:
# use_multi_gpus = None
# if use_dict_match:
# pred_by_dict = []
# for p, i in zip(prefixes, input_texts):
# pred_by_dict.append(
# NERUtils.predict_entry_match_by_dict_match(str(i).strip(), dictionary=prefix_match_dict.get(p),
# use_edit_distance=False))
#
# # i = i.lower() # modify
#
# # if p == 'disease_em':
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=di_dict, use_edit_distance=False))
# # else:
# # pred_by_dict.append(
# # NERUtils.predict_entry_match_by_dict_match(i, dictionary=tar_dict, use_edit_distance=False))
# else:
# pred_by_dict = [None] * len(input_texts)
#
# to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
# if not use_multi_gpus:
# pred_by_model = em_model.predict(to_predict_texts)
# else:
# pred_by_model = em_model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
# # pred_by_model = em_model.predict(to_predict_texts)
# assert len(pred_by_model) == len(pred_by_dict)
# pred_target_texts = [d if d else m for d, m in zip(pred_by_dict, pred_by_model)]
# return pred_target_texts
#
#
# @staticmethod
# def predict_entry_match_by_dict_match(input_text: str, dictionary: dict, use_edit_distance: bool = False):
# """predict the entry of a string by using dictionary match
#
# Args:
# input_text: a string
# dictionary: the dict, {entity:entry}
# use_edit_distance: True or False
#
# Returns:
# None or entry(str)
# """
# entry = dictionary.get(input_text)
# if not entry:
# if use_edit_distance:
# import Levenshtein
# max_score = 0
# for every_entity in dictionary.keys():
# score = Levenshtein.ratio(every_entity, input_text)
# if score >= max_score and score > 0.80: # 42-->43-->52
# max_score = score
# entry = dictionary.get(every_entity)
# return entry # None or entry
#
#
# @staticmethod
# def em_revise_target_texts(prefixes, target_texts, prefix_dict, delimiter='|', keep_entry_in_dict=False):
# revised_target_texts = [NERUtils.revise_target_text(t_t, return_format='set', delimiter=delimiter) for
# t_t in target_texts] # type:list[set,...]
#
# if keep_entry_in_dict:
# result = []
# for p, r_t_t in zip(prefixes, revised_target_texts):
# res = set()
# if r_t_t:
# for j in list(r_t_t):
# if j in prefix_dict.get(p):
# res.add(j)
# result.append(res)
# return result
# return revised_target_texts # type:list[set]
# @staticmethod
# def eval_by_auto_batch_size(job, eval_df, initial_eval_batch_size=600):
# """
#
# Args:
# job: you function. if run error, return None.
# eval_df: eval dataframe
# initial_eval_batch_size:
#
# Returns:
#
# """
# eval_batch_size = initial_eval_batch_size
# q = mp.Queue()
# pl = {'eval_batch_size': eval_batch_size}
# res = None
# while not res:
# eval_batch_size = int(eval_batch_size * 0.8)
# print(f'try eval_batch_size: {eval_batch_size}')
# pl['eval_batch_size'] = eval_batch_size
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# res = q.get()
# print(res)
#
# @staticmethod
# def eval_by_different_parameters(job, parameter_cfg: dict, eval_df):
# q = mp.Queue()
# parameters_list = NERUtils.get_parameters_list(parameter_cfg)
# for pl in parameters_list:
# eval_process = mp.Process(target=job, args=(pl, q, eval_df,))
# eval_process.start()
# eval_process.join()
# print(q.get())
#
# @staticmethod
# def get_parameters_list(parameter_cfg: dict):
# """
#
# Args:
# parameter_cfg: like:{'truncating_size': [100,10], 'overlapping_size': [10],'max_seq_length':[100,30]}
#
# Returns:[{'truncating_size': 100, 'overlapping_size': 10, 'max_seq_length': 100}, {'truncating_size': 100,
# 'overlapping_size': 10, 'max_seq_length': 30}, {'truncating_size': 10, 'overlapping_size': 10,
# 'max_seq_length': 100}, {'truncating_size': 10, 'overlapping_size': 10, 'max_seq_length': 30}]
#
# """
# parameters_list = []
# keys = []
# values = []
# for i, j in parameter_cfg.items():
# keys.append(i)
# values.append(j)
# for para in product(*values): # 求多个可迭代对象的笛卡尔积
# cfg = dict(zip(keys, para))
# parameters_list.append(cfg)
# return parameters_list # type:list
# @staticmethod
# def cut_entities(input_entities: list, prefixes: list):
# assert len(input_entities) == len(prefixes) # a input_text corresponds a prefix
# input_texts_ids = range(len(input_entities))
#
# cut_ids = []
# cut_input_entities = []
# cut_prefixes = []
# for id, i_e, p in zip(input_texts_ids, input_entities, prefixes):
# if not isinstance(i_e, set):
# cut_i_e = NERUtils.revise_target_text(target_text=i_e, return_format='set', delimiter='|')
# else:
# cut_i_e = i_e
# if cut_i_e != set():
# for c_i_t in cut_i_e:
# cut_ids.append(id)
# cut_input_entities.append(c_i_t)
# cut_prefixes.append(p)
# return cut_ids, cut_input_entities, cut_prefixes # type:list
#
# @staticmethod
# def combine_cut_entities(input_entities: list, cut_entities: list, cut_ids: list):
# dic = dict()
# for i, j in zip(cut_ids, cut_entities):
# if i not in dic.keys():
# dic[i] = j
# else:
# if isinstance(j, str):
# dic[i] = dic[i] + '|' + j
# else:
# dic[i].update(j)
#
# res = []
# all_keys = list(dic.keys())
# for i in range(len(input_entities)):
# if i in all_keys:
# res.append(dic[i])
# else:
# res.append(set())
# return res
###################################
# eval_entry_match
# em_revise_target_texts
# predict_entry_match
# predict_entry_match_by_dict_match
# model.predict_gpu
# | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/entry_match.py | entry_match.py |
import copy
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import torch
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
from zyl_utils.data_utils.processing import Processor
from ..metrics.ner_metric import entity_recognition_metrics
from tqdm import tqdm
class NerBIO:
"""
ner model for train and eval---bio--simple-trainsformers
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--ner-model"
self.wandb_proj = 'ner'
self.save_dir = './'
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,c进行模型的处理的数据批次,d:迭代调参批次
self.model_type = 'roberta'
self.pretrained_model = 'roberta-base' # 预训练模型位置 model_name
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
self.use_cuda = True
self.cuda_device = 0
self.model_args = self.my_config()
self.funcs = None
self.model = None
self.my_tokenizer =None
def my_config(self):
return {
'train_batch_size': 8,
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# multiprocess
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
}
@staticmethod
def deal_with_df(df: pd.DataFrame):
df = df[["sentence_id", "words", "labels"]]
df = df.astype({'sentence_id': 'int', 'words': 'str', 'labels': 'str'})
return df
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame, wandb_log=None):
# deal with dt
train_data = NerBIO.deal_with_df(train_data)
eval_data = NerBIO.deal_with_df(eval_data)
train_size = len(set(train_data['sentence_id'].tolist()))
eval_size = len(set(eval_data['sentence_id'].tolist()))
# update args
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']
}
}
)
# get model
model = NERModel(model_type=self.model_type, model_name=self.pretrained_model, labels=self.labels,
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train_size---{train_size}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
wandb.log({'train_size': train_size, 'eval_size': eval_size})
if wandb_log:
wandb.log(wandb_log)
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
@staticmethod
def get_id_entity(pred_list, label='DISEASE'):
"""
从一个bio格式的序列中获得id实体,比如:['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', ]---->['-3-4']
Args:
pred_list: ['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', ]
label: DISEASE
Returns:
['-3-4']
"""
if not label:
label = ''
entities = []
e = ''
is_entity = 0
for index, p in enumerate(pred_list):
if p == 'O':
if is_entity == 1:
entities.append(e)
is_entity = 0
elif p.startswith('B-' + label):
if is_entity == 1:
if e:
entities.append(e)
e = '-' + str(index)
is_entity = 1
elif p.startswith('I-' + label):
e = e + ('-' + str(index))
if is_entity == 1:
entities.append(e)
return entities # list or []
def eval(self, eval_df: pd.DataFrame, ner_t5_metric=False, wandb_log=None):
eval_data = NerBIO.deal_with_df(eval_df)
eval_size = len(set(eval_df['sentence_id'].tolist()))
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device,
labels=self.labels)
result, model_outputs, preds_list = model.eval_model(eval_data)
if wandb_log:
wandb.log(wandb_log)
wandb.log({"f1_score": result.get('f1_score'), 'eval_size': eval_size})
if ner_t5_metric:
all_entities_cls = set()
for c in self.labels:
if c.startswith('B'):
all_entities_cls.add(c.split('-')[-1])
labels = eval_data.groupby(by=['sentence_id'], sort=False)
labels = labels.apply(lambda x: x['labels'].tolist())
for c in all_entities_cls:
y_pred = [set(NerBIO.get_id_entity(p, label=c)) for p in preds_list]
y_true = [set(NerBIO.get_id_entity(l, label=c)) for l in labels]
print(c + ": \n")
res_df = entity_recognition_metrics(y_true, y_pred)
wandb.log({c + "_" + "ner_t5_metric": res_df.iloc[2, -1]})
def predict_with_multi_gpus(self, to_predict, gpus: list = None, **kwargs):
"""
多gpu预测,大数据量评估时用,必须在init中加入”self.funcs=None“
Args:
self: cls 某个模型类
to_predict: 要预测的东西,list
gpus: 若干gpu,list, gpus can be like: ["1","2"],多gpu预测时,若gpu列表中无本身的cuda-device,则不用,
只用gpus里面的gpu进行预测
Returns:
预测的结果
"""
if not self.model:
self.model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device,
labels=self.labels)
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
preds, model_outputs = self.model.predict(to_predict=to_predict, **kwargs)
else:
if not self.funcs:
self.funcs = []
for i in gpus:
if i != self.model.device.index:
other_m = copy.deepcopy(self.model)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.model.predict)
max_workers = len(gpus)
sub_data_sets = Processor.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt, **kwargs): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
preds = []
model_outputs = []
for i in sorted(res.keys()):
preds.extend(res[i][0])
model_outputs.extend(res[i][1])
return preds, model_outputs
def predict_texts(self, to_predict,split_on_space=False,if_cut_sentences=False):
if not self.model:
self.model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device,
labels=self.labels)
if not self.my_tokenizer:
from zyl_utils.data_utils.text_processing import MyTokenizer
self.my_tokenizer = MyTokenizer()
predict_ids = list(range(len(to_predict))) # 样本id
sentence_ids = [] # 句子id
sentences = []
if if_cut_sentences:
for t,i in zip(to_predict,predict_ids):
tmp_sentences = self.my_tokenizer.cut_paragraph_to_sentences(t) # [str]
for s in tmp_sentences:
words = self.my_tokenizer.cut_sentence_to_words(s, return_starts=False)
sentences.append(words)
sentence_ids.append(i)
else:
for t,i in zip(to_predict,predict_ids):
words = self.my_tokenizer.cut_sentence_to_words(t, return_starts=False)
sentences.append(words)
sentence_ids.append(i)
pred_res, _ = self.model.predict(sentences, split_on_space=split_on_space)
labels = set()
for l in self.labels:
if l!='O':
labels.add(l.split('-')[-1])
if split_on_space:
split_symbol = ' '
else:
split_symbol = ''
results = []
for p_i in predict_ids:
res = {l:set() for l in labels}
for p_r,s_i in zip(pred_res,sentence_ids):
if p_i == s_i:
words = [list(_.keys())[0] for _ in p_r]
pred = [list(_.values())[0] for _ in p_r] # ['B-DISEASE','I'....]
for l in labels:
entities_ids = NerBIO.get_id_entity(pred, label=l) # ['-0-1-2','-3-4'...]
for entity_id in entities_ids:
starts_id = int(entity_id.split('-')[1])
end_id = int(entity_id.split('-')[-1])
res[l].add(split_symbol.join(words[starts_id:end_id+1]))
results.append(res)
return results # [{'TENDEREE': {'临沂市人民医院'}}]
# pred = NerBIO.get_id_entity(pred, label=label)
# pred = [list(p.values())[0] for p in pred[0]]
# preds = []
# for text in tqdm(to_predict):
# if if_cut_sentences:
#
# else:
# sentences = [text]
# entities_in_one_text = []
# for sentence in sentences:
# words, starts = self.my_tokenizer.cut_sentence_to_words(sentence, return_starts=True)
#
# pred, _ = self.predict_with_multi_gpus([words], split_on_space=split_on_space) # [{'entity':'B-DISEASE'...}]
# pred = [list(p.values())[0] for p in pred[0]] # ['B-DISEASE','I'....]
# pred = NerBIO.get_id_entity(pred, label=label) # ['-0-1-2','-3-5'...]
#
# entities_in_one_sentence = []
# if pred:
# for entity in pred:
# starts_id = int(entity.split('-')[1])
# end_id = int(entity.split('-')[-1])
# entities_in_one_sentence.append(sentence[starts[starts_id]:
# starts[end_id] + len(words[end_id])]) # ['癌症'...]
# entities_in_one_text.extend(entities_in_one_sentence)
# preds.append(entities_in_one_text)
# return preds
class NerBIOModel(NERModel):
def __init__(self, model_type, model_name, labels=None, weight=None, args=None, use_cuda=True, cuda_device=-1,
onnx_execution_provider=None, **kwargs, ):
super(NerBIOModel, self).__init__(model_type, model_name, labels=labels, weight=weight, args=args,
use_cuda=use_cuda,
cuda_device=cuda_device, onnx_execution_provider=onnx_execution_provider,
**kwargs)
self.funcs = None
from zyl_utils.data_utils.text_processing import MyTokenizer
self.my_tokenizer = MyTokenizer()
def predict_with_multi_gpus(self, to_predict, gpus: list = None, **kwargs):
"""
多gpu预测,必须在init中加入”self.funcs=None“
Args:
self: cls 某个模型类
to_predict: 要预测的东西,list
gpus: 若干gpu,list, gpus can be like: ["1","2"]
Returns:
预测的结果
"""
if len(to_predict) <= len(gpus):
gpus = None
if gpus and (len(gpus) == 1):
gpus = None
if not gpus:
preds, model_outputs = self.predict(to_predict=to_predict, **kwargs)
else:
if not self.funcs:
self.funcs = []
for i in gpus:
if i != self.device.index:
other_m = copy.deepcopy(self)
other_m.device = torch.device(f"cuda:{i}")
self.funcs.append(other_m.predict)
else:
self.funcs.append(self.predict)
max_workers = len(gpus)
sub_data_sets = Processor.split_data_evenly(to_predict, len(gpus))
res = dict()
with ThreadPoolExecutor(max_workers=max_workers) as executor:
assert len(self.funcs) == len(sub_data_sets)
futures = {executor.submit(self.funcs[n], dt, **kwargs): n for dt, n in
zip(sub_data_sets, list(range(len(sub_data_sets))))}
for f in as_completed(futures): # not block,iterator
f.dt_id = futures[f]
res.update({f.dt_id: f.result()})
preds = []
model_outputs = []
for i in sorted(res.keys()):
preds.extend(res[i][0])
model_outputs.extend(res[i][1])
return preds, model_outputs
def predict_texts(self, to_predict, split_on_space=False, label='DISEASE'):
from tqdm import tqdm
preds = []
for text in tqdm(to_predict):
sentences = self.my_tokenizer.cut_paragraph_to_sentences(text)
entities_in_one_text = []
for sentence in sentences:
words, starts = self.my_tokenizer.cut_sentence_to_words(sentence, return_starts=True)
pred, _ = self.predict([words], split_on_space=split_on_space) # [{'entity':'B-DISEASE'...}]
pred = [list(p.values())[0] for p in pred[0]] # ['B-DISEASE','I'....]
pred = NerBIO.get_id_entity(pred, label=label) # ['-0-1-2','-3-5'...]
entities_in_one_sentence = []
if pred:
for entity in pred:
starts_id = int(entity.split('-')[1])
end_id = int(entity.split('-')[-1])
entities_in_one_sentence.append(sentence[starts[starts_id]:
starts[end_id] + len(words[end_id])]) # ['癌症'...]
entities_in_one_text.extend(entities_in_one_sentence)
preds.append(entities_in_one_text)
return preds
if __name__ == '__main__':
from zyl_utils import get_best_cuda_device
class M(NerBIO):
def __init__(self):
super(M, self).__init__()
self.wandb_proj = 'test'
self.use_cuda = True
self.cuda_device = get_best_cuda_device()
self.save_dir = './'
def train_sample(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_df = pd.read_excel(train_file) # type:pd.DataFrame
eval_df = pd.read_excel(eval_file) # type:pd.DataFrame
self.model_version = 'v0.0.0.0'
self.model_type = 'bert'
self.pretrained_model = 'bert-base-multilingual-cased' # 预训练模型位置 model_name
self.model_args = self.my_config()
self.model_args.update(
{
'num_train_epochs': 3,
'learning_rate': 3e-4,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.labels = ["O", "B-DISEASE", "I-DISEASE"]
self.train(train_df, eval_df, wandb_log=None)
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.model_args = self.my_config()
self.model_args.update(
{
# 'best_model_dir':'./',
'eval_batch_size': 16,
}
)
self.eval(eval_data, ner_t5_metric=True, wandb_log={'eval_file': eval_file}) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/ner_bio.py | ner_bio.py |
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.t5 import T5Model
from ..metrics.ner_metric import entity_recognition_metrics
class NerT5:
"""
ner model for train and eval---t5--simple-trainsformers
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--t5-model"
self.wandb_proj = 'mt5'
self.save_dir = './' # save output_file
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,c进行模型的处理的数据批次,d:迭代调参批次
self.model_type = 't5'
self.pretrained_model = 't5-base' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.model_args = self.my_config()
def my_config(self):
return {
'train_batch_size': 8,
'max_seq_length': 256,
# multiprocess
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
# normal
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
# save
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
# t5 args
'use_multiprocessed_decoding': False,
'num_beams': 1,
'length_penalty': 2.0,
'max_length': 20,
'num_return_sequences': 1,
'preprocess_inputs': True,
'repetition_penalty': 1.0,
'special_tokens_list': [],
'top_k': None,
'top_p': None,
}
def _deal_with_df(self, data, sliding_window=False, delimiter='|', up_sampling=False):
data = data[['prefix', 'input_text', 'target_text']]
data = data.astype('str')
if sliding_window:
from transformers import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained(self.pretrained_model)
data['input_text'] = data['input_text'].apply(NerT5._split_text_with_sliding_window,
args=(self.model_args.get('max_seq_length'),
tokenizer, 0.8))
data = data.explode('input_text')
res = []
for i, t in zip(data['input_text'].tolist(), data['target_text'].tolist()):
if t != delimiter:
all_entities = list(set(t.split(delimiter)))
if '' in all_entities:
all_entities.remove('')
r = delimiter
if all_entities:
for e in all_entities:
if str(e) in str(i):
r = r + str(e) + delimiter
res.append(r)
else:
res.append(t)
data['target_text'] = res
if up_sampling:
pos_data = data[data['target_text'] != '|']
from sklearn.utils import resample
up_sampling_data = resample(pos_data, replace=True, n_samples=(len(data) - len(pos_data) - len(pos_data)))
data = pd.concat([data, up_sampling_data], ignore_index=True)
data = resample(data, replace=False)
data.dropna(inplace=True)
return data
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame, sliding_window=False, up_sampling=False,
wandb_log=None):
# deal with dt
train_raw_size = train_data.shape[0]
eval_raw_size = eval_data.shape[0]
logger.info('processing data...')
train_data = self._deal_with_df(train_data, sliding_window=sliding_window, delimiter='|',
up_sampling=up_sampling)
eval_data = self._deal_with_df(eval_data, sliding_window=sliding_window, delimiter='|')
train_size = train_data.shape[0]
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train'],
}
}
)
model = T5Model(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.model_args)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train_size---{train_raw_size}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
wandb.log({"eval_size": eval_raw_size, 'train_size': train_raw_size})
if wandb_log:
wandb.log(wandb_log)
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def eval(self, eval_data: pd.DataFrame, check_in_input_text: bool = False, delimiter='|',
tokenizer=None, use_sliding_window=False, sliding_window=None, stride=0.8,
pos_neg_ratio=None, use_multi_gpus=None, self_metric=False, wandb_log=None):
# deal_with_dt
eval_data = self._deal_with_df(eval_data, sliding_window=False)
eval_size = eval_data.shape[0]
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{eval_size}')
model = T5Model(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.model_args)
eval_res = NerT5._eval_entity_recognition(model, eval_data=eval_data, delimiter=delimiter,
check_in_input_text=check_in_input_text,
tokenizer=tokenizer, use_sliding_window=use_sliding_window,
sliding_window=sliding_window, stride=stride,
pos_neg_ratio=pos_neg_ratio, use_multi_gpus=use_multi_gpus,
self_metric=self_metric)
if wandb_log:
wandb.log(wandb_log)
wandb_log = {"eval_size": eval_size}
for k, v in eval_res.items():
wandb_log.update({k: v.iloc[2, -1]})
wandb.log(wandb_log)
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / eval_size, 5)
eval_time = round(need_time * eval_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {eval_size} = {eval_time} s')
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
finally:
wandb.finish()
@staticmethod
def _eval_entity_recognition(model, eval_data: pd.DataFrame, check_in_input_text: bool, delimiter='|',
tokenizer=None, use_sliding_window=False, sliding_window=512, stride=0.8,
pos_neg_ratio=None, use_multi_gpus=None, self_metric=False):
"""eval entity recognition in mt5 model, version-v2 , reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
model: a mt5 model
eval_data: a pd.Dataframe , must have columns ['prefix','input_text','target_text']
check_in_input_text: if the entities are in input_texts
delimiter: the delimiter in target_text to split different entities
use_sliding_window: if truncate the input text when predict
sliding_window: truncating_size
stride: overlapping_size
use_multi_gpus:use_multi_gpus
pos_neg_ratio : the ratio of positive and negative sample importance
self_metric:self_metric
tokenizer: tokenizer to split sentence
Returns:
show report and res, {prefix:res_df},type:dict
"""
eval_data = eval_data[['prefix', 'input_text', 'target_text']]
eval_data = eval_data.astype('str')
prefixes = eval_data['prefix'].to_list()
input_texts = eval_data['input_text'].tolist()
target_texts = eval_data['target_text'].tolist()
revised_target_texts = NerT5._revise_target_texts(target_texts=target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
pred_target_texts = NerT5.predict_entity_recognition(model, prefixes, input_texts, tokenizer=tokenizer,
use_sliding_window=use_sliding_window,
sliding_window=sliding_window, stride=stride,
delimiter=delimiter, use_multi_gpus=use_multi_gpus)
revised_pred_target_texts = NerT5._revise_target_texts(target_texts=pred_target_texts,
input_texts=input_texts, delimiter=delimiter,
check_in_input_text=check_in_input_text)
eval_data['true_target_text'] = revised_target_texts
eval_data['pred_target_text'] = revised_pred_target_texts
eval_res = {}
for prefix in set(prefixes):
prefix_df = eval_data[eval_data['prefix'] == prefix]
y_true = prefix_df['true_target_text'].tolist()
y_pred = prefix_df['pred_target_text'].tolist()
print(f'{prefix} report:')
res_df = entity_recognition_metrics(y_true, y_pred, pos_neg_ratio=pos_neg_ratio,
self_metric=self_metric)
eval_res[prefix] = res_df
print(f'sum report:')
res_df = entity_recognition_metrics(revised_target_texts, revised_pred_target_texts,
pos_neg_ratio=pos_neg_ratio, self_metric=self_metric)
eval_res['ner_t5_metric'] = res_df
return eval_res # {prefix:res_df},type:dict
@staticmethod
def predict_entity_recognition(model, prefixes: list, input_texts: list, use_sliding_window=False,
sliding_window=None, stride=0.8, tokenizer=None,
delimiter='|', use_multi_gpus=None) -> list:
"""predict entity recognition in mt5 model,
Args:
model: a mt5 model
prefixes: prefixes
input_texts: input_texts
use_sliding_window: if use_sliding_window
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
delimiter: the delimiter in target_text to split different entities,default: '|'
use_multi_gpus: use_multi_gpus
Returns:
pred_target_texts:list,every element in pred_target_texts corresponds a prefix and an input_text
"""
if not sliding_window:
sliding_window = model.args.max_seq_length
if len(input_texts) == 1:
use_multi_gpus = None
assert len(prefixes) == len(input_texts)
if use_sliding_window:
t_ids, t_prefixes, t_input_texts = NerT5._split_texts_with_sliding_window(input_texts, prefixes,
tokenizer=tokenizer,
sliding_window=sliding_window,
stride=stride)
to_predict_texts = [i + ': ' + j for i, j in zip(t_prefixes, t_input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
pred_target_texts = NerT5._combine_pred_target_texts_by_ids(pred_target_texts, t_ids, delimiter)
else:
to_predict_texts = [i + ': ' + j for i, j in zip(prefixes, input_texts)]
if not use_multi_gpus:
pred_target_texts = model.predict(to_predict_texts)
else:
pred_target_texts = model.predict_gpu(to_predict_texts, gpus=use_multi_gpus)
assert len(pred_target_texts) == len(input_texts)
return pred_target_texts # type:list[str]
@staticmethod
def _split_text_with_sliding_window(text: str, sliding_window=128, tokenizer=None, stride=0.8) -> list:
""" any sequence exceeding the max_seq_length will be split into several windows (sub-sequences),
each of length max_seq_length. The windows will typically overlap each other to a certain degree to
minimize any information loss that may be caused by hard cutoffs.
Args:
text: a str text
sliding_window: truncating_size:sliding window, max_seq_length
tokenizer: tokenizer
stride: The amount of overlap between the windows,The stride can be specified in terms of either a fraction
of the max_seq_length, or as an absolute number of tokens.
Returns:
truncated_input_text: the list of truncated_input_text
"""
sliding_window = sliding_window - 8 # 防止一些词: <\s> <sep>等
if not isinstance(text, str):
text = str(text)
if not tokenizer:
try:
from simpletransformers.t5 import T5Model
tokenizer = T5Model('mt5', 'google/mt5-base').tokenizer
except Exception:
print('no tokenizer....')
tokens = tokenizer.tokenize(text)
if len(tokens) <= sliding_window:
return [text]
else:
split_text = []
if stride < 1:
step_size = int(sliding_window * stride)
else:
step_size = int(stride)
steps = int(len(tokens) / step_size)
for i in range(0, steps + 1):
text_i_tokens = tokens[i * step_size:i * step_size + sliding_window]
if text_i_tokens:
text_i = ''.join(text_i_tokens).replace('▁', ' ').strip()
split_text.append(text_i)
if (len(split_text) > 1) and (
len(tokenizer.tokenize(split_text[-1])) < (sliding_window - step_size)):
split_text = split_text[0:-1]
return split_text
@staticmethod
def _split_texts_with_sliding_window(input_texts: list, prefixes: list, tokenizer=None,
sliding_window=512, stride=0.8):
""" for every input_text in input_texts, split it and record the split_ids for combining
Args:
input_texts: the list of many input_text
prefixes: the prefix list of the input_texts list
sliding_window: sliding_window,the max token length for the model input(max_sequence_length)
tokenizer: tokenizer
stride: stride,(1-stride)*sliding_window for overlapping
Returns:
split_ids, split_prefixes, split_input_texts
"""
assert len(input_texts) == len(prefixes) # every input_text corresponds a prefix
input_texts_ids = range(len(input_texts))
split_ids = []
split_prefixes = []
split_input_texts = []
if not tokenizer:
try:
from transformers.models.t5 import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("google/mt5-base")
except Exception:
print('no tokenizer....')
for i_t_d, p, i_t in zip(input_texts_ids, prefixes, input_texts):
split_input_text = NerT5._split_text_with_sliding_window(i_t, sliding_window, tokenizer, stride)
for t_i_t in split_input_text:
split_ids.append(i_t_d)
split_input_texts.append(t_i_t)
split_prefixes.append(p)
return split_ids, split_prefixes, split_input_texts # type:tuple[list[int],list[str],list[str]]
@staticmethod
def _combine_pred_target_texts_by_ids(pred_target_texts, split_ids, delimiter: str = '|') -> list:
"""combine truncated_predicted_target_texts split_ids
Args:
pred_target_texts: the result of predicting the truncated input_texts
split_ids: get the truncated_ids when truncating input_texts
delimiter: the delimiter in target_text to split different entities
Returns:
pred_target_texts: predicted target_texts
"""
ids_target_text_dict = dict()
for i, j in zip(split_ids, pred_target_texts):
if not ids_target_text_dict.get(i):
ids_target_text_dict[i] = delimiter + j + delimiter
else:
ids_target_text_dict[i] = ids_target_text_dict[i] + j + delimiter
pred_target_texts = [ids_target_text_dict[k] for k in sorted(ids_target_text_dict.keys())]
return pred_target_texts # type:list
@staticmethod
def _revise_target_texts(target_texts: list, input_texts: list, check_in_input_text: bool = False, delimiter='|'):
"""revise the target texts,
Args:
target_texts: the list of the target_texts
input_texts: the list of the input_texts
check_in_input_text: if check the entities in input_text
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_texts = list[set]
"""
revised_target_texts = [NerT5._revise_target_text(t_t, return_format='set', delimiter=delimiter) for
t_t in target_texts] # type:list[set,...]
if check_in_input_text:
revised_target_texts = NerT5._keep_entities_in_input_text(input_texts, revised_target_texts)
return revised_target_texts # type:list[set]
@staticmethod
def _revise_target_text(target_text: str, delimiter: str = '|', return_format='set'):
""" revise the target text
Args:
target_text: str, target_text
return_format: 'set' means:'every entity is an element in a set', 'str' means: different entities are split
by the delimiter
delimiter: the delimiter in target_text to split different entities
Returns:
revised_target_text : set or list
"""
assert isinstance(target_text, str)
target_text = target_text.split(delimiter)
target_text = set([' '.join(e.strip().split()) for e in target_text])
if '' in target_text:
target_text.remove('')
if return_format == 'set':
revised_target_text = target_text
elif return_format == 'list':
revised_target_text = list(target_text)
else: # return_format == 'str'
revised_target_text = '|'
if target_text != set():
for entity in list(target_text):
revised_target_text += (str(entity) + '|')
return revised_target_text
@staticmethod
def _keep_entities_in_input_text(input_texts: list, target_texts: list):
"""for each sample, for every entity ,keep the entities that are in the input text,and remove other entities
Args:
input_texts: the list of many input_text,and every input text is a string
target_texts: the list of many target_text,and evert target text is a set
Returns:
revise_target_texts: list[str]
"""
revised_target_texts = []
for input_text, target_text in zip(input_texts, target_texts):
if target_text != set():
elements = list(target_text)
for e in elements:
if str(e) not in input_text:
target_text.remove(e) # type:set
revised_target_texts.append(target_text)
return revised_target_texts # type:list[set]
if __name__ == '__main__':
from zyl_utils import get_best_cuda_device
class M(NerT5):
def __init__(self):
super(M, self).__init__()
self.wandb_proj = 'test'
self.save_dir = './'
self.model_type = 'mt5' # t5
self.use_cuda = True
self.cuda_device = get_best_cuda_device()
def train_sample(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_df = pd.read_excel(train_file) # type:pd.DataFrame
eval_df = pd.read_excel(eval_file) # type:pd.DataFrame
self.model_version = 'v0.0.0.0'
self.pretrained_model = 'google/mt5-base' # 预训练模型位置 model_name
self.model_args = self.my_config()
self.model_args.update(
{
'num_train_epochs': 3,
'learning_rate': 3e-4,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_df, eval_df, sliding_window=True,
wandb_log={'train_file': train_file, 'eval_file': eval_file})
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.model_version = 'erv0.0.0.0'
self.model_args = self.my_config()
self.model_args.update(
{
'eval_batch_size': 16,
# 'best_model_dir':'./'
}
)
self.eval(eval_data, check_in_input_text=False, delimiter='|',
tokenizer=None, use_sliding_window=False) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/ner_t5.py | ner_t5.py |
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class NerModel:
"""
ner model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.describe = " use simple-transformers--ner-model"
self.show_running_loss = False
self.wandb_proj = 'ner'
self.save_dir = '../'
self.model_version = 'v0.0.0.0' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的处理的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.model_type = 'roberta'
self.pretrained_model = 'roberta-base' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
self.model_args = self.my_config()
def my_config(self):
return {
'train_batch_size': 8,
'use_multiprocessing': False,
'use_multiprocessing_for_evaluation': False,
# multiprocess
# base config
'reprocess_input_data': True,
'use_cached_eval_features': False,
'fp16': False,
'manual_seed': 234,
'gradient_accumulation_steps': 1, # ::increase batch size,Use time for memory,
# save
'no_save': False,
'save_eval_checkpoints': False,
'save_model_every_epoch': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1,
# eval
'evaluate_during_training': True,
'evaluate_during_training_verbose': True,
'no_cache': False,
'use_early_stopping': False,
'encoding': None,
'do_lower_case': False,
'dynamic_quantize': False,
'quantized_model': False,
'silent': False,
'overwrite_output_dir': True,
'output_dir': self.save_dir + 'outputs/' + self.model_version + '/',
'cache_dir': self.save_dir + 'cache/' + self.model_version + '/',
'best_model_dir': self.save_dir + 'best_model/' + self.model_version + '/',
'tensorboard_dir': self.save_dir + 'runs/' + self.model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/',
}
@staticmethod
def deal_with_df(df):
df = df[["sentence_id", "words", "labels"]]
df = df.astype({'sentence_id': 'int', 'words': 'str', 'labels': 'str'})
return df
def train(self, train_data: pd.DataFrame, eval_data: pd.DataFrame):
# deal with dt
train_data = NerModel.deal_with_df(train_data)
eval_data = NerModel.deal_with_df(eval_data)
train_size = len(set(train_data['sentence_id'].tolist()))
eval_size = len(set(eval_data['sentence_id'].tolist()))
all_steps = train_size / self.model_args.get('train_batch_size')
self.model_args.update(
{
'train_size': train_size,
'eval_size': eval_size,
'logging_steps': int(max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'evaluate_during_training_steps': int(
max(all_steps / 10 / self.model_args.get('gradient_accumulation_steps'), 1)),
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']
}
}
)
# get model
model = NERModel(model_type=self.model_type, model_name=self.pretrained_model, labels=self.labels,
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version}')
model.train_model(train_data=train_data, eval_data=eval_data)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def train_example(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_data = pd.read_excel(train_file)
eval_data = pd.read_excel(eval_file)
self.save_dir = '../'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.pretrained_model = 'bert-base-multilingual-cased' # 预训练模型位置 model_name
self.use_cuda = True
self.cuda_device = 0
self.labels = ["O", "B-DISEASE", "I-DISEASE"]
self.model_args = self.my_config()
self.model_args.update(
{
'train_file': train_file,
'eval_file': eval_file,
'num_train_epochs': 3,
'learning_rate': 1e-3,
'train_batch_size': 24, # 28
'gradient_accumulation_steps': 16,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_data, eval_data)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = self.model_args.get('eval_size')
# wand_b
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
@staticmethod
def get_entity(pred_list, label='DISEASE'):
if not label:
label = ''
entities = []
e = ''
is_entity = 0
for index, p in enumerate(pred_list):
if p == '0':
if is_entity == 1:
entities.append(e)
is_entity = 0
elif p.startswith('B-' + label):
if is_entity == 1:
if e:
entities.append(e)
e = '-' + str(index)
is_entity = 1
elif p.startswith('I-' + label):
e = e + ('-' + str(index))
if is_entity == 1:
entities.append(e)
return entities
def eval(self, eval_df: pd.DataFrame,use_t5_matric=False):
eval_data = NerModel.deal_with_df(eval_df)
eval_size = len(set(eval_df['sentence_id'].tolist()))
self.model_args.update(
{
'eval_size': eval_size,
'wandb_project': self.wandb_proj,
'wandb_kwargs': {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'eval']
}
}
)
model = NERModel(model_type=self.model_type, model_name=self.model_args.get('best_model_dir'),
args=self.model_args, use_cuda=self.use_cuda, cuda_device=self.cuda_device)
result, model_outputs, preds_list = model.eval_model(eval_data)
if use_t5_matric:
labels = eval_data.groupby(by=['sentence_id'],sort =False)
labels = labels.apply(lambda x: x['labels'].tolist())
preds_list = [set(NerModel.get_entity(p)) for p in preds_list]
labels = [set(NerModel.get_entity(l)) for l in labels]
from zyl_utils.model_utils.ner_utils import NERUtils
NERUtils.entity_recognition_v2(labels,preds_list)
print('1')
# # wandb updata
# wandb.init(
# project=self.wandb_proj,
# config = self.model_args,
# name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
# tags=[self.model_version, 'eval']
# )
# wandb.log({"f1_score": result.get('f1_score')})
def eval_sample(self):
eval_file = './test.xlsx'
eval_data = pd.read_excel(eval_file)
self.save_dir = '../'
self.model_version = 'erv4.2.0.2'
self.model_type = 'bert'
self.use_cuda = True
self.cuda_device = 1
self.model_args = self.my_config()
self.model_args.update(
{
'eval_file': eval_file,
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.eval(eval_data)
if __name__ == '__main__':
s = ['O', 'O', 'O', 'B-DISEASE', 'I-DISEASE', 'O', 'B-DISEASE', 'B-DISEASE', 'B-DISEASE', 'I-DISEASE',
'I-DISEASE', 'O', 'B-DISEASE', 'O', 'I-DISEASE', 'I-DISEASE', 'B-DISEASE', 'I-DISEASE']
print(NerModel.get_entity(s)) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/ner_model.py | ner_model.py |
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.classification import ClassificationModel, ClassificationArgs, DDPClassificationModel
from simpletransformers.t5 import T5Args
from zyl_utils.model_utils.models.my_T5model import MyT5, MyDDPT5
class MyModel:
"""
my model for train and eval
"""
def __init__(self):
self.start_time = '...'
self.end_time = '...'
self.wandb_proj = 'test'
self.model_version = 'test' # to save model or best model
# like a,b,c,d : a 原始数据批次,b模型方法批次,比如mt5和分类,
# c进行模型的数据批次,比如同一输入,输出是文本还是序号,d:迭代调参批次
self.use_model = 'classification' # mt5 /classification
self.model_type = 'bert'
self.pretrained_model = './best/v1.1.1.1/' # 预训练模型位置
self.use_cuda = True
self.cuda_device = 0
self.num_labels = 2
self.args = MyModel.set_model_parameter(model_version=self.model_version,
args=self._set_args(), save_dir='../')
def _set_args(self):
if self.use_model == 't5' or self.use_model == 'mt5':
return T5Args()
else:
return ClassificationArgs()
@staticmethod
def set_model_parameter(model_version='test', args=ClassificationArgs(), save_dir='./'):
# multiprocess
args.use_multiprocessing = False
args.use_multiprocessing_for_evaluation = False
# base config
args.reprocess_input_data = True
args.use_cached_eval_features = False
args.fp16 = False
args.manual_seed = 234
args.gradient_accumulation_steps = 2 # ==increase batch size,Use time for memory,
# save
args.no_save = False
args.save_eval_checkpoints = False
args.save_model_every_epoch = False
args.save_optimizer_and_scheduler = True
args.save_steps = -1
# eval
args.evaluate_during_training = True
args.evaluate_during_training_verbose = True
args.no_cache = False
args.use_early_stopping = False
args.encoding = None
args.do_lower_case = False
args.dynamic_quantize = False
args.quantized_model = False
args.silent = False
args.overwrite_output_dir = True
args.output_dir = save_dir + 'outputs/' + model_version + '/'
args.cache_dir = save_dir + 'cache/' + model_version + '/'
args.best_model_dir = save_dir + 'best_model/' + model_version + '/'
args.tensorboard_dir = save_dir + 'runs/' + model_version + '/' + time.strftime("%Y%m%d_%H%M%S",
time.localtime()) + '/'
return args
def get_train_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.pretrained_model, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'classification':
return ClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.pretrained_model,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def deal_with_df(df, use_model='cls'):
if use_model == 't5' or use_model == 'mt5':
df = df[['prefix', 'input_text', 'target_text']]
df = df.astype('str')
elif use_model == 'sentence_pair':
df = df[['text_a', 'text_b', 'labels']]
df = df.astype({'text_a': 'str', 'text_b': 'str', 'labels': 'int'})
else:
df = df.astype({'text': 'str', 'labels': 'int'})
df = df[['text', 'labels']]
return df
def train(self, train_df: pd.DataFrame, eval_df: pd.DataFrame, if_send_message=False):
# deal with dt
train_df = MyModel.deal_with_df(train_df, use_model=self.use_model)
eval_df = MyModel.deal_with_df(eval_df, use_model=self.use_model)
# config some parameters
train_size = train_df.shape[0]
self.args.update_from_dict({'train_length': train_size})
all_steps = train_size / self.args.train_batch_size
self.args.logging_steps = int(max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.evaluate_during_training_steps = int(
max(all_steps / 10 / self.args.gradient_accumulation_steps, 1))
self.args.wandb_project = self.wandb_proj
self.args.wandb_kwargs = {
'name': self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
'tags': [self.model_version, 'train']}
# get model
model = self.get_train_model()
# train
try:
start_time = time.time()
logger.info(f'start training: model_version---{self.model_version},train length---{train_size}')
if self.use_model == 't5' or self.use_model == 'mt5':
model.train_model(train_data=train_df, eval_data=eval_df)
else:
model.train_model(train_df=train_df, eval_df=eval_df)
logger.info('training finished!!!')
end_time = time.time()
logger.info(f'train time: {round(end_time - start_time, 4)} s')
except Exception as error:
logger.error(f'train failed!!! ERROR:{error}')
if if_send_message:
print(f'train failed!!! ERROR:{error}')
# ModelUtils.send_to_me(f'train failed!!! ERROR:{error}')
finally:
wandb.finish()
# ModelUtils.remove_some_model_files(model.args)
def get_predict_model(self):
if self.args.n_gpu <= 1:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyT5(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args)
else:
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
if self.use_model == 't5' or self.use_model == 'mt5':
self.args.use_multiprocessed_decoding = False
return MyDDPT5(model_type=self.model_type, model_name=self.args.best_model_dir, use_cuda=True,
cuda_device=-1, args=self.args)
elif self.use_model == 'sentence_pair':
return ClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=self.use_cuda, cuda_device=self.cuda_device, args=self.args,
num_labels=self.num_labels)
else:
return DDPClassificationModel(model_type=self.model_type, model_name=self.args.best_model_dir,
use_cuda=True, args=self.args, num_labels=self.num_labels)
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
eval_length = eval_df.shape[0]
# wand_b
wandb.init(project=self.wandb_proj, config=self.args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval length---{eval_length}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / eval_length, 5)
eval_time = round(need_time * eval_length, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {eval_length} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"eval_length": eval_length})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/my_model.py | my_model.py |
import math
from dataclasses import dataclass
from typing import Dict
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import \
CESoftmaxAccuracyEvaluator, CECorrelationEvaluator, CEBinaryClassificationEvaluator
from simpletransformers.config.model_args import ModelArgs
from torch.utils.data import DataLoader
from zyl_utils import get_best_cuda_device
MODEL_TYPE = [
'two_classification', # 输出0或1
'sts', # 语义相似性,输出0-1连续值,无序
'nli' # 自然语言推理,输出前后两句话的关系,有序,输出:0,1,2
]
@dataclass
class ReRankerCrossEncoderArgs(ModelArgs):
"""
Model args for a ReRankerCrossEncoder
num_labels:Number of labels of the classifier. If 1, the CrossEncoder is a regression model that outputs a
continous score 0...1. If > 1, it output several scores that can be soft-maxed to get probability
scores for the different classes.
"""
cuda_device: str = get_best_cuda_device(gpu_num=1)
train_batch_size: int = 16
max_seq_length: int = 128
tokenizer_args: Dict = dict
default_activation_function = None
num_labels:int =1
class ReRankerCrossEncoderModel:
def __init__(self, model_type='two_classification',
model_name="sentence-transformers/distiluse-base-multilingual-cased-v1", args=None):
"""
Args:
model_type: 'two_classification', # 输出0或1. 'sts', # 语义相似性,输出0-1连续值,无序
'nli' # 自然语言推理,输出前后两句话的关系,有序,输出:0,1,2
model_name: "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
args: dict
"""
self.args = self._load_model_args(model_name)
self.args.model_type = model_type
self.args.model_name = model_name
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ReRankerCrossEncoderArgs):
self.args = args
if self.args.model_type == 'sts':
self.args.num_labels = 1
elif self.args.model_type == 'two_classification':
self.args.num_labels = 1
else:
self.args.num_labels = 3
# loss_fct = nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# num_labels: int = 1 # softmaxed类的数量,默认1:continous score,
self.model = self.get_model()
def get_model(self):
return CrossEncoder(model_name=self.args.model_name, num_labels=self.args.num_labels,
max_length=self.args.max_seq_length, device=f'cuda:{self.args.cuda_device}',
tokenizer_args=self.args.tokenizer_args,
default_activation_function=self.args.default_activation_function)
def _load_model_args(self, input_dir):
args = ReRankerCrossEncoderArgs()
args.load(input_dir)
return args
def train(self, train_dt, eval_dt):
"""
loss_fct = nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
Args:
train_dt: df,['mention','entries'],'mention' is string text,'entries' is a list of entries.
eval_dt:
Returns:
"""
self.model = self.get_model()
train_samples = self.get_samples(train_dt)
print(f'train_sample_length:{len(train_samples)}')
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=self.args.train_batch_size)
eval_samples = self.get_samples(eval_dt)
evaluator = self.get_evaluator(eval_samples)
warmup_steps = math.ceil(
len(train_dataloader) * self.args.num_train_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = math.ceil(len(train_dataloader) * 0.1)
self.model.fit(train_dataloader=train_dataloader, evaluator=evaluator, epochs=self.args.num_train_epochs,
warmup_steps=warmup_steps, evaluation_steps=evaluation_steps, save_best_model=True,
output_path=self.args.best_model_dir, use_amp=False, callback=self.call_back,
show_progress_bar=True, optimizer_params={'lr': self.args.learning_rate})
def get_samples(self, df):
samples = []
if self.args.model_type == 'nli':
for _, sub_df in df.iterrows():
candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
if sub_df['entries']:
entries_length = len(sub_df['entries'])
if entries_length > 1:
label_id = 1 # 蕴含关系
else:
label_id = 2 # 等价关系
for e in sub_df['entries']:
samples.append(InputExample(texts=[sub_df['mention'], e], label=label_id))
if e in candidate_entries:
candidate_entries.remove(e)
for c_e in candidate_entries:
samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
elif self.args.model_type == 'sts':
for _, sub_df in df.iterrows():
candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
if sub_df['entries']:
entries_length = len(sub_df['entries'])
if 'label' in sub_df.index:
score = sub_df['label']
else:
score = round(1 / entries_length, 4)
for e in sub_df['entries']:
samples.append(InputExample(texts=[sub_df['mention'], e], label=score))
samples.append(InputExample(texts=[e, sub_df['mention']], label=score))
if e in candidate_entries:
candidate_entries.remove(e)
for c_e in candidate_entries:
samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
else:
for _, sub_df in df.iterrows():
candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
if sub_df['entries']:
for e in sub_df['entries']:
samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
samples.append(InputExample(texts=[e, sub_df['mention']], label=1))
for c_e in candidate_entries:
samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
return samples
def get_candidade_entries(self, query):
candidate_entries = query
return candidate_entries # type:list
def get_evaluator(self, eval_samples):
if self.args.model_type == 'nli':
return CECorrelationEvaluator.from_input_examples(eval_samples, name='eval')
elif self.args.model_type == 'two_classification':
return CEBinaryClassificationEvaluator.from_input_examples(eval_samples, name='eval')
else:
return CESoftmaxAccuracyEvaluator.from_input_examples(eval_samples, name='eval')
# class RerankerTrainer:
# def __init__(self):
# self.model_path = "distiluse-base-multilingual-cased-v1"
# self.dimensions = 512
# self.cuda_device = get_best_cuda_device(gpu_num=1)
# self.max_seqence_length = 128
# self.use_st_model = True
# self.train_batch_size = 16
# self.epoch = 5
# self.learning_rate = 1e-5
# self.all_scores = []
# self.best_score = 0
# self.label2int = {"contradiction": 0, "entailment": 1, "neutral": 1}
# self.train_num_labels = len(set(self.label2int.values()))
# pass
#
# def train(self, train_df, dev_df, save_model="./best_model/test/", loss_func='SoftmaxLoss',
# evaluator_func='MyEvaluator2', top_k=30):
#
# self.save_model = save_model
# model = self.get_model()
#
# train_dataloader, train_loss = self.get_train_objectives(train_df, model, loss_func=loss_func,
# top_k=top_k)
#
# evaluator = self.get_evaluator(dev_df, evaluator_func=evaluator_func)
#
# warmup_steps = math.ceil(len(train_dataloader) * self.epoch * 0.1) # 10% of train data for warm-up
# evaluation_steps = math.ceil(len(train_dataloader) * 0.1)
#
# print('start train...')
# # Which loss function to use for training. If None, will use nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# model.fit(train_dataloader=train_dataloader, epochs=self.epoch, warmup_steps=warmup_steps,
# evaluator=evaluator, save_best_model=True,
# output_path=save_model,
# evaluation_steps=evaluation_steps,
# callback=self.call_back,
# loss_fct=train_loss,
# optimizer_params={'lr': self.learning_rate})
#
# df = pd.DataFrame(self.all_scores)
# df.to_excel(save_model + 'my_score.xlsx')
# RerankerTrainer.save_parameters(self, save_model=f'{save_model}parameters.json')
#
# def get_retrieval_model(self):
# from sentence_transformers import SentenceTransformer
# model = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/v2/"
# model = SentenceTransformer(self.model_path, device=f'cuda:{self.cuda_device}')
# return model
#
# def get_evaluator(self, dev_df, evaluator_func='MyEvaluator2', collection='t1'):
# from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# from sklearn.utils import resample
#
# self.evaluator_func = evaluator_func
# dev_df = resample(dev_df, replace=False)
#
# if evaluator_func == 'MyEvaluator':
# from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# from sentence_transformers import InputExample
# dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# scores = dev_df.index.tolist()
# eval_examples = []
# dev_samples = []
# for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# eval_examples.append(InputExample(texts=[t, r]))
# evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection)
#
# elif evaluator_func == 'EmbeddingSimilarityEvaluator':
# sentences_1 = []
# sentences_2 = []
# scores = []
# dev_samples = []
# for _, sub_df in dev_df.iterrows():
# if sub_df['label'] != 0.0:
# sentences_1.append(sub_df['entity'])
# sentences_2.append(sub_df['entry'])
# scores.append(sub_df['label'])
#
# evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# else:
# from sentence_transformers import InputExample
# from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator2
# dev_samples = []
# for _, sub_df in dev_df.iterrows():
# if sub_df['label'] == 1:
# dev_samples.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# elif sub_df['label'] > 0:
# dev_samples.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# else:
# dev_samples.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=0))
# evaluator = MyEvaluator2.from_input_examples(dev_samples, name='AllNLI-dev')
#
# print(f'dev_length:{len(dev_samples)}')
# self.dev_length = len(dev_samples)
# return evaluator
#
# @staticmethod
# def save_parameters(para_obj, save_model='./test.json'):
# """
# 存储一个对象的参数,对象参数可以是模型参数或超参数
# Args:
# para_obj: 要存储的参数的对象
# save_model: 保存路径
#
# Returns:
#
# """
# para_list = para_obj.__dir__()
# # save_para_list = ['best_score','device','max_seq_length','tokenizer']
# para = {}
# for p in para_list:
# if not p.startswith('_'):
# # if p in save_para_list:
# r = getattr(para_obj, p)
# if isinstance(r, int) or isinstance(r, str) or isinstance(r, float) or isinstance(r, list) \
# or isinstance(r, bool):
# para[p] = r
#
# with open(save_model, "w", encoding='utf-8') as f:
# # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格
# # f.write(json.dumps(para,indent=4))
# json.dump(para, f, indent=4) # 传入文件描述符,和dumps一样的结果
#
# para.pop("all_scores")
# with open(log_file, "a", encoding='utf-8') as f:
# json.dump(para, f, indent=4)
# f.write('\n')
#
# def call_back(self, score, epoch, steps):
# self.all_scores.append({str(epoch) + '-' + str(steps): score})
# if score > self.best_score:
# self.best_score = score
# print(f'epoch:{epoch}: score:{score} ')
#
# class TrainerV1(RerankerTrainer):
# def __init__(self):
# super(TrainerV1, self).__init__()
#
# def run(self):
# self.train_1011()
#
# def train_1011(self):
# def deal_with_df(df, corpus):
# df['entry'] = df['entry'].astype('str')
# df['entity'] = df['entity'].astype('str')
# m = self.get_retrieval_model()
# qs = df['entity'].tolist()
# res = RetrievalEvaluator.query_result(model=m, corpus=corpus, queries=qs, top_k=10)
# li = []
# for i, r in zip(qs, res):
# for _ in r:
# li.append({'entity': i, 'entry': _, 'label': 0})
# df_ = pd.DataFrame(li)
# print(len(df))
# df = pd.concat([df, df_], ignore_index=True)
# print(len(df))
# df.drop_duplicates(subset=['entity', 'entry'], keep='first', inplace=True)
# print(len(df))
# return df
#
# self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# 'train')
# corpus = list(set(train_df['entry'].tolist()))
# corpus = [str(c) for c in corpus]
# train_df = deal_with_df(train_df, corpus=corpus)
#
# self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# 'eval')
# dev_df = deal_with_df(dev_df, corpus=corpus)
#
# self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # self.model_path = "./best_model/di_reranker_v2.0/"
#
# # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
#
# # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # self.model_path = "./best_model/v2/v2.2.1/"
#
# # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
#
# self.cuda_device = get_best_cuda_device(gpu_num=1)
# self.dimensions = 768
# self.max_seqence_length = 64
# self.use_st_model = True
# self.train_batch_size = 32
# self.epoch = 3
# self.learning_rate = 1e-5
# self.train(train_df, dev_df, save_model="./best_model/di_reranker_2/",
# loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# evaluator_func="MyEvaluator2",
# top_k=10)
#
# # def train_cross_model(self):
# # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'train')
# # m = self.get_retrieval_model()
# # RetrievalEvaluator.query_result(model=model, corpus=corpus, queries=queries, top_k=1)
# #
# # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'eval')
# #
# # # self.train_file = "./data/v2/train_2.csv.gz"
# # # train_df = pd.read_csv(self.train_file, compression='gzip', sep='|')
# # # self.dev_file = "./data/v2/eval.csv.gz"
# # # dev_df = pd.read_csv(self.dev_file, compression='gzip', sep='|')
# #
# #
# # # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # self.model_path = "./best_model/di_reranker_v2.0/"
# #
# # # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
# #
# # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # self.model_path = "./best_model/v2/v2.2.1/"
# #
# # # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
# #
# #
# #
# # self.dimensions = 768
# # self.max_seqence_length = 128
# # self.use_st_model = True
# # self.train_batch_size = 32
# # self.epoch = 3
# # self.learning_rate = 2e-5
# # self.train(train_df, dev_df, save_model="./best_model/v2/v2.2.2/",
# # loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# # evaluator_func="MyEvaluator2",
# # top_k=10)
def call_back(self, score, epoch, steps):
print(f'epoch:{epoch}----step:{steps}----score:{score} ')
if __name__ == '__main__':
import pandas as pd
class Test(ReRankerCrossEncoderModel):
def __init__(self):
super(Test, self).__init__()
def get_candidade_entries(self, query):
candidate_entries = []
# 模糊搜索
# 语义搜索
return candidate_entries
def test_train(self):
train_file = './test.xlsx'
eval_file = './test.xlsx'
train_df = pd.read_excel(train_file) # type:pd.DataFrame
eval_df = pd.read_excel(eval_file) # type:pd.DataFrame
self.model_version = 'v0.0.0.0'
self.args.update_from_dict(
{
'model_type' : 'two_classification',
'model_name' : "sentence-transformers/distiluse-base-multilingual-cased-v1",
'num_train_epochs': 3,
'learning_rate': 3e-4,
'train_batch_size': 24, # 28
'eval_batch_size': 16,
'max_seq_length': 512,
}
)
self.train(train_df, eval_df) | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/reranker_cross_encoder.py | reranker_cross_encoder.py |
import math
from dataclasses import dataclass
from typing import Dict
import pandas as pd
from sentence_transformers import datasets
from sentence_transformers import losses
from sentence_transformers import models
from simpletransformers.config.model_args import ModelArgs
from tqdm import tqdm
from zyl_utils import get_best_cuda_device
MODEL_TYPE = [
'sts', # 两个文本相似性,
'nli', # 句子关系,多对多,只有蕴含和矛盾
'paraphrase', # 释义,(从一组数据中找出其中相似含义的句子)
'duplicate_text' # 相同文本集,多对一
'information retrieval' # 信息检索
]
# from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
# from scipy.stats import pearsonr, spearmanr
# from sentence_transformers.readers import InputExample
from sentence_transformers import SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
@dataclass
class ReTrievalBiEncoderArgs(ModelArgs):
"""
Model args for a ReTrievalBiEncoderArgs
num_labels:Number of labels of the classifier. If 1, the CrossEncoder is a regression model that outputs a
continous score 0...1. If > 1, it output several scores that can be soft-maxed to get probability
scores for the different classes.
"""
cuda_device: str = get_best_cuda_device(gpu_num=1)
train_batch_size: int = 16
max_seq_length: int = 128
use_sbert_model: bool = True
tokenizer_args: Dict = dict
default_activation_function = None
num_labels: int = 1
output_path: str = './'
model_version: str = 'test'
loss_func: str = 'MultipleNegativesRankingLossHard'
evaluator_func: str = 'BinaryClassificationEvaluator'
show_encode_progress_bar: bool = True
learning_rate = 1e-4
query_chunk_size: int = 100
retrieval_top_k: int = 10 # corpus中最大搜索多少个实体
retrieval_score: float = -1 # corpus大于多少得分的被搜索出来
at_least_top_k: int = -1 # corpus最少搜索出多少个词条
# class RecallEvaluator(SentenceEvaluator):
# """
# Evaluate a model based on the similarity of the embeddings by calculating the Spearman and Pearson rank correlation
# in comparison to the gold standard labels.
# The metrics are the cosine similarity as well as euclidean and Manhattan distance
# The returned score is the Spearman correlation with a specified metric.
#
# The results are written in a CSV. If a CSV already exists, then values are appended.
# """
#
# def __init__(self, to_predict_texts: List[str], labels: List[str], corpus, batch_size: int = 16,
# main_similarity: SimilarityFunction = None, name: str = '', show_progress_bar: bool = False,
# write_csv: bool = True, top_k=100, encode_batch_size=128):
# """
# Constructs an evaluator based for the dataset
#
# The labels need to indicate the similarity between the sentences.
#
# :param to_predict_texts: List with the first sentence in a pair
# :param labels: List with the second sentence in a pair
# :param scores: Similarity score between to_predict_texts[i] and labels[i]
# :param write_csv: Write results to a CSV file
# """
# self.corpus = corpus
# self.to_predict_texts = to_predict_texts
# self.labels = labels
# self.write_csv = write_csv
# self.top_k = top_k
# self.encode_batch_size = encode_batch_size
# assert len(self.to_predict_texts) == len(self.labels)
#
# self.main_similarity = main_similarity
# self.name = name
#
# self.batch_size = batch_size
# if show_progress_bar is None:
# show_progress_bar = (
# logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG)
# self.show_progress_bar = show_progress_bar
#
# self.csv_file = "similarity_evaluation" + ("_" + name if name else '') + "_results.csv"
# self.csv_headers = ["epoch", "steps", "score"]
#
# @classmethod
# def from_input_examples(cls, examples: List[InputExample], **kwargs):
# to_predict_texts = []
# labels = []
#
# for example in examples:
# to_predict_texts.append(example.texts[0])
# labels.append(example.texts[1])
# return cls(to_predict_texts, labels, **kwargs)
#
# @staticmethod
# def caculate_recall(y_true, y_pred):
# recall = 0
# for t, p in zip(y_true, y_pred):
# if len(t) == 0:
# recall += 1
# else:
# recall += (len(set(t) & set(p)) / len(t))
# return recall / len(y_true)
#
# def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1):
# res = RetrievalEvaluator.query_result(model, queries=self.to_predict_texts, corpus=self.corpus,
# corpus_embeddings=None, top_k=self.top_k, return_format='result')
# y_true = [set(i) for i in self.labels]
#
# res_1 = [r[0:1] for r in res]
#
# res_10 = [r[0:10] for r in res]
# res_50 = [r[0:50] for r in res]
# res_100 = [r[0:100] for r in res]
#
# recall_1 = RecallEvaluator.caculate_recall(y_true, res_1)
# recall_10 = RecallEvaluator.caculate_recall(y_true, res_10)
# recall_50 = RecallEvaluator.caculate_recall(y_true, res_50)
# recall_100 = RecallEvaluator.caculate_recall(y_true, res_100)
# print(f'\nrecall@1 {recall_1}\n'
# f'recall@10 {recall_10}\n'
# f'recall@50 {recall_50}\n'
# f'recall@100 {recall_100}\n')
# return recall_10
import random
class ReTrievalBiEncoderModel:
def __init__(self,model_name="sentence-transformers/distiluse-base-multilingual-cased-v1",args=None):
"""
Args:
model_type: 'two_classification', # 输出0或1. 'sts', # 语义相似性,输出0-1连续值,无序
'nli' # 自然语言推理,输出前后两句话的关系,有序,输出:0,1,2
model_name: "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
args: dict
"""
self.score_function = util.dot_score
self.args = self._load_model_args(model_name)
self.args.model_name = model_name
self.corpus_embeddings = None
self.mention_corpus = self.get_mention_corpus()
self.entries_corpus = self.get_entries_corpus()
self.corpus_dict = self.get_corpus_dict()
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ReTrievalBiEncoderArgs):
self.args = args
self.model = None
def _load_model_args(self, input_dir):
args = ReTrievalBiEncoderArgs()
args.load(input_dir)
return args
def train(self, train_dt, eval_dt):
"""
Args:
train_dt: df,['mention','entries'],'mention' is string text,'entries' is a list of entries.
eval_dt:
Returns:
"""
self.model = self.get_model()
self.args.best_model_dir = self.args.output_dir + 'best_model/' + self.args.model_version + '/'
train_objectives = self.get_train_objects(train_dt) # type:list
evaluator = self.get_evaluator(eval_dt)
warmup_steps = math.ceil(
len(train_objectives[0]) * self.args.num_train_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = math.ceil(len(train_objectives[0]) * 0.1)
self.model.fit(train_objectives=train_objectives, evaluator=evaluator, epochs=self.args.num_train_epochs,
warmup_steps=warmup_steps, evaluation_steps=evaluation_steps, save_best_model=True,
output_path=self.args.best_model_dir, use_amp=False, callback=self.call_back,
show_progress_bar=True, optimizer_params={'lr': self.args.learning_rate})
def get_model(self):
if self.args.use_sbert_model:
# 预测和训练sentence-transformers_model时用到
model = SentenceTransformer(self.args.model_name, device=f'cuda:{str(self.args.cuda_device)}')
else:
# 训练时,修改模型结构,比如输出,用到,得到的是一个sentencetransformer_model模型
# max_seq_length,model_args,cache_dir,tokenizer_args, do_lower_case,tokenizer_name_or_path
word_embedding_model = models.Transformer(self.args.model_name, max_seq_length=self.args.max_seq_length, )
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode='mean')
model = SentenceTransformer(modules=[word_embedding_model, pooling_model],
device=f'cuda:{str(self.args.cuda_device)}')
# dense_layer = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
# out_features=self.output_dimension, activation_function=nn.Tanh())
# normalize_layer = models.Normalize()
# model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_layer, normalize_layer],
# device=f'cuda:{str(self.cuda_device)}')
# from sentence_transformers.models.T5 import T5
# word_embedding_model = T5(self.model_path,max_seq_length=self.max_seqence_length)
# dense_model = models.Dense(in_features=word_embedding_model.get_word_embedding_dimension(),
# out_features=word_embedding_model.get_word_embedding_dimension(),
# activation_function=nn.Tanh())
return model
def get_train_objects(self, df):
"""
Args:
df: 输入: ['mention','entries'],'mention' is string text,'entries' is a list of entries.
Returns:
"""
if self.args.loss_func == 'MultipleNegativesRankingLossHard':
df = df[df['entries'].apply(len).gt(0)] # 去除空列表
train_samples = []
for _, sub_df in tqdm(df.iterrows()):
contradiction_entries = self.get_candidate_entries(sub_df['mention'])
contradiction_entries = [c for c in contradiction_entries if c not in sub_df['entries']]
for e in sub_df['entries']:
train_samples.append(
InputExample(texts=[sub_df['mention'], e, random.choice(contradiction_entries)]))
train_samples.append(
InputExample(texts=[e, sub_df['mention'], random.choice(contradiction_entries)]))
train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.args.train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=self.model, scale=20.0,
similarity_fct=util.dot_score)
train_obj = [(train_dataloader, train_loss)]
elif self.args.loss_func == 'MultipleNegativesRankingLoss':
df = df[df['entry'] != []]
df = df.explode('entry')
train_samples = []
for _, sub_df in tqdm(df.iterrows()):
train_samples.append(InputExample(texts=[sub_df['entry'], sub_df['entity']]))
print(len(train_samples))
# Special data loader that avoid duplicates within a batch
train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.args.train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=self.model, scale=20.0,
similarity_fct=util.dot_score)
train_obj = [(train_dataloader, train_loss)]
else:
df = df[df['entry'] != []]
df = df.explode('entry')
train_samples = []
for _, sub_df in tqdm(df.iterrows()):
train_samples.append(InputExample(texts=[sub_df['entry'], sub_df['entity']]))
print(len(train_samples))
# Special data loader that avoid duplicates within a batch
train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.args.train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=self.model, scale=20.0,
similarity_fct=util.dot_score)
train_obj = [(train_dataloader, train_loss)]
return train_obj
def get_mention_corpus(self):
# 评估时的实体语料库(非词条),所有提及,
mention_corpus = []
return mention_corpus
def get_entries_corpus(self):
# 所有词条的语料库
entries_corpus = []
return entries_corpus
def get_corpus_dict(self):
# 评估时每个语料库中的实体 映射为词条的字典
# 评估时使用训练集的字典,接口使用所有数据的字典
corpus_dict = {'entity': 'entry'}
return corpus_dict
def get_candidate_entries(self, text):
# 对于一个文本,从所有字典词条中获取最相似的若干个词条
candidate_entries = []
return candidate_entries
def call_back(self, score, epoch, steps):
print(f'epoch:{epoch}: score:{score}, steps:{steps} ')
def query(self, queries, return_format='result'):
if not self.model:
self.model = self.get_model()
# 从预料库中搜索最相似的
if not self.mention_corpus:
self.mention_corpus = self.get_mention_corpus()
if not self.corpus_embeddings:
self.corpus_embeddings = self.model.encode(self.mention_corpus, self.args.eval_batch_size,
self.args.show_encode_progress_bar,
'sentence_embedding',
True, True, f'cuda:{self.args.cuda_device}', False)
self.corpus_embeddings = util.normalize_embeddings(self.corpus_embeddings)
queries_embeddings = self.model.encode(queries, self.args.eval_batch_size,
self.args.show_encode_progress_bar,
'sentence_embedding',
True, True, f'cuda:{self.args.cuda_device}', False)
queries_embeddings = util.normalize_embeddings(queries_embeddings)
hits = util.semantic_search(queries_embeddings, self.corpus_embeddings,
top_k=self.args.retrieval_top_k,
corpus_chunk_size=len(self.mention_corpus),
query_chunk_size=self.args.query_chunk_size,
score_function=self.score_function) # 排过序,得分从大到小
if return_format == 'result':
res = []
for h in hits:
r = []
for i in h:
if i['score'] > self.args.retrieval_score:
r.append(self.mention_corpus[i['corpus_id']])
if len(r) < self.args.at_least_top_k:
for i in range(len(r), self.args.at_least_top_k):
r.append(self.mention_corpus[i['corpus_id']])
res.append(r)
return res
else:
return hits
@staticmethod
def caculate_recall(y_true, y_pred):
recall = 0
for t, p in zip(y_true, y_pred):
if len(t) == 0:
recall += 1
else:
recall += (len(set(t) & set(p)) / len(t))
return recall / len(y_true)
def eval(self, to_predicts, labels, batch_size=16, retrieval_top_k=100, at_least_top_k=10,
retrieval_score=0.1):
pred = self.query(to_predicts, batch_size=batch_size, show_progress_bar=True,
retrieval_top_k=retrieval_top_k,
at_least_top_k=at_least_top_k, retrieval_score=retrieval_score,
return_format='result')
res_1 = [r[0:1] for r in pred]
res_10 = [r[0:10] for r in pred]
res_50 = [r[0:50] for r in pred]
res_100 = [r[0:100] for r in pred]
recall_1 = ReTrievalBiEncoderModel.caculate_recall(labels, res_1)
recall_10 = ReTrievalBiEncoderModel.caculate_recall(labels, res_10)
recall_50 = ReTrievalBiEncoderModel.caculate_recall(labels, res_50)
recall_100 = ReTrievalBiEncoderModel.caculate_recall(labels, res_100)
print(f'\nrecall@1 {recall_1}\n'
f'recall@10 {recall_10}\n'
f'recall@50 {recall_50}\n'
f'recall@100 {recall_100}\n')
return recall_10
def get_evaluator(self, dev_df):
if self.args.evaluator_func == 'BinaryClassificationEvaluator':
eval_samples = []
for _, sub_df in tqdm(dev_df.iterrows()):
for e in sub_df['entries']:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
contradiction_entries = self.get_candidate_entries(sub_df['mention'])
contradiction_entries = [c for c in contradiction_entries if c not in sub_df['entries']]
for e in contradiction_entries:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=0))
evaluator = BinaryClassificationEvaluator.from_input_examples(examples=eval_samples,
name='eval',
batch_size=self.args.eval_batch_size,
show_progress_bar=True)
else:
eval_samples = []
for _, sub_df in tqdm(dev_df.iterrows()):
for e in sub_df['entries']:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
contradiction_entries = self.get_candidate_entries(sub_df['mention'])
contradiction_entries = [c for c in contradiction_entries if c not in sub_df['entries']]
for e in contradiction_entries:
eval_samples.append(InputExample(texts=[sub_df['mention'], e], label=0))
evaluator = BinaryClassificationEvaluator.from_input_examples(examples=eval_samples,
name='eval',
batch_size=self.args.eval_batch_size,
show_progress_bar=True,
)
return evaluator
if __name__ == '__main__':
class Test(ReTrievalBiEncoderModel):
def __init__(self):
super(Test, self).__init__()
def get_mention_corpus(self):
# disease_dict = pd.read_excel("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_v2_1221.xlsx")
disease_dict = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
corpus = disease_dict['entity'].tolist()
return [str(c) for c in set(corpus)]
def get_entries_corpus(self):
disease_dict = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
corpus = disease_dict['entity'].tolist()
return [str(c) for c in set(corpus)]
pass
def get_corpus_dict(self):
disease_dict = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'train')
disease_dict = dict(zip(disease_dict['entity'].tolist(), disease_dict['entry'].tolist()))
return disease_dict
def get_candidate_entries(self, one_text):
# 对于一个文本,从所有字典词条中获取最相似的若干个词条
candidate_entries = self.query(one_text, return_format='result')[0]
return candidate_entries
def test_train(self):
self.args.update_from_dict(
{
'model_name':"sentence-transformers/distiluse-base-multilingual-cased-v1",
'cuda_device': '1',
'train_batch_size': 16,
'max_seq_length': 128,
'loss_func': 'MultipleNegativesRankingLossHard',
'evaluator_func': 'BinaryClassificationEvaluator',
'learning_rate': 1e-4,
'output_path':'/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/',
'model_version': 'test',
}
)
train_dt = pd.read_hdf('/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5',
'train')
train_dt.rename(columns={'entity':'mention','entry':'entries'},inplace=True)
eval_dt = pd.read_hdf('/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5',
'eval')
eval_dt.rename(columns={'entity': 'mention', 'entry': 'entries'}, inplace=True)
self.train(train_dt, eval_dt)
def test_predict(self, to_predict):
self.args.model_name = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/di_retrieval_v2.1/"
self.args.update_from_dict(
{}
)
self.model = self.get_model()
res = self.query(to_predict, return_format='result')
print(res)
def test_eval(self):
self.args.model_name = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/di_retrieval_v2.1/"
self.model = self.get_model()
dev_df = pd.read_hdf(
"/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
'eval')
to_predict = dev_df['entity'].tolist()
labels = dev_df['entry'].tolist()
self.eval(to_predict, labels, batch_size=16, retrieval_top_k=100, at_least_top_k=10,
retrieval_score=-1)
# Test().test_predict(['肿瘤', 'cancer'])
Test().test_train()
# def get_samples(self, df):
# samples = []
# if self.args.loss=='MultipleNegativesRankingLoss':
#
# # entry , entity ,other_entry
#
#
# elif self.args.loss=='MultipleNegativesRankingLossHard':
#
# elif self.args.loss=='OnlineContrastiveLoss':
#
# elif self.args.loss ==
#
# if self.args.model_type == 'nli':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if entries_length > 1:
# label_id = 1 # 蕴含关系
# else:
# label_id = 2 # 等价关系
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=label_id))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# elif self.args.model_type == 'sts':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if 'label' in sub_df.index:
# score = sub_df['label']
# else:
# score = round(1 / entries_length, 4)
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=score))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=score))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# else:
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=1))
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# return samples
#
# def get_candidade_entries(self, query):
# candidate_entries = query
# return candidate_entries # type:list
#
# def get_evaluator(self, eval_samples):
# if self.args.model_type == 'nli':
# return CECorrelationEvaluator.from_input_examples(eval_samples, name='eval')
# elif self.args.model_type == 'two_classification':
# return CEBinaryClassificationEvaluator.from_input_examples(eval_samples, name='eval')
# else:
# return CESoftmaxAccuracyEvaluator.from_input_examples(eval_samples, name='eval')
#
# # class RerankerTrainer:
# # def __init__(self):
# # self.model_path = "distiluse-base-multilingual-cased-v1"
# # self.dimensions = 512
# # self.cuda_device = get_best_cuda_device(gpu_num=1)
# # self.max_seqence_length = 128
# # self.use_st_model = True
# # self.train_batch_size = 16
# # self.epoch = 5
# # self.learning_rate = 1e-5
# # self.all_scores = []
# # self.best_score = 0
# # self.label2int = {"contradiction": 0, "entailment": 1, "neutral": 1}
# # self.train_num_labels = len(set(self.label2int.values()))
# # pass
# #
# # def train(self, train_df, dev_df, save_model="./best_model/test/", loss_func='SoftmaxLoss',
# # evaluator_func='MyEvaluator2', top_k=30):
# #
# # self.save_model = save_model
# # model = self.get_model()
# #
# # train_dataloader, train_loss = self.get_train_objectives(train_df, model, loss_func=loss_func,
# # top_k=top_k)
# #
# # evaluator = self.get_evaluator(dev_df, evaluator_func=evaluator_func)
# #
# # warmup_steps = math.ceil(len(train_dataloader) * self.epoch * 0.1) # 10% of train data for warm-up
# # evaluation_steps = math.ceil(len(train_dataloader) * 0.1)
# #
# # print('start train...')
# # # Which loss function to use for training. If None, will use nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# # model.fit(train_dataloader=train_dataloader, epochs=self.epoch, warmup_steps=warmup_steps,
# # evaluator=evaluator, save_best_model=True,
# # output_path=save_model,
# # evaluation_steps=evaluation_steps,
# # callback=self.call_back,
# # loss_fct=train_loss,
# # optimizer_params={'lr': self.learning_rate})
# #
# # df = pd.DataFrame(self.all_scores)
# # df.to_excel(save_model + 'my_score.xlsx')
# # RerankerTrainer.save_parameters(self, save_model=f'{save_model}parameters.json')
# #
# # def get_retrieval_model(self):
# # from sentence_transformers import SentenceTransformer
# # model = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/v2/"
# # model = SentenceTransformer(self.model_path, device=f'cuda:{self.cuda_device}')
# # return model
# #
# # def get_evaluator(self, dev_df, evaluator_func='MyEvaluator2', collection='t1'):
# # from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# # from sklearn.utils import resample
# #
# # self.evaluator_func = evaluator_func
# # dev_df = resample(dev_df, replace=False)
# #
# # if evaluator_func == 'MyEvaluator':
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# # from sentence_transformers import InputExample
# # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# # scores = dev_df.index.tolist()
# # eval_examples = []
# # dev_samples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# # evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection)
# #
# # elif evaluator_func == 'EmbeddingSimilarityEvaluator':
# # sentences_1 = []
# # sentences_2 = []
# # scores = []
# # dev_samples = []
# # for _, sub_df in dev_df.iterrows():
# # if sub_df['label'] != 0.0:
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # scores.append(sub_df['label'])
# #
# # evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# # else:
# # from sentence_transformers import InputExample
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator2
# # dev_samples = []
# # for _, sub_df in dev_df.iterrows():
# # if sub_df['label'] == 1:
# # dev_samples.append(
# # InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# # elif sub_df['label'] > 0:
# # dev_samples.append(
# # InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# # else:
# # dev_samples.append(
# # InputExample(texts=[sub_df['entity'], sub_df['entry']], label=0))
# # evaluator = MyEvaluator2.from_input_examples(dev_samples, name='AllNLI-dev')
# #
# # print(f'dev_length:{len(dev_samples)}')
# # self.dev_length = len(dev_samples)
# # return evaluator
# #
# # @staticmethod
# # def save_parameters(para_obj, save_model='./test.json'):
# # """
# # 存储一个对象的参数,对象参数可以是模型参数或超参数
# # Args:
# # para_obj: 要存储的参数的对象
# # save_model: 保存路径
# #
# # Returns:
# #
# # """
# # para_list = para_obj.__dir__()
# # # save_para_list = ['best_score','device','max_seq_length','tokenizer']
# # para = {}
# # for p in para_list:
# # if not p.startswith('_'):
# # # if p in save_para_list:
# # r = getattr(para_obj, p)
# # if isinstance(r, int) or isinstance(r, str) or isinstance(r, float) or isinstance(r, list) \
# # or isinstance(r, bool):
# # para[p] = r
# #
# # with open(save_model, "w", encoding='utf-8') as f:
# # # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格
# # # f.write(json.dumps(para,indent=4))
# # json.dump(para, f, indent=4) # 传入文件描述符,和dumps一样的结果
# #
# # para.pop("all_scores")
# # with open(log_file, "a", encoding='utf-8') as f:
# # json.dump(para, f, indent=4)
# # f.write('\n')
# #
# # def call_back(self, score, epoch, steps):
# # self.all_scores.append({str(epoch) + '-' + str(steps): score})
# # if score > self.best_score:
# # self.best_score = score
# # print(f'epoch:{epoch}: score:{score} ')
# #
# # class TrainerV1(RerankerTrainer):
# # def __init__(self):
# # super(TrainerV1, self).__init__()
# #
# # def run(self):
# # self.train_1011()
# #
# # def train_1011(self):
# # def deal_with_df(df, corpus):
# # df['entry'] = df['entry'].astype('str')
# # df['entity'] = df['entity'].astype('str')
# # m = self.get_retrieval_model()
# # qs = df['entity'].tolist()
# # res = RetrievalEvaluator.query_result(model=m, corpus=corpus, queries=qs, top_k=10)
# # li = []
# # for i, r in zip(qs, res):
# # for _ in r:
# # li.append({'entity': i, 'entry': _, 'label': 0})
# # df_ = pd.DataFrame(li)
# # print(len(df))
# # df = pd.concat([df, df_], ignore_index=True)
# # print(len(df))
# # df.drop_duplicates(subset=['entity', 'entry'], keep='first', inplace=True)
# # print(len(df))
# # return df
# #
# # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# # 'train')
# # corpus = list(set(train_df['entry'].tolist()))
# # corpus = [str(c) for c in corpus]
# # train_df = deal_with_df(train_df, corpus=corpus)
# #
# # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5"
# # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval.h5",
# # 'eval')
# # dev_df = deal_with_df(dev_df, corpus=corpus)
# #
# # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # # self.model_path = "./best_model/di_reranker_v2.0/"
# #
# # # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
# #
# # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # self.model_path = "./best_model/v2/v2.2.1/"
# #
# # # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
# #
# # self.cuda_device = get_best_cuda_device(gpu_num=1)
# # self.dimensions = 768
# # self.max_seqence_length = 64
# # self.use_st_model = True
# # self.train_batch_size = 32
# # self.epoch = 3
# # self.learning_rate = 1e-5
# # self.train(train_df, dev_df, save_model="./best_model/di_reranker_2/",
# # loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# # evaluator_func="MyEvaluator2",
# # top_k=10)
# #
# # # def train_cross_model(self):
# # # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # # 'train')
# # # m = self.get_retrieval_model()
# # # RetrievalEvaluator.query_result(model=model, corpus=corpus, queries=queries, top_k=1)
# # #
# # # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # # 'eval')
# # #
# # # # self.train_file = "./data/v2/train_2.csv.gz"
# # # # train_df = pd.read_csv(self.train_file, compression='gzip', sep='|')
# # # # self.dev_file = "./data/v2/eval.csv.gz"
# # # # dev_df = pd.read_csv(self.dev_file, compression='gzip', sep='|')
# # #
# # #
# # # # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # # self.model_path = "./best_model/di_reranker_v2.0/"
# # #
# # # # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/em9/"
# # # # self.model_path = '/large_files/pretrained_pytorch/mt5_zh_en/'
# # #
# # # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # # self.model_path = "./best_model/v2/v2.2.1/"
# # #
# # # # self.model_path = "sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking"
# # #
# # #
# # #
# # # self.dimensions = 768
# # # self.max_seqence_length = 128
# # # self.use_st_model = True
# # # self.train_batch_size = 32
# # # self.epoch = 3
# # # self.learning_rate = 2e-5
# # # self.train(train_df, dev_df, save_model="./best_model/v2/v2.2.2/",
# # # loss_func='CrossEntropyLoss', # CrossEntropyLoss,BCEWithLogitsLoss,nli
# # # evaluator_func="MyEvaluator2",
# # # top_k=10)
#
# def call_back(self, score, epoch, steps):
# print(f'epoch:{epoch}----step:{steps}----score:{score} ')
# class RetrievalDT:
# def __init__(self):
# pass
#
# @staticmethod
# def convert_dt_for_MultipleNegativesRankingLoss(train_data: pd.DataFrame, neg_data=2, corpus=None,
# mode='sentence_pair'):
# train_data = v
# train_data.dropna(inplace=True)
# if mode == 'sentence_pair':
# return train_data
# else:
# new_train_data = []
# for _, sub_df in tqdm(train_data.iterrows()):
# count = 1
# while count <= neg_data / 2:
# neg_entity = random.choice(corpus)
# if train_data[
# (train_data['entry'] == neg_entity) & (train_data['entity'] == sub_df['entity'])].empty:
# new_train_data.append({
# 'entry': sub_df['entry'],
# 'pos_entity': sub_df['entity'],
# 'neg_entity': neg_entity,
# })
# new_train_data.append({
# 'entry': sub_df['entity'],
# 'pos_entity': sub_df['entry'],
# 'neg_entity': neg_entity,
# })
# count += 1
# return pd.DataFrame(new_train_data)
# class RetrievalBiEncoder:
# def __init__(self):
# self.pretrained_model = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# self.output_dimension = 768 # 输出向量维度
# self.cuda_device = get_best_cuda_device(gpu_num=1)
# self.max_seqence_length = 128 # 输入长度
# self.use_sbert_model = True # 是否改变模型结构
# self.train_batch_size = 16
# self.epoch = 5
# self.data_top_k = 3 # 负样本数
# self.learning_rate = 1e-5
#
# self.save_model = "./best_model/" # 模型保存路径
# self.model_version = 'test' # 版本号,最好模型路径
#
# self.logging_scores = []
# self.logging_best_score = 0
# self.log_file = './best_model/retrieval_logging.json'
#
# def train_model(self, train_df, dev_df, loss_func='CosineSimilarityLoss',
# evaluator_func='EmbeddingSimilarityEvaluator',
# eval_batch_size=128):
#
# model = self.get_model()
# train_samples = self.get_samples(train_dt)
#
# corpus = self.get_corpus()
# corpus = [str(c) for c in corpus]
# train_obj = self.get_train_objectives(train_df, model, loss_func=loss_func, corpus=corpus)
#
# self.train_size = 9999999999
# for t in train_obj:
# self.train_size = min(len(t[0]), self.train_size)
#
# print(f'train_size:{self.train_size}')
# evaluator = self.get_evaluator(dev_df, evaluator_func=evaluator_func, corpus=corpus,
# encode_batch_size=encode_batch_size)
#
# warmup_steps = math.ceil(self.train_size * 1 * 0.1) # 10% of train data for warm-up
# evaluation_steps = math.ceil(self.train_size * 0.1)
#
# print('start train...')
# print(f"save to :{self.save_model + self.model_version + '/'}")
# model.fit(train_objectives=train_obj, epochs=self.epoch, warmup_steps=warmup_steps,
# evaluator=evaluator,
# save_best_model=True,
# output_path=self.save_model + self.model_version + '/',
# evaluation_steps=evaluation_steps,
# callback=self.call_back,
# optimizer_params={'lr': self.learning_rate})
#
# df = pd.DataFrame(self.all_scores)
# df.to_excel(self.save_model + self.model_version + '/my_score.xlsx')
# TrainRetrieval.save_parameters(self,
# save_model=f"{self.save_model + self.model_version + '/'}parameters.json")
#
# def get_model(self):
# print(f'use_pretrained_model: {self.pretrained_model}')
# if self.use_sbert_model:
# model = SentenceTransformer(self.pretrained_model, device=f'cuda:{str(self.cuda_device)}')
# else:
# word_embedding_model = models.Transformer(self.pretrained_model, max_seq_length=self.max_seqence_length)
# # from sentence_transformers.models.T5 import T5
# # word_embedding_model = T5(self.model_path,max_seq_length=self.max_seqence_length)
# # dense_model = models.Dense(in_features=word_embedding_model.get_word_embedding_dimension(),
# # out_features=word_embedding_model.get_word_embedding_dimension(),
# # activation_function=nn.Tanh())
# pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
# pooling_mode_cls_token=False, pooling_mode_max_tokens=False,
# pooling_mode_mean_tokens=True, pooling_mode_mean_sqrt_len_tokens=False, )
# dense_layer = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(),
# out_features=self.output_dimension, activation_function=nn.Tanh())
# normalize_layer = models.Normalize()
# model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_layer, normalize_layer],
# device=f'cuda:{str(self.cuda_device)}')
# self.output_dimension = model.get_sentence_embedding_dimension()
# return model
#
# def get_samples(self, df):
# samples = []
# if self.args.model_type == 'nli':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if entries_length > 1:
# label_id = 1 # 蕴含关系
# else:
# label_id = 2 # 等价关系
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=label_id))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# elif self.args.model_type == 'sts':
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# entries_length = len(sub_df['entries'])
# if 'label' in sub_df.index:
# score = sub_df['label']
# else:
# score = round(1 / entries_length, 4)
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=score))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=score))
# if e in candidate_entries:
# candidate_entries.remove(e)
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# else:
# for _, sub_df in df.iterrows():
# candidate_entries = self.get_candidade_entries(query=sub_df['mention'])
# if sub_df['entries']:
# for e in sub_df['entries']:
# samples.append(InputExample(texts=[sub_df['mention'], e], label=1))
# samples.append(InputExample(texts=[e, sub_df['mention']], label=1))
# for c_e in candidate_entries:
# samples.append(InputExample(texts=[sub_df['mention'], c_e], label=0))
# samples.append(InputExample(texts=[c_e, sub_df['mention']], label=0))
# return samples
#
#
# def get_train_objectives(self, train_data, model, loss_func='MultipleNegativesRankingLoss', corpus=None):
# """
#
# Args:
# train_data: ['entity','entry'],entity:要查询的文本,entry:匹配到的词条列表,可以多条
# model:
# loss_func:
# corpus: 输入的语料库用以构建负样本
#
# Returns:
# train_obj = [(train_dataloader, train_loss)]
# """
# train_samples = []
# self.loss_func = loss_func
# if loss_func == 'MultipleNegativesRankingLoss':
# train_data = RetrievalDT.convert_dt_for_MultipleNegativesRankingLoss(train_data, neg_data=2, corpus=corpus)
# # Special data loader that avoid duplicates within a batch
#
# train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.train_batch_size)
# train_loss = losses.MultipleNegativesRankingLoss(model=model)
# train_obj = [(train_dataloader, train_loss)]
# return train_obj
# elif loss_func == 'MultipleNegativesRankingLoss2':
# for _, sub_df in tqdm(train_data.iterrows()):
# if sub_df['label'] != 0:
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']]))
#
# print(len(train_samples))
# # Special data loader that avoid duplicates within a batch
# train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=self.train_batch_size)
# train_loss = losses.MultipleNegativesRankingLoss(model=model)
# train_obj = [(train_dataloader, train_loss)]
# return train_obj
# elif loss_func == 'OnlineContrastiveLoss':
# train_data = train_data[train_data['label'] != 0.0] # type:pd.DataFrame
#
# dev_df = train_data.groupby('entity').apply(lambda x: x['entry'].tolist())
#
# scores = dev_df.index.tolist()
# eval_examples = []
# for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# eval_examples.append(InputExample(texts=[t, r]))
#
# for _, sub_df in train_data.iterrows():
# if sub_df['label'] > 0:
# label = 1
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']], label=label))
# train_samples.append(InputExample(texts=[sub_df['entry'], sub_df['entity']], label=label))
# else:
# label = 0
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']], label=label))
#
# train_loss = losses.OnlineContrastiveLoss(model=model)
# elif loss_func == 'multi-task':
# train_samples_MultipleNegativesRankingLoss = []
# train_samples_ConstrativeLoss = []
#
# for _, sub_df in train_data.iterrows():
# if sub_df['label'] > 0:
# label = 1
# else:
# label = 0
# train_samples_ConstrativeLoss.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=label))
# if str(label) == '1':
# for _ in range(int(self.data_top_k / 2)):
# train_samples_MultipleNegativesRankingLoss.append(
# InputExample(texts=[sub_df['entity'], sub_df['entry']], label=1))
# train_samples_MultipleNegativesRankingLoss.append(
# InputExample(texts=[sub_df['entry'], sub_df['entity']], label=1))
#
# # Create data loader and loss for MultipleNegativesRankingLoss
# train_dataset_MultipleNegativesRankingLoss = SentencesDataset(
# train_samples_MultipleNegativesRankingLoss,
# model=model)
# train_dataloader_MultipleNegativesRankingLoss = DataLoader(train_dataset_MultipleNegativesRankingLoss,
# shuffle=True,
# batch_size=self.train_batch_size)
# train_loss_MultipleNegativesRankingLoss = losses.MultipleNegativesRankingLoss(model)
#
# # Create data loader and loss for OnlineContrastiveLoss
# train_dataset_ConstrativeLoss = SentencesDataset(train_samples_ConstrativeLoss, model=model)
# train_dataloader_ConstrativeLoss = DataLoader(train_dataset_ConstrativeLoss, shuffle=True,
# batch_size=self.train_batch_size)
#
# # As distance metric, we use cosine distance (cosine_distance = 1-cosine_similarity)
# distance_metric = losses.SiameseDistanceMetric.COSINE_DISTANCE
# # Negative pairs should have a distance of at least 0.5
# margin = 0.5
# train_loss_ConstrativeLoss = losses.OnlineContrastiveLoss(model=model, distance_metric=distance_metric,
# margin=margin)
# train_object = [
# (train_dataloader_MultipleNegativesRankingLoss, train_loss_MultipleNegativesRankingLoss),
# (train_dataloader_ConstrativeLoss, train_loss_ConstrativeLoss)]
#
# return train_object
# elif loss_func == 'BatchHardSoftMarginTripletLoss':
# ### There are 4 triplet loss variants:
# ### - BatchHardTripletLoss
# ### - BatchHardSoftMarginTripletLoss
# ### - BatchSemiHardTripletLoss
# ### - BatchAllTripletLoss
#
# from sentence_transformers.datasets.SentenceLabelDataset import SentenceLabelDataset
#
# guid = 1
# self.label_map_file = "./data/v2/label_dict.xlsx"
# label_map = pd.read_excel(self.label_map_file)
# label_map = dict(zip(label_map['entry'].tolist(), label_map['label_num'].tolist()))
# train_samples = []
# for _, sub_df in train_data.iterrows():
# if sub_df['label'] != 0:
# train_samples.append(InputExample(guid=str(guid), texts=[sub_df['entity']],
# label=label_map.get(sub_df['entry'])))
# guid += 1
#
# print(f'train_length:{len(train_samples)}')
# self.train_length = len(train_samples)
#
# train_dataset = SentenceLabelDataset(train_samples)
# train_dataloader = DataLoader(train_dataset, batch_size=self.train_batch_size, drop_last=True)
# train_loss = losses.BatchHardSoftMarginTripletLoss(model=model)
# return train_dataloader, train_loss
# else:
# for _, sub_df in train_data.iterrows():
# train_samples.append(InputExample(texts=[sub_df['entity'], sub_df['entry']], label=sub_df['label']))
# train_loss = losses.CosineSimilarityLoss(model=model)
#
# train_dataset = SentencesDataset(train_samples, model)
# train_dataloader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=self.train_batch_size)
# train_obj = [(train_dataloader, train_loss)]
# return train_obj
#
# #
# #
# # def get_evaluator(self, dev_df, evaluator_func='EmbeddingSimilarityEvaluator', collection='t1', corpus=None,
# # top_k=100, encode_batch_size=128):
# # self.evaluator_func = evaluator_func
# # dev_df = resample(dev_df, replace=False)
# #
# # if evaluator_func == 'MyEvaluator':
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# # from sentence_transformers import InputExample
# # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# # scores = dev_df.index.tolist()
# # eval_examples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# # evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection,
# # top_k=top_k, encode_batch_size=encode_batch_size)
# #
# # # elif evaluator_func == 'InformationRetrievalEvaluator':
# # # ir_evaluator = InformationRetrievalEvaluator(dev_queries, corpus, dev_rel_docs,
# # # show_progress_bar=True,
# # # corpus_chunk_size=100000,
# # # precision_recall_at_k=[10, 100],
# # # name="msmarco dev")
# # elif evaluator_func == 'recall_evaluator':
# # from pharm_ai.panel.entry_match.retrieval_eval import RecallEvaluator
# # # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # from sentence_transformers import InputExample
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# #
# # scores = dev_df.index.tolist()
# # eval_examples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# #
# # evaluator = RecallEvaluator.from_input_examples(examples=eval_examples, corpus=corpus, name='sts-eval',
# # top_k=top_k, encode_batch_size=encode_batch_size)
# # return evaluator
# #
# # elif evaluator_func == 'seq_evaluator':
# # from sentence_transformers import evaluation
# # from sentence_transformers import InputExample
# # from pharm_ai.panel.entry_match.revise_evaluator import MyEvaluator
# # evaluators = []
# #
# # sentences_1 = []
# # sentences_2 = []
# # scores_ = []
# # for _, sub_df in dev_df.iterrows():
# #
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # if sub_df['label'] > 0:
# # scores_.append(1)
# # else:
# # scores_.append(0)
# #
# # binary_acc_evaluator = evaluation.BinaryClassificationEvaluator(sentences_1, sentences_2, scores_)
# # evaluators.append(binary_acc_evaluator)
# #
# # dev_df = dev_df[dev_df['label'] != 0.0] # type:pd.DataFrame
# # dev_df = dev_df.groupby('entity').apply(lambda x: x['entry'].tolist())
# # # scores = dev_df.index.tolist()
# # eval_examples = []
# # for t, r in zip(dev_df.index.tolist(), dev_df.tolist()):
# # eval_examples.append(InputExample(texts=[t, r]))
# # my_evaluator = MyEvaluator.from_input_examples(eval_examples, name='sts-eval', collection=collection,
# # top_k=top_k, encode_batch_size=encode_batch_size)
# #
# # evaluators.append(my_evaluator)
# # seq_evaluator = evaluation.SequentialEvaluator(evaluators,
# # main_score_function=lambda scores: scores[-1])
# # return seq_evaluator
# #
# # elif evaluator_func == 'EmbeddingSimilarityEvaluator':
# # sentences_1 = []
# # sentences_2 = []
# # scores = []
# # for _, sub_df in dev_df.iterrows():
# # # if sub_df['label'] != 0.0:
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # scores.append(sub_df['label'])
# #
# # evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# # else:
# # sentences_1 = []
# # sentences_2 = []
# # scores = []
# # for _, sub_df in dev_df.iterrows():
# # if sub_df['label'] != 0.0:
# # sentences_1.append(sub_df['entity'])
# # sentences_2.append(sub_df['entry'])
# # scores.append(sub_df['label'])
# # evaluator = EmbeddingSimilarityEvaluator(sentences_1, sentences_2, scores)
# # print(f'dev_length:{len(scores)}')
# # self.dev_length = len(scores)
# # return evaluator
# #
# # @staticmethod
# # def save_parameters(para_obj, save_model='./test.json'):
# # """
# # 存储一个对象的参数,对象参数可以是模型参数或超参数
# # Args:
# # para_obj: 要存储的参数的对象
# # save_model: 保存路径
# #
# # Returns:
# #
# # """
# # para_list = para_obj.__dir__()
# # # save_para_list = ['best_score','device','max_seq_length','tokenizer']
# # para = {}
# # for p in para_list:
# # if not p.startswith('_'):
# # # if p in save_para_list:
# # r = getattr(para_obj, p)
# # if isinstance(r, int) or isinstance(r, str) or isinstance(r, float) or isinstance(r, list) \
# # or isinstance(r, bool):
# # para[p] = r
# #
# # with open(save_model, "w", encoding='utf-8') as f:
# # # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格
# # # f.write(json.dumps(para,indent=4))
# # json.dump(para, f, indent=4) # 传入文件描述符,和dumps一样的结果
# #
# # para.pop("all_scores")
# # with open(log_file, "a", encoding='utf-8') as f:
# # json.dump(para, f, indent=4)
# # f.write('\n')
# #
# # def call_back(self, score, epoch, steps):
# # self.all_scores.append({str(epoch) + '-' + str(steps): score})
# # if score > self.best_score:
# # self.best_score = score
# # print(f'epoch:{epoch}: score:{score} ')
# #
# # def get_corpus(self):
# # self.corpus_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_v2_1217.xlsx"
# # corpus = pd.read_excel(self.corpus_file)
# # corpus = list(set(corpus['entry'].tolist()))
# # return corpus
# #
# # def run(self):
# # self.train_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # train_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'train')
# # self.dev_file = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5"
# # dev_df = pd.read_hdf("/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/data/v2/disease_retrieval_v2.h5",
# # 'eval')
# #
# # # self.model_path = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
# # # self.model_path = "sentence-transformers/distiluse-base-multilingual-cased-v1"
# # self.model_path = "/home/zyl/disk/PharmAI/pharm_ai/panel/entry_match/best_model/disease_v2.0/"
# #
# # self.use_st_model = True
# # self.model_version = 'di_retrieval_v2.1'
# #
# # from zyl_utils import get_best_cuda_device
# # self.cuda_device = get_best_cuda_device(gpu_num=1)
# # self.max_seqence_length = 128
# # self.output_dimension = 1024
# # self.train_batch_size = 256
# # self.data_top_k = 3
# # self.epoch = 5
# # self.learning_rate = 1e-5
# #
# # self.train_model(train_df, dev_df,
# # loss_func='MultipleNegativesRankingLoss2', # multi-task
# # evaluator_func="recall_evaluator",
# # encode_batch_size=32)
#
# if __name__ == '__main__':
# # get_auto_device()
# # FineTurn().run()
# # Trainer().run()
#
# TrainRetrieval().run()
#
# pass
# if __name__ == '__main__':
# class Re | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/retrieval_bi_encoder.py | retrieval_bi_encoder.py |
import logging
import math
import os
import random
from dataclasses import asdict
import pandas as pd
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from simpletransformers.t5.t5_model import T5Model
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers.optimization import AdamW, Adafactor
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class DDPT5Model(T5Model):
"""The DDP version of T5Model"""
def __init__(
self,
model_type,
model_name,
args=None,
tokenizer=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a DDP T5Model model. Turn off multi-processing settings.
Args:
model_type: The type of model (t5, mt5)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
super().__init__(model_type, model_name, args, tokenizer, use_cuda, cuda_device, **kwargs)
self.args.use_multiprocessing = False
self.args.use_multiprocessing_for_evaluation = False
if self.args.n_gpu == 1:
raise ValueError("You are using DDP with single GPU.")
def train_model(
self,
train_data,
output_dir=None,
show_running_loss=True,
args=None,
eval_data=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
verbose (optional): whether output staff.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
os.environ['MASTER_ADDR'] = 'localhost'
port = random.randint(10000, 20000)
os.environ['MASTER_PORT'] = str(port)
mp.spawn(self.train_each_proc, nprocs=self.args.n_gpu,
args=(train_dataset, output_dir,
show_running_loss, eval_data, verbose, kwargs))
# self.save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train_each_proc(self, process_index, train_dataset, *train_args):
"""
A wrapper function of train() for each process of DDP.
:param process_index: param train_dataset passed into train().
:param train_dataset: The training set.
:param train_args: other position arguments passed to train().
:return: The same as train().
"""
self._local_rank = process_index
self._world_size = self.args.n_gpu
self.train(train_dataset, *train_args[:-1], **train_args[-1])
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
args = self.args
self.device = torch.device(f"cuda:{self._local_rank}")
self._move_model_to_device()
torch.distributed.init_process_group(
backend='nccl',
init_method='env://',
world_size=self._world_size,
rank=self._local_rank
)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self._local_rank])
model = self.model
if self._local_rank == 0:
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = DistributedSampler(
train_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size // self._world_size,
pin_memory=True
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
if 0 < args.save_after < 1:
args.save_after = math.ceil(t_total * args.save_after)
if args.optimizer == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "Adafactor":
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adafactor_eps,
clip_threshold=args.adafactor_clip_threshold,
decay_rate=args.adafactor_decay_rate,
beta1=args.adafactor_beta1,
weight_decay=args.weight_decay,
scale_parameter=args.adafactor_scale_parameter,
relative_step=args.adafactor_relative_step,
warmup_init=args.adafactor_warmup_init,
)
if self._local_rank == 0:
print("Using Adafactor for T5")
else:
raise ValueError(
"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.".format(
args.optimizer
)
)
if args.scheduler == "constant_schedule":
scheduler = get_constant_schedule(optimizer)
elif args.scheduler == "constant_schedule_with_warmup":
scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)
elif args.scheduler == "linear_schedule_with_warmup":
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "cosine_with_hard_restarts_schedule_with_warmup":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "polynomial_decay_schedule_with_warmup":
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
lr_end=args.polynomial_decay_schedule_lr_end,
power=args.polynomial_decay_schedule_power,
)
else:
raise ValueError("{} is not a valid scheduler.".format(args.scheduler))
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if self._local_rank == 0:
logger.info(" Training started")
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch",
disable=args.silent or self._local_rank != 0, mininterval=0)
epoch_number = 0
best_eval_metric = None
current_loss = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
stop_training = False
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to global_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project and self._local_rank == 0:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for epoch in train_iterator:
model.train()
train_sampler.set_epoch(epoch)
if epochs_trained > 0:
epochs_trained -= 1
continue
if self._local_rank == 0:
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs} on process {self._local_rank}",
disable=args.silent or self._local_rank != 0,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
loss = self.compute_loss(model, args, inputs)
else:
loss = self.compute_loss(model, args, inputs)
loss_ = loss.clone()
torch.distributed.barrier()
torch.distributed.reduce(loss_, 0)
current_loss = loss_.item() / self._world_size
if show_running_loss and self._local_rank == 0:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
if args.optimizer == "AdamW":
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0 and self._local_rank == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0]
},
step=global_step
)
if args.save_steps > 0 and global_step % args.save_steps == 0 and self._local_rank == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
model.train()
if stop_training:
break
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if (args.save_model_every_epoch or args.evaluate_during_training) and self._local_rank == 0:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch and self._local_rank == 0:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent or self._local_rank != 0,
**kwargs,
)
if self._local_rank == 0:
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, results=results)
stop_training, best_eval_metric, early_stopping_counter = self.logging_and_saving(
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter)
torch.distributed.barrier()
stop_training_tensor = torch.tensor([stop_training], device=self.device)
torch.distributed.broadcast(stop_training_tensor, src=0)
stop_training = bool(stop_training_tensor.cpu()[0])
if stop_training:
break
# close tensorboard writer to avoid EOFError.
if self._local_rank == 0:
tb_writer.close()
wandb.finish()
def eval_model(
self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs
):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 3 columns - `prefix`, `input_text`, `target_text`.
- `prefix`: A string indicating the task to perform. (E.g. `"question"`, `"stsb"`)
- `input_text`: The input text sequence. `prefix` is automatically prepended to form the full input. (<prefix>: <input_text>)
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
eval_dataset = self.load_and_cache_examples(
eval_data, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(
eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs
)
self.results.update(result)
if self.args.evaluate_generated_text:
if self.args.preprocess_inputs:
to_predict = [
prefix + ": " + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
else:
to_predict = [
prefix + input_text
for prefix, input_text in zip(
eval_data["prefix"], eval_data["input_text"]
)
]
preds = self.predict(to_predict)
result = self.compute_metrics(
eval_data["target_text"].tolist(), preds, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = DistributedSampler(
eval_dataset,
num_replicas=self._world_size,
rank=self._local_rank
)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size // self._world_size,
pin_memory=True
)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
if self.args.fp16:
from torch.cuda import amp
for batch in tqdm(
eval_dataloader,
disable=args.silent or silent,
desc="Running Evaluation"
):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
torch.distributed.barrier()
torch.distributed.reduce(loss, 0)
eval_loss += loss.item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps / self._world_size
if self._local_rank == 0:
print(eval_loss)
results["eval_loss"] = eval_loss
if self._local_rank == 0:
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def logging_and_saving(
self,
args,
results,
global_step,
train_iterator,
optimizer,
scheduler,
model,
training_progress_scores,
current_loss,
best_eval_metric,
verbose,
early_stopping_counter):
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores), step=global_step)
stop_training = False
if global_step > args.save_after:
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
stop_training, early_stopping_counter = \
self.check_early_stopping(early_stopping_counter, args, train_iterator, verbose)
return stop_training, best_eval_metric, early_stopping_counter
def check_early_stopping(self, early_stopping_counter, args, train_iterator, verbose):
stop_training = False
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
stop_training = True
return stop_training, early_stopping_counter
def compute_loss(self, model, args, inputs):
outputs = model(**inputs)
if args.r_drop:
outputs_ = model(**inputs)
loss = self.compute_r_drop_loss(
outputs['loss'],
outputs_['loss'],
outputs['logits'],
outputs_['logits'],
inputs['attention_mask'],
args.r_drop_alpha
)
else:
loss = outputs[0]
return loss
def compute_kl_loss(self, p, q, pad_mask=None, reduction='mean'):
p_loss = F.kl_div(F.log_softmax(p, dim=-1), F.softmax(q, dim=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, dim=-1), F.softmax(p, dim=-1), reduction='none')
if pad_mask is not None:
p_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
q_loss.masked_fill_(pad_mask.to(bool).unsqueeze(-1), 0.)
if reduction == 'mean':
p_loss = p_loss.mean()
q_loss = q_loss.mean()
elif reduction == 'sum':
p_loss = p_loss.sum()
q_loss = q_loss.sum()
else:
raise ValueError('Only mean or sum reduction is supported in computing KL Divergence!')
loss = (p_loss + q_loss) / 2
return loss
def compute_r_drop_loss(self, ce1, ce2, logit1, logit2, attention_mask, alpha, reduction='mean'):
kl_loss = self.compute_kl_loss(logit1, logit2, attention_mask, reduction=reduction)
ce_loss = 0.5 * (ce1 + ce2)
return ce_loss + alpha * kl_loss | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/DDPT5model.py | DDPT5model.py |
import time
import pandas as pd
import wandb
from loguru import logger
from simpletransformers.ner import NERModel
class ModelUtils:
def __init__(self):
pass
@staticmethod
def get_auto_cuda_device(gpu_num=1):
import pynvml
import numpy as np
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
deviceMemory = dict()
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
deviceMemory.update({i:mem_info.free / 1024 / 1024}) # M
deviceMemory = sorted(deviceMemory.items(), key=lambda x: x[1], reverse=True)
deviceMemory = np.array(deviceMemory, dtype=np.int64).tolist()
deviceMemory_tuple = deviceMemory[0:gpu_num]
deviceMemory = ','.join([str(d[0]) for d in deviceMemory_tuple])
logger.info(f'The memory of the smallest memory gpu({deviceMemory_tuple[-1][0]}) is:{deviceMemory_tuple[-1][-1]}M')
return deviceMemory
@staticmethod
def eval_decoration(eval_func):
# #############################################################
# examples: should set : self.wandb_proj , self.ver , self.args.hyper_args
# >>> @eval_decoration
# >>> def eval(eval_df,a,b):
# >>> eval_res = func... a,b
# >>> return eval_res
# ############################################################
def eval_method(self, eval_df, *args, **kwargs):
evel_size = eval_df.shape[0]
# wand_db
wandb.init(project=self.wandb_proj, config=self.model_args,
name=self.model_version + time.strftime("_%m%d_%H:%M:%S", time.localtime()),
tags=[self.model_version, 'eval'])
try:
start_time = time.time()
logger.info(f'start eval: model_version---{self.model_version},eval size---{evel_size}')
eval_res = eval_func(self, eval_df, *args, **kwargs) # type:dict
logger.info('eval finished!!!')
end_time = time.time()
need_time = round((end_time - start_time) / evel_size, 5)
eval_time = round(need_time * evel_size, 4)
print(f'eval results: {eval_res}')
logger.info(f'eval time: {need_time} s * {evel_size} = {eval_time} s')
assert isinstance(eval_res, dict) == True
eval_res.update({"evel_size": evel_size})
wandb.log(eval_res)
except Exception as error:
logger.error(f'eval failed!!! ERROR:{error}')
eval_res = dict()
finally:
wandb.finish()
return eval_res
return eval_method
if __name__ == '__main__':
ModelUtils.get_auto_cuda_device() | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/models/model_utils.py | model_utils.py |
from typing import List, Set
import pandas as pd
def entity_recognition_metrics(
y_true: List[Set],
y_pred: List[Set],
pos_neg_ratio: str = None,
self_metric=False
) -> pd.DataFrame:
"""
the metric of entity_recognition, version-v2, reference: https://docs.qq.com/doc/DYXRYQU1YbkVvT3V2
Args:
y_true: list[set],the list of true target texts,each element is a set
y_pred: list[set],the list of pred target texts,each element is a set
pos_neg_ratio: the ratio of positive and negative sample importance, default: the ratio of positive and
negative sample sizes, you can set it,like"7:3"
self_metric: self_metric
Returns:
show report and res
"""
neg_data = 0
neg_correct_dt = 0
neg_wrong_dt = 0
neg_redundant_entities = 0
pos_data = 0
pos_correct_dt = 0
pos_wrong_dt = 0
pos_correct_entities = 0
pos_wrong_entities = 0
pos_omitted_entities = 0
pos_redundant_entities = 0
for i, j in zip(y_true, y_pred):
if i == set():
neg_data += 1
if j == set():
neg_correct_dt += 1
else:
neg_wrong_dt += 1
neg_redundant_entities += len(j)
else:
pos_data += 1
true_pred = len(i & j)
pos_correct_entities += true_pred
if i == j:
pos_correct_dt += 1
elif len(i) > len(j):
pos_wrong_dt += 1
pos_wrong_entities += (len(j) - true_pred)
pos_omitted_entities += (len(i) - len(j))
else:
pos_wrong_dt += 1
pos_redundant_entities += (len(j) - len(i))
pos_wrong_entities += (len(i) - true_pred)
all_pos_entities = pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities
pred_neg = sum([1 for j in y_pred if len(j) == 0])
true_neg = sum([1 for i in y_true if len(i) == 0])
pred_pos = sum([len(j) for j in y_pred])
true_pos = sum([len(i) for i in y_true])
if neg_data == 0:
neg_metric = neg_precision = neg_recall = neg_f1 = 0
else:
neg_metric = neg_correct_dt / (neg_correct_dt + neg_redundant_entities)
neg_precision = neg_correct_dt / pred_neg if pred_neg else 0
neg_recall = neg_correct_dt / true_neg if true_neg else 0
neg_f1 = 2 * neg_precision * neg_recall / (neg_precision + neg_recall + 1e-10)
if pos_data == 0:
pos_metric = pos_precision = pos_recall = pos_f1 = 0
else:
pos_metric = pos_correct_entities / all_pos_entities
pos_precision = pos_correct_entities / pred_pos if pred_pos else 0
pos_recall = pos_correct_entities / true_pos if true_pos else 0
pos_f1 = 2 * pos_precision * pos_recall / (pos_precision + pos_recall + 1e-10)
sum_metric_micro = (pos_correct_entities + neg_correct_dt) / (
neg_correct_dt + neg_redundant_entities + all_pos_entities)
# sum_metric_macro = neg_metric * 0.5 + pos_metric * 0.5
precision = (neg_correct_dt + pos_correct_entities) / (pred_pos + pred_neg + 1e-10)
recall = (neg_correct_dt + pos_correct_entities) / (true_pos + true_neg + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
if pos_neg_ratio:
pos_all = float(pos_neg_ratio.split(':')[0])
neg_all = float(pos_neg_ratio.split(':')[1])
pos_ratio = pos_all / (pos_all + neg_all)
neg_ratio = neg_all / (pos_all + neg_all)
else:
pos_ratio = pos_data / (pos_data + neg_data)
neg_ratio = neg_data / (pos_data + neg_data)
sum_metric_weighted = pos_ratio * pos_metric + neg_ratio * neg_metric
# pos_precision = pos_correct_dt / (neg_correct_dt + pos_correct_dt)
# recall = pos_correct_dt / pos_data
tp = pos_correct_dt
fn = pos_wrong_dt
fp = neg_wrong_dt
tn = neg_correct_dt
accuracy = (tp + tn) / (tp + fn + fp + tn)
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 / (1 / precision + 1 / recall)
r = {
'positive data': [str(pos_data), pos_correct_dt, pos_wrong_dt, pos_correct_entities,
pos_wrong_entities, pos_omitted_entities, pos_redundant_entities,
pos_precision, pos_recall, pos_f1, pos_metric],
'negative data': [neg_data, neg_correct_dt, neg_wrong_dt, '-', '-', '-', neg_redundant_entities,
neg_precision, neg_recall, neg_f1, neg_metric],
'all data ': [str(pos_data + neg_data), neg_correct_dt + pos_correct_dt, neg_wrong_dt + pos_wrong_dt,
pos_correct_entities, pos_wrong_entities, pos_omitted_entities,
pos_redundant_entities + neg_redundant_entities,
precision, recall, f1, sum_metric_micro],
'weighted score': ['', '', '', '', '', '', '', '', '', '', sum_metric_weighted],
}
index = ['| data_num', '| correct_data', '| wrong_data', '| correct_entities', '| wrong_entities',
'| omitted_entities', '| redundant_entities', '| precision', '| recall', '| f1', '| score']
res_df = pd.DataFrame(r, index=index).T
pd.set_option('precision', 4)
pd.set_option('display.width', None)
pd.set_option('display.max_columns', None)
pd.set_option("colheader_justify", "center")
print(res_df)
print(
f"正样本集得分为:{pos_correct_entities} / "
f"({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(pos_metric, 4)},负样本集得分为:{neg_correct_dt} / ({neg_correct_dt} + "
f"{neg_redundant_entities})={round(neg_metric, 4)},",
f"总体得分为: ({pos_correct_entities} + {neg_correct_dt}) / "
f"({all_pos_entities}+{neg_correct_dt + neg_redundant_entities})={round(sum_metric_micro, 4)}",
# f"准确率:{accuracy}",
)
print('\n')
if self_metric:
more_not_error_pos = (pos_correct_entities + pos_redundant_entities) / (
pos_correct_entities + pos_wrong_entities + pos_omitted_entities + pos_redundant_entities)
f"自定义-正样本集得分为:{pos_correct_entities + pos_redundant_entities} /"
f" ({pos_correct_entities}+{pos_wrong_entities}+{pos_omitted_entities}+"
f"{pos_redundant_entities}) = {round(more_not_error_pos, 4)},负样本集得分为:{round(1, 4)},"
print('\n')
return res_df
if __name__ == '__main__':
y_true = [{'a','b'},{'j','d'},{'c','k'}]
y_true.extend([set()]*27)
y_pred = [{'a','b'},{'j','d'},{'c','f'}]
y_pred.extend([set()] * 27)
# y_true = [{'a','b','j','d','c','k'}]
# y_pred = [{'a','b','j','d','c','f'}]
r = entity_recognition_metrics(y_true,y_pred)
# print(r.iloc[2,-3])
print('1') | zyl-utils | /zyl_utils-0.1.4.tar.gz/zyl_utils-0.1.4/zyl_utils/model_utils/metrics/ner_metric.py | ner_metric.py |
## Zylo-Admin
Zylo-Admin a battery startup for newly updated zylo v2.0.8
## Available scripts
```bash
zylo-admin startproject -i {projectname}
```
```bash
zylo-admin manage engine
```
```bash
zylo-admin runserver {projectname}
```
- zylo-admin --> Main module()
- zylo-admin startproject --> create wsgi project for zylo()
- zylo-admin startproject -i {projectname} --> -i denote project cells it set on 100.55 m/s by default
- zylo-admin manage engine --> manage for managing all the static and templating files & engine denote the default engine in settings.py
- zylo-admin runserver {projectname} --> runserver to run the server in debug mode by default just passig the create wsgi folder name | zylo-admin | /zylo-admin-1.0.3.tar.gz/zylo-admin-1.0.3/README.md | README.md |
import os
import sys
import pickle
import subprocess
import argparse
created_projects = []
def create_folder(project_name):
if not os.path.exists(project_name):
os.makedirs(project_name)
print(f"Folder '{project_name}' created.")
else:
print(f"Folder '{project_name}' already exists.")
def create_file(file_path, content):
with open(file_path, 'w') as f:
f.write(content)
print(f"File '{file_path}' created.")
def create_views_static(project_name):
views_folder = f"{project_name}/views"
static_folder = f"{project_name}/static"
os.makedirs(views_folder)
os.makedirs(static_folder)
print("Folders 'views' and 'static' created.")
index_html_code = '''<!DOCTYPE html>
<html>
<head>
<title>Welcome to Zylo Web Framework</title>
</head>
<body>
<div class="flex items-center justify-center h-screen bg-gray-100">
<h1 class="text-4xl font-bold text-indigo-700">Welcome to Zylo Web Framework</h1>
</div>
</body>
</html>
'''
create_file(f"{views_folder}/index.html", index_html_code)
create_file(f"{static_folder}/script.js", "")
create_file(f"{static_folder}/style.css", "")
def update_modules_json(project_name):
modules_json_file = f"{project_name}/modules.json"
import json
with open(modules_json_file, 'r') as f:
data = json.load(f)
data[0]["modules"].extend([
{"name": "viewengine", "url": "http://zylo.vvfin.in/jit/23116933/modules/viewengine?pypi=True&connected=True"},
{"name": "staticengine", "url": "http://zylo.vvfin.in/jit/23116933/modules/static?pypi=True&connected=True"},
{"name": "pubsec", "url": "http://zylo.vvfin.in/jit/23116933/modules/pubsec?pypi=True&connected=True"}
])
with open(modules_json_file, 'w') as f:
json.dump(data, f, indent=4)
print("modules.json updated.")
def load_created_projects():
if os.path.exists("project.pkl"):
with open("project.pkl", "rb") as f:
return pickle.load(f)
return []
def save_created_projects(projects):
with open("project.pkl", "wb") as f:
pickle.dump(projects, f)
def run_server(project_name):
app_file = f"{project_name}/app.py"
if os.path.exists(app_file):
print(f"Running server for project '{project_name}'...")
subprocess.run(["python", app_file])
else:
print(f"Error: 'app.py' file not found in project '{project_name}'.")
def main():
global created_projects # Declare the variable as global to modify it inside the function
parser = argparse.ArgumentParser(description="ZyloAdmin - A Python project management tool for Zylo Web Framework.")
subparsers = parser.add_subparsers(dest='command', help="Available commands")
# Subparser for the 'startproject' command
startproject_parser = subparsers.add_parser('startproject', help='Create a new project')
startproject_parser.add_argument('-i', '--projectname', required=True, help='Name of the project')
# Subparser for the 'runserver' command
runserver_parser = subparsers.add_parser('runserver', help='Run the server for a project')
runserver_parser.add_argument('projectname', help='Name of the project to run the server for')
# Subparser for the 'manage' command
manage_parser = subparsers.add_parser('manage', help='Manage startup engine')
manage_parser.add_argument('engine', choices=['engine'], help='Manage the startup engine')
args = parser.parse_args()
if args.command == 'startproject':
project_name = args.projectname
create_folder(project_name)
settings_code = '''HOST = 'localhost'
PORT = 8000
DEBUG = True
SECRET_KEY = "your_secret_key"
STATIC_FOLDER = "static"
TEMPLATES = [
{
'BACKEND': 'zylo.backends.ZyloTemplates',
'DIRS': ['views'],
}
]
DATABASES = {
'default': {
'ENGINE': 'zylo.db.backends.electrus',
'HOST': 'localhost',
'PORT': 37017,
'USER': 'root',
'PASSWORD': 'root'
}
}
MAILER = [
{
"SMTP": "VALUE",
"PORT": "VALUE",
"USERNAME": "VALUE",
"PASSWORD": "VALUE",
"SSL": True,
"DEFAULT_SENDER": "VALUE"
}
]
'''
create_file(f"{project_name}/settings.py", settings_code)
app_code = '''from zylo.core.branch import Zylo, Response
app = Zylo(__name__)
@app.route('/', methods=['GET', 'POST'])
def home(request):
return Response("Welcome to Zylo Web Framework")
if __name__ == "__main__":
app.runs()
'''
create_file(f"{project_name}/app.py", app_code)
modules_json_code = '''[
{
"config": [
{
"$host": "127.0.0.1",
"$port": 8000,
"$debug": "True",
"$http": "www.zylo.vvfin.in/conf/%connection%/devweb2?_uri=main&support=True&_ping=192.168.0.1"
}
],
"modules": [
{
"name": "zylo",
"url": "http://zylo.vvfin.in/jit/23116933/modules/zylo?pypi=True&connected=True"
},
{
"name": "mailer",
"url": "http://zylo.vvfin.in/jit/23116933/modules/mailer?pypi=True&connected=True"
},
{
"name": "JwT",
"url": "http://zylo.vvfin.in/jit/23116933/modules/JwT?pypi=True&connected=True"
},
{
"name": "blueprint",
"url": "http://zylo.vvfin.in/jit/23116933/modules/blueprint?pypi=True&connected=True"
},
{
"name": "chiper",
"url": "http://zylo.vvfin.in/jit/23116933/modules/chiper?pypi=True&connected=True"
},
{
"name": "session",
"url": "http://zylo.vvfin.in/jit/23116933/modules/session?pypi=True&connected=True"
},
{
"name": "limiter",
"url": "http://zylo.vvfin.in/jit/23116933/modules/limiter?pypi=True&connected=True"
},
{
"name": "BaseModals",
"url": "http://zylo.vvfin.in/jit/23116933/modules/BaseModals?pypi=True&connected=True"
}
],
"database": [
{"name": "Electrus", "$connection": "True"},
{"name": "MongoDB", "$connection": "False"}
],
"privilege": [
{
"role": "user",
"control": "+055 wbr++",
"$host": "127.0.0.1",
"$port": "8080"
}
]
}
]
'''
create_file(f"{project_name}/modules.json", modules_json_code)
created_projects.append(project_name)
save_created_projects(created_projects)
elif args.command == 'runserver':
project_name = args.projectname
run_server(project_name)
elif args.command == 'manage' and args.engine == 'engine':
created_projects = load_created_projects()
if len(created_projects) == 0:
print("No projects have been created yet.")
return
project_name = created_projects[-1]
create_views_static(project_name)
update_modules_json(project_name)
else:
parser.print_help()
if __name__ == "__main__":
main() | zylo-admin | /zylo-admin-1.0.3.tar.gz/zylo-admin-1.0.3/zyloadmin/main.py | main.py |
# Zylo
Zylo is a lightweight web framework made with love.
## Features
- Simple and intuitive routing
- Template rendering using Jinja2
- Session management with the sessions library
- Static file serving
## Installation
You can install Zylo using pip:
```bash
pip install zylo
```
## Usage
```python
from zylo import Zylo
app = Zylo()
@app.route('/')
def home(request):
return 'Hello, World!'
if __name__ == '__main__':
app.run()
```
## changelogs
- Beta version 2.0.3
- Latest update of beta
- Bug fixed with update --> 2.0.3
- Updated Usage Guide 1.2.1
- Addedd more functions & Bug Fixes
- Bug fixes in Zylo
- Mailer updated to --> 1.0.3
```python
from zylo.limiter import Limiter, render_template
app = Zylo(__name__)
limiter = Limiter(app)
@app.route('/', methods=['GET', 'POST'])
@limiter.limit('10/minutes')
return render_template('index.html')
if __name__ == '__main__':
app.run()
```
## Blueprint
```python
from zylo import Zylo, Response
from zylo.blueprint import Blueprint
app = Zylo(__name__)
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/')
def home(request):
return Response("Welcome to ZYLO blueprint route")
app.register_blueprint(blueprint)
if __name__ == "__main__":
app.run()
```
## Sessions
```python
from zylo import Zylo, Response, render_template, redirect
app = Zylo(__name__)
@app.route('/')
def home(request):
session = request.session
session['id'] = 123
return redirect('/dashboard')
@app.route('/dashboard')
def dashboard(request):
session = request.session
id = session.get('id')
return render_template('dashboard.html', id=id)
@app.route('/logout')
def logout(request):
request.session.clear()
return Response("You have been successfully logged out")
if __name__ == "__main__":
app.run()
```
## JwT
```python
from zylo.JwT import JwT, error_handler
jwt = JwT()
try:
payload = {'user_id': 123, 'role': 'admin'}
access_token = jwt.create_payload(payload, algorithm="HS256", time_limit_hours=1)
decoded_payload = jwt.verify_payload(access_token)
id = decoded_payload['user_id']
print(f"id: {id}")
except Exception as e:
error_message = error_handler(e)
print('Error:', error_message)
```
## Limiter
```python
from zylo import Zylo, Response
from zylo.limiter import Limiter
app = Zylo(__name__)
limiter = Limiter(app)
@app.route('/')
@limiter.limit(limit=5, period=60)
def home(request):
return Response("Limited route")
if __name__ == "__main__":
app.run()
```
## Mailer
```python
from zylo import Zylo, Response
from zylo.mailer import Mailer
mailer = Mailer()
app = Zylo(__name__)
// Mailer config
mailer.config['SMTP'] = 'SMTP'
mailer.config['SMTP_PORT'] = 'SMTP_PORT'
mailer.config['SENDER_EMAIL'] = 'SENDER_EMAIL'
mailer.config['DEFAULT_SENDER'] = 'DEFAULT_SENDER'
mailer.config['SENDER_PASSWORD'] = 'SENDER_PASSWORD'
mailer.config['SSL'] = True
mailer.config['SSL_SECURITY'] = True
@app.route('/')
def home(request):
email = "[email protected]"
subject = "Welcome to ZYLO"
body = "A user-friendly python web framework made with love"
mail = mailer.send_email(email, subject, body)
if mail:
return Response(f"Mail sent successfully to {email}")
return Response("Something went wrong while sending email")
if __name__ == "__main__":
app.run()
```
## Chiper
```python
// Input sanitization
from zylo.chiper import sanitize_input
name = "'name1'"
san_name = sanitize_input(name)
print(san_name) // output --> name1
// Generate ID
from zylo.chiper import generate_id
print(generate_id(11)) // length defined 11, output --> y-909716817
// Secure password validation
from zylo.chiper import is_secure_password
password = "123"
sec_password = "secpassword@0000"
print(is_secure_password(password)) // output --> False
print(is_secure_password(sec_password)) // output --> True
// Email validation
from zylo.chiper import validate_email
print(validate_email("demo@1")) // output -->
print(validate_email("[email protected]")) // output --> True
// Hashing and verifying passwords
from zylo.chiper import hash_password, verify_password
pswd = "mypassword"
hashed_password = hash_password(pswd)
print(hashed_password) // output --> $zylo.chiper@9e8b057a1f8e43c9e0d8d20769c8f516b5ba419998b5ed6fb877452db4c46049b2bd9560da6fef2c3afb047485cebfbab5cad85787b2be1de820ca5ee42ba3bcfb37c6395dcf4e27abf6a02d1926197a
print(verify_password(pswd, hashed_password)) // output --> True
``` | zylo | /zylo-2.0.3.tar.gz/zylo-2.0.3/README.md | README.md |
import datetime
import logging
import math
from _ssl import SSLWantReadError
from zymbit import settings
from zymbit.exceptions import NotConnected, Disconnect
from zymbit.upstream import registration
from zymbit.upstream.ws import get_websocket
from zymbit.util.client import get_auth_token
from zymbit.util.envelope import get_envelope
from zymbit.util.statemachine import StateMachine, NO_SLEEP
from zymbit.util.time import now
NO_DELTA = datetime.timedelta(seconds=0)
class PubSubStateMachine(StateMachine):
"""
State machine to keep connect to the pubsub engine
This state machine handles bootstrapping a system when it's not yet
registered and once registered, establish a persistent connection to
the pubsub engine
"""
def __init__(self, raise_exceptions=True, message_handler=None, subscriptions=None):
super(PubSubStateMachine, self).__init__(raise_exceptions=raise_exceptions)
self.message_handler = message_handler
self.registration_retries = 0
self.next_registration_attempt = None
self.registration_retry_max_sleep = 3600 # sleep up to an hour
self.subscriptions = subscriptions or []
self.websocket = None
# set last_read to instantiation time so that ping pong is played after
# the connection has been established
self.last_read = now()
self.last_ping = self.last_read
# play ping pong after a minute of silence
self.ping_interval = datetime.timedelta(seconds=settings.PUBSUB_PING_INTERVAL)
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def send(self, envelope):
if self.websocket is None:
raise NotConnected()
self.websocket.send(envelope)
##
# State machine methods
##
def init(self):
if self.message_handler:
self.message_handler(get_envelope('proxy', dict(routing_key='proxy.init')))
def check_last_read(self):
_now = now()
next_ping_check = self.last_read + self.ping_interval
if (next_ping_check - _now) < NO_DELTA:
# only send pings once per max_silence_time
next_ping = self.last_ping + self.ping_interval
if (next_ping - _now) < NO_DELTA:
self.logger.debug('sending ping')
self.websocket.send(get_envelope('ping', {}))
self.last_ping = _now
# check if a re-connect is in order
disconnect_time = self.last_read + (self.ping_interval * 3)
if (disconnect_time - _now) < NO_DELTA:
raise Disconnect()
def connect(self):
"""
Connects to the pubsub engine
"""
self.websocket = get_websocket()
# set last_read here so that we are not immediately disconnected by check_last_read()
self.last_read = now()
def disconnect(self):
"""
Disconnects from the pubsub engine
"""
if self.message_handler:
self.message_handler(get_envelope('connection', dict(routing_key='connection.disconnected')))
if self.websocket is None:
return
ws, self.websocket = self.websocket, None
ws.close()
def handle_message(self, buf):
if self.message_handler:
self.message_handler(buf)
else:
self.logger.info(repr(buf))
def has_auth_token(self):
"""
Checks whether this device has an auth token
"""
return get_auth_token() not in ('', None)
def listen(self):
"""
Listens for upstream messages and sends up local messages
"""
try:
buf = self.websocket.recv()
except SSLWantReadError: # seems to be raised when there is no data
buf = None
if buf:
self.last_read = now()
self.handle_message(buf)
return NO_SLEEP
self.check_last_read()
def register(self):
"""
Registers the system with zymbit services
"""
# check to see if a registration attempt should be made
if self.next_registration_attempt:
_now = now()
# when there is a positive delta between now and the next registration attempt
# simply return
if (self.next_registration_attempt - _now) > NO_DELTA:
return False
self.next_registration_attempt = None
registration.register()
self.registration_retries = 0
def registration_error(self):
self.logger.exception(self.last_exception)
self.registration_retries += 1
sleep_time = min(math.pow(2, self.registration_retries), self.registration_retry_max_sleep)
self.next_registration_attempt = now() + datetime.timedelta(seconds=sleep_time)
self.logger.error('Registration error; next retry at {}'.format(self.next_registration_attempt))
def subscribe(self):
"""
Subscribes to desired streams
"""
for subscription in self.subscriptions:
if isinstance(subscription, dict):
params = subscription
else:
params = dict(routing_key=subscription)
envelope = get_envelope('subscribe', params=params)
self.websocket.send(envelope)
transitions = {
StateMachine.start: {
True: init,
},
init: {
None: has_auth_token,
},
has_auth_token: {
False: register,
True: connect,
},
register: {
None: connect,
Exception: registration_error,
},
registration_error: {
None: StateMachine.start,
},
connect: {
None: subscribe,
Exception: disconnect,
},
disconnect: {
None: StateMachine.start,
Exception: StateMachine.start,
},
subscribe: {
None: listen,
Exception: disconnect,
},
listen: {
Exception: disconnect,
},
}
if __name__ == '__main__':
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
PubSubStateMachine(raise_exceptions=False).run() | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/pubsub.py | pubsub.py |
import json
import logging
import random
import select
import socket
from zymbit import settings
from zymbit.exceptions import NotConnected
from zymbit.util.statemachine import StateMachine
from zymbit.util.time import interval
class LocalClient(StateMachine):
buffer_size = 4096
subscriptions = []
def __init__(self, raise_exceptions=False, loop_sleep_time=None, subscriptions=None):
super(LocalClient, self).__init__(raise_exceptions=raise_exceptions)
self.socket = None
self.loop_sleep_time = loop_sleep_time or self.loop_sleep_time
self.subscriptions = subscriptions or self.subscriptions
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def connect(self):
address = self.get_address()
self.logger.info('address={}'.format(address))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(address)
self.socket.setblocking(0)
self.logger.debug('connected to {}'.format(address))
def disconnect(self):
if self.socket is not None:
self.socket.close()
def get_address(self):
return (settings.CONSOLE_MESSENGER_HOST, settings.CONSOLE_MESSENGER_PORT)
def handle_buf(self, buf):
buf_utf8 = buf.decode('utf8')
try:
envelope = json.loads(buf_utf8)
if envelope.get('params', {}).get('routing_key') == 'connection.connected':
self.subscribe()
except ValueError:
pass
self.handle_message(buf_utf8)
def handle_message(self, buf):
self.logger.info(buf)
def listen(self):
r, _, _ = select.select([self.socket], [], [], 0.01)
if self.socket in r:
buf = self.socket.recv(self.buffer_size)
self.handle_buf(buf)
self.publish()
def publish(self):
pass
def send(self, buf):
if self.socket is None:
raise NotConnected()
if not buf.endswith('\n'):
buf = '{}\n'.format(buf)
self.socket.send(buf)
def subscribe(self):
for subscription in self.subscriptions:
self.send('action=subscribe,routing_key={}'.format(subscription))
transitions = {
StateMachine.start: {
True: connect,
},
connect: {
None: listen,
Exception: disconnect,
},
disconnect: {
None: StateMachine.start,
Exception: StateMachine.start,
},
listen: {
socket.error: disconnect,
Exception: disconnect,
},
}
class ExampleClient(LocalClient):
subscriptions = [
'#',
]
@interval(30.0)
def publish(self):
value = int(5 * random.random())
data = 'key=foo,value={}'.format(value)
self.send(data)
if __name__ == '__main__':
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
client = ExampleClient()
client.run() | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/local.py | local.py |
import json
import logging
import time
from zymbit.connect.pubsub import PubSubStateMachine, NotConnected
from zymbit.connect.server import ConsoleMessengerServer
from zymbit.util.buffer import BufferIterator
from zymbit.util.envelope import parse_buf
from zymbit.util.statemachine import NO_SLEEP
from zymbit.util.time import get_sleep_time, now
class Proxy(object):
def __init__(self):
self.pubsub = PubSubStateMachine(raise_exceptions=False, message_handler=self.handle_pubsub_message)
self.messenger_server = ConsoleMessengerServer(self.handle_console_message)
# when set, this message sent to all messenger server clients
self.initial_message = None
self._run = True
self.console_buffer = BufferIterator()
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def handle_console_message(self, client, buf):
self.console_buffer.write(buf)
for item in self.console_buffer:
if not item:
continue
self.handle_buf(client, item)
def handle_buf(self, client, buf):
try:
envelope = parse_buf(buf)
except:
self.logger.warning('unable to parse buf={!r}'.format(buf))
return
self.handle_console_connection(client, envelope)
# connection notifications are not sent upstream
data = json.loads(envelope)
if data.get('action') == 'connection':
return
try:
self.pubsub.send(envelope)
except NotConnected as exc:
self.logger.exception(exc)
self.logger.error('unable to send pubsub buf={!r}, envelope={}'.format(buf, envelope))
def handle_console_connection(self, client, envelope):
data = json.loads(envelope)
# nothing to do for disconnects
if data['params'].get('routing_key') != 'connection.connected':
return
# nothing to do when there is no initial message
if self.initial_message is None:
return
self.messenger_server.send(client, self.initial_message)
return True
def handle_pubsub_message(self, buf):
if not buf.endswith('\n'):
buf = '{}\n'.format(buf)
buffer_iterator = BufferIterator(buf=buf)
for t_buf in buffer_iterator:
data = json.loads(t_buf)
if data.get('params', {}).get('routing_key') == 'connection.connected':
self.initial_message = t_buf
elif data.get('params', {}).get('routing_key') == 'connection.disconnected':
self.initial_message = None
try:
self.messenger_server.broadcast(buf)
except Exception as exc:
self.logger.exception(exc)
self.logger.error('unable to send messenger_server buf={!r}'.format(buf))
def run(self):
while self._run:
start = now()
pubsub_result = self.pubsub.loop()
messenger_result = self.messenger_server.loop(select_timeout=0.01)
if NO_SLEEP in (pubsub_result, messenger_result):
continue
time.sleep(get_sleep_time(1.0, start)) | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/proxy.py | proxy.py |
import logging
import socket
from select import select
from zymbit import settings
from zymbit.util.envelope import get_envelope
BUFSIZE = 4096
# backwards compat with python2
try:
BlockingIOError
except NameError:
BlockingIOError = None.__class__
try:
ConnectionResetError
except NameError:
ConnectionResetError = None.__class__
class BaseServer(object):
def __init__(self, host, port, message_handler=None):
self.addr = (host, port)
self._tcp_sock = None
self._udp_sock = None
self.connections = {}
self.message_handler = message_handler
self._run = True
@property
def logger(self):
logger_name = '{}.{}'.format(__name__, self.__class__.__name__)
return logging.getLogger(logger_name)
@property
def tcp_sock(self):
if self._tcp_sock:
return self._tcp_sock
try:
self._tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._tcp_sock.setblocking(0)
self._tcp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._tcp_sock.bind(self.addr)
self._tcp_sock.listen(128) # max 128 clients
except socket.error:
self.logger.warning('Unable to bind TCP socket at addr={}'.format(self.addr))
else:
self.logger.info("Listening on TCP addr={}".format(self.addr))
return self._tcp_sock
@property
def udp_sock(self):
if self._udp_sock:
return self._udp_sock
try:
self._udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._udp_sock.setblocking(0)
self._udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._udp_sock.bind(self.addr)
except socket.error:
self.logger.warning('Unable to bind UDP socket at addr={}'.format(self.addr))
else:
self.logger.info("Listening on UDP addr={}".format(self.addr))
return self._udp_sock
def broadcast(self, message):
for connection in self.connections:
self.send(connection, message)
def close_tcp(self):
self._tcp_sock = None
def close_udp(self):
self._udp_sock = None
def connect(self, info):
message = get_envelope('connection', dict(routing_key='connection.connected'))
conn, addr = info
self.logger.info('%s, %s %s' % (conn, addr, message))
self.connections[conn] = addr
self.handle_message(conn, message)
def disconnect(self, connection):
message = get_envelope('connection', dict(routing_key='connection.disconnected'))
addr = self.connections.pop(connection)
self.logger.info('%s, %s %s' % (connection, addr, message))
self.handle_message(connection, message)
def fileno(self):
return self.tcp_sock.fileno()
def handle_message(self, client, buf):
if self.message_handler:
self.message_handler(client, buf)
else:
self.logger.info('client={}, buf={}'.format(client, buf))
def loop(self, select_timeout=1.0):
handled = None
# check UDP
try:
buf, client = self.udp_sock.recvfrom(1024)
except socket.error as exc:
if isinstance(exc, (BlockingIOError,)):
error_number = exc.errno
else:
error_number = exc[0]
# (11, 'Resource temporarily unavailable')
# [Errno 35] Resource temporarily unavailable
if error_number not in (11, 35):
self.logger.exception(exc)
self.logger.warning('got socket error_number={}'.format(error_number))
self.close_udp()
else:
if buf:
self.handle_message(client, buf)
handled = True
try:
self.connect(self.tcp_sock.accept())
except socket.error as exc:
if isinstance(exc, (BlockingIOError,)):
error_number = exc.errno
else:
error_number = exc[0]
# (11, 'Resource temporarily unavailable')
# [Errno 35] Resource temporarily unavailable
if error_number not in (11, 35):
self.logger.exception(exc)
self.logger.warning('got socket error_number={}'.format(error_number))
self.close_tcp()
ready, _, _ = select(self.connections, [], [], select_timeout)
for client in ready:
try:
buf = client.recv(BUFSIZE)
except socket.error as exc:
if isinstance(exc, (ConnectionResetError,)):
error_number = exc.errno
else:
error_number = exc[0]
# [Errno 54] Connection reset by peer
# [Errno 104] Connection reset by peer -- raspbian
if error_number not in (54, 104):
self.logger.exception(exc)
self.logger.warning('got socket error_number={}'.format(error_number))
self.disconnect(client)
continue
else:
if not len(buf):
self.disconnect(client)
continue
self.handle_message(client, buf)
handled = True
return handled
def quit(self):
self.tcp_sock.close()
self.udp_sock.close()
# prevent getting exception where dictionary changes while looping
connections = list(self.connections.keys())
for connection in connections:
self.disconnect(connection)
def run(self):
while self._run:
self.loop()
def send(self, connection, buf):
try:
if not isinstance(buf, (bytes,)):
buf = buf.encode('utf8')
connection.send(buf)
except Exception as exc:
self.logger.exception(exc)
self.logger.error('error sending connection={}, buf={}'.format(connection, buf))
class ConsoleMessengerServer(BaseServer):
def __init__(self, message_handler):
super(ConsoleMessengerServer, self).__init__(
settings.CONSOLE_MESSENGER_HOST,
settings.CONSOLE_MESSENGER_PORT,
message_handler=message_handler
) | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/connect/server.py | server.py |
import datetime
import dateutil.parser
import functools
import logging
import pytz
# it's impossible that "now" is less than this datetime
# we know we are out of sync with real time if we ever
# get a time value less than this
MIN_DT = datetime.datetime(2014, 7, 25, 17, 00, 00) # Zymbit est date, UTC
utc = pytz.utc
EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=utc)
LONG_TIME_AGO = utc.localize(datetime.datetime(1, 1, 1)) # a really long time ago
# keys follow the same convention as InfluxDB
SECOND_PRECISIONS = {
's': 1,
'ms': 1000,
'u': 1e6,
'n': 1e9,
}
def now():
return utc.localize(datetime.datetime.utcnow())
def timestamp(dt=None):
if dt is None:
dt = now()
return dt.isoformat('T')
def get_sleep_time(seconds, start):
"""
Wait at most the given number of seconds from the initial time given
:param seconds: float - number of seconds to wait
:param start: datetime - the start time
:return: float - time to wait
"""
_now = now()
delta = _now - start
diff = delta.seconds + (1.0 * delta.microseconds / 1e6)
wait = max(0, seconds - diff)
# print 'start={}, _now={}, delta={}, diff={}, wait={}'.format(start, _now, delta, diff, wait)
return wait
def interval(interval_delay, default_return=None):
"""
Call a function every given interval
:param interval_delay: float - number of seconds
:param default_return: when the interval has not passed, what to return (default: None)
"""
interval_delta = datetime.timedelta(seconds=interval_delay)
def wrapper(fn):
@functools.wraps(fn)
def interval_handler(*args, **kwargs):
t0 = now()
last_call = getattr(fn, 'last_call', LONG_TIME_AGO)
if (t0 - last_call) > interval_delta:
fn.last_call = t0
return fn(*args, **kwargs)
else:
return default_return
return interval_handler
return wrapper
class MillisDatetime(object):
def __init__(self, millis):
self.last_millis = None
self.initial = None
self.set_initial(millis)
@property
def logger(self):
return logging.getLogger(__name__)
def get_time(self, millis):
if millis < self.last_millis:
self.logger.info(
'time rolled over, last_millis={}, millis={}'.format(
self.last_millis, millis))
self.set_initial(millis)
delta = datetime.timedelta(milliseconds=millis)
return self.initial + delta
def set_initial(self, millis):
delta = datetime.timedelta(milliseconds=millis)
self.initial = now() - delta
self.last_millis = millis
def get_seconds(iso_timestamp, precision='s'):
"""
Returns the number of seconds since EPOCH for the given ISO 8601 timestamp
"""
dt = dateutil.parser.parse(iso_timestamp)
return get_seconds_dt(dt, precision=precision)
def get_seconds_dt(dt=None, precision='s'):
"""
Returns the number of seconds since EPOCH for the given datetime object
"""
dt = dt or now()
return (dt - EPOCH).total_seconds() * SECOND_PRECISIONS[precision] | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/time.py | time.py |
import hashlib
import json
import os
import re
from zymbit.settings import AUTH_ROOT, AUTH_TOKEN, CLIENT_ID_VERSION, ZYMBIT_HOST_INFO_PATH
MAC_RE = re.compile(r'.*HWaddr (?P<hwaddr>[^ ]+)')
SDCARD_ATTRS_RE = re.compile(r'ATTRS{(?P<key>[^}]+)}=="(?P<value>[^"]+)"')
def get_auth_path():
client_id = get_client_id()
return os.path.join(AUTH_ROOT, client_id)
def get_auth_token():
auth_token = AUTH_TOKEN
if auth_token is not None:
return auth_token
auth_path = get_auth_path()
if os.path.exists(auth_path):
with open(auth_path, 'r') as fh:
auth_token = fh.read().strip()
return auth_token
def get_cpu_info():
"""
Returns CPU identification information
:return:
"""
info = {
'cpu_hardware': None,
'cpu_revision': None,
'cpu_serial': None,
}
with open(os.path.join(ZYMBIT_HOST_INFO_PATH, 'cpu')) as fh:
content = fh.read()
for line in content.splitlines():
line = line.strip()
if line == '':
continue
line_split = line.split(':', 1)
key = 'cpu_{}'.format(line_split[0].strip().replace(' ', '_').lower())
if key not in list(info.keys()):
continue
info[key] = line_split[1].strip()
return info
def get_eth0_info():
"""
Returns eth0 identification information
:return:
"""
info = {
'eth0_hwaddr': None
}
with open(os.path.join(ZYMBIT_HOST_INFO_PATH, 'eth0')) as fh:
content = fh.read()
for line in content.splitlines():
matches = MAC_RE.match(line)
if not matches:
continue
info['eth0_hwaddr'] = matches.group('hwaddr')
return info
def get_sdcard_info():
"""
Returns sdcard identification information
:return dict: sdcard information
"""
info = {
'sdcard_cid': None,
}
with open(os.path.join(ZYMBIT_HOST_INFO_PATH, 'sdcard')) as fh:
content = fh.read()
for line in content.splitlines():
matches = SDCARD_ATTRS_RE.match(line.strip())
if not matches:
continue
key = 'sdcard_{}'.format(matches.group('key'))
if key not in list(info.keys()):
continue
info[key] = matches.group('value')
return info
def get_client_id():
if CLIENT_ID_VERSION is None:
return get_client_id_latest()
return globals()['get_client_id_v{}'.format(CLIENT_ID_VERSION)]()
def get_client_id_v0():
info = get_eth0_info()
return info['eth0_hwaddr']
def get_client_id_v1():
info = get_client_info()
# the client_id is the hash of a JSON representation of an array of (key, value) 2-tuples
data = json.dumps(sorted(list(info.items()), key=lambda a: a[0])).encode('utf8')
sha = hashlib.sha1(data)
return sha.hexdigest()
# alias the default get_client_id to v1
get_client_id_latest = get_client_id_v1
def get_client_info():
info = {}
info.update(get_cpu_info())
info.update(get_eth0_info())
info.update(get_sdcard_info())
return info | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/client.py | client.py |
import json
import uuid
from zymbit.util.time import timestamp
# different ways a data stream buffer is parsed in order to ship up
# NOTE: first one to send back an envelope wins, so order matters!
ENVELOPE_PARSERS = []
def get_parsed_envelope(params):
action = 'data'
if isinstance(params, dict):
_action = params.pop('action', None)
if _action is not None:
action = _action
# this looks like an envelope already, jsonify and return
if 'params' in params:
params['action'] = action
return jsonify(params)
if action == 'data' and 'key' not in params:
params['key'] = 'sensor'
return get_envelope(action, params)
def parse_json_envelope(buf):
try:
params = json.loads(buf)
except ValueError:
return None
else:
if isinstance(params, int):
params = {
'value': params,
}
return params
ENVELOPE_PARSERS.append(parse_json_envelope)
def parse_comma_equals(buf):
"""
Parse a string of comma-delimited strings, that are each equal-delimited key/value pairs
:param buf: string - buffer to be parsed
:return: None - when no equal sign is found, JSON string envelop - when data is parsed
"""
if '=' not in buf:
return None
parsed = {}
unparsed = []
# split at commas
for token in buf.split(','):
# get rid of outer spaces
token = token.strip()
if '=' not in token:
unparsed.append(token)
continue
key, value = token.split('=')
key = key.strip()
if ' ' in key:
_unparsed, key = key.rsplit(' ', 1)
unparsed.append(_unparsed)
for conversion in (int, float):
try:
value = conversion(value)
except ValueError:
pass
else:
break
parsed[key] = value
if unparsed:
parsed['zb.unparsed'] = json.dumps(unparsed)
parsed['zb.unparsed.line'] = buf
return parsed
ENVELOPE_PARSERS.append(parse_comma_equals)
# NOTE: this is the "if all else fails" parser; should be appended last!
def parse_log_envelope(buf):
params = {
'action': 'log',
'line': buf,
}
return params
ENVELOPE_PARSERS.append(parse_log_envelope)
def get_envelope(action, params, request_message_id=None, client_id=None, as_json=True):
data = {
'message_id': str(uuid.uuid4()),
'timestamp': timestamp(),
'action': action,
'params': params,
}
if request_message_id:
data.update({
'request_message_id': request_message_id,
})
if client_id:
data.update({
'client_id': client_id,
})
if as_json:
return jsonify(data)
else:
return data
def jsonify(data):
return '{}\r\n'.format(json.dumps(data))
def parse_buf(buf):
"""
parse the given buffer into an envelope
:param buf: string, may be in a parseable format
:return: envelope
"""
for parser in ENVELOPE_PARSERS:
params = parser(buf)
if params:
return get_parsed_envelope(params) | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/envelope.py | envelope.py |
from __future__ import absolute_import
import datetime
import inspect
import logging
import time
from .time import LONG_TIME_AGO, now, get_sleep_time
NO_SLEEP = '-- NO SLEEP --'
class StateMachine(object):
transitions = {}
def __init__(self, raise_exceptions=True):
self._run = True
self._state = self.start
self.raise_exceptions = raise_exceptions
self.loop_sleep_time = 1.0
self.last_exception = None
self._setup_transitions()
self.logger.debug('transitions={}'.format(self.transitions))
self.check_start = False
self.last_start = LONG_TIME_AGO
self.next_start = LONG_TIME_AGO
self.start_fail_count = 0
self.start_success_delta = datetime.timedelta(seconds=10)
def _setup_transitions(self):
# convert the transition functions into bound methods
_transitions = {}
for k, v in list(self.transitions.items()):
bound_method = getattr(self, k.__name__)
t_transitions = dict([(kk, getattr(self, vv.__name__)) for kk, vv in list(v.items())])
_transitions[bound_method] = t_transitions
self.transitions = _transitions
@property
def logger(self):
return logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
def loop(self):
result = None
try:
result = self._state()
except Exception as exc: # global exception catcher here to use for state transitions
self.last_exception = exc
result = exc
if not inspect.isclass(exc):
result = exc.__class__
if self.raise_exceptions:
raise
else:
self.logger.exception(exc)
else:
self.last_exception = None
finally:
transitions = self.transitions.get(self._state, {})
for _result, _state in list(transitions.items()):
if _result == result or inspect.isclass(_result) and inspect.isclass(result) and issubclass(result, _result):
self._state = _state
return result
def quit(self):
self._run = False
def run(self):
while self._run:
start = now()
current_state = self._state
result = self.loop()
# only sleep when there is no state transition
if current_state == self._state and result != NO_SLEEP:
sleep_time = get_sleep_time(self.loop_sleep_time, start)
# self.logger.debug('loop_sleep_time={}, sleep_time={}'.format(self.loop_sleep_time, sleep_time))
time.sleep(sleep_time)
def start(self):
_now = now()
if self.check_start:
self.check_start = False
if _now > self.last_start + self.start_success_delta:
# the current time is greater than the last start time + the
# success delta; reset the fail count
self.start_fail_count = 0
else:
# otherwise, increment the fail count and calculate an exponential
# backoff
self.start_fail_count += 1
seconds = min(300, 2 ** self.start_fail_count)
backoff = datetime.timedelta(seconds=seconds)
self.next_start = _now + backoff
self.logger.info('next start at {}'.format(self.next_start))
if _now < self.next_start:
# the current time is before the next start, hold off
return False
self.check_start = True
self.last_start = _now
return True | zymbit-connect | /zymbit-connect-2.0.1rc1.tar.gz/zymbit-connect-2.0.1rc1/src/zymbit/util/statemachine.py | statemachine.py |
trequests
=========
.. image:: https://travis-ci.org/1stvamp/trequests.png?branch=master
A Tornado async HTTP/HTTPS client adapter for python-requests.
The problem
-----------
You enjoy using `Tornado <http://www.tornadoweb.org/>`_ to build fast non-blocking web applications, and you want to use a library from PyPI that makes a few HTTP requests, but pretty much every dev and their dog uses `Requests <http://python-requests.org/>`_ to make HTTP requests (rightly so, because it's *awesome*), but requests has no knowledge of the event loop nor can it yield when a socket blocks, which means any time you try to use a library like that it begins to block your request handling and grud-knows what other worlds of pain.
The solution
------------
Luckily there are solutions, one such is to use the `greenlet <http://greenlet.readthedocs.org/>`_ module to wrap blocking operations and swap Tornado coroutines at the right time, there is even the handy `tornalet <https://github.com/Gawen/tornalet>`_ module which handles this for you.
To make life even easier, you lucky lucky people, I've created ``trequests``, an async Requests adapter which uses greenlets (via tornalet) and the inbuilt non-blocking HTTP client methos in Tornado, to make any call to a library (utilizing Requests) non-blocking.
Installation
------------
.. code-block:: bash
$ pip install trequests
Usage
-----
.. code-block:: python
# Assume bobs_big_data uses python-requests for HTTP requests
import bobs_big_data
from tornado.web import RequestHandler
from trequests import setup_session
from tornalet import tornalet
# Tell requests to use our AsyncHTTPadapter for the default
# session instance, you can also pass you own through
setup_session()
class WebHandler(RequestHandler):
@tornalet
def get(self):
data = {'foo': 'bar'}
# This will now unblock the current coroutine, like magic
response = bobs_big_data.BigData(data).post()
return self.write(response)
Tests
-----
To run the basic testsuite hit up `python setup.py test`.
Caveats
-------
``trequests`` has been used in production in a large scale metrics application, and is a very small and quite simple module.
**However** I've released it as ``0.9.x`` mainly because it's missing 100% compatibility with the Requests adapter API, most noticeably *cookie jar* and *session* support, which I will improve (or please send me a pull request if you fancy adding support), and release as a ``1.x`` branch when I have the time.
Also at the moment the ``setup_session`` utility actually monkey patches the ``session`` utility functions in Requests, as this was the only way I could see to override the mounts on "default" session instances (e.g. those created for every call when a session isn't provided). I'm hoping to change this in the future.
| zymbit-trequests | /zymbit-trequests-0.9.5.tar.gz/zymbit-trequests-0.9.5/README.rst | README.rst |
import requests
from os import path
from tornalet import asyncify
from tornado.httpclient import AsyncHTTPClient
def get_version_string():
return open(path.join(path.dirname(__file__),
'trequests_version.txt'), 'r').read().strip()
def get_version():
return get_version_string().split('.')
__version__ = get_version_string()
# Don't know how to handle this yet, so just mock it out for now
requests.adapters.extract_cookies_to_jar = lambda a, b, c: None
class AsyncHTTPAdapter(requests.adapters.HTTPAdapter):
"""A python-requests HTTP/HTTPS adapter that uses the Tornado
AsyncHTTPClient and greenlets (via the tornalet library) to perform a
non-blocking call inside the Tornado IOLoop whenever a
requests.[get/post/put/delete/request]() call is made. It then wraps the
tornado.httpclient.HTTPResponse as a requests.models.Response instance and
returns so that any library calling requests gets what it expects (mostly).
"""
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
http_client = AsyncHTTPClient()
# This where the magic happens, tornalet.asyncify wraps the parent
# call in a greenlet that can be swapped out the same as any
# aync tornado IO handler call.
resp = asyncify(http_client.fetch)(request=request.url,
method=request.method,
body=request.body,
headers=request.headers,
validate_cert=verify)
# We probably don't get this from any of the tornado adaptors, so
# we stub it out as Unknown
resp.reason = 'Unknown'
resp.content = resp.body
r = self.build_response(request, resp)
# Reset the code and content as they're not parsed by build_response
r.status_code = resp.code
r._content = resp.content
return r
def setup_session(session=None, mounts=None):
"""Mount the AsyncHTTPAdapter for a given session instance,
or for the default instance in python-requests, for a given set of mounts
or just for the default HTTP/HTTPS protocols.
"""
if session is None:
session = requests.session()
if mounts is None:
mounts = ('http://', 'https://')
def _session():
for mount in mounts:
session.mount(mount, AsyncHTTPAdapter())
if session is None:
requests.session = requests.sessions.session = _session
else:
_session() | zymbit-trequests | /zymbit-trequests-0.9.5.tar.gz/zymbit-trequests-0.9.5/trequests/__init__.py | __init__.py |
Subsets and Splits